repo stringlengths 2 99 | file stringlengths 13 225 | code stringlengths 0 18.3M | file_length int64 0 18.3M | avg_line_length float64 0 1.36M | max_line_length int64 0 4.26M | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
THULAC-Python | THULAC-Python-master/thulac/manage/Punctuation.py | #coding: utf-8
from ..base.Dat import Dat
class Punctuation():
def __init__(self, filename):
self.__pDat = Dat(filename)
def adjustSeg(self, sentence):
if(not self.__pDat):
return
tmpVec = []
for i in range(len(sentence)):
if(i>=len(sentence)):
break
tmp = sentence[i]
if(self.__pDat.getInfo(tmp) >= 0):
continue
del tmpVec[:]
for j in range(i+1, len(sentence)):
tmp += sentence[j]
if(self.__pDat.getInfo(tmp) >= 0):
break
tmpVec.append(tmp)
vecSize = len(tmpVec)
for k in range(vecSize-1, -1, -1):
tmp = tmpVec[k]
if(self.__pDat.match(tmp) != -1):
for j in range(i+1, i+k+2):
sentence[i] += sentence[j]
del sentence[i+1:i+k+2]
break
del tmpVec[:]
def adjustTag(self, sentence):
if(not self.__pDat):
return
tmpVec = []
findMulti = False
for i in range(len(sentence)):
if (i >= len(sentence)):
break
tmp = sentence[i][0]
if(self.__pDat.getInfo(tmp) >= 0):
continue
del tmpVec[:]
for j in range(i+1, len(sentence)):
tmp += sentence[j][0]
if(self.__pDat.getInfo(tmp) >= 0):
break
tmpVec.append(tmp)
vecSize = len(tmpVec)
findMulti = False
for k in range(vecSize-1, -1, -1):
tmp = tmpVec[k]
if(self.__pDat.match(tmp) != -1):
for j in range(i+1, i+k+2):
sentence[i] = (sentence[i][0] + sentence[j][0], sentence[i][1], 'w')
del sentence[i+1:i+k+2]
findMulti = True
break
if(not findMulti):
if(self.__pDat.match(sentence[i][0]) != -1):
sentence[i] = (sentence[i][0], sentence[i][1], 'w')
del tmpVec[:] | 1,600 | 22.895522 | 74 | py |
THULAC-Python | THULAC-Python-master/thulac/manage/Postprocesser.py | from ..base.Dat import Dat, DATMaker
from ..base.compatibility import decodeGenerator
decode = decodeGenerator()
class Postprocesser():
def __init__(self, filename, tag, isTxt):
if(not filename):
return None
self.tag = tag
if(isTxt):
lexicon = []
f = None
try:
f = open(filename, "r", encoding="utf-8")
except:
f = open(filename, "r")
for i, line in enumerate(f):
line = line.split()
lexicon.append([decode(line[0]), i])
f.close()
dm = DATMaker()
dm.makeDat(lexicon, 0)
dm.shrink()
self.p_dat = Dat(datSize=dm.datSize, oldDat=dm.dat)
else:
self.p_dat = Dat(filename=filename)
def adjustSeg(self, sentence):
if(self.p_dat is None):
return
i = 0
while(i < len(sentence)):
tmp = sentence[i]
if(self.p_dat.getInfo(tmp) >= 0):
i += 1
continue
tmpVec = []
for j in range(i + 1, len(sentence)):
tmp += sentence[j]
if(self.p_dat.getInfo(tmp) >= 0):
break
tmpVec.append(tmp)
vecSize = len(tmpVec)
for k in range(vecSize-1, -1, -1):
tmp = tmpVec[k]
if(self.p_dat.match(tmp) != -1):
sentence[i] = tmp
del sentence[i+1:i+k+2]
break
i += 1
def adjustTag(self, sentence):
# print sentence
if(self.p_dat is None):
return
i = 0
while(i < len(sentence)):
tmp = sentence[i][0]
if(self.p_dat.getInfo(tmp) >= 0):
i+=1
continue
tmpVec = []
for j in range(i+1, len(sentence)):
tmp += sentence[j][0]
if(self.p_dat.getInfo(tmp) >= 0):
break
tmpVec.append(tmp)
vecSize = len(tmpVec)
for k in range(vecSize-1, -1, -1):
tmp = tmpVec[k]
if(self.p_dat.match(tmp) != -1):
sentence[i] = (tmp, '_', self.tag)
del sentence[i+1:i+k+2]
# sentence[i][2] = self.tag;
break
i+=1
if __name__ == '__main__':
pp = Postprocesser("userwords.txt", "uw", True)
| 2,550 | 27.662921 | 63 | py |
THULAC-Python | THULAC-Python-master/thulac/manage/TimeWord.py | #coding: utf-8
class TimeWord():
def __init__(self):
self.__arabicNumSet = set()
self.__timeWordSet = set()
self.__otherSet = set()
timeWord = {24180, 26376, 26085, 21495, 26102, 28857, 20998, 31186}
for i in range(48, 58):
self.__arabicNumSet.add(i)
for i in range(65296, 65306):
self.__arabicNumSet.add(i)
timeWord = {24180, 26376, 26085, 21495, 26102, 28857, 20998, 31186}
self.__timeWordSet = self.__timeWordSet | timeWord
for i in range(65, 91):
self.__otherSet.add(i)
for i in range(97, 123):
self.__otherSet.add(i)
for i in range(48, 58):
self.__otherSet.add(i)
other = {65292, 12290, 65311, 65281, 65306, 65307, 8216, 8217, 8220, 8221, 12304, 12305,
12289, 12298, 12299, 126, 183, 64, 124, 35, 65509, 37, 8230, 38, 42, 65288,
65289, 8212, 45, 43, 61, 44, 46, 60, 62, 63, 47, 33, 59, 58, 39, 34, 123, 125,
91, 93, 92, 124, 35, 36, 37, 94, 38, 42, 40, 41, 95, 45, 43, 61, 9700, 9734, 9733}
self.__otherSet = self.__otherSet | other
def isArabicNum(self, word):
allArabic = True
for i in word:
if(i not in self.__arabicNumSet):
allArabic = False
break
return allArabic
def isTimeWord(self, word):
if(len(word)!= 1):
return False
if(word[0] in self.__timeWordSet):
return True
else:
return False
def isDoubleWord(self, word, postWord):
if(len(word) != 1 or len(postWord) != 1):
return False
else:
wordInt = word[0]
postWordInt = postWord[0]
if(wordInt == postWordInt):
if(wordInt in self.__otherSet):
return True
else:
return False
return False
def adjustSeg(self, sentence):
size = len(sentence)
word = []
hasTimeWord = False
for i in range(size-1, -1, -1):
word = sentence[i]
if(self.isTimeWord(word)):
hasTimeWord = True
else:
if(hasTimeWord):
if(self.isArabicNum(word)):
sentence[i] += sentence[i+1]
del sentence[i+1]
hasTimeWord = False
postWord = []
for i in range(size-2, -1, -1):
word = sentence[i]
postWord = sentence[i+1]
if(self.isDoubleWord(word, postWord)):
sentence[i] += sentence[i+1]
del sentence[i+1]
def adjustTag(self, sentence):
size = len(sentence)
word = []
hasTimeWord = False
for i in range(size-1, -1, -1):
word = sentence[i][0]
if(self.isTimeWord(word)):
hasTimeWord = True
else:
if(hasTimeWord):
if(self.isArabicNum(word)):
sentence[i] = (sentence[i][0] + sentence[i+1][0], sentence[i][1], 't')
del sentence[i+1]
hasTimeWord = False
size = len(sentence)
for i in range(size):
word = sentence[i][0]
if(self.isHttpWord(word)):
sentence[i] = (sentence[i][0], sentence[i][1], 'x')
size = len(sentence)
preWord = ""
for i in range(1, size):
preWord = sentence[i-1][0]
word = sentence[i][0]
if(len(preWord) == 1 and preWord[0] == 64):
if(len(word) != 1 or word[0] != 64):
sentence[i] = (sentence[i][0], sentence[i][1], 'np')
# del word[:]
def isHttpWord(self, word):
if(len(word) < 5):
return False
else:
if(word[0] == ord('h') and word[1] == ord('t') and word[2] == ord('t') and word[3] == ord('p')):
return True
else:
return False
def adjustDouble(self, sentence):
size = len(sentence)
word = []
hasTimeWord = False
for i in range(size-1, -1, -1):
word = sentence[i].word
if(self.isTimeWord(word)):
hasTimeWord = True
else:
if(hasTimeWord):
if(self.isArabicNum(word)):
sentence[i].word += sentence[i+1].word
del sentence[i+1]
sentence[i].tag = "t"
hasTimeWord = False
size = len(sentence)
postWord = ""
for i in range(size - 2, -1, -1):
word = sentence[i].word
postWord = sentence[i+1].word
if(self.isDoubleWord(word, postWord)):
sentence[i].word += sentence[i+1].word
del sentence[i+1]
size = len(sentence)
for i in range(size):
word = sentence[i].word
if(self.isHttpWord(word)):
sentence[i].tag = 'x'
size = len(sentence)
preWord = ""
for i in range(size):
preWord = sentence[i-1].word
word = sentence[i].word
if(len(preWord) == 1 and preWord[0] == 64):
if(len(word) != 1 or word[0] != 64):
sentence[i].tag = "np"
del word[:]
| 5,475 | 33.225 | 108 | py |
THULAC-Python | THULAC-Python-master/thulac/manage/Preprocesser.py | #coding: utf-8
import os
import struct
from ..base.compatibility import chrGenerator
chr = chrGenerator()
class Preprocesser:
def __init__(self, rm_space=False):
self.otherSet = [65292, 12290, 65311, 65281, 65306, 65307, 8216, \
8217, 8220, 8221, 12304, 12305, \
12289, 12298, 12299, 126, 183, 64, 124, 35, 65509, 37, 8230, 38, 42, 65288, \
65289, 8212, 45, 43, 61, 44, 46, 60, 62, 63, 47, 33, 59, 58, 39, 34, 123, 125, \
91, 93, 92, 124, 35, 36, 37, 94, 38, 42, 40, 41, 95, 45, 43, 61, 9700, 9734, 9733, 32, 12288, \
21543, 32610, 21591, 21877, 30340, 20215, 23478, 21862, 26469, 21819, \
20102, 22046, 21737, 21671, 21679, 21872, 21949, 21527, 22043, 22172, 20040, \
21738, 21602, 21584, 21542, 21621, 21704, 19981, 20846, 33324, 21017, 36830, \
32599, 32473, 22139, 21705, 38463, 21834, 21571, 27448, 21703, 21568, 20063, \
32822, 21727, 27428, 21589, 22114, 21606, 22050
]
self.singlePunSet = [65292, 12290, 65311, 65281, 65306, 65307, 8216, 8217, 8220, 8221, 1230, 12304, \
12305, 12289, 12298, 12299, 64,35, 65288, 65289, 34, 91, 93, 126, 47, 44, 58, \
63, 9700, 9734, 9733, 8230, 39, 33, 42, 43, 62, 40, 41, 59, 61, 32, 12288]
self.httpSet = [47, 46, 58, 35, 34, 95, 45, 61, 43, 38, 36, 59]
self.modalParticleSet = [20040, 21602, 21543, 21834, 21602, 21734]
self.t2s = {}
self.s2t = {}
self.rmSpace = rm_space
self.isOther = self.is_X(self.otherSet)
self.isSinglePun = self.is_X(self.singlePunSet)
self.isHttp = self.is_X(self.httpSet)
self.isModalParticleSet = self.is_X(self.modalParticleSet)
def is_X(self, charType):
def func(c):
if(c in charType):
return True
else:
return False
return func
# def isOther(self, c):
# if(c in self.otherSet):
# return True
# else:
# return False
# def isSinglePun(self, c):
# if(c in self.singlePunSet):
# return True
# else:
# return False
# def isHttp(self, c):
# if(c in self.httpSet):
# return True
# else:
# return False
# def isModalParticleSet(self, c):
# if(c in self.modalParticleSet):
# return True
# else:
# return False
def setT2SMap(self, filename):
file = open(filename, "rb")
self.datSize = int(os.path.getsize(filename) / 8)
tempbytes = file.read(4 * self.datSize)
tra = struct.unpack("<"+str(self.datSize)+"i", tempbytes)
tempbytes = file.read(4 * self.datSize)
sim = struct.unpack("<"+str(self.datSize)+"i", tempbytes)
for i in range(self.datSize):
self.t2s[tra[i]] = sim[i]
self.s2t[sim[i]] = tra[i]
def clean(self, sentence):
senClean = ""
graph = []
hasSpace = False
hasOther = False
hasSinglePun = False
hasHttp = False
hasAt = False
hasTitle = False
httpStartVec = []
httpStart = -1
httpVec = []
c = -1
tmpRaw = []
npRaw = []
npStart = -1
npStartVec = []
npVec = []
titleRaw = []
titleStart = -1
titleStartVec = []
titleVec = []
for i in range(len(sentence)):
c = ord(sentence[i])
if(c == 32 or c == 12288):
hasOther = False
if(hasSpace):
continue
else:
if(len(graph) > 0):
o = graph[-1] & 12
graph[-1] = o
hasSpace = True
if(not self.rmSpace):
senClean += sentence[i]
graph.append(9)
continue
elif(self.isOther(c)):
if(hasSpace):
senClean += sentence[i]
if(self.isSinglePun(c) or self.isModalParticleSet(c)):
graph.append(8)
if(self.isSinglePun(c)):
hasSinglePun = True
else:
graph.append(9)
hasSinglePun = False
hasSpace = False
elif(hasOther):
if(self.isSinglePun(c) or self.isModalParticleSet(c)):
if(len(graph) > 0):
o = graph[-1] & 12
graph[-1] = o
senClean += sentence[i]
graph.append(8)
if(self.isSinglePun(c)):
hasSinglePun = True
else:
if(hasSinglePun):
senClean += sentence[i]
graph.append(9)
else:
if(graph[-1] == 0):
graph[-1] = 7
senClean += sentence[i]
graph.append(2)
hasSinglePun = False
else:
senClean += sentence[i]
graph.append(9)
if(self.isSinglePun(c)):
hasSinglePun = True
else:
hasSinglePun = False
if(c == 12299):
if(hasTitle):
titleVec.append(titleRaw)
titleStartVec.append(titleStart)
hasTitle = False
hasOther = True
else:
if(hasSpace):
senClean += sentence[i]
graph.append(9)
elif(hasOther):
graph[-1] = graph[-1] & 12
if(hasSinglePun):
senClean += sentence[i]
graph.append(9)
hasSinglePun = False
else:
senClean += sentence[i]
graph.append(15)
else:
senClean += sentence[i]
graph.append(15)
hasSpace = False
hasOther = False
if(c == 12298):
hasTitle = True
titleStart = len(graph)
titleRaw = []
elif(hasTitle):
titleRaw.append(c)
for i in range(len(titleVec)):
titleRaw = titleVec[i]
if(self.isPossibleTitle(titleRaw)):
start = titleStartVec[i]
size = len(titleRaw)
# print sentence + ":Here" + str(titleRaw) + ":" + str(start) + ":" + str(size) + ":" + str(len(graph))
if(len(titleRaw) == 1):
graph[start] = 9
continue
graph[start] = 1
for j in range(start + 1, start + size - 1):
graph[j] = 2
graph[start + size - 1] = 4
if(len(graph) != 0):
graph[0] = graph[0] & 9
graph[-1] = graph[-1] & 12
if(graph[0] == 0):
graph[0] = 9
if(graph[-1] == 0):
graph[-1] = 12
return senClean, graph
def isPossibleTitle(self, titleRaw):
if(len(titleRaw) > 10 or len(titleRaw) == 0):
return False
else:
for i in range(len(titleRaw)):
if(self.isOther(titleRaw[i])):
return False
return True
def getT2S(self, c):
if(ord(c) in self.t2s):
return chr(self.t2s[ord(c)])
else:
return c
def getS2T(self, c):
if(c in self.s2t):
return self.s2t[c]
else:
return c
def T2S(self, sentence):
newSentence = ""
for w in sentence:
newSentence += self.getT2S(w)
return newSentence
def S2T(self, sentence, oriSentence):
count = 0
for w in sentence:
for j in range(len(w.word)):
w.word = w.word[0, j-1]+oriSentence[count] \
+ w.word[j+1:]
count = count + 1
def cleanSpace(self, sentence, senClean, graph):
senClean = ""
graph = []
hasSpace = False
c = -1
wordLength = 0
# for(int i=0;i<(int)sentence.length();i++)
for i in range(len(sentence)):
c = sentence[i]
if(c==32 or c==12288):
if(hasSpace):
continue
else:
if(len(graph) > 0):
if(wordLength == 1):
graph[-1] = 8
else:
graph[-1] = 4
hasSpace = True
wordLength = 0
else:
if(hasSpace):
senClean += sentence[i]
graph.append(1)
hasSpace = False
else:
senClean += sentence[i]
if(len(graph) == 0):
graph.append(1)
else:
graph.append(2)
wordLength = wordLength + 1
if(len(graph) > 0):
if(wordLength == 1):
graph[-1] = 8
else:
graph[-1] = 4
return graph
| 9,839 | 33.526316 | 119 | py |
THULAC-Python | THULAC-Python-master/thulac/manage/__init__.py | 0 | 0 | 0 | py | |
THULAC-Python | THULAC-Python-master/thulac/manage/Filter.py | from ..base.Dat import Dat
class Filter:
def __init__(self, xuWordFile, timeWordFile):
self.xu_dat = Dat(xuWordFile)
self.time_dat = Dat(timeWordFile)
self.posSet = ["n","np","ns","ni","nz","v","a","id","t","uw"]
self.arabicNumSet = [i for i in range(48, 58)] +[i for i in range(65296, 65306)]
self.chineseNumSet = [12295,19968,20108,19977,22235,20116,20845,19971,20843,20061]
def adjustSeg(self, sentence):
if((self.xu_dat is None) or (self.time_dat is None)):
return
size = len(sentence)
count = 0
checkArabic = False
checkChinese = False
for i in range(size-1, -1, -1):
word = sentence[i]
if(self.xu_dat.match(word) != -1):
sentence.remove(word)
continue
count = 0
checkArabic = False
checkChinese = False
for j in range(len(word)):
if(ord(word[j]) in self.arabicNumSet):
checkArabic = True
break
if(ord(word[j]) in self.chineseNumSet):
count = count + 1
if(count == 2):
checkChinese = True
break
if(checkArabic or checkChinese or (self.time_dat.match(word) != -1)):
sentence.remove(word)
def adjustTag(self, sentence):
if(self.xu_dat is None or self.time_dat is None):
return
size = len(sentence)
word = ""
tag = ""
count = 0
checkArabic = False
checkChinese = False
for i in range(size-1, -1, -1):
word = sentence[i][0]
tag = sentence[i][2]
if(tag in self.posSet):
if(self.xu_dat.match(word) != -1):
sentence.remove(sentence[i])
continue
if(tag == "t"):
count = 0
checkArabic = False
checkChinese = False
for j in range(len(word)):
if(ord(word[j]) in self.arabicNumSet):
checkArabic = True
break
if(ord(word[j]) in self.chineseNumSet):
count = count + 1
if(count == 2):
checkChinese = True
break
if(checkArabic or checkChinese or (self.time_dat.match(word) != -1)):
sentence.remove(sentence[i])
continue
else:
sentence.remove(sentence[i])
continue
| 2,792 | 35.75 | 90 | py |
THULAC-Python | THULAC-Python-master/thulac/manage/SoExtention.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from ctypes import cdll, c_char, c_char_p, cast, POINTER
from ..base.compatibility import fixC_char_p, isPython2
import os.path
import platform
fixCCP = fixC_char_p()
# path = os.path.dirname(os.path.realpath(__file__)) #设置so文件的位置
class SoExtention:
def __init__(self, model_path, user_dict_path, t2s, just_seg, pre_alloc_size=1024*1024*16):
root = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) #设置so文件的位置
self._lib = cdll.LoadLibrary(root+'/libthulac.so') #读取so文件
self._lib.init(c_char_p(fixCCP(model_path)), c_char_p(fixCCP(user_dict_path)), int(pre_alloc_size), int(t2s), int(just_seg)) #调用接口进行初始化
def clear(self):
if self._lib != None: self._lib.deinit()
def seg(self, data):
r = self._lib.seg(c_char_p(fixCCP(data)))
assert r > 0
self._lib.getResult.restype = POINTER(c_char)
p = self._lib.getResult()
s = cast(p,c_char_p)
d = '%s'%s.value
if(isPython2):
self._lib.freeResult();
return d
s = s.value.decode('utf-8')
self._lib.freeResult();
return s
| 1,166 | 35.46875 | 143 | py |
THULAC-Python | THULAC-Python-master/tests/testInitVariables.py | #coding: utf-8
import thulac
import sys
prefix = sys.path[0]
def testSegOnly():
test_text = "我爱北京天安门"
thu = thulac.thulac(seg_only = True)
gold = thu.cut(test_text, text = True)
assert gold == "我 爱 北京 天安门"
#由于Tag模型初始化耗时较大,在这里将两个Tag模型的测试放在一起
def testTagAndDeli():
test_text = "我爱北京天安门"
thu = thulac.thulac(deli = '#')
gold = thu.cut(test_text, text = True)
assert gold == "我#r 爱#v 北京#ns 天安门#ns"
def testUserDict():
test_text = "我爱北京天安门"
thu = thulac.thulac(seg_only = True, user_dict = prefix + "/userDict.txt")
gold = thu.cut(test_text, text = True)
assert gold == "我爱北京天安门"
def testT2S():
test_text = "我愛北京天安門"
thu = thulac.thulac(seg_only = True, T2S = True)
gold = thu.cut(test_text, text = True)
print(gold)
assert gold == "我 爱 北京 天安门"
def testFilt():
test_text = "我可以爱北京天安门"
thu = thulac.thulac(seg_only = True, filt = True)
gold = thu.cut(test_text, text = True)
print(gold)
assert gold == "我 爱 北京 天安门"
def testrmSpace():
test_text1 = "而荔 波 肉又 丧 心 病 狂 的不肯悔改"
test_text2 = "我爱北京天 安 门"
thu = thulac.thulac(seg_only = True, rm_space = False)
gold1 = thu.cut(test_text1, text = True)
gold2 = thu.cut(test_text2, text = True)
print(gold1, gold2)
assert gold1 == "而 荔 波 肉 又 丧 心 病 狂 的 不 肯 悔改"
assert gold2 == "我 爱 北京 天 安 门"
| 1,261 | 24.24 | 75 | py |
THULAC-Python | THULAC-Python-master/tests/testAllCutMethod.py | #coding: utf-8
import thulac
import sys
prefix = sys.path[0]
thu = thulac.thulac(seg_only = True)
def readFile(file_name):
with open(file_name) as result:
for line in result:
return line
def testCutFile():
thu.cut_f(prefix +"/textForTest/input.txt", prefix +"/textForTest/output.txt")
print(readFile(prefix +"/textForTest/output.txt"))
assert readFile(prefix + "/textForTest/output.txt") == "我 爱 北京 天安门\n"
def testFastCut():
test_text = "我爱北京天安门"
gold = thu.fast_cut(test_text, text = True)
assert gold == "我 爱 北京 天安门"
def testFastCutFile():
thu.fast_cut_f(prefix +"/textForTest/input.txt", prefix +"/textForTest/output.txt")
print(readFile(prefix +"/textForTest/output.txt"))
assert readFile(prefix +"/textForTest/output.txt") == "我 爱 北京 天安门\n"
| 811 | 29.074074 | 87 | py |
SpectralRadex | SpectralRadex-master/setup.py | import setuptools # this is the "magic" import
from numpy.distutils.core import setup, Extension
from numpy.distutils import exec_command
from glob import glob
import os
with open("README.md", "r") as fh:
long_description = fh.read()
DATA_DIR="src/spectralradex/radex/data/"
#exec_command.exec_command( "make python", execute_in='src/radex_src/', use_shell=True)
if not os.getenv('READTHEDOCS'):
radexwrap = Extension(name = 'radexwrap',
sources = ['src/radex_src/'+x for x in ['types.f90','commondata.f90','slatec.f90',
'solver.f90','background.f90','io.f90','wrap.f90','radexwrap.pyf']])
ext_mods=[radexwrap]
data_files=[("spectralradex/radex/data",glob(DATA_DIR))]
else:
ext_mods=[]
data_files=[]
exec(open('src/spectralradex/version.py').read())
setup(
name="spectralradex", # Replace with your own username
version=__version__,
author="Jonathan Holdship",
author_email="jonholdship@gmail.com",
description="A package for RADEX and spectral modelling",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://spectralradex.readthedocs.io",
ext_modules = ext_mods,
package_dir={'': 'src'},
packages=setuptools.find_packages(where='src'),
data_files=data_files,
classifiers=[
'Development Status :: 5 - Production/Stable',
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
install_requires=['pandas','numpy']
) | 1,615 | 34.911111 | 103 | py |
SpectralRadex | SpectralRadex-master/src/radex_src/test.py | import radexwrap | 16 | 16 | 16 | py |
SpectralRadex | SpectralRadex-master/src/spectralradex/version.py | __version__='1.1.5'
| 20 | 9.5 | 19 | py |
SpectralRadex | SpectralRadex-master/src/spectralradex/__init__.py |
from . import radex
from .version import __version__
from pandas import DataFrame,read_csv
import numpy as np
import os
package_directory = os.path.dirname(os.path.abspath(__file__))
light_speed_si=2.99792e5
planck=6.62607e-34
boltzman_si=1.38e-23
light_speed_cgs=c=2.99792458e10
boltzman_cgs=1.380649e-16 #cgs units
def noise_from_spectrum(intensities):
"""
Estimate the rms noise level from a spectrum by assuming it is a gaussian noise distribution plus positive signal.
If this is true, the median should be the peak of the noise distribution and values smaller are just noise. Thus, the mean
square difference between the median and smaller values is the square of the noise rms.
:param intensities: An array of the intensity values representing a spectrum
:type intensities: float, iterable
:return: The rms noise value
:rtype: float
"""
noise=np.median(intensities)
ints=intensities[np.where(intensities<noise)[0]]
noise=np.mean((noise-ints)**2.0)
noise=np.sqrt(noise)
return noise
def convert_intensity_to_kelvin(frequencies,intensities,minor_beam,major_beam):
"""
Convert a spectrum from jy/beam to kelvin. All spectra produced by spectralradex use kelvin so this function is intended to convert observed spectra
for fitting. Treatment taken from https://science.nrao.edu/facilities/vla/proposing/TBconv
:param intensities: An array of the frequency values representing a spectrum, in GHz
:type intensities: float, iterable
:param intensities: An array of the intensity values at each of the frequencies in the frequency array in Jy/beam.
:type intensities: float, iterable
:param minor_beam: beamsize along minor axis in arcseconds
:type minor_beam: float
:param major_beam: beamsize along major axis in arcseconds
:type major_beam: float
"""
frequencies=frequencies*1e9
minor_beam=(minor_beam/3600.0)*np.pi*2/360.0
major_beam=(major_beam/3600.0)*np.pi*2/360.0
factor=(2.0*np.log(2)/np.pi)*(c**2.0)/(boltzman_cgs*(minor_beam*major_beam))
intensities*=factor*1e-23/(freq*freq)
return intensities
#calculate the optical depth at all our observed frequencies for a given line
#based on velocity relative to line centre
def maxwellian_distribution(v0,delta_v,tau_0,velocities):
"""
Returns the optical depth as a function of velocity, assuming gaussian line profiles and given an optical depth a line centre
:param v0: Peak velocity of the emission
:type v0: float
:param delta_v: FWHM of the peaks, taken from linewidth parameter of RADEX when called via :func:`model_spectrum`
:type v0: float
:param tau_0: The optical depth at line centre. Taken from RADEX when called via :func:`model_spectrum`
:type tau_0: float
:param velocities: An iterable containing the velocity values at which to calculate tau
:type velocities: float, iterable
:return: An array with the tau value at each velocity in velocities
:rtype: ndarray,float
"""
taus=np.exp(-4.0*np.log(2.0)*(((velocities-v0)**2.0)/(delta_v*delta_v)))
taus=taus*tau_0
return taus
#This runs radex to get the excitation temperature and optical depth for every line
def get_radex_taus(params):
columns=['E_UP (K)','freq','WAVEL (um)','T_ex','tau','T_R (K)','POP UP',
'POP LOW', 'FLUX (K*km/s)', 'FLUX (erg/cm2/s)']
output=radex.run(params)
if output is None:
return output
idx=(output["freq"]>0.0)
return output.loc[idx,["freq","tau","T_ex"]]
def get_tau_distribution(x,v0,delta_v,frequencies,tau_profile):
"""
Internal function meant to turn radex output into frequency
dependent optical depth and radiation temperature
"""
#unpack tau_df row
line_freq,tau_0,t_ex=x
#get the relative velocity of all the emitting frequencies
velocities=((line_freq/frequencies)-1.0)*light_speed_si
#warn user if they're frequencies are too far apart
if velocities[1]-velocities[0]>delta_v:
print("Velocity bins larger than linewidth")
#calculate optical depth as function of v
taus=tau_profile(v0,delta_v,tau_0,velocities)
#calculate tau weighted radiation temperature
rad_weights=rad_temp(t_ex,frequencies*(1.0+v0/light_speed_si))*taus
return taus,rad_weights
def rad_temp(t,frequencies):
"""
Calculate radiation temperature for a given excitation temperature
:meta private:
"""
hvk=(planck*frequencies*1.0e9)/boltzman_si
hvk=hvk/(np.exp(hvk/t)-1)
return hvk
def chi_squared(obs_freqs,obs_intensity,error,v0,params):
intensity=model_spectrum(obs_freqs,v0,params)
chi=obs_intensity-intensity
chi=(chi*chi)/(error*error)
chi=np.sum(error)
return chi
def model_spectrum(obs_freqs,v0,radex_params,tau_profile=maxwellian_distribution):
"""
Calculates the brightness temperature as a function of frequency for given input frequencies, :math:`V_{LSR}`
velocity and RADEX parameters.
:param obs_freqs: An array of frequency values in GHz at which the brightness temperature should be calculated.
:type obs_freqs: iterable, float
:param v0: The :math:`V_{LSR}` velocity of the emitting object to be modelled in km/s
:type v0: float
:param radex_params: A dictionary containing the inputs for the RADEX model. See :func:`radex.get_default_parameters` for a list of possible parameters. Note this includes the linewidth in km/s that will be used to set the shape of the emission lines.
:type radex_params: dict
:param tau_profile: A function with the same arguments as :func:`maxwellian_distribution` that returns the optical depth as a function of velocity. If not set, spectralradex will assume gaussian line profiles centred on `v0` and a FWHM taken from the RADEX parameters.
:type tau_profile: function, optional
"""
#make sure input frquencies are a sorted array
obs_freqs=np.asarray(obs_freqs)
obs_freqs.sort()
#solve the radex model and get all line properties
tau_0_df=get_radex_taus(radex_params)
if tau_0_df is None:
print("RADEX failed so no spectrum produced")
return None
tau_0_df=tau_0_df[tau_0_df["freq"]<1.1*obs_freqs.max()]
delta_v=radex_params["linewidth"]
#obtain tau as a function of frequency and tau*radiation temperature for each line
vfunc=lambda x: get_tau_distribution(x,v0,delta_v,obs_freqs,tau_profile)
solve=np.apply_along_axis(vfunc,axis=1,arr=tau_0_df.values)
#sum to get total optical depth
taus=[x[0] for x in solve]
taus=np.sum(taus,axis=0)
#sum radiation temperatures that have been multiplied by line taus and divide by total tau
#this is tau weighted radiation temperature - helps for overlapping lines
rad_weights=[x[1] for x in solve]
rad_weights=np.sum(rad_weights,axis=0)/taus
#finally, calculated observed brightness temperature
taus=(rad_weights-rad_temp(2.73,obs_freqs))*(1.0-np.exp(-taus))
#we'll return a dataframe of Frequency, Intensity
new_df=DataFrame({"Frequency":obs_freqs,"Intensity":taus}).fillna(0.0)
return new_df | 7,183 | 37.832432 | 272 | py |
SpectralRadex | SpectralRadex-master/src/spectralradex/radex/__init__.py | from radexwrap import *
from pandas import DataFrame, concat
import numpy as np
from functools import partial
import os
_ROOT = os.path.dirname(os.path.abspath(__file__))
PARTNER_LIST={1:"h2",2:"p-h2",3:"o-h2", 4:"e-", 5:"h", 6:"he",7:"h+"}
def run(parameters, output_file=None):
"""
Run a single RADEX model using a dictionary to set parameters.
:param parameters: A dictionary containing the RADEX inputs that the user wishes to set,
all other parameters will use the default values. See :func:`get_default_parameters`
for a list of possible parameters and :func:`run_params` for descriptions.
:type parameters: dict
:param output_file: If not ``None``, the RADEX results are stored to this file in csv format/
:type output_file: str
"""
columns = ['E_UP (K)', 'freq', 'WAVEL (um)', 'T_ex', 'tau',
'T_R (K)', 'POP UP', 'POP LOW', 'FLUX (K*km/s)', 'FLUX (erg/cm2/s)']
parameters["molfile"] = add_data_path(parameters["molfile"])
success,nlines, qup, qlow, output = from_dict(parameters)
if success==1:
output = DataFrame(columns=columns, data=output[:, :nlines].T)
output["Qup"] = [q.decode("UTF-8").strip() for q in qup][:nlines]
output["Qlow"] = [q.decode("UTF-8").strip() for q in qlow][:nlines]
output=output[output["freq"]>parameters["fmin"]]
output=output[output["freq"]<parameters["fmax"]]
if output_file is not None:
output.to_csv(output_file, index=False)
return output
else:
print("RADEX Failed, check RADEX error messages\nYour parameters were:\n")
print(parameters)
return None
def run_params(molfile,tkin,cdmol,nh=0.0,nh2=0.0,op_ratio=3.0,ne=0.0,nhe=0.0,nhx=0.0,
linewidth=1.0,fmin=0.0,fmax=500.0,tbg=2.73,geometry=1, output_file=None):
"""
Run a single RADEX model from individual parameters
:param molfile: Either a full path or a relative path beginning with "." to a datafile in the Lamda database
format. Alternatively, the filename of a datafile from `list_data_files()`.
:type molfile: str
:param tkin: Temperature of the Gas in Kelvin
:type molfile: float
:param cdmol: Column density of the emitting species in cm :math:`^{-2}`
:type molfile: float
:param nh: Number density of H atoms
:type nh: float, optional
:param nh2: Total number density of H2 molecules, set this to o-H2 + p-H2 if using ortho and para H2 as collisional partners.
:type nh2: float, optional
:param op_ratio: Ortho to para ratio for H2. Defaults to statistical limit of 3 and used to set o-H2 and p-H2 densities from nh2.
:type op_ratio: float, optional
:param ne: Number density of electron.
:type ne: float, optional
:param nhe: Number density of He atoms.
:type nhe: float, optional
:param nhx: Number density of H+ ions.
:type nh: float, optional
:param linewidth: FWHM of the line in km s :math:`^{-1}`.
:type linewidth: float, optional
:param fmin: Minimum frequency below which a line is not included in the results.
:type fmin: float, optional
:param fmax: Maximum frequency above which a line is not included in the results.
:type fmax: float, optional
:param tbg: Background temperature, defaults to CMB temperature 2.73 K.
:type tbg: float, optional
:param geometry: Choice of geometry of emitting object. 1 for sphere, 2 for LVG, 3 for slab.
:type geometry: int, optional
"""
columns = ['E_UP (K)', 'freq', 'WAVEL (um)', 'T_ex', 'tau',
'T_R (K)', 'POP UP', 'POP LOW', 'FLUX (K*km/s)', 'FLUX (erg/cm2/s)']
molfile = add_data_path(molfile)
ortho=op_ratio/(op_ratio+1.0)
para=1.0-ortho
densities=[nh2,nh2*ortho,nh2*para,ne,nh,nhe,nhx]
success,nlines, qup, qlow, output = from_params(molfile,tkin,tbg,cdmol,densities,
linewidth,fmin,fmax,geometry)
if success==1:
output = DataFrame(columns=columns, data=output[:, :nlines].T)
output["QN Upper"] = qup.reshape(-1, 6).view('S6')[:nlines]
output["QN Lower"] = qlow.reshape(-1, 6).view('S6')[:nlines]
output["Qup"] = output["QN Upper"].map(lambda x: x.decode('UTF-8')).str.strip()
output["Qlow"] = output["QN Lower"].map(lambda x: x.decode('UTF-8')).str.strip()
output=output.drop(["QN Upper","QN Lower"],axis=1)
output=output[output["freq"]>fmin]
output=output[output["freq"]<fmax]
if output_file is not None:
output.to_csv(output_file, index=False)
return output
else:
print("RADEX Failed, check RADEX error messages\nYour parameters were:\n")
print(parameters)
return None
def run_grid(parameters,
target_value="FLUX (K*km/s)", pool=None):
"""
Runs a grid of RADEX models using all combinations of any iterable items in the parameters dictionary whilst keeping other parameters constant. Returns a dataframe of results and can be parallelized with the ``pool`` parameter.
:param parameters: A dictionary of parameters as provided by :func:`get_default_parameters` or :func:`get_example_grid_parameters`. Parameters should take a single value when they are constant over the grid and contain and interable if they are to be varied.
:param molfile: Either a full path or a relative path beginning with "." to a datafile in the Lamda database
format. Alternatively, the filename of a datafile from `list_data_files()`.
:type molfile: str
:param target_value: RADEX output column to be returned. Select one of 'T_R (K)', 'FLUX (K*km/s)', 'FLUX (erg/cm2/s)'
:type target_value: str,optional
:param pool: a Pool object with ``map()``, ``close()`` , and ``join()`` methods such as multiprocessing.Pool or schwimmbad.MPIPool.
If supplied, the grid will be calculated in parallel.
:type pool: Pool, optional
"""
#cleaning up and checking inputs
possible_targets=[ 'T_ex', 'tau', 'T_R (K)', 'POP UP','POP LOW', 'FLUX (K*km/s)', 'FLUX (erg/cm2/s)']
if target_value not in possible_targets:
raise ValueError(f"target_value must be a string and one of the following options:\n'"+"'\n'".join(possible_targets)+"'")
if parameters["molfile"][0] != "/":
parameters["molfile"] = add_data_path(parameters["molfile"])
#get list of varying parameters
variables=[key for key,value in parameters.items() if is_iter(value) and key!="molfile"]
parameter_grid = np.array(np.meshgrid(*[parameters[x] for x in variables]))
parameter_grid = parameter_grid.T.reshape(-1, len(variables))
parameters=parameters.copy()
for i,variable in enumerate(variables):
parameters[variable]=parameter_grid[0,i]
parameter_combinations = np.delete(parameter_grid, 0, axis=0)
radex_output = run(parameters)
dataframe_columns = variables[:]
line_count = np.shape(radex_output)[0]
transition_value = []
for line in range(line_count):
transition_name = "(" + radex_output.iloc[line]['Qup'] + ")-(" + \
radex_output.iloc[line]['Qlow'] + ")[" + \
str(radex_output.iloc[line]['freq']) + " GHz]"
dataframe_columns += [transition_name]
transition_value += [radex_output.iloc[line][target_value]]
output = DataFrame(columns=dataframe_columns, data=[[parameters[x] for x in variables] + transition_value])
if pool is not None:
func = partial(format_run_for_grid, line_count, parameters, target_value, dataframe_columns,variables)
pool_results = pool.map(func, parameter_combinations)
pool.close()
pool.join()
pool_results_df = concat(pool_results, axis=0)
output = output.append(pool_results_df, ignore_index=True)
else:
for grid_point in range(len(parameter_combinations)):
output = output.append(format_run_for_grid(line_count, parameters,
target_value, dataframe_columns,variables,
parameter_combinations[grid_point]), ignore_index=True)
return output
def get_default_parameters():
"""
Get the default RADEX parameters as a dictionary, this largely serves as an example for the
input required for :func:`run`.
molfile should be a collsional datafile in the LAMDA database format. If using a local file, a full path or a relative path beginning with "." is required. Otherwise, one of the files listed by :func:`list_data_files` can be supplied without a path.
method is 1 (uniform sphere), 2 (LVG), or 3 (slab)
"""
parameters = {
"molfile": "co.dat",
"tkin": 30.0,
"tbg": 2.73,
"cdmol": 1.0e13,
"h2": 1.0e5,
"h": 0.0,
"e-": 0.0,
"p-h2": 0.0,
"o-h2": 0.0,
"h+": 0.0,
"linewidth": 1.0,
"fmin": 0.0,
"fmax": 1000.0,
"geometry":1
}
return parameters
def get_example_grid_parameters():
"""
Returns a dictionary of parameters for RADEX with iterables which can be used with :func:`run_grid`.
"""
parameters = {
"molfile": "co.dat",
"tkin": np.linspace(10,300,5),
"tbg": 2.73,
"cdmol": np.logspace(14,18,5),
"h2": np.logspace(4,7,5),
"h": 0.0,
"e-": 0.0,
"p-h2": 0.0,
"o-h2": 0.0,
"h+": 0.0,
"linewidth": 1.0,
"fmin": 0.0,
"fmax": 800.0,
"geometry":1
}
return parameters
def get_transition_table(molfile):
"""
Reads a collisional data file and returns a pandas DataFrame for the molecule with one row per transition containing the Einstein coefficients, upper level energy and frequency.
:param molfile: Either the full path to a collisional datafile or the filename of one supplied with SpectralRadex
:type molfile: str
"""
molfile=add_data_path(molfile)
with open(molfile) as f:
f.readline()
molecule=f.readline()
f.readline()
mass=f.readline()
f.readline()
levels=int(f.readline())
f.readline()
for i in range(levels):
f.readline()
f.readline()
n_transitions=int(f.readline())
f.readline()
line_df=DataFrame(columns=["Upper level","Lower level","Aij","Frequency","E_u"])
for i in range(n_transitions):
line_df.loc[len(line_df)]=f.readline().split()[1:6]
line_df[["Aij","Frequency","E_u"]]=line_df[["Aij","Frequency","E_u"]].astype(float)
return line_df
def get_collisional_partners(molfile):
"""
Reads a collisional data file and returns a dictionary containing the number of collisional partners and their names. The partner names match the input keys for :func:`run`
:param molfile: Either the full path to a collisional datafile or the filename of one supplied with SpectralRadex
:type molfile: str
"""
partners=[]
molfile=add_data_path(molfile)
with open(molfile) as f:
for i in range(5):
f.readline()
levels=int(f.readline())
for i in range(levels+2):
f.readline()
n_transitions=int(f.readline())
for i in range(n_transitions+2):
f.readline()
n_partners=int(f.readline())
for i in range(n_partners):
f.readline()
partner=f.readline()
partner=int(partner.split()[0])
f.readline()
transitions=int(f.readline())
for j in range(5+transitions):
f.readline()
partners.append(PARTNER_LIST[partner])
result={"Number":n_partners,"Partners":partners}
return result
def thermal_h2_op_ratio(tkin):
"""
If your data file has collisions with p-h2 and o-h2, you may want to use the thermal ratio to split your total H2 density. You can check that value for any
given temperature with this function. Returns the ortho:para ratio as a float
:param tkin: Gas kinetic temperature
:type tkin: float
"""
return np.min(3.0,9.0*np.exp(-170.6/tkin))
def add_data_path(filename):
#Adds the path to the packaged datafiles to a filename.
if (os.path.isabs(filename)) or (filename[0] == "."):
return filename
else:
return os.path.join(_ROOT, "data", filename)
def list_data_files():
"""
SpectralRadex is packaged with a selection of LAMDA collisional datafiles.
This function prints the list of available files. You can provide the full path to another
file in the parameter dictionary to use one not packaged with SpectralRadex.
"""
files=sorted(os.listdir(os.path.join(_ROOT, "data")))
print(files)
def is_iter(x):
return hasattr(x, '__iter__')
def format_run_for_grid(line_count, parameters, target_value, columns,grid_variables, grid_parameters):
#Simple function to set up and reformat the output of :func:`run` for :func:`run_grid`
for i,variable in enumerate(grid_variables):
parameters[variable] = grid_parameters[i]
radex_output = run(parameters)
if radex_output is not None:
transition_value = radex_output.iloc[:line_count][target_value].to_list()
else:
transition_value=[np.nan]*line_count
return DataFrame([[parameters[x] for x in grid_variables] + transition_value], columns=columns)
| 13,556 | 39.109467 | 262 | py |
SpectralRadex | SpectralRadex-master/tests/test_sgeirFails.py | from spectralradex import radex
params={'molfile': 'SO-pH2.dat', 'tkin': 240.23288848051274, 'tbg': 2.73, 'cdmol': 2.7117385194476626e+19, 'h2': 1477400.1189838066, 'h': 0.0, 'e-': 0.0, 'p-h2': 369350.02974595164, 'o-h2': 1108050.0892378548, 'h+': 0.0, 'linewidth': 125.49076959987242, 'fmin': 0.0, 'fmax': 30000000.0}
result=radex.run(params)
print("hi")
print(result) | 370 | 60.833333 | 286 | py |
SpectralRadex | SpectralRadex-master/tests/new_subroutine.py | from spectralradex import radex
import numpy as np
from time import perf_counter
params=radex.get_default_parameters()
def new():
start=perf_counter()
radex.new_run("co.dat",30.0,cdmol=1e16,nh=0.0,nh2=1e5,op_ratio=3.0,ne=0.0,nhe=0.0,nhx=0.0,
linewidth=1.0,fmin=0.0,fmax=500.0,tbg=2.73,geometry=1)
stop=perf_counter()
return stop-start
def old():
start=perf_counter()
radex.run(params)
stop=perf_counter()
return stop-start
new_times=[new() for it in range(100)]
old_times=[old() for it in range(100)]
print(f"new runs in {np.mean(new_times)}")
print(f"old runs in {np.mean(old_times)}") | 643 | 25.833333 | 94 | py |
SpectralRadex | SpectralRadex-master/tests/datafile_reading.py | from spectralradex import radex
for data_file in ['hcn.dat', 'o-nh3.dat', 'p-h3o+.dat', 'hc3n.dat', 'catom.dat', 'sio.dat', 'ch2_h2_para.dat', 'hnc.dat', 'hcl.dat', 'ch2_h2_ortho.dat', 'co.dat', 'hco+.dat', 'oh2s.dat', 'hd.dat', 'oh.dat', 'oh@hfs.dat', 'oh2cs.dat', 'n+.dat', 'hcl@hfs.dat', 'hcn@hfs.dat', 'oatom.dat', 'SO-pH2.dat', 'o-h3o+.dat', 'ph2cs.dat', 'o-c3h2.dat', 'c+.dat', 'ph2s.dat', 'p-c3h2.dat', 'o-sic2.dat', 'so2@lowT.dat', 'o2.dat', 'p-nh3.dat', 'oh+.dat']:
print(data_file)
coll_dict=radex.get_collisional_partners(data_file)
print(coll_dict)
table=radex.get_transition_table("../HITs/data/collisional/cn.dat")
print(table) | 660 | 65.1 | 442 | py |
SpectralRadex | SpectralRadex-master/tests/radex.py | from spectralradex import radex
from multiprocessing import Pool
import time
# Single run using just the basic run() method from Spectral Radex
params = radex.get_default_parameters()
# params["molfile"] = "co.dat"
# output = radex.run(params)
# print(output)
# params = radex.get_default_parameters()
# #try to exceed temperature limit
# params["tkin"]=1e5
# output=radex.run(params)
# print(output)
# # Try with no collisional partners
# params["tkin"]=20.0
# params["h2"]=0.0
# params["e-"]=100
# output=radex.run(params)
# print(output)
# # check run_grid() method and how to use or not use multiprocessing with it.
params["tkin"]=[10,50]
res=radex.run_grid(params,
target_value="FLUX (K*km/s)", pool=Pool(4))
print(res)
try:
res=radex.run_grid(params,
target_value=1, pool=Pool(4))
except:
pass
res=radex.run_grid(params, target_value="FLUXy (K*km/s)", pool=Pool(4))
print(res) | 927 | 23.421053 | 78 | py |
SpectralRadex | SpectralRadex-master/docs/make_tutorials.py | import subprocess
import glob
import os
# Convert the tutorials
for fn in glob.glob("../examples/*.ipynb"):
name = os.path.splitext(os.path.split(fn)[1])[0]
outfn = os.path.join("tutorials", name + ".rst")
print("Building {0}...".format(name))
subprocess.check_call(
"jupyter nbconvert --template _templates/tutorial_rst.tpl --to rst "
+ fn
+ " --output-dir tutorials",
shell=True,) | 431 | 29.857143 | 76 | py |
SpectralRadex | SpectralRadex-master/docs/conf.py | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
import sphinx_rtd_theme
sys.path.insert(0, os.path.abspath('../src'))
import mock
MOCK_MODULES = ['numpy', 'scipy', 'pandas', 'radexwrap','seaborn','matplotlib']
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = mock.Mock()
sys.modules["numpy"].pi=3.1
# -- Project information -----------------------------------------------------
project = 'SpectralRadex'
copyright = '2020, Jonathan Holdship'
author = 'Jonathan Holdship'
# The full version, including alpha/beta/rc tags
release = '0.1.1'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['recommonmark','sphinx.ext.autodoc','sphinx.ext.coverage', 'sphinx.ext.napoleon',
"sphinx_rtd_theme"]
autodoc_mock_imports=['numpy', 'scipy', 'pandas', 'radexwrap','seaborn','matplotlib']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
master_doc = 'index'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
autodoc_member_order = 'bysource' | 2,408 | 34.955224 | 95 | py |
StyleMask | StyleMask-master/run_inference.py | import os
import datetime
import random
import sys
import argparse
from argparse import Namespace
import torch
from torch import nn
import numpy as np
import warnings
from tqdm import tqdm
warnings.filterwarnings("ignore")
sys.dont_write_bytecode = True
seed = 0
random.seed(seed)
import face_alignment
from libs.models.StyleGAN2.model import Generator as StyleGAN2Generator
from libs.models.mask_predictor import MaskPredictor
from libs.utilities.utils import make_noise, generate_image, generate_new_stylespace, save_image, save_grid, get_files_frompath
from libs.utilities.stylespace_utils import decoder
from libs.configs.config_models import stylegan2_ffhq_1024
from libs.utilities.utils_inference import preprocess_image, invert_image
from libs.utilities.image_utils import image_to_tensor
from libs.models.inversion.psp import pSp
class Inference_demo():
def __init__(self, args):
self.args = args
self.device = 'cuda'
self.output_path = args['output_path']
arguments_json = os.path.join(self.output_path, 'arguments.json')
self.masknet_path = args['masknet_path']
self.image_resolution = args['image_resolution']
self.dataset = args['dataset']
self.source_path = args['source_path']
self.target_path = args['target_path']
self.num_pairs = args['num_pairs']
self.save_grid = args['save_grid']
self.save_image = args['save_image']
self.resize_image = args['resize_image']
if not os.path.exists(self.output_path):
os.makedirs(self.output_path, exist_ok=True)
def load_models(self, inversion):
self.face_pool = torch.nn.AdaptiveAvgPool2d((256, 256))
if self.dataset == 'ffhq' and self.image_resolution == 1024:
self.generator_path = stylegan2_ffhq_1024['gan_weights']
self.channel_multiplier = stylegan2_ffhq_1024['channel_multiplier']
self.split_sections = stylegan2_ffhq_1024['split_sections']
self.stylespace_dim = stylegan2_ffhq_1024['stylespace_dim']
else:
print('Incorect dataset type {} and image resolution {}'.format(self.dataset, self.image_resolution))
if os.path.exists(self.generator_path):
print('----- Load generator from {} -----'.format(self.generator_path))
self.G = StyleGAN2Generator(self.image_resolution, 512, 8, channel_multiplier = self.channel_multiplier)
self.G.load_state_dict(torch.load(self.generator_path)['g_ema'], strict = True)
self.G.cuda().eval()
# use truncation
self.truncation = 0.7
self.trunc =self.G.mean_latent(4096).detach().clone()
else:
print('Please download the pretrained model for StyleGAN2 generator and save it into ./pretrained_models path')
exit()
if os.path.exists(self.masknet_path):
print('----- Load mask network from {} -----'.format(self.masknet_path))
ckpt = torch.load(self.masknet_path, map_location=torch.device('cpu'))
self.num_layers_control = ckpt['num_layers_control']
self.mask_net = nn.ModuleDict({})
for layer_idx in range(self.num_layers_control):
network_name_str = 'network_{:02d}'.format(layer_idx)
# Net info
stylespace_dim_layer = self.split_sections[layer_idx]
input_dim = stylespace_dim_layer
output_dim = stylespace_dim_layer
inner_dim = stylespace_dim_layer
network_module = MaskPredictor(input_dim, output_dim, inner_dim = inner_dim)
self.mask_net.update({network_name_str: network_module})
self.mask_net.load_state_dict(ckpt['mask_net'])
self.mask_net.cuda().eval()
else:
print('Please download the pretrained model for Mask network and save it into ./pretrained_models path')
exit()
if inversion:
self.fa = face_alignment.FaceAlignment(face_alignment.LandmarksType._2D, device='cuda')
### Load inversion model only when the input is image. ###
self.encoder_path = stylegan2_ffhq_1024['e4e_inversion_model']
print('----- Load e4e encoder from {} -----'.format(self.encoder_path))
ckpt = torch.load(self.encoder_path, map_location='cpu')
opts = ckpt['opts']
opts['output_size'] = self.image_resolution
opts['checkpoint_path'] = self.encoder_path
opts['device'] = 'cuda'
opts['channel_multiplier'] = self.channel_multiplier
opts['dataset'] = self.dataset
opts = Namespace(**opts)
self.encoder = pSp(opts)
self.encoder.cuda().eval()
def load_samples(self, filepath):
inversion = False
if filepath is None:
# Generate random latent code
files_grabbed = []
for i in range(self.num_pairs):
files_grabbed.append(make_noise(1, 512))
else:
if os.path.isdir(filepath):
## Check if files inside directory are images. Else check if latent codes
files_grabbed = get_files_frompath(filepath, ['*.png', '*.jpg'])
if len(files_grabbed) == 0:
files_grabbed = get_files_frompath(filepath, ['*.npy'])
if len(files_grabbed) == 0:
print('Please specify correct path: folder with images (.png, .jpg) or latent codes (.npy)')
exit()
z_codes = []
for file_ in files_grabbed:
z_codes.append(torch.from_numpy(np.load(file_)).cuda())
z_codes = torch.cat(z_codes).unsqueeze(0)
files_grabbed = z_codes
else:
inversion = True # invert real images
elif os.path.isfile(filepath):
head, tail = os.path.split(filepath)
ext = tail.split('.')[-1]
# Check if file is image
if ext == 'png' or ext == 'jpg':
files_grabbed = [filepath]
inversion = True
elif ext == 'npy':
z_codes = torch.from_numpy(np.load(filepath)).unsqueeze(1)
files_grabbed = z_codes
else:
print('Wrong path. Expected file image (.png, .jpg) or latent code (.npy)')
exit()
else:
print('Wrong path. Expected file image (.png, .jpg) or latent code (.npy)')
exit()
return files_grabbed, inversion
def reenact_pair(self, source_code, target_code):
with torch.no_grad():
# Get source style space
source_img, style_source, w_source, noise_source = generate_image(self.G, source_code, self.truncation, self.trunc, self.image_resolution, self.split_sections,
input_is_latent = self.input_is_latent, return_latents= True, resize_image = self.resize_image)
# Get target style space
target_img, style_target, w_target, noise_target = generate_image(self.G, target_code, self.truncation, self.trunc, self.image_resolution, self.split_sections,
input_is_latent = self.input_is_latent, return_latents= True, resize_image = self.resize_image)
# Get reenacted image
masks_per_layer = []
for layer_idx in range(self.num_layers_control):
network_name_str = 'network_{:02d}'.format(layer_idx)
style_source_idx = style_source[layer_idx]
style_target_idx = style_target[layer_idx]
styles = style_source_idx - style_target_idx
mask_idx = self.mask_net[network_name_str](styles)
masks_per_layer.append(mask_idx)
mask = torch.cat(masks_per_layer, dim=1)
style_source = torch.cat(style_source, dim=1)
style_target = torch.cat(style_target, dim=1)
new_style_space = generate_new_stylespace(style_source, style_target, mask, num_layers_control = self.num_layers_control)
new_style_space = list(torch.split(tensor=new_style_space, split_size_or_sections=self.split_sections, dim=1))
reenacted_img = decoder(self.G, new_style_space, w_source, noise_source, resize_image = self.resize_image)
return source_img, target_img, reenacted_img
def check_paths(self):
assert type(self.target_path) == type(self.source_path), \
"Source path and target path should have the same type, None, files (.png, .jpg or .npy) or directories with files of type .png, .jpg or .npy"
if self.source_path is not None and self.target_path is not None:
if os.path.isdir(self.source_path):
assert os.path.isdir(self.target_path), \
"Source path and target path should have the same type, None, files (.png, .jpg or .npy) or directories with files of type .png, .jpg or .npy"
if os.path.isfile(self.source_path):
assert os.path.isfile(self.target_path), \
"Source path and target path should have the same type, None, files (.png, .jpg or .npy) or directories with files of type .png, .jpg or .npy"
def run(self):
self.check_paths()
source_samples, inversion = self.load_samples(self.source_path)
target_samples, inversion = self.load_samples(self.target_path)
assert len(source_samples) == len(target_samples), "Number of source samples should be the same with target samples"
self.load_models(inversion)
self.num_pairs = len(source_samples)
print('Reenact {} pairs'.format(self.num_pairs))
for i in tqdm(range(self.num_pairs)):
if inversion: # Real image
# Preprocess and invert real images into the W+ latent space using Encoder4Editing method
cropped_image = preprocess_image(source_samples[i], self.fa, save_filename = None)
source_img = image_to_tensor(cropped_image).unsqueeze(0).cuda()
inv_image, source_code = invert_image(source_img, self.encoder, self.G, self.truncation, self.trunc)
cropped_image = preprocess_image(target_samples[i], self.fa)
target_img = image_to_tensor(cropped_image).unsqueeze(0).cuda()
inv_image, target_code = invert_image(target_img, self.encoder, self.G, self.truncation, self.trunc)
self.input_is_latent = True
else: # synthetic latent code
if self.source_path is not None:
source_code = source_samples[i].cuda()
target_code = target_samples[i].cuda()
if source_code.ndim == 2:
self.input_is_latent = False # Z space 1 X 512
elif source_code.ndim == 3:
self.truncation = 1.0
self.trunc = None
self.input_is_latent = True # W sapce 1 X 18 X 512
else:
source_code = source_samples[i].cuda()
target_code = target_samples[i].cuda()
self.input_is_latent = False # Z space
source_img, target_img, reenacted_img = self.reenact_pair(source_code, target_code)
if self.save_grid:
save_grid(source_img, target_img, reenacted_img, os.path.join(self.output_path, 'grid_{:04d}.png').format(i))
if self.save_image:
save_image(reenacted_img, os.path.join(self.output_path, '{:04d}.png').format(i))
def main():
"""
Inference script.
Options:
######### General ###########
--output_path : path to save output images
--source_path : It can be either an image file, or a latent code or a directory with images or latent codes or None.
If source path is None then it will generate a random latent code.
--target_path : It can be either an image file, or a latent code or a directory with images or latent codes or None.
If target path is None then it will generate a random latent code.
--masknet_path : path to pretrained model for mask network
--dataset : dataset (ffhq)
--image_resolution : image resolution (1024)
--num_pairs : number of pairs to reenact
########## Visualization ##########
--save_grid : Generate figure with source, target and reenacted image
--save_image : Save only the reenacted image
--resize_image : Resize image from 1024 to 256
python run_inference.py --output_path ./results --save_grid
"""
parser = argparse.ArgumentParser(description="training script")
######### General #########
parser.add_argument('--output_path', type=str, required = True, help="path to save output images")
parser.add_argument('--source_path', type=str, default = None, help='path to source samples (latent codes or images)')
parser.add_argument('--target_path', type=str, default = None, help='path to target samples (latent codes or images)')
parser.add_argument('--masknet_path', type=str, default = './pretrained_models/mask_network_1024.pt', help="path to pretrained model for mask network")
parser.add_argument('--dataset', type=str, default = 'ffhq', help="dataset")
parser.add_argument('--image_resolution', type=int, default = 1024, help="image resolution")
parser.add_argument('--num_pairs', type=int, default = 4, help="number of random pairs to reenact")
parser.add_argument('--save_grid', dest='save_grid', action='store_true', help="Generate figure with source, target and reenacted image")
parser.set_defaults(save_grid=False)
parser.add_argument('--save_image', dest='save_image', action='store_true', help="Save only the reenacted image")
parser.set_defaults(save_image=False)
parser.add_argument('--resize_image', dest='resize_image', action='store_true', help="Resize image from 1024 to 256")
parser.set_defaults(resize_image=False)
# Parse given arguments
args = parser.parse_args()
args = vars(args) # convert to dictionary
inf = Inference_demo(args)
inf.run()
if __name__ == '__main__':
main()
| 12,669 | 39.479233 | 162 | py |
StyleMask | StyleMask-master/extract_statistics.py | """
Script to extract the npy file with the min, max values of facial pose parameters (yaw, pitch, roll, jaw and expressions)
1. Generate a set of random synthetic images
2. Use DECA model to extract the facial shape and the corresponding parameters
3. Calculate min, max values
"""
import os
import glob
import numpy as np
from PIL import Image
import torch
from torch.nn import functional as F
import matplotlib.pyplot as plt
import json
import cv2
from tqdm import tqdm
import argparse
from torchvision import utils as torch_utils
import warnings
warnings.filterwarnings("ignore")
from libs.configs.config_models import *
from libs.utilities.utils import make_noise, make_path, calculate_shapemodel
from libs.DECA.estimate_DECA import DECA_model
from libs.models.StyleGAN2.model import Generator as StyleGAN2Generator
def extract_stats(statistics):
num_stats = statistics.shape[1]
statistics = np.transpose(statistics, (1, 0))
ranges = []
for i in range(statistics.shape[0]):
pred = statistics[i, :]
max_ = np.amax(pred)
min_ = np.amin(pred)
if i == 0:
label = 'yaw'
elif i == 1:
label = 'pitch'
elif i == 2:
label = 'roll'
elif i == 3:
label = 'jaw'
else:
label = 'exp_{:02d}'.format(i)
print('{}/{} Min {:.2f} Max {:.2f}'.format(label, i, min_, max_))
ranges.append([min_, max_])
return ranges
if __name__ == '__main__':
num_images = 2000
image_resolution = 1024
dataset = 'FFHQ'
output_path = './{}_stats'.format(dataset)
make_path(output_path)
gan_weights = stylegan2_ffhq_1024['gan_weights']
channel_multiplier = stylegan2_ffhq_1024['channel_multiplier']
print('----- Load generator from {} -----'.format(gan_weights))
truncation = 0.7
generator = StyleGAN2Generator(image_resolution, 512, 8, channel_multiplier= channel_multiplier)
generator.load_state_dict(torch.load(gan_weights)['g_ema'], strict = True)
generator.cuda().eval()
trunc = generator.mean_latent(4096).detach().clone()
shape_model = DECA_model('cuda')
face_pool = torch.nn.AdaptiveAvgPool2d((256, 256))
statistics = []
with torch.no_grad():
for i in tqdm(range(num_images)):
z = make_noise(1, 512).cuda()
source_img = generator([z], return_latents = False, truncation = truncation, truncation_latent = trunc, input_is_latent = False)[0]
source_img = face_pool(source_img)
params_source, angles_source = calculate_shapemodel(shape_model, source_img)
yaw = angles_source[:,0][0].detach().cpu().numpy()
pitch = angles_source[:,1][0].detach().cpu().numpy()
roll = angles_source[:, 2][0].detach().cpu().numpy()
exp = params_source['alpha_exp'][0].detach().cpu().numpy()
jaw = params_source['pose'][0, 3].detach().cpu().numpy()
tmp = np.zeros(54)
tmp[0] = yaw
tmp[1] = pitch
tmp[2] = roll
tmp[3] = jaw
tmp[4:] = exp
# np.save(os.path.join(output_path, '{:07d}.npy'.format(i)), tmp)
statistics.append(tmp)
statistics = np.asarray(statistics)
np.save(os.path.join(output_path, 'stats_all.npy'), statistics)
ranges = extract_stats(statistics)
np.save(os.path.join(output_path, 'ranges_{}.npy'.format(dataset)), ranges)
| 3,152 | 26.181034 | 134 | py |
StyleMask | StyleMask-master/run_trainer.py | import os
import datetime
import random
import sys
import json
import argparse
import warnings
warnings.filterwarnings("ignore")
sys.dont_write_bytecode = True
from libs.trainer import Trainer
def main():
"""
Training script.
Options:
######### General ###########
--experiment_path : path to save experiment
--use_wandb : use wandb to log losses and evaluation metrics
--log_images_wandb : if True log images on wandb
--project_wandb : Project name for wandb
--resume_training_model : Path to model to continue training or None
######### Generator #########
--dataset_type : voxceleb or ffhq
--image_resolution : image resolution of pre-trained GAN model. image resolution for voxceleb dataset is 256
######### Dataset #########
--synthetic_dataset_path : set synthetic dataset path for evaluation. npy file with random synthetic latent codes.
######### Training #########
--lr : set the learning rate of direction matrix model
--num_layers_control : number of layers to apply the mask
--max_iter : set maximum number of training iterations
--batch_size : set training batch size
--lambda_identity : identity loss weight
--lambda_perceptual : perceptual loss weight
--lambda_shape : shape loss weight
--use_recurrent_cycle_loss : use recurrent cycle loss
--steps_per_log : set number iterations per log
--steps_per_save_models : set number iterations per saving model
--steps_per_evaluation : set number iterations per model evaluation during training
--validation_pairs : number of validation pairs for evaluation
--num_pairs_log : number of pairs to visualize during evaluation
######################
python run_trainer.py --experiment_path ./training_attempts/exp_v00
"""
parser = argparse.ArgumentParser(description="training script")
######### General ###########
parser.add_argument('--experiment_path', type=str, required = True, help="path to save the experiment")
parser.add_argument('--use_wandb', dest='use_wandb', action='store_true', help="use wandb to log losses and evaluation metrics")
parser.set_defaults(use_wandb=False)
parser.add_argument('--log_images_wandb', dest='log_images_wandb', action='store_true', help="if True log images on wandb")
parser.set_defaults(log_images_wandb=False)
parser.add_argument('--project_wandb', type=str, default = 'stylespace', help="Project name for wandb")
parser.add_argument('--resume_training_model', type=str, default = None, help="Path to model or None")
######### Generator #########
parser.add_argument('--image_resolution', type=int, default=1024, choices=(256, 1024), help="image resolution of pre-trained GAN modeln")
parser.add_argument('--dataset_type', type=str, default='ffhq', help="set dataset name")
######### Dataset #########
parser.add_argument('--synthetic_dataset_path', type=str, default=None, help="set synthetic dataset path for evaluation")
######### Training #########
parser.add_argument('--lr', type=float, default=0.0001, help=" set the learning rate of direction matrix model")
parser.add_argument('--num_layers_control', type=int, default=12, help="setnumber of layers to apply the mask")
parser.add_argument('--max_iter', type=int, default=100000, help="set maximum number of training iterations")
parser.add_argument('--batch_size', type=int, default=12, help="set training batch size")
parser.add_argument('--test_batch_size', type=int, default=2, help="set test batch size")
parser.add_argument('--workers', type=int, default=1, help="set workers")
parser.add_argument('--lambda_identity', type=float, default=10.0, help="")
parser.add_argument('--lambda_perceptual', type=float, default=0.0, help="")
parser.add_argument('--lambda_shape', type=float, default=1.0, help="")
parser.add_argument('--use_recurrent_cycle_loss', dest='use_recurrent_cycle_loss', action='store_false', help="Use recurrent cycle loss. Default is True!")
parser.add_argument('--steps_per_log', type=int, default=10, help="print log")
parser.add_argument('--steps_per_save_models', type=int, default=1000, help="steps per save model")
parser.add_argument('--steps_per_evaluation', type=int, default=1000, help="steps per evaluation during training")
parser.add_argument('--validation_pairs', type=int, default=100, help="number of pairs for evaluation")
parser.add_argument('--num_pairs_log', type=int, default=4, help="how many pairs on the reenactment figure")
# Parse given arguments
args = parser.parse_args()
args = vars(args) # convert to dictionary
# Create output dir and save current arguments
experiment_path = args['experiment_path']
experiment_path = experiment_path + '_{}_{}'.format(args['dataset_type'], args['image_resolution'])
args['experiment_path'] = experiment_path
# Set up trainer
print("#. Experiment: {}".format(experiment_path))
trainer = Trainer(args)
trainer.train()
if __name__ == '__main__':
main()
| 5,104 | 44.176991 | 157 | py |
StyleMask | StyleMask-master/libs/trainer.py | """
"""
import os
import json
import torch
import time
import numpy as np
import pdb
import cv2
import wandb
from torch import autograd
from torch import nn
from torch.utils.data import DataLoader
from tqdm import tqdm
from libs.utilities.utils import *
from libs.utilities.image_utils import *
from libs.DECA.estimate_DECA import DECA_model
from libs.models.StyleGAN2.model import Generator as StyleGAN2Generator
from libs.models.mask_predictor import MaskPredictor
from libs.utilities.stylespace_utils import decoder
from libs.configs.config_models import stylegan2_ffhq_1024
from libs.criteria.losses import Losses
from libs.criteria import id_loss
from libs.criteria.lpips.lpips import LPIPS
from libs.utilities.utils_inference import generate_grid_image, calculate_evaluation_metrics
from libs.utilities.dataloader import CustomDataset_validation
class Trainer(object):
def __init__(self, args):
self.args = args
self.initialize_arguments(args)
################# Initialize output paths #################
make_path(self.output_path)
self.log_dir = os.path.join(self.output_path, 'logs')
make_path(self.log_dir)
self.models_dir = os.path.join(self.output_path, 'models')
make_path(self.models_dir)
####################################################################
# save arguments file with params
save_arguments_json(args, self.output_path, 'arguments.json')
def initialize_arguments(self, args):
self.output_path = args['experiment_path']
self.use_wandb = args['use_wandb']
self.log_images_wandb = args['log_images_wandb']
self.project_wandb = args['project_wandb']
self.resume_training_model = args['resume_training_model']
self.image_resolution = args['image_resolution']
self.dataset_type = args['dataset_type']
self.synthetic_dataset_path = args['synthetic_dataset_path']
self.lr = args['lr']
self.num_layers_control = args['num_layers_control']
self.max_iter = args['max_iter']
self.batch_size = args['batch_size']
self.test_batch_size = args['test_batch_size']
self.workers = args['workers']
# Weights
self.lambda_identity = args['lambda_identity']
self.lambda_perceptual = args['lambda_perceptual']
self.lambda_shape = args['lambda_shape']
self.use_recurrent_cycle_loss = args['use_recurrent_cycle_loss']
self.steps_per_log = args['steps_per_log']
self.steps_per_save_models = args['steps_per_save_models']
self.steps_per_evaluation = args['steps_per_evaluation']
self.validation_pairs = args['validation_pairs']
self.num_pairs_log = args['num_pairs_log']
# if self.num_pairs_log > self.validation_pairs:
# self.num_pairs_log = self.validation_pairs
def load_models(self):
################## Initialize models #################
print('-- Load DECA model ')
self.deca = DECA_model('cuda')
self.id_loss_ = id_loss.IDLoss().cuda().eval()
self.lpips_loss = LPIPS(net_type='alex').cuda().eval()
self.losses = Losses()
####################################################################
self.face_pool = torch.nn.AdaptiveAvgPool2d((256, 256))
if self.dataset_type == 'ffhq' and self.image_resolution == 1024:
self.generator_path = stylegan2_ffhq_1024['gan_weights']
self.channel_multiplier = stylegan2_ffhq_1024['channel_multiplier']
self.split_sections = stylegan2_ffhq_1024['split_sections']
self.stylespace_dim = stylegan2_ffhq_1024['stylespace_dim']
self.exp_ranges = np.load(stylegan2_ffhq_1024['expression_ranges'])
else:
print('Incorect dataset type {} and image resolution {}'.format(self.dataset_type, self.image_resolution))
if self.num_layers_control is not None:
self.num_nets = self.num_layers_control
else:
self.num_nets = len(self.split_sections)
print('-- Load generator from {} '.format(self.generator_path))
self.G = StyleGAN2Generator(self.image_resolution, 512, 8, channel_multiplier= self.channel_multiplier)
if self.image_resolution == 256:
self.G.load_state_dict(torch.load(self.generator_path)['g_ema'], strict = False)
else:
self.G.load_state_dict(torch.load(self.generator_path)['g_ema'], strict = True)
self.G.cuda().eval()
self.truncation = 0.7
self.trunc = self.G.mean_latent(4096).detach().clone()
print('-- Initialize mask predictor.')
self.mask_net = nn.ModuleDict({})
for layer_idx in range(self.num_nets):
network_name_str = 'network_{:02d}'.format(layer_idx)
# Net info
stylespace_dim_layer = self.split_sections[layer_idx]
input_dim = stylespace_dim_layer
output_dim = stylespace_dim_layer
inner_dim = stylespace_dim_layer
network_module = MaskPredictor(input_dim, output_dim, inner_dim = inner_dim)
self.mask_net.update({network_name_str: network_module})
out_text = 'Network {}: ----> input_dim:{} - output_dim:{}'.format(layer_idx, input_dim, output_dim)
print(out_text)
def configure_dataset(self):
self.test_dataset = CustomDataset_validation(synthetic_dataset_path = self.synthetic_dataset_path, validation_pairs = self.validation_pairs)
self.test_dataloader = DataLoader(self.test_dataset,
batch_size=self.test_batch_size ,
shuffle=False,
num_workers=int(self.workers),
drop_last=True)
def start_from_checkpoint(self):
step = 0
if self.resume_training_model is not None:
if os.path.isfile(self.resume_training_model):
print('Resuming training from {}'.format(self.resume_training_model))
state_dict = torch.load(self.resume_training_model)
if 'step' in state_dict:
step = state_dict['step']
if 'mask_net' in state_dict:
self.mask_net.load_state_dict(state_dict['mask_net'])
return step
def get_shifted_image(self, style_source, style_target, w, noise):
# Generate shift
masks_per_layer = []
for layer_idx in range(self.num_nets):
network_name_str = 'network_{:02d}'.format(layer_idx)
style_source_idx = style_source[layer_idx]
style_target_idx = style_target[layer_idx]
styles = style_source_idx - style_target_idx
mask_idx = self.mask_net[network_name_str](styles)
masks_per_layer.append(mask_idx)
style_source = torch.cat(style_source, dim=1)
style_target = torch.cat(style_target, dim=1)
mask = torch.cat(masks_per_layer, dim=1)
new_style_space = generate_new_stylespace(style_source, style_target, mask, self.num_layers_control)
new_style_space = list(torch.split(tensor=new_style_space, split_size_or_sections=self.split_sections, dim=1))
imgs_shifted = decoder(self.G, new_style_space, w, noise, resize_image = True)
return imgs_shifted, new_style_space
def train(self):
self.load_models()
if self.use_wandb:
#########################
config = self.args
wandb.init(
project= self.project_wandb,
notes="",
tags=["debug"],
config=config,
)
name = self.output_path.split('/')[-1]
wandb.run.name = name
wandb.watch(self.mask_net, log="all", log_freq=500)
#######################
self.configure_dataset()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.G.cuda().eval()
self.mask_net.train().cuda()
optimizer = torch.optim.Adam(self.mask_net.parameters(), lr=self.lr, weight_decay=5e-4)
self.truncation = 0.7
latent_in = torch.randn(4096, 512).cuda()
self.trunc = self.G.style(latent_in).mean(0, keepdim=True)
input_is_latent = False
recovered_step = self.start_from_checkpoint()
if recovered_step != 0:
print('Resume training from {}'.format(recovered_step))
list_loss = []
for step in range(recovered_step, self.max_iter):
loss_dict = {}
self.G.zero_grad()
source_z = make_noise(self.batch_size, 512, None).cuda()
target_z = make_noise(self.batch_size, 512, None).cuda()
with torch.no_grad():
######## Source images ########
imgs_source, style_source, w_source, noise_source = generate_image(self.G, source_z, self.truncation, self.trunc, self.image_resolution, self.split_sections,
input_is_latent = input_is_latent, return_latents= True, resize_image = True)
params_source, angles_source = calculate_shapemodel(self.deca, imgs_source)
######## Target images ########
imgs_target, style_target, w_target, noise_target = generate_image(self.G, target_z, self.truncation, self.trunc, self.image_resolution, self.split_sections,
input_is_latent = input_is_latent, return_latents= True, resize_image = True)
params_target, angles_target = calculate_shapemodel(self.deca, imgs_target)
######## Generate reenacted image between source and target images ########
imgs_shifted, new_style_space = self.get_shifted_image(style_source, style_target, w_source, noise_source)
params_shifted, angles_shifted = calculate_shapemodel(self.deca, imgs_shifted)
loss, loss_dict = self.calculate_loss(params_source, params_shifted, params_target, imgs_source, imgs_shifted)
if self.use_recurrent_cycle_loss:
########## Recurrent Cycle loss ##########
with torch.no_grad():
### Generate a new random target image ###
target_z_cycle = make_noise(self.batch_size, 512, None).cuda()
imgs_target_cycle, style_target_cycle, w_target_cycle, noise_target_cycle = generate_image(self.G, target_z_cycle, self.truncation, self.trunc, self.image_resolution, self.split_sections,
input_is_latent = input_is_latent, return_latents= True, resize_image = True)
params_target_cycle, angles_target_cycle = calculate_shapemodel(self.deca, imgs_target_cycle)
#### Reenact source image into the facial pose of target_z_cycle ####
imgs_shifted_hat, new_style_space_hat = self.get_shifted_image(style_source, style_target_cycle, w_source, noise_source)
params_shifted_hat, angles_shifted_hat = calculate_shapemodel(self.deca, imgs_shifted_hat)
#####################################################################
#### Reenact initial shifted image into the facial pose of target_z_cycle ####
imgs_shifted_hat_2, new_style_space_hat_2 = self.get_shifted_image(new_style_space, style_target_cycle, w_source, noise_source)
params_shifted_hat_2, angles_shifted_hat_2 = calculate_shapemodel(self.deca, imgs_shifted_hat_2)
loss_cycle, loss_dict = self.calculate_recurrent_loss(params_source, params_target_cycle, params_shifted_hat,
params_shifted_hat_2, imgs_source, imgs_shifted_hat, imgs_shifted_hat_2, loss_dict)
loss += loss_cycle
############## Total loss ##############
list_loss.append(loss.data.item())
self.mask_net.zero_grad()
loss.backward()
optimizer.step()
######### Evaluate #########
if step % self.steps_per_log == 0:
out_text = '[step {}]'.format(step)
for key, value in loss_dict.items():
out_text += (' | {}: {:.2f}'.format(key, value))
out_text += '| Mean Loss {:.2f}'.format(np.mean(np.array(list_loss)))
print(out_text)
if step % self.steps_per_save_models == 0 and step > 0:
self.save_model(step)
if step % self.steps_per_evaluation == 0:
self.evaluate_model_reenactment(step)
if step % 500 == 0 and step > 0:
list_loss = []
if self.use_wandb:
wandb.log({
'step': step,
})
wandb.log(loss_dict)
def calculate_loss(self, params_source, params_shifted, params_target, imgs_source, imgs_shifted):
loss_dict = {}
loss = 0
############## Shape Loss ##############
if self.lambda_shape !=0:
coefficients_gt = {}
coefficients_gt['pose'] = params_target['pose']
coefficients_gt['exp'] = params_target['alpha_exp']
coefficients_gt['cam'] = params_source['cam']
coefficients_gt['cam'][:,:] = 0.
coefficients_gt['cam'][:,0] = 8
coefficients_gt['shape'] = params_source['alpha_shp']
landmarks2d_gt, landmarks3d_gt, shape_gt = self.deca.calculate_shape(coefficients_gt)
coefficients_reen = {}
coefficients_reen['pose'] = params_shifted['pose']
coefficients_reen['shape'] = params_shifted['alpha_shp']
coefficients_reen['exp'] = params_shifted['alpha_exp']
coefficients_reen['cam'] = params_shifted['cam']
coefficients_reen['cam'][:,:] = 0.
coefficients_reen['cam'][:,0] = 8
landmarks2d_reenacted, landmarks3d_reenacted, shape_reenacted = self.deca.calculate_shape(coefficients_reen)
loss_shape = self.lambda_shape * self.losses.calculate_shape_loss(shape_gt, shape_reenacted, normalize = False)
loss_mouth = self.lambda_shape * self.losses.calculate_mouth_loss(landmarks2d_gt, landmarks2d_reenacted)
loss_eye = self.lambda_shape * self.losses.calculate_eye_loss(landmarks2d_gt, landmarks2d_reenacted)
loss_dict['loss_shape'] = loss_shape.data.item()
loss_dict['loss_eye'] = loss_eye.data.item()
loss_dict['loss_mouth'] = loss_mouth.data.item()
loss += loss_mouth
loss += loss_shape
loss += loss_eye
####################################################
############## Identity losses ##############
if self.lambda_identity != 0:
loss_identity = self.lambda_identity * self.id_loss_(imgs_shifted, imgs_source.detach())
loss_dict['loss_identity'] = loss_identity.data.item()
loss += loss_identity
if self.lambda_perceptual != 0:
imgs_source_255 = tensor_to_255(imgs_source)
imgs_shifted_255 = tensor_to_255(imgs_shifted)
loss_perceptual = self.lambda_perceptual * self.lpips_loss(imgs_shifted_255, imgs_source_255.detach())
loss_dict['loss_perceptual'] = loss_perceptual.data.item()
loss += loss_perceptual
loss_dict['loss'] = loss.data.item()
return loss, loss_dict
def calculate_recurrent_loss(self, params_source, params_target_cycle, params_shifted_hat, params_shifted_hat_2, imgs_source, imgs_shifted_hat, imgs_shifted_hat_2, loss_dict):
loss = 0
############## Shape Loss ##############
if self.lambda_shape > 0:
# 1
coefficients_gt = {}
coefficients_gt['pose'] = params_target_cycle['pose']
coefficients_gt['exp'] = params_target_cycle['alpha_exp']
coefficients_gt['cam'] = params_source['cam']
coefficients_gt['cam'][:,:] = 0.
coefficients_gt['cam'][:,0] = 8
coefficients_gt['shape'] = params_source['alpha_shp']
landmarks2d_gt, landmarks3d_gt, shape_gt = self.deca.calculate_shape(coefficients_gt)
coefficients_reen = {}
coefficients_reen['pose'] = params_shifted_hat['pose']
coefficients_reen['shape'] = params_shifted_hat['alpha_shp']
coefficients_reen['exp'] = params_shifted_hat['alpha_exp']
coefficients_reen['cam'] = params_shifted_hat['cam']
coefficients_reen['cam'][:,:] = 0.
coefficients_reen['cam'][:,0] = 8
landmarks2d_reenacted, landmarks3d_reenacted, shape_reenacted = self.deca.calculate_shape(coefficients_reen)
loss_shape = self.lambda_shape * self.losses.calculate_shape_loss(shape_gt, shape_reenacted, normalize = False)
loss_mouth = self.lambda_shape * self.losses.calculate_mouth_loss(landmarks2d_gt, landmarks2d_reenacted)
loss_eye = self.lambda_shape * self.losses.calculate_eye_loss(landmarks2d_gt, landmarks2d_reenacted)
# 2
coefficients_gt = {}
coefficients_gt['pose'] = params_target_cycle['pose']
coefficients_gt['exp'] = params_target_cycle['alpha_exp']
coefficients_gt['cam'] = params_source['cam']
coefficients_gt['cam'][:,:] = 0.
coefficients_gt['cam'][:,0] = 8
coefficients_gt['shape'] = params_source['alpha_shp']
landmarks2d_gt, landmarks3d_gt, shape_gt = self.deca.calculate_shape(coefficients_gt)
coefficients_reen = {}
coefficients_reen['pose'] = params_shifted_hat_2['pose']
coefficients_reen['shape'] = params_shifted_hat_2['alpha_shp']
coefficients_reen['exp'] = params_shifted_hat_2['alpha_exp']
coefficients_reen['cam'] = params_shifted_hat_2['cam']
coefficients_reen['cam'][:,:] = 0.
coefficients_reen['cam'][:,0] = 8
landmarks2d_reenacted, landmarks3d_reenacted, shape_reenacted = self.deca.calculate_shape(coefficients_reen)
loss_shape += self.lambda_shape * self.losses.calculate_shape_loss(shape_gt, shape_reenacted, normalize = False)
loss_mouth += self.lambda_shape * self.losses.calculate_mouth_loss(landmarks2d_gt, landmarks2d_reenacted)
loss_eye += self.lambda_shape * self.losses.calculate_eye_loss(landmarks2d_gt, landmarks2d_reenacted)
loss_dict['loss_shape_cycle'] = loss_shape.data.item()
loss_dict['loss_eye_cycle'] = loss_eye.data.item()
loss_dict['loss_mouth_cycle'] = loss_mouth.data.item()
loss += loss_mouth
loss += loss_shape
loss += loss_eye
############## Identity losses ##############
if self.lambda_identity != 0:
loss_identity = self.lambda_identity * self.id_loss_(imgs_shifted_hat, imgs_source.detach())
loss_identity += self.lambda_identity* self.id_loss_(imgs_shifted_hat_2, imgs_source.detach())
loss_dict['loss_identity_cycle'] = loss_identity.data.item()
loss += loss_identity
if self.lambda_perceptual != 0:
imgs_shifted_hat_255 = tensor_to_255(imgs_shifted_hat)
imgs_shifted_hat_2_255 = tensor_to_255(imgs_shifted_hat_2)
loss_perceptual = self.lambda_perceptual * self.lpips_loss(imgs_shifted_hat_255, imgs_shifted_hat_2_255)
loss_dict['loss_perceptual_cycle'] = loss_perceptual.data.item()
loss += loss_perceptual
loss_dict['loss_cycle'] = loss.data.item()
return loss, loss_dict
def save_model(self, step):
state_dict = {
'step': step,
'mask_net': self.mask_net.state_dict(),
'num_layers_control': self.num_layers_control
}
checkpoint_path = os.path.join(self.models_dir, 'mask_net_{:06d}.pt'.format(step))
torch.save(state_dict, checkpoint_path)
'Evaluate models for face reenactment and save reenactment figure'
def evaluate_model_reenactment(self, step):
input_is_latent = False
self.mask_net.eval()
exp_error = 0; pose_error = 0; csim_total = 0; count = 0
counter_logs = 0
source_images = torch.zeros((self.num_pairs_log, 3, 256, 256))
target_images = torch.zeros((self.num_pairs_log, 3, 256, 256))
reenacted_images = torch.zeros((self.num_pairs_log, 3, 256, 256))
for batch_idx, batch in enumerate(tqdm(self.test_dataloader)):
with torch.no_grad():
sample_batch = batch
source_w = sample_batch['source_w'].cuda()
target_w = sample_batch['target_w'].cuda()
imgs_source, style_source, w_source, noise_source = generate_image(self.G, source_w, self.truncation, self.trunc, self.image_resolution, self.split_sections,
input_is_latent = input_is_latent, return_latents= True, resize_image = True)
params_source, angles_source = calculate_shapemodel(self.deca, imgs_source)
imgs_target, style_target, w_target, noise_target = generate_image(self.G, target_w, self.truncation, self.trunc, self.image_resolution, self.split_sections,
input_is_latent = input_is_latent, return_latents= True, resize_image = True)
params_target, angles_target = calculate_shapemodel(self.deca, imgs_target)
imgs_shifted, new_style_space = self.get_shifted_image(style_source, style_target, w_source, noise_source)
params_shifted, angles_shifted = calculate_shapemodel(self.deca, imgs_shifted)
csim, pose, exp = calculate_evaluation_metrics(params_shifted, params_target, angles_shifted, angles_target, imgs_shifted, imgs_source, self.id_loss_, self.exp_ranges)
exp_error += exp
csim_total += csim
pose_error += pose
count += 1
if counter_logs < self.num_pairs_log:
if (self.num_pairs_log - counter_logs) % source_w.shape[0] == 0:
source_images[counter_logs:counter_logs+source_w.shape[0]] = imgs_source.detach().cpu()
target_images[counter_logs:counter_logs+source_w.shape[0]] = imgs_target.detach().cpu()
reenacted_images[counter_logs:counter_logs+source_w.shape[0]] = imgs_shifted.detach().cpu()
else:
num = self.num_pairs_log - counter_logs
source_images[counter_logs:counter_logs+num] = imgs_source[:num].detach().cpu()
target_images[counter_logs:counter_logs+num] = imgs_target[:num].detach().cpu()
reenacted_images[counter_logs:counter_logs+num] = imgs_shifted[:num].detach().cpu()
counter_logs += source_w.shape[0]
sample = generate_grid_image(source_images, target_images, reenacted_images)
save_image(sample, os.path.join(self.log_dir, '{:06d}.png'.format(step)))
if self.use_wandb and self.log_images_wandb:
image_array = sample.detach().cpu().numpy()
image_array = np.transpose(image_array, (1, 2, 0))
images = wandb.Image(image_array)
wandb.log({"images": images})
print('*************** Validation ***************')
print('Expression Error: {:.4f}\t Pose Error: {:.2f}\t CSIM: {:.2f}'.format(exp_error/count, pose_error/count, csim_total/count))
print('*************** Validation ***************')
if self.use_wandb:
wandb.log({
'expression_error': exp_error/count,
'pose_error': pose_error/count,
'csim': csim_total/count,
})
self.mask_net.train()
| 20,832 | 40.5 | 192 | py |
StyleMask | StyleMask-master/libs/models/mask_predictor.py | import torch
from torch import nn
class MaskPredictor(nn.Module):
def __init__(self, input_dim, output_dim, inner_dim=1024):
super(MaskPredictor, self).__init__()
self.masknet = nn.Sequential(nn.Linear(input_dim, inner_dim, bias=True),
nn.ReLU(),
nn.Linear(inner_dim, output_dim, bias=True),
)
self.initilization()
def initilization(self):
torch.nn.init.normal_(self.masknet[0].weight, mean=0.0, std=0.01)
torch.nn.init.normal_(self.masknet[2].weight, mean=0.0, std=0.01)
def forward(self, input):
out = self.masknet(input)
out = torch.nn.Sigmoid()(out)
return out
| 628 | 21.464286 | 74 | py |
StyleMask | StyleMask-master/libs/models/inversion/psp.py | """
This file defines the core research contribution
"""
import math
import matplotlib
matplotlib.use('Agg')
import torch
from torch import nn
import torchvision.transforms as transforms
import os
from libs.models.inversion import psp_encoders
def get_keys(d, name):
if 'state_dict' in d:
d = d['state_dict']
d_filt = {k[len(name) + 1:]: v for k, v in d.items() if k[:len(name)] == name}
return d_filt
class pSp(nn.Module):
def __init__(self, opts):
super(pSp, self).__init__()
self.opts = opts
# compute number of style inputs based on the output resolution
self.opts.n_styles = int(math.log(self.opts.output_size, 2)) * 2 - 2
self.n_styles = self.opts.n_styles
# Define architecture
self.encoder = psp_encoders.Encoder4Editing(50, 'ir_se', self.opts)
# Load weights if needed
ckpt = torch.load(self.opts.checkpoint_path, map_location='cpu')
self.encoder.load_state_dict(get_keys(ckpt, 'encoder'), strict=True)
self.__load_latent_avg(ckpt)
def forward(self, real_image, randomize_noise=False, inject_latent=None, return_latents=False, alpha=None, average_code=False, input_is_full=False):
codes = self.encoder(real_image)
if self.latent_avg is not None:
if codes.ndim == 2:
codes = codes + self.latent_avg.repeat(codes.shape[0], 1, 1)[:, 0, :]
else:
codes = codes + self.latent_avg.repeat(codes.shape[0], 1, 1)
return codes
def __load_latent_avg(self, ckpt, repeat=None):
if 'latent_avg' in ckpt:
self.latent_avg = ckpt['latent_avg'].to(self.opts.device)
if repeat is not None:
self.latent_avg = self.latent_avg.repeat(repeat, 1)
else:
self.latent_avg = None | 1,643 | 28.357143 | 149 | py |
StyleMask | StyleMask-master/libs/models/inversion/psp_encoders.py | from enum import Enum
import math
import numpy as np
import torch
from torch import nn
from torch.nn import Conv2d, BatchNorm2d, PReLU, Sequential, Module
from libs.models.inversion.helpers import get_blocks, bottleneck_IR, bottleneck_IR_SE, _upsample_add
from libs.models.StyleGAN2.model import EqualLinear, ScaledLeakyReLU, EqualConv2d
class ProgressiveStage(Enum):
WTraining = 0
Delta1Training = 1
Delta2Training = 2
Delta3Training = 3
Delta4Training = 4
Delta5Training = 5
Delta6Training = 6
Delta7Training = 7
Delta8Training = 8
Delta9Training = 9
Delta10Training = 10
Delta11Training = 11
Delta12Training = 12
Delta13Training = 13
Delta14Training = 14
Delta15Training = 15
Delta16Training = 16
Delta17Training = 17
Inference = 18
class GradualStyleBlock(Module):
def __init__(self, in_c, out_c, spatial):
super(GradualStyleBlock, self).__init__()
self.out_c = out_c
self.spatial = spatial
num_pools = int(np.log2(spatial))
modules = []
modules += [Conv2d(in_c, out_c, kernel_size=3, stride=2, padding=1),
nn.LeakyReLU()]
for i in range(num_pools - 1):
modules += [
Conv2d(out_c, out_c, kernel_size=3, stride=2, padding=1),
nn.LeakyReLU()
]
self.convs = nn.Sequential(*modules)
self.linear = EqualLinear(out_c, out_c, lr_mul=1)
def forward(self, x):
x = self.convs(x)
x = x.view(-1, self.out_c)
x = self.linear(x)
return x
class GradualStyleEncoder(Module):
def __init__(self, num_layers, mode='ir', opts=None):
super(GradualStyleEncoder, self).__init__()
assert num_layers in [50, 100, 152], 'num_layers should be 50,100, or 152'
assert mode in ['ir', 'ir_se'], 'mode should be ir or ir_se'
blocks = get_blocks(num_layers)
if mode == 'ir':
unit_module = bottleneck_IR
elif mode == 'ir_se':
unit_module = bottleneck_IR_SE
self.input_layer = Sequential(Conv2d(3, 64, (3, 3), 1, 1, bias=False),
BatchNorm2d(64),
PReLU(64))
modules = []
for block in blocks:
for bottleneck in block:
modules.append(unit_module(bottleneck.in_channel,
bottleneck.depth,
bottleneck.stride))
self.body = Sequential(*modules)
self.styles = nn.ModuleList()
log_size = int(math.log(opts.output_size, 2))
self.style_count = 2 * log_size - 2
self.coarse_ind = 3
self.middle_ind = 7
for i in range(self.style_count):
if i < self.coarse_ind:
style = GradualStyleBlock(512, 512, 16)
elif i < self.middle_ind:
style = GradualStyleBlock(512, 512, 32)
else:
style = GradualStyleBlock(512, 512, 64)
self.styles.append(style)
self.latlayer1 = nn.Conv2d(256, 512, kernel_size=1, stride=1, padding=0)
self.latlayer2 = nn.Conv2d(128, 512, kernel_size=1, stride=1, padding=0)
def forward(self, x):
x = self.input_layer(x)
latents = []
modulelist = list(self.body._modules.values())
for i, l in enumerate(modulelist):
x = l(x)
if i == 6:
c1 = x
elif i == 20:
c2 = x
elif i == 23:
c3 = x
for j in range(self.coarse_ind):
latents.append(self.styles[j](c3))
p2 = _upsample_add(c3, self.latlayer1(c2))
for j in range(self.coarse_ind, self.middle_ind):
latents.append(self.styles[j](p2))
p1 = _upsample_add(p2, self.latlayer2(c1))
for j in range(self.middle_ind, self.style_count):
latents.append(self.styles[j](p1))
out = torch.stack(latents, dim=1)
return out
class Encoder4Editing(Module):
def __init__(self, num_layers, mode='ir', opts=None):
super(Encoder4Editing, self).__init__()
assert num_layers in [50, 100, 152], 'num_layers should be 50,100, or 152'
assert mode in ['ir', 'ir_se'], 'mode should be ir or ir_se'
blocks = get_blocks(num_layers)
if mode == 'ir':
unit_module = bottleneck_IR
elif mode == 'ir_se':
unit_module = bottleneck_IR_SE
self.input_layer = Sequential(Conv2d(3, 64, (3, 3), 1, 1, bias=False),
BatchNorm2d(64),
PReLU(64))
modules = []
for block in blocks:
for bottleneck in block:
modules.append(unit_module(bottleneck.in_channel,
bottleneck.depth,
bottleneck.stride))
self.body = Sequential(*modules)
self.styles = nn.ModuleList()
log_size = int(math.log(opts.output_size, 2))
self.style_count = 2 * log_size - 2
self.coarse_ind = 3
self.middle_ind = 7
for i in range(self.style_count):
if i < self.coarse_ind:
style = GradualStyleBlock(512, 512, 16)
elif i < self.middle_ind:
style = GradualStyleBlock(512, 512, 32)
else:
style = GradualStyleBlock(512, 512, 64)
self.styles.append(style)
self.latlayer1 = nn.Conv2d(256, 512, kernel_size=1, stride=1, padding=0)
self.latlayer2 = nn.Conv2d(128, 512, kernel_size=1, stride=1, padding=0)
self.progressive_stage = ProgressiveStage.Inference
def get_deltas_starting_dimensions(self):
''' Get a list of the initial dimension of every delta from which it is applied '''
return list(range(self.style_count)) # Each dimension has a delta applied to it
def set_progressive_stage(self, new_stage: ProgressiveStage):
self.progressive_stage = new_stage
print('Changed progressive stage to: ', new_stage)
def forward(self, x):
x = self.input_layer(x)
modulelist = list(self.body._modules.values())
for i, l in enumerate(modulelist):
x = l(x)
if i == 6:
c1 = x
elif i == 20:
c2 = x
elif i == 23:
c3 = x
# Infer main W and duplicate it
w0 = self.styles[0](c3)
w = w0.repeat(self.style_count, 1, 1).permute(1, 0, 2)
stage = self.progressive_stage.value
features = c3
for i in range(1, min(stage + 1, self.style_count)): # Infer additional deltas
if i == self.coarse_ind:
p2 = _upsample_add(c3, self.latlayer1(c2)) # FPN's middle features
features = p2
elif i == self.middle_ind:
p1 = _upsample_add(p2, self.latlayer2(c1)) # FPN's fine features
features = p1
delta_i = self.styles[i](features)
w[:, i] += delta_i
return w
class BackboneEncoderUsingLastLayerIntoW(Module):
def __init__(self, num_layers, mode='ir', opts=None):
super(BackboneEncoderUsingLastLayerIntoW, self).__init__()
print('Using BackboneEncoderUsingLastLayerIntoW')
assert num_layers in [50, 100, 152], 'num_layers should be 50,100, or 152'
assert mode in ['ir', 'ir_se'], 'mode should be ir or ir_se'
blocks = get_blocks(num_layers)
if mode == 'ir':
unit_module = bottleneck_IR
elif mode == 'ir_se':
unit_module = bottleneck_IR_SE
self.input_layer = Sequential(Conv2d(3, 64, (3, 3), 1, 1, bias=False),
BatchNorm2d(64),
PReLU(64))
self.output_pool = torch.nn.AdaptiveAvgPool2d((1, 1))
self.linear = EqualLinear(512, 512, lr_mul=1)
modules = []
for block in blocks:
for bottleneck in block:
modules.append(unit_module(bottleneck.in_channel,
bottleneck.depth,
bottleneck.stride))
self.body = Sequential(*modules)
log_size = int(math.log(opts.output_size, 2))
self.style_count = 2 * log_size - 2
def forward(self, x):
x = self.input_layer(x)
x = self.body(x)
x = self.output_pool(x)
x = x.view(-1, 512)
x = self.linear(x)
return x.repeat(self.style_count, 1, 1).permute(1, 0, 2)
# Consultation encoder
class ResidualEncoder(Module):
def __init__(self, opts=None):
super(ResidualEncoder, self).__init__()
self.conv_layer1 = Sequential(Conv2d(3, 32, (3, 3), 1, 1, bias=False),
BatchNorm2d(32),
PReLU(32))
self.conv_layer2 = Sequential(*[bottleneck_IR(32,48,2), bottleneck_IR(48,48,1), bottleneck_IR(48,48,1)])
self.conv_layer3 = Sequential(*[bottleneck_IR(48,64,2), bottleneck_IR(64,64,1), bottleneck_IR(64,64,1)])
self.condition_scale3 = nn.Sequential(
EqualConv2d(64, 512, 3, stride=1, padding=1, bias=True ),
ScaledLeakyReLU(0.2),
EqualConv2d(512, 512, 3, stride=1, padding=1, bias=True ))
self.condition_shift3 = nn.Sequential(
EqualConv2d(64, 512, 3, stride=1, padding=1, bias=True ),
ScaledLeakyReLU(0.2),
EqualConv2d(512, 512, 3, stride=1, padding=1, bias=True ))
def get_deltas_starting_dimensions(self):
''' Get a list of the initial dimension of every delta from which it is applied '''
return list(range(self.style_count)) # Each dimension has a delta applied to it
def forward(self, x):
conditions = []
feat1 = self.conv_layer1(x)
feat2 = self.conv_layer2(feat1)
feat3 = self.conv_layer3(feat2)
scale = self.condition_scale3(feat3)
scale = torch.nn.functional.interpolate(scale, size=(64,64) , mode='bilinear')
conditions.append(scale.clone())
shift = self.condition_shift3(feat3)
shift = torch.nn.functional.interpolate(shift, size=(64,64) , mode='bilinear')
conditions.append(shift.clone())
return conditions
# ADA
class ResidualAligner(Module):
def __init__(self, opts=None):
super(ResidualAligner, self).__init__()
self.conv_layer1 = Sequential(Conv2d(6, 16, (3, 3), 1, 1, bias=False),
BatchNorm2d(16),
PReLU(16))
self.conv_layer2 = Sequential(*[bottleneck_IR(16,32,2), bottleneck_IR(32,32,1), bottleneck_IR(32,32,1)])
self.conv_layer3 = Sequential(*[bottleneck_IR(32,48,2), bottleneck_IR(48,48,1), bottleneck_IR(48,48,1)])
self.conv_layer4 = Sequential(*[bottleneck_IR(48,64,2), bottleneck_IR(64,64,1), bottleneck_IR(64,64,1)])
self.dconv_layer1 = Sequential(*[bottleneck_IR(112,64,1), bottleneck_IR(64,32,1), bottleneck_IR(32,32,1)])
self.dconv_layer2 = Sequential(*[bottleneck_IR(64,32,1), bottleneck_IR(32,16,1), bottleneck_IR(16,16,1)])
self.dconv_layer3 = Sequential(*[bottleneck_IR(32,16,1), bottleneck_IR(16,3,1), bottleneck_IR(3,3,1)])
def forward(self, x):
feat1 = self.conv_layer1(x)
feat2 = self.conv_layer2(feat1)
feat3 = self.conv_layer3(feat2)
feat4 = self.conv_layer4(feat3)
feat4 = torch.nn.functional.interpolate(feat4, size=(64,64) , mode='bilinear')
dfea1 = self.dconv_layer1(torch.cat((feat4, feat3),1))
dfea1 = torch.nn.functional.interpolate(dfea1, size=(128,128) , mode='bilinear')
dfea2 = self.dconv_layer2(torch.cat( (dfea1, feat2),1))
dfea2 = torch.nn.functional.interpolate(dfea2, size=(256,256) , mode='bilinear')
dfea3 = self.dconv_layer3(torch.cat( (dfea2, feat1),1))
res_aligned = dfea3
return res_aligned
| 12,262 | 37.806962 | 115 | py |
StyleMask | StyleMask-master/libs/models/inversion/helpers.py | from collections import namedtuple
import torch
from torch.nn import Conv2d, BatchNorm2d, PReLU, ReLU, Sigmoid, MaxPool2d, AdaptiveAvgPool2d, Sequential, Module, Linear
import torch.nn.functional as F
"""
ArcFace implementation from [TreB1eN](https://github.com/TreB1eN/InsightFace_Pytorch)
"""
class Flatten(Module):
def forward(self, input):
return input.view(input.size(0), -1)
def l2_norm(input, axis=1):
norm = torch.norm(input, 2, axis, True)
output = torch.div(input, norm)
return output
class Bottleneck(namedtuple('Block', ['in_channel', 'depth', 'stride'])):
""" A named tuple describing a ResNet block. """
def get_block(in_channel, depth, num_units, stride=2):
return [Bottleneck(in_channel, depth, stride)] + [Bottleneck(depth, depth, 1) for i in range(num_units - 1)]
def get_blocks(num_layers):
if num_layers == 50:
blocks = [
get_block(in_channel=64, depth=64, num_units=3),
get_block(in_channel=64, depth=128, num_units=4),
get_block(in_channel=128, depth=256, num_units=14),
get_block(in_channel=256, depth=512, num_units=3)
]
elif num_layers == 100:
blocks = [
get_block(in_channel=64, depth=64, num_units=3),
get_block(in_channel=64, depth=128, num_units=13),
get_block(in_channel=128, depth=256, num_units=30),
get_block(in_channel=256, depth=512, num_units=3)
]
elif num_layers == 152:
blocks = [
get_block(in_channel=64, depth=64, num_units=3),
get_block(in_channel=64, depth=128, num_units=8),
get_block(in_channel=128, depth=256, num_units=36),
get_block(in_channel=256, depth=512, num_units=3)
]
else:
raise ValueError("Invalid number of layers: {}. Must be one of [50, 100, 152]".format(num_layers))
return blocks
class SEModule(Module):
def __init__(self, channels, reduction):
super(SEModule, self).__init__()
self.avg_pool = AdaptiveAvgPool2d(1)
self.fc1 = Conv2d(channels, channels // reduction, kernel_size=1, padding=0, bias=False)
self.relu = ReLU(inplace=True)
self.fc2 = Conv2d(channels // reduction, channels, kernel_size=1, padding=0, bias=False)
self.sigmoid = Sigmoid()
def forward(self, x):
module_input = x
x = self.avg_pool(x)
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
x = self.sigmoid(x)
return module_input * x
class bottleneck_IR(Module):
def __init__(self, in_channel, depth, stride):
super(bottleneck_IR, self).__init__()
if in_channel == depth:
self.shortcut_layer = MaxPool2d(1, stride)
else:
self.shortcut_layer = Sequential(
Conv2d(in_channel, depth, (1, 1), stride, bias=False),
BatchNorm2d(depth)
)
self.res_layer = Sequential(
BatchNorm2d(in_channel),
Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False), PReLU(depth),
Conv2d(depth, depth, (3, 3), stride, 1, bias=False), BatchNorm2d(depth)
)
def forward(self, x):
shortcut = self.shortcut_layer(x)
res = self.res_layer(x)
return res + shortcut
class bottleneck_IR_SE(Module):
def __init__(self, in_channel, depth, stride):
super(bottleneck_IR_SE, self).__init__()
if in_channel == depth:
self.shortcut_layer = MaxPool2d(1, stride)
else:
self.shortcut_layer = Sequential(
Conv2d(in_channel, depth, (1, 1), stride, bias=False),
BatchNorm2d(depth)
)
self.res_layer = Sequential(
BatchNorm2d(in_channel),
Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False),
PReLU(depth),
Conv2d(depth, depth, (3, 3), stride, 1, bias=False),
BatchNorm2d(depth),
SEModule(depth, 16)
)
def forward(self, x):
shortcut = self.shortcut_layer(x)
res = self.res_layer(x)
return res + shortcut
class SeparableConv2d(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, bias=False):
super(SeparableConv2d, self).__init__()
self.depthwise = Conv2d(in_channels, in_channels, kernel_size=kernel_size, groups=in_channels, bias=bias, padding=1)
self.pointwise = Conv2d(in_channels, out_channels, kernel_size=1, bias=bias)
def forward(self, x):
out = self.depthwise(x)
out = self.pointwise(out)
return out
def _upsample_add(x, y):
"""Upsample and add two feature maps.
Args:
x: (Variable) top feature map to be upsampled.
y: (Variable) lateral feature map.
Returns:
(Variable) added feature map.
Note in PyTorch, when input size is odd, the upsampled feature map
with `F.upsample(..., scale_factor=2, mode='nearest')`
maybe not equal to the lateral feature map size.
e.g.
original input size: [N,_,15,15] ->
conv2d feature map size: [N,_,8,8] ->
upsampled feature map size: [N,_,16,16]
So we choose bilinear upsample which supports arbitrary output sizes.
"""
_, _, H, W = y.size()
return F.interpolate(x, size=(H, W), mode='bilinear', align_corners=True) + y
class SeparableBlock(Module):
def __init__(self, input_size, kernel_channels_in, kernel_channels_out, kernel_size):
super(SeparableBlock, self).__init__()
self.input_size = input_size
self.kernel_size = kernel_size
self.kernel_channels_in = kernel_channels_in
self.kernel_channels_out = kernel_channels_out
self.make_kernel_in = Linear(input_size, kernel_size * kernel_size * kernel_channels_in)
self.make_kernel_out = Linear(input_size, kernel_size * kernel_size * kernel_channels_out)
self.kernel_linear_in = Linear(kernel_channels_in, kernel_channels_in)
self.kernel_linear_out = Linear(kernel_channels_out, kernel_channels_out)
def forward(self, features):
features = features.view(-1, self.input_size)
kernel_in = self.make_kernel_in(features).view(-1, self.kernel_size, self.kernel_size, 1, self.kernel_channels_in)
kernel_out = self.make_kernel_out(features).view(-1, self.kernel_size, self.kernel_size, self.kernel_channels_out, 1)
kernel = torch.matmul(kernel_out, kernel_in)
kernel = self.kernel_linear_in(kernel).permute(0, 1, 2, 4, 3)
kernel = self.kernel_linear_out(kernel)
kernel = kernel.permute(0, 4, 3, 1, 2)
return kernel
| 5,916 | 30.473404 | 120 | py |
StyleMask | StyleMask-master/libs/models/StyleGAN2/model.py | import math
import random
import torch
from torch import nn
from torch.nn import functional as F
from .op import FusedLeakyReLU, fused_leaky_relu, upfirdn2d
class PixelNorm(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input):
return input * torch.rsqrt(torch.mean(input ** 2, dim=1, keepdim=True) + 1e-8)
def make_kernel(k):
k = torch.tensor(k, dtype=torch.float32)
if k.ndim == 1:
k = k[None, :] * k[:, None]
k /= k.sum()
return k
class Upsample(nn.Module):
def __init__(self, kernel, factor=2):
super().__init__()
self.factor = factor
kernel = make_kernel(kernel) * (factor ** 2)
self.register_buffer('kernel', kernel)
p = kernel.shape[0] - factor
pad0 = (p + 1) // 2 + factor - 1
pad1 = p // 2
self.pad = (pad0, pad1)
def forward(self, input):
out = upfirdn2d(input, self.kernel, up=self.factor, down=1, pad=self.pad)
return out
class Downsample(nn.Module):
def __init__(self, kernel, factor=2):
super().__init__()
self.factor = factor
kernel = make_kernel(kernel)
self.register_buffer('kernel', kernel)
p = kernel.shape[0] - factor
pad0 = (p + 1) // 2
pad1 = p // 2
self.pad = (pad0, pad1)
def forward(self, input):
out = upfirdn2d(input, self.kernel, up=1, down=self.factor, pad=self.pad)
return out
class Blur(nn.Module):
def __init__(self, kernel, pad, upsample_factor=1):
super().__init__()
kernel = make_kernel(kernel)
if upsample_factor > 1:
kernel = kernel * (upsample_factor ** 2)
self.register_buffer('kernel', kernel)
self.pad = pad
def forward(self, input):
out = upfirdn2d(input, self.kernel, pad=self.pad)
return out
class EqualConv2d(nn.Module):
def __init__(
self, in_channel, out_channel, kernel_size, stride=1, padding=0, bias=True
):
super().__init__()
self.weight = nn.Parameter(
torch.randn(out_channel, in_channel, kernel_size, kernel_size)
)
self.scale = 1 / math.sqrt(in_channel * kernel_size ** 2)
self.stride = stride
self.padding = padding
if bias:
self.bias = nn.Parameter(torch.zeros(out_channel))
else:
self.bias = None
def forward(self, input):
out = F.conv2d(
input,
self.weight * self.scale,
bias=self.bias,
stride=self.stride,
padding=self.padding,
)
return out
def __repr__(self):
return (
'{}({}, {},'.format(self.__class__.__name__, self.weight.shape[1], self.weight.shape[0]) +
' {}, stride={}, padding={})'.format(self.weight.shape[2], self.stride, self.padding)
)
class EqualLinear(nn.Module):
def __init__(
self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1, activation=None
):
super().__init__()
self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul))
if bias:
self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init))
else:
self.bias = None
self.activation = activation
self.scale = (1 / math.sqrt(in_dim)) * lr_mul
self.lr_mul = lr_mul
def forward(self, input):
if self.activation:
out = F.linear(input, self.weight * self.scale)
out = fused_leaky_relu(out, self.bias * self.lr_mul)
else:
out = F.linear(input, self.weight * self.scale, bias=self.bias * self.lr_mul)
return out
def __repr__(self):
return (
'{}({}, {})'.format(self.__class__.__name__, self.weight.shape[1], self.weight.shape[0])
)
class ScaledLeakyReLU(nn.Module):
def __init__(self, negative_slope=0.2):
super().__init__()
self.negative_slope = negative_slope
def forward(self, input):
out = F.leaky_relu(input, negative_slope=self.negative_slope)
return out * math.sqrt(2)
class ModulatedConv2d(nn.Module):
def __init__(
self,
in_channel,
out_channel,
kernel_size,
style_dim,
demodulate=True,
upsample=False,
downsample=False,
blur_kernel=[1, 3, 3, 1],
):
super().__init__()
self.eps = 1e-8
self.kernel_size = kernel_size
self.in_channel = in_channel
self.out_channel = out_channel
self.upsample = upsample
self.downsample = downsample
if upsample:
factor = 2
p = (len(blur_kernel) - factor) - (kernel_size - 1)
pad0 = (p + 1) // 2 + factor - 1
pad1 = p // 2 + 1
self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor=factor)
if downsample:
factor = 2
p = (len(blur_kernel) - factor) + (kernel_size - 1)
pad0 = (p + 1) // 2
pad1 = p // 2
self.blur = Blur(blur_kernel, pad=(pad0, pad1))
fan_in = in_channel * kernel_size ** 2
self.scale = 1 / math.sqrt(fan_in)
self.padding = kernel_size // 2
self.weight = nn.Parameter(
torch.randn(1, out_channel, in_channel, kernel_size, kernel_size)
)
self.modulation = EqualLinear(style_dim, in_channel, bias_init=1)
self.demodulate = demodulate
def __repr__(self):
return (
'{}({}, {}, {}, '.format(self.__class__.__name__, self.in_channel, self.out_channel, self.kernel_size) +
'upsample={}, downsample={})'.format(self.upsample, self.downsample)
)
def forward(self, input, style):
batch, in_channel, height, width = input.shape
style = self.modulation(style).view(batch, 1, in_channel, 1, 1)
weight = self.scale * self.weight * style
if self.demodulate:
demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-8)
weight = weight * demod.view(batch, self.out_channel, 1, 1, 1)
weight = weight.view(
batch * self.out_channel, in_channel, self.kernel_size, self.kernel_size
)
if self.upsample:
input = input.view(1, batch * in_channel, height, width)
weight = weight.view(
batch, self.out_channel, in_channel, self.kernel_size, self.kernel_size
)
weight = weight.transpose(1, 2).reshape(
batch * in_channel, self.out_channel, self.kernel_size, self.kernel_size
)
out = F.conv_transpose2d(input, weight, padding=0, stride=2, groups=batch)
_, _, height, width = out.shape
out = out.view(batch, self.out_channel, height, width)
out = self.blur(out)
elif self.downsample:
input = self.blur(input)
_, _, height, width = input.shape
input = input.view(1, batch * in_channel, height, width)
out = F.conv2d(input, weight, padding=0, stride=2, groups=batch)
_, _, height, width = out.shape
out = out.view(batch, self.out_channel, height, width)
else:
input = input.view(1, batch * in_channel, height, width)
out = F.conv2d(input, weight, padding=self.padding, groups=batch)
_, _, height, width = out.shape
out = out.view(batch, self.out_channel, height, width)
return out
class NoiseInjection(nn.Module):
def __init__(self):
super().__init__()
self.weight = nn.Parameter(torch.zeros(1))
def forward(self, image, noise=None):
if noise is None:
batch, _, height, width = image.shape
noise = image.new_empty(batch, 1, height, width).normal_()
return image + self.weight * noise
class ConstantInput(nn.Module):
def __init__(self, channel, size=4):
super().__init__()
self.input = nn.Parameter(torch.randn(1, channel, size, size))
def forward(self, input):
batch = input.shape[0]
out = self.input.repeat(batch, 1, 1, 1)
return out
class StyledConv(nn.Module):
def __init__(
self,
in_channel,
out_channel,
kernel_size,
style_dim,
upsample=False,
blur_kernel=[1, 3, 3, 1],
demodulate=True,
):
super().__init__()
self.conv = ModulatedConv2d(
in_channel,
out_channel,
kernel_size,
style_dim,
upsample=upsample,
blur_kernel=blur_kernel,
demodulate=demodulate,
)
self.noise = NoiseInjection()
# self.bias = nn.Parameter(torch.zeros(1, out_channel, 1, 1))
# self.activate = ScaledLeakyReLU(0.2)
self.activate = FusedLeakyReLU(out_channel)
def forward(self, input, style, noise=None):
out = self.conv(input, style)
out = self.noise(out, noise=noise)
# out = out + self.bias
out = self.activate(out)
return out
class ToRGB(nn.Module):
def __init__(self, in_channel, style_dim, upsample=True, blur_kernel=[1, 3, 3, 1]):
super().__init__()
if upsample:
self.upsample = Upsample(blur_kernel)
self.conv = ModulatedConv2d(in_channel, 3, 1, style_dim, demodulate=False)
self.bias = nn.Parameter(torch.zeros(1, 3, 1, 1))
def forward(self, input, style, skip=None):
out = self.conv(input, style)
out = out + self.bias
if skip is not None:
skip = self.upsample(skip)
out = out + skip
return out
class Generator(nn.Module):
def __init__(
self,
size,
style_dim,
n_mlp,
channel_multiplier=2,
blur_kernel=[1, 3, 3, 1],
lr_mlp=0.01,
):
super().__init__()
self.size = size
self.style_dim = style_dim
layers = [PixelNorm()]
for i in range(n_mlp):
layers.append(
EqualLinear(
style_dim, style_dim, lr_mul=lr_mlp, activation='fused_lrelu'
)
)
self.style = nn.Sequential(*layers)
self.channels = {
4: 512,
8: 512,
16: 512,
32: 512,
64: 256 * channel_multiplier,
128: 128 * channel_multiplier,
256: 64 * channel_multiplier,
512: 32 * channel_multiplier,
1024: 16 * channel_multiplier,
}
self.input = ConstantInput(self.channels[4])
self.conv1 = StyledConv(
self.channels[4], self.channels[4], 3, style_dim, blur_kernel=blur_kernel
)
self.to_rgb1 = ToRGB(self.channels[4], style_dim, upsample=False)
self.log_size = int(math.log(size, 2))
self.num_layers = (self.log_size - 2) * 2 + 1
self.convs = nn.ModuleList()
self.upsamples = nn.ModuleList()
self.to_rgbs = nn.ModuleList()
self.noises = nn.Module()
in_channel = self.channels[4]
for layer_idx in range(self.num_layers):
res = (layer_idx + 5) // 2
shape = [1, 1, 2 ** res, 2 ** res]
self.noises.register_buffer('noise_{}'.format(layer_idx), torch.randn(*shape))
for i in range(3, self.log_size + 1):
out_channel = self.channels[2 ** i]
self.convs.append(
StyledConv(
in_channel,
out_channel,
3,
style_dim,
upsample=True,
blur_kernel=blur_kernel,
)
)
self.convs.append(
StyledConv(
out_channel, out_channel, 3, style_dim, blur_kernel=blur_kernel
)
)
self.to_rgbs.append(ToRGB(out_channel, style_dim))
in_channel = out_channel
self.n_latent = self.log_size * 2 - 2
def make_noise(self):
# device = self.input.input.device
noises = [torch.randn(1, 1, 2 ** 2, 2 ** 2).cuda()]
for i in range(3, self.log_size + 1):
for _ in range(2):
noises.append(torch.randn(1, 1, 2 ** i, 2 ** i).cuda())
return noises
def mean_latent(self, n_latent):
latent_in = torch.randn(
n_latent, self.style_dim).cuda()
latent = self.style(latent_in).mean(0, keepdim=True)
return latent
def get_latent(self, input):
return self.style(input)
def forward(
self,
styles,
return_latents=False,
return_features=False,
inject_index=None,
truncation=1,
truncation_latent=None,
input_is_latent=False,
noise=None,
randomize_noise=False
):
if not input_is_latent:
styles = [self.style(s) for s in styles]
if noise is None:
if randomize_noise:
noise = [None] * self.num_layers
else:
noise = [getattr(self.noises, 'noise_{}'.format(i)) for i in range(self.num_layers)]
if truncation < 1:
style_t = []
for style in styles:
style_t.append(
truncation_latent + truncation * (style - truncation_latent)
)
styles = style_t
if len(styles) < 2:
inject_index = self.n_latent
if styles[0].ndim < 3:
latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
else:
latent = styles[0]
else:
if inject_index is None:
inject_index = random.randint(1, self.n_latent - 1)
latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
latent2 = styles[1].unsqueeze(1).repeat(1, self.n_latent - inject_index, 1)
latent = torch.cat([latent, latent2], 1)
out = self.input(latent)
out = self.conv1(out, latent[:, 0], noise=noise[0])
skip = self.to_rgb1(out, latent[:, 1])
i = 1
count = 1
for conv1, conv2, noise1, noise2, to_rgb in zip(
self.convs[::2], self.convs[1::2], noise[1::2], noise[2::2], self.to_rgbs
):
out = conv1(out, latent[:, i], noise=noise1)
out = conv2(out, latent[:, i + 1], noise=noise2)
skip = to_rgb(out, latent[:, i + 2], skip)
i += 2
image = skip
if return_latents:
return image, latent
else:
return image, None
class ConvLayer(nn.Sequential):
def __init__(
self,
in_channel,
out_channel,
kernel_size,
downsample=False,
blur_kernel=[1, 3, 3, 1],
bias=True,
activate=True,
):
layers = []
if downsample:
factor = 2
p = (len(blur_kernel) - factor) + (kernel_size - 1)
pad0 = (p + 1) // 2
pad1 = p // 2
layers.append(Blur(blur_kernel, pad=(pad0, pad1)))
stride = 2
self.padding = 0
else:
stride = 1
self.padding = kernel_size // 2
layers.append(
EqualConv2d(
in_channel,
out_channel,
kernel_size,
padding=self.padding,
stride=stride,
bias=bias and not activate,
)
)
if activate:
if bias:
layers.append(FusedLeakyReLU(out_channel))
else:
layers.append(ScaledLeakyReLU(0.2))
super().__init__(*layers)
class ResBlock(nn.Module):
def __init__(self, in_channel, out_channel, blur_kernel=[1, 3, 3, 1]):
super().__init__()
self.conv1 = ConvLayer(in_channel, in_channel, 3)
self.conv2 = ConvLayer(in_channel, out_channel, 3, downsample=True)
self.skip = ConvLayer(
in_channel, out_channel, 1, downsample=True, activate=False, bias=False
)
def forward(self, input):
out = self.conv1(input)
out = self.conv2(out)
skip = self.skip(input)
out = (out + skip) / math.sqrt(2)
return out
class Discriminator(nn.Module):
def __init__(self, size, channel_multiplier=2, blur_kernel=[1, 3, 3, 1]):
super().__init__()
channels = {
4: 512,
8: 512,
16: 512,
32: 512,
64: 256 * channel_multiplier,
128: 128 * channel_multiplier,
256: 64 * channel_multiplier,
512: 32 * channel_multiplier,
1024: 16 * channel_multiplier,
}
convs = [ConvLayer(3, channels[size], 1)]
log_size = int(math.log(size, 2))
in_channel = channels[size]
for i in range(log_size, 2, -1):
out_channel = channels[2 ** (i - 1)]
convs.append(ResBlock(in_channel, out_channel, blur_kernel))
in_channel = out_channel
self.convs = nn.Sequential(*convs)
self.stddev_group = 4
self.stddev_feat = 1
self.final_conv = ConvLayer(in_channel + 1, channels[4], 3)
self.final_linear = nn.Sequential(
EqualLinear(channels[4] * 4 * 4, channels[4], activation='fused_lrelu'),
EqualLinear(channels[4], 1),
)
def forward(self, input):
out = self.convs(input)
batch, channel, height, width = out.shape
group = min(batch, self.stddev_group)
stddev = out.view(
group, -1, self.stddev_feat, channel // self.stddev_feat, height, width
)
stddev = torch.sqrt(stddev.var(0, unbiased=False) + 1e-8)
stddev = stddev.mean([2, 3, 4], keepdims=True).squeeze(2)
stddev = stddev.repeat(group, 1, height, width)
out = torch.cat([out, stddev], 1)
out = self.final_conv(out)
out = out.view(batch, -1)
out = self.final_linear(out)
return out
class Encoder(nn.Module):
def __init__(self, size, w_dim=512):
super().__init__()
channels = {
4: 512,
8: 512,
16: 512,
32: 512,
64: 256,
128: 128,
256: 64,
512: 32,
1024: 16
}
self.w_dim = w_dim
log_size = int(math.log(size, 2))
self.n_latents = log_size*2 - 2
convs = [ConvLayer(3, channels[size], 1)]
in_channel = channels[size]
for i in range(log_size, 2, -1):
out_channel = channels[2 ** (i - 1)]
convs.append(ResBlock(in_channel, out_channel))
in_channel = out_channel
convs.append(EqualConv2d(in_channel, self.n_latents*self.w_dim, 4, padding=0, bias=False))
self.convs = nn.Sequential(*convs)
def forward(self, input):
out = self.convs(input)
# print('Encoder weights: {:.5f}'.format( torch.sum(torch.abs( self.convs[0][0].weight.data )) ))
return out.view(len(input), self.n_latents, self.w_dim) | 19,525 | 26.501408 | 116 | py |
StyleMask | StyleMask-master/libs/models/StyleGAN2/convert_weight.py | import argparse
import os
import sys
import pickle
import math
import torch
import numpy as np
from torchvision import utils
from models.StyleGAN2.model import Generator, Discriminator
def convert_modconv(vars, source_name, target_name, flip=False):
weight = vars[source_name + '/weight'].value().eval()
mod_weight = vars[source_name + '/mod_weight'].value().eval()
mod_bias = vars[source_name + '/mod_bias'].value().eval()
noise = vars[source_name + '/noise_strength'].value().eval()
bias = vars[source_name + '/bias'].value().eval()
dic = {
'conv.weight': np.expand_dims(weight.transpose((3, 2, 0, 1)), 0),
'conv.modulation.weight': mod_weight.transpose((1, 0)),
'conv.modulation.bias': mod_bias + 1,
'noise.weight': np.array([noise]),
'activate.bias': bias,
}
dic_torch = {}
for k, v in dic.items():
dic_torch[target_name + '.' + k] = torch.from_numpy(v)
if flip:
dic_torch[target_name + '.conv.weight'] = torch.flip(
dic_torch[target_name + '.conv.weight'], [3, 4]
)
return dic_torch
def convert_conv(vars, source_name, target_name, bias=True, start=0):
weight = vars[source_name + '/weight'].value().eval()
dic = {'weight': weight.transpose((3, 2, 0, 1))}
if bias:
dic['bias'] = vars[source_name + '/bias'].value().eval()
dic_torch = {}
dic_torch[target_name + '.{}.weight'.format(start)] = torch.from_numpy(dic['weight'])
if bias:
dic_torch[target_name + '.{}.bias'.format(start + 1)] = torch.from_numpy(dic['bias'])
return dic_torch
def convert_torgb(vars, source_name, target_name):
weight = vars[source_name + '/weight'].value().eval()
mod_weight = vars[source_name + '/mod_weight'].value().eval()
mod_bias = vars[source_name + '/mod_bias'].value().eval()
bias = vars[source_name + '/bias'].value().eval()
dic = {
'conv.weight': np.expand_dims(weight.transpose((3, 2, 0, 1)), 0),
'conv.modulation.weight': mod_weight.transpose((1, 0)),
'conv.modulation.bias': mod_bias + 1,
'bias': bias.reshape((1, 3, 1, 1)),
}
dic_torch = {}
for k, v in dic.items():
dic_torch[target_name + '.' + k] = torch.from_numpy(v)
return dic_torch
def convert_dense(vars, source_name, target_name):
weight = vars[source_name + '/weight'].value().eval()
bias = vars[source_name + '/bias'].value().eval()
dic = {'weight': weight.transpose((1, 0)), 'bias': bias}
dic_torch = {}
for k, v in dic.items():
dic_torch[target_name + '.' + k] = torch.from_numpy(v)
return dic_torch
def update(state_dict, new):
for k, v in new.items():
if k not in state_dict:
raise KeyError(k + ' is not found')
if v.shape != state_dict[k].shape:
raise ValueError('Shape mismatch: {} vs {}'.format(v.shape, state_dict[k].shape))
state_dict[k] = v
def discriminator_fill_statedict(statedict, vars, size):
log_size = int(math.log(size, 2))
update(statedict, convert_conv(vars, '{}x{}/FromRGB'.format(size, size), 'convs.0'))
conv_i = 1
for i in range(log_size - 2, 0, -1):
reso = 4 * 2 ** i
update(statedict, convert_conv(vars, '{}x{}/Conv0'.format(reso, reso), 'convs.{}.conv1'.format(conv_i)))
update(statedict, convert_conv(vars, '{}x{}/Conv1_down'.format(reso, reso), 'convs.{}.conv2'.format(conv_i), start=1))
update(statedict, convert_conv(vars, '{}x{}/Skip'.format(reso, reso), 'convs.{}.skip'.format(conv_i), start=1, bias=False))
conv_i += 1
update(statedict, convert_conv(vars, '4x4/Conv', 'final_conv'))
update(statedict, convert_dense(vars, '4x4/Dense0', 'final_linear.0'))
update(statedict, convert_dense(vars, 'Output', 'final_linear.1'))
return statedict
def fill_statedict(state_dict, vars, size):
log_size = int(math.log(size, 2))
for i in range(8):
update(state_dict, convert_dense(vars, 'G_mapping/Dense{}'.format(i), 'style.{}'.format(i + 1)))
update(
state_dict,
{
'input.input': torch.from_numpy(
vars['G_synthesis/4x4/Const/const'].value().eval()
)
},
)
update(state_dict, convert_torgb(vars, 'G_synthesis/4x4/ToRGB', 'to_rgb1'))
for i in range(log_size - 2):
reso = 4 * 2 ** (i + 1)
update(
state_dict,
convert_torgb(vars, 'G_synthesis/{}x{}/ToRGB'.format(reso, reso), 'to_rgbs.{}'.format(i)),
)
update(state_dict, convert_modconv(vars, 'G_synthesis/4x4/Conv', 'conv1'))
conv_i = 0
for i in range(log_size - 2):
reso = 4 * 2 ** (i + 1)
update(
state_dict,
convert_modconv(
vars,
'G_synthesis/{}x{}/Conv0_up'.format(reso, reso),
'convs.{}'.format(conv_i),
flip=True,
),
)
update(
state_dict,
convert_modconv(
vars,
'G_synthesis/{}x{}/Conv1'.format(reso, reso),
'convs.{}'.format(conv_i + 1)
),
)
conv_i += 2
for i in range(0, (log_size - 2) * 2 + 1):
update(
state_dict,
{
'noises.noise_{}'.format(i): torch.from_numpy(
vars['G_synthesis/noise{}'.format(i)].value().eval()
)
},
)
return state_dict
if __name__ == '__main__':
device = 'cuda'
parser = argparse.ArgumentParser()
parser.add_argument('--repo', type=str, required=True)
parser.add_argument('--gen', action='store_true')
parser.add_argument('--disc', action='store_true')
parser.add_argument('path', metavar='PATH')
args = parser.parse_args()
sys.path.append(args.repo)
import dnnlib
from dnnlib import tflib
tflib.init_tf()
with open(args.path, 'rb') as f:
generator, discriminator, g_ema = pickle.load(f)
size = g_ema.output_shape[2]
g = Generator(size, 512, 8)
state_dict = g.state_dict()
state_dict = fill_statedict(state_dict, g_ema.vars, size)
g.load_state_dict(state_dict)
latent_avg = torch.from_numpy(g_ema.vars['dlatent_avg'].value().eval())
ckpt = {'g_ema': state_dict, 'latent_avg': latent_avg}
if args.gen:
g_train = Generator(size, 512, 8)
g_train_state = g_train.state_dict()
g_train_state = fill_statedict(g_train_state, generator.vars, size)
ckpt['g'] = g_train_state
if args.disc:
disc = Discriminator(size)
d_state = disc.state_dict()
d_state = discriminator_fill_statedict(d_state, discriminator.vars, size)
ckpt['d'] = d_state
name = os.path.splitext(os.path.basename(args.path))[0]
torch.save(ckpt, name + '.pt')
batch_size = {256: 16, 512: 9, 1024: 4}
n_sample = batch_size.get(size, 25)
g = g.to(device)
z = np.random.RandomState(0).randn(n_sample, 512).astype('float32')
with torch.no_grad():
img_pt, _ = g([torch.from_numpy(z).to(device)], truncation=0.5, truncation_latent=latent_avg.to(device))
Gs_kwargs = dnnlib.EasyDict()
Gs_kwargs.randomize_noise = False
img_tf = g_ema.run(z, None, **Gs_kwargs)
img_tf = torch.from_numpy(img_tf).to(device)
img_diff = ((img_pt + 1) / 2).clamp(0.0, 1.0) - ((img_tf.to(device) + 1) / 2).clamp(0.0, 1.0)
img_concat = torch.cat((img_tf, img_pt, img_diff), dim=0)
utils.save_image(img_concat, name + '.png', nrow=n_sample, normalize=True, range=(-1, 1))
| 7,718 | 29.152344 | 131 | py |
StyleMask | StyleMask-master/libs/models/StyleGAN2/op/upfirdn2d.py | import os
import torch
from torch.autograd import Function
from torch.utils.cpp_extension import load
module_path = os.path.dirname(__file__)
upfirdn2d_op = load(
'upfirdn2d',
sources=[
os.path.join(module_path, 'upfirdn2d.cpp'),
os.path.join(module_path, 'upfirdn2d_kernel.cu'),
],
)
class UpFirDn2dBackward(Function):
@staticmethod
def forward(
ctx, grad_output, kernel, grad_kernel, up, down, pad, g_pad, in_size, out_size
):
up_x, up_y = up
down_x, down_y = down
g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 = g_pad
grad_output = grad_output.reshape(-1, out_size[0], out_size[1], 1)
grad_input = upfirdn2d_op.upfirdn2d(
grad_output,
grad_kernel,
down_x,
down_y,
up_x,
up_y,
g_pad_x0,
g_pad_x1,
g_pad_y0,
g_pad_y1,
)
grad_input = grad_input.view(in_size[0], in_size[1], in_size[2], in_size[3])
ctx.save_for_backward(kernel)
pad_x0, pad_x1, pad_y0, pad_y1 = pad
ctx.up_x = up_x
ctx.up_y = up_y
ctx.down_x = down_x
ctx.down_y = down_y
ctx.pad_x0 = pad_x0
ctx.pad_x1 = pad_x1
ctx.pad_y0 = pad_y0
ctx.pad_y1 = pad_y1
ctx.in_size = in_size
ctx.out_size = out_size
return grad_input
@staticmethod
def backward(ctx, gradgrad_input):
kernel, = ctx.saved_tensors
gradgrad_input = gradgrad_input.reshape(-1, ctx.in_size[2], ctx.in_size[3], 1)
gradgrad_out = upfirdn2d_op.upfirdn2d(
gradgrad_input,
kernel,
ctx.up_x,
ctx.up_y,
ctx.down_x,
ctx.down_y,
ctx.pad_x0,
ctx.pad_x1,
ctx.pad_y0,
ctx.pad_y1,
)
# gradgrad_out = gradgrad_out.view(ctx.in_size[0], ctx.out_size[0], ctx.out_size[1], ctx.in_size[3])
gradgrad_out = gradgrad_out.view(
ctx.in_size[0], ctx.in_size[1], ctx.out_size[0], ctx.out_size[1]
)
return gradgrad_out, None, None, None, None, None, None, None, None
class UpFirDn2d(Function):
@staticmethod
def forward(ctx, input, kernel, up, down, pad):
up_x, up_y = up
down_x, down_y = down
pad_x0, pad_x1, pad_y0, pad_y1 = pad
kernel_h, kernel_w = kernel.shape
batch, channel, in_h, in_w = input.shape
ctx.in_size = input.shape
input = input.reshape(-1, in_h, in_w, 1)
ctx.save_for_backward(kernel, torch.flip(kernel, [0, 1]))
out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1
out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1
ctx.out_size = (out_h, out_w)
ctx.up = (up_x, up_y)
ctx.down = (down_x, down_y)
ctx.pad = (pad_x0, pad_x1, pad_y0, pad_y1)
g_pad_x0 = kernel_w - pad_x0 - 1
g_pad_y0 = kernel_h - pad_y0 - 1
g_pad_x1 = in_w * up_x - out_w * down_x + pad_x0 - up_x + 1
g_pad_y1 = in_h * up_y - out_h * down_y + pad_y0 - up_y + 1
ctx.g_pad = (g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1)
out = upfirdn2d_op.upfirdn2d(
input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1
)
# out = out.view(major, out_h, out_w, minor)
out = out.view(-1, channel, out_h, out_w)
return out
@staticmethod
def backward(ctx, grad_output):
kernel, grad_kernel = ctx.saved_tensors
grad_input = UpFirDn2dBackward.apply(
grad_output,
kernel,
grad_kernel,
ctx.up,
ctx.down,
ctx.pad,
ctx.g_pad,
ctx.in_size,
ctx.out_size,
)
return grad_input, None, None, None, None
def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)):
out = UpFirDn2d.apply(
input, kernel, (up, up), (down, down), (pad[0], pad[1], pad[0], pad[1])
)
return out
def upfirdn2d_native(
input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1
):
_, in_h, in_w, minor = input.shape
kernel_h, kernel_w = kernel.shape
out = input.view(-1, in_h, 1, in_w, 1, minor)
out = F.pad(out, [0, 0, 0, up_x - 1, 0, 0, 0, up_y - 1])
out = out.view(-1, in_h * up_y, in_w * up_x, minor)
out = F.pad(
out, [0, 0, max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0), max(pad_y1, 0)]
)
out = out[
:,
max(-pad_y0, 0) : out.shape[1] - max(-pad_y1, 0),
max(-pad_x0, 0) : out.shape[2] - max(-pad_x1, 0),
:,
]
out = out.permute(0, 3, 1, 2)
out = out.reshape(
[-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x + pad_x0 + pad_x1]
)
w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w)
out = F.conv2d(out, w)
out = out.reshape(
-1,
minor,
in_h * up_y + pad_y0 + pad_y1 - kernel_h + 1,
in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1,
)
out = out.permute(0, 2, 3, 1)
return out[:, ::down_y, ::down_x, :]
| 5,186 | 26.590426 | 108 | py |
StyleMask | StyleMask-master/libs/models/StyleGAN2/op/__init__.py | from .fused_act import FusedLeakyReLU, fused_leaky_relu
from .upfirdn2d import upfirdn2d
| 89 | 29 | 55 | py |
StyleMask | StyleMask-master/libs/models/StyleGAN2/op/fused_act.py | import os
import torch
from torch import nn
from torch.autograd import Function
from torch.utils.cpp_extension import load
module_path = os.path.dirname(__file__)
fused = load(
'fused',
sources=[
os.path.join(module_path, 'fused_bias_act.cpp'),
os.path.join(module_path, 'fused_bias_act_kernel.cu'),
],
)
class FusedLeakyReLUFunctionBackward(Function):
@staticmethod
def forward(ctx, grad_output, out, negative_slope, scale):
ctx.save_for_backward(out)
ctx.negative_slope = negative_slope
ctx.scale = scale
empty = grad_output.new_empty(0)
grad_input = fused.fused_bias_act(
grad_output, empty, out, 3, 1, negative_slope, scale
)
dim = [0]
if grad_input.ndim > 2:
dim += list(range(2, grad_input.ndim))
grad_bias = grad_input.sum(dim).detach()
return grad_input, grad_bias
@staticmethod
def backward(ctx, gradgrad_input, gradgrad_bias):
out, = ctx.saved_tensors
gradgrad_out = fused.fused_bias_act(
gradgrad_input, gradgrad_bias, out, 3, 1, ctx.negative_slope, ctx.scale
)
return gradgrad_out, None, None, None
class FusedLeakyReLUFunction(Function):
@staticmethod
def forward(ctx, input, bias, negative_slope, scale):
empty = input.new_empty(0)
out = fused.fused_bias_act(input, bias, empty, 3, 0, negative_slope, scale)
ctx.save_for_backward(out)
ctx.negative_slope = negative_slope
ctx.scale = scale
return out
@staticmethod
def backward(ctx, grad_output):
out, = ctx.saved_tensors
grad_input, grad_bias = FusedLeakyReLUFunctionBackward.apply(
grad_output, out, ctx.negative_slope, ctx.scale
)
return grad_input, grad_bias, None, None
class FusedLeakyReLU(nn.Module):
def __init__(self, channel, negative_slope=0.2, scale=2 ** 0.5):
super().__init__()
self.bias = nn.Parameter(torch.zeros(channel))
self.negative_slope = negative_slope
self.scale = scale
def forward(self, input):
return fused_leaky_relu(input, self.bias, self.negative_slope, self.scale)
def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5):
return FusedLeakyReLUFunction.apply(input, bias, negative_slope, scale)
| 2,379 | 26.356322 | 83 | py |
StyleMask | StyleMask-master/libs/utilities/image_utils.py | import torch
import numpy as np
import cv2
import torchvision
import os
" Read image from path"
def read_image_opencv(image_path):
img = cv2.imread(image_path, cv2.IMREAD_COLOR) # BGR order!!!!
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
return img.astype('uint8')
" image numpy array to tensor [-1,1] range "
def image_to_tensor(image):
max_val = 1
min_val = -1
if image.shape[0]>256:
image, _ = image_resize(image, 256)
image_tensor = torch.tensor(np.transpose(image,(2,0,1))).float().div(255.0)
image_tensor = image_tensor * (max_val - min_val) + min_val
return image_tensor
def tensor_to_255(image):
img_tmp = image.clone()
min_val = -1
max_val = 1
img_tmp.clamp_(min=min_val, max=max_val)
img_tmp.add_(-min_val).div_(max_val - min_val + 1e-5)
img_tmp = img_tmp.mul(255.0).add(0.0)
return img_tmp
def torch_image_resize(image, width = None, height = None):
dim = None
(h, w) = image.shape[1:]
# if both the width and height are None, then return the
# original image
if width is None and height is None:
return image
# check to see if the width is None
if width is None:
# calculate the ratio of the height and construct the
# dimensions
r = height / float(h)
dim = (height, int(w * r))
scale = r
# otherwise, the height is None
else:
# calculate the ratio of the width and construct the
# dimensions
r = width / float(w)
dim = (int(h * r), width)
scale = r
image = image.unsqueeze(0)
image = torch.nn.functional.interpolate(image, size=dim, mode='bilinear')
return image.squeeze(0) | 1,548 | 25.706897 | 77 | py |
StyleMask | StyleMask-master/libs/utilities/stylespace_utils.py | import torch
import numpy as np
from torch.nn import functional as F
import os
import math
def conv_warper(layer, input, style, noise):
# the conv should change
conv = layer.conv
batch, in_channel, height, width = input.shape
style = style.view(batch, 1, in_channel, 1, 1)
weight = conv.scale * conv.weight * style
if conv.demodulate:
demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-8)
weight = weight * demod.view(batch, conv.out_channel, 1, 1, 1)
weight = weight.view(
batch * conv.out_channel, in_channel, conv.kernel_size, conv.kernel_size
)
if conv.upsample:
input = input.view(1, batch * in_channel, height, width)
weight = weight.view(
batch, conv.out_channel, in_channel, conv.kernel_size, conv.kernel_size
)
weight = weight.transpose(1, 2).reshape(
batch * in_channel, conv.out_channel, conv.kernel_size, conv.kernel_size
)
out = F.conv_transpose2d(input, weight, padding=0, stride=2, groups=batch)
_, _, height, width = out.shape
out = out.view(batch, conv.out_channel, height, width)
out = conv.blur(out)
elif conv.downsample:
input = conv.blur(input)
_, _, height, width = input.shape
input = input.view(1, batch * in_channel, height, width)
out = F.conv2d(input, weight, padding=0, stride=2, groups=batch)
_, _, height, width = out.shape
out = out.view(batch, conv.out_channel, height, width)
else:
input = input.view(1, batch * in_channel, height, width)
out = F.conv2d(input, weight, padding=conv.padding, groups=batch)
_, _, height, width = out.shape
out = out.view(batch, conv.out_channel, height, width)
out = layer.noise(out, noise=noise)
out = layer.activate(out)
return out
def decoder(G, style_space, latent, noise, resize_image = True):
# an decoder warper for G
out = G.input(latent)
out = conv_warper(G.conv1, out, style_space[0], noise[0])
skip = G.to_rgb1(out, latent[:, 1])
i = 1
for conv1, conv2, noise1, noise2, to_rgb in zip(
G.convs[::2], G.convs[1::2], noise[1::2], noise[2::2], G.to_rgbs
):
out = conv_warper(conv1, out, style_space[i], noise=noise1)
out = conv_warper(conv2, out, style_space[i+1], noise=noise2)
skip = to_rgb(out, latent[:, i + 2], skip)
i += 2
image = skip
if resize_image:
face_pool = torch.nn.AdaptiveAvgPool2d((256, 256))
image = face_pool(image)
return image
def encoder(G, noise, truncation, truncation_latent, size = 256, input_is_latent = False):
style_space = []
# an encoder warper for G
inject_index = None
if not input_is_latent:
inject_index = G.n_latent
styles = [noise]
styles = [G.style(s) for s in styles]
else:
styles = [noise]
n_latent = int(math.log(size, 2))* 2 - 2
if truncation < 1:
style_t = []
for style in styles:
style_t.append(
truncation_latent + truncation * (style - truncation_latent)
)
styles = style_t
if len(styles) < 2:
inject_index = n_latent
if styles[0].ndim < 3:
latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
else:
latent = styles[0]
else:
if inject_index is None:
inject_index = random.randint(1, n_latent - 1)
latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1)
latent2 = styles[1].unsqueeze(1).repeat(1, n_latent - inject_index, 1)
latent = torch.cat([latent, latent2], 1)
noise = [getattr(G.noises, 'noise_{}'.format(i)) for i in range(G.num_layers)]
style_space.append(G.conv1.conv.modulation(latent[:, 0]))
i = 1
for conv1, conv2, noise1, noise2, to_rgb in zip(
G.convs[::2], G.convs[1::2], noise[1::2], noise[2::2], G.to_rgbs
):
style_space.append(conv1.conv.modulation(latent[:, i]))
style_space.append(conv2.conv.modulation(latent[:, i+1]))
i += 2
return style_space, latent, noise
| 3,714 | 27.576923 | 90 | py |
StyleMask | StyleMask-master/libs/utilities/dataloader.py | """
"""
import torch
import os
import glob
import cv2
import numpy as np
from torchvision import transforms, utils
from PIL import Image
from torch.utils.data import Dataset
from libs.utilities.utils import make_noise
np.random.seed(0)
class CustomDataset_validation(Dataset):
def __init__(self, synthetic_dataset_path = None, validation_pairs = None, shuffle = True):
"""
Args:
synthetic_dataset_path: path to synthetic latent codes. If None generate random
num_samples: how many samples for validation
"""
self.shuffle = shuffle
self.validation_pairs = validation_pairs
self.synthetic_dataset_path = synthetic_dataset_path
if self.synthetic_dataset_path is not None:
z_codes = np.load(self.synthetic_dataset_path)
z_codes = torch.from_numpy(z_codes)
if self.validation_pairs is not None:
self.num_samples = 2 * self.validation_pairs
if z_codes.shape[0] > self.num_samples:
z_codes = z_codes[:self.num_samples]
else:
self.num_samples = z_codes.shape[0]
self.validation_pairs = int(self.num_samples/2)
else:
self.validation_pairs = int(z_codes.shape[0]/2)
self.num_samples = 2 * self.validation_pairs
self.fixed_source_w = z_codes[:self.validation_pairs, :]
self.fixed_target_w = z_codes[self.validation_pairs:2*self.validation_pairs, :]
else:
self.fixed_source_w = make_noise(self.validation_pairs, 512, None)
self.fixed_target_w = make_noise(self.validation_pairs, 512, None)
# Save random generated latent codes
save_path = './libs/configs/random_latent_codes_{}.npy'.format(self.validation_pairs)
z_codes = torch.cat((self.fixed_source_w, self.fixed_target_w), dim = 0)
np.save(save_path, z_codes.detach().cpu().numpy())
self.transform = transforms.Compose([
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])])
def __len__(self):
return self.validation_pairs
def __getitem__(self, index):
source_w = self.fixed_source_w[index]
target_w = self.fixed_target_w[index]
sample = {
'source_w': source_w,
'target_w': target_w
}
return sample
| 2,169 | 29.138889 | 92 | py |
StyleMask | StyleMask-master/libs/utilities/utils.py | import os
import numpy as np
import torch
from torchvision import utils as torch_utils
import glob
from datetime import datetime
import json
from libs.utilities.stylespace_utils import encoder, decoder
def make_path(filepath):
if not os.path.exists(filepath):
os.makedirs(filepath, exist_ok = True)
def save_arguments_json(args, save_path, filename):
out_json = os.path.join(save_path, filename)
# datetime object containing current date and time
now = datetime.now()
dt_string = now.strftime("%d/%m/%Y %H:%M:%S")
with open(out_json, 'w') as out:
stat_dict = args
json.dump(stat_dict, out)
def get_files_frompath(path, types):
files_grabbed = []
for files in types:
files_grabbed.extend(glob.glob(os.path.join(path, files)))
files_grabbed.sort()
return files_grabbed
def make_noise(batch, dim, truncation=None):
if isinstance(dim, int):
dim = [dim]
if truncation is None or truncation == 1.0:
return torch.randn([batch] + dim)
else:
return torch.from_numpy(truncated_noise([batch] + dim, truncation)).to(torch.float)
def calculate_shapemodel(deca_model, images, image_space = 'gan'):
img_tmp = images.clone()
if image_space == 'gan':
# invert image from [-1,1] to [0,255]
min_val = -1; max_val = 1
img_tmp.clamp_(min=min_val, max=max_val)
img_tmp.add_(-min_val).div_(max_val - min_val + 1e-5)
img_tmp = img_tmp.mul(255.0)
p_tensor, alpha_shp_tensor, alpha_exp_tensor, angles, cam = deca_model.extract_DECA_params(img_tmp) # params dictionary
out_dict = {}
out_dict['pose'] = p_tensor
out_dict['alpha_exp'] = alpha_exp_tensor
out_dict['alpha_shp'] = alpha_shp_tensor
out_dict['cam'] = cam
return out_dict, angles.cuda()
def generate_image(G, latent_code, truncation, trunc, image_resolution, split_sections, input_is_latent = False, return_latents = False, resize_image = True):
img, _ = G([latent_code], return_latents = return_latents, truncation = truncation, truncation_latent = trunc, input_is_latent = input_is_latent)
style_space, w, noise = encoder(G, latent_code, truncation, trunc, size = image_resolution, input_is_latent = input_is_latent)
if resize_image:
face_pool = torch.nn.AdaptiveAvgPool2d((256, 256))
img = face_pool(img)
return img, style_space, w, noise
def generate_new_stylespace(style_source, style_target, mask, num_layers_control = None):
if num_layers_control is not None:
new_style_space = style_source.clone()
mask_size = mask.shape[1]
new_style_space[:, :mask_size] = mask * style_target[:, :mask_size] + (1-mask) * style_source[:, :mask_size]
else:
new_style_space = mask * style_target + (1-mask) * style_source
return new_style_space
def save_image(image, save_image_dir):
grid = torch_utils.save_image(
image,
save_image_dir,
normalize=True,
range=(-1, 1),
)
def save_grid(source_img, target_img, reenacted_img, save_path):
dim = source_img.shape[2]
grid_image = torch.zeros(3, dim , 3 * dim)
grid_image[:, :, :dim] = source_img.squeeze(0)
grid_image[:, :, dim:dim*2] = target_img.squeeze(0)
grid_image[:, :, dim*2:] = reenacted_img.squeeze(0)
save_image(grid_image, save_path)
| 3,116 | 32.880435 | 158 | py |
StyleMask | StyleMask-master/libs/utilities/ffhq_cropping.py | '''
Aling and crop images like in FFHQ dataset
Code from https://github.com/NVlabs/ffhq-dataset/blob/master/download_ffhq.py
'''
import numpy as np
import cv2
import os
import glob
import matplotlib.pyplot as plt
import collections
import PIL.Image
import PIL.ImageFile
from PIL import Image
import scipy.ndimage
def align_crop_image(image, landmarks, transform_size = 4096, output_size = 256):
lm = landmarks
lm_chin = lm[0 : 17] # left-right
lm_eyebrow_left = lm[17 : 22] # left-right
lm_eyebrow_right = lm[22 : 27] # left-right
lm_nose = lm[27 : 31] # top-down
lm_nostrils = lm[31 : 36] # top-down
lm_eye_left = lm[36 : 42] # left-clockwise
lm_eye_right = lm[42 : 48] # left-clockwise
lm_mouth_outer = lm[48 : 60] # left-clockwise
lm_mouth_inner = lm[60 : 68] # left-clockwise
# Calculate auxiliary vectors.
eye_left = np.mean(lm_eye_left, axis=0)
eye_right = np.mean(lm_eye_right, axis=0)
eye_avg = (eye_left + eye_right) * 0.5
eye_to_eye = eye_right - eye_left
mouth_left = lm_mouth_outer[0]
mouth_right = lm_mouth_outer[6]
mouth_avg = (mouth_left + mouth_right) * 0.5
eye_to_mouth = mouth_avg - eye_avg
# Choose oriented crop rectangle.
x = eye_to_eye - np.flipud(eye_to_mouth) * [-1, 1]
x /= np.hypot(*x)
x *= max(np.hypot(*eye_to_eye) * 2.0, np.hypot(*eye_to_mouth) * 1.8)
y = np.flipud(x) * [-1, 1]
c = eye_avg + eye_to_mouth * 0.1
quad = np.stack([c - x - y, c - x + y, c + x + y, c + x - y])
qsize = np.hypot(*x) * 2
img = Image.fromarray(image)
shrink = int(np.floor(qsize / output_size * 0.5))
if shrink > 1:
rsize = (int(np.rint(float(img.size[0]) / shrink)), int(np.rint(float(img.size[1]) / shrink)))
img = img.resize(rsize, PIL.Image.ANTIALIAS)
quad /= shrink
qsize /= shrink
# Crop.
border = max(int(np.rint(qsize * 0.1)), 3)
crop = (int(np.floor(min(quad[:,0]))), int(np.floor(min(quad[:,1]))), int(np.ceil(max(quad[:,0]))), int(np.ceil(max(quad[:,1]))))
crop = (max(crop[0] - border, 0), max(crop[1] - border, 0), min(crop[2] + border, img.size[0]), min(crop[3] + border, img.size[1]))
if crop[2] - crop[0] < img.size[0] or crop[3] - crop[1] < img.size[1]:
img = img.crop(crop)
quad -= crop[0:2]
# Pad.
pad = (int(np.floor(min(quad[:,0]))), int(np.floor(min(quad[:,1]))), int(np.ceil(max(quad[:,0]))), int(np.ceil(max(quad[:,1]))))
pad = (max(-pad[0] + border, 0), max(-pad[1] + border, 0), max(pad[2] - img.size[0] + border, 0), max(pad[3] - img.size[1] + border, 0))
enable_padding = True
if enable_padding and max(pad) > border - 4:
pad = np.maximum(pad, int(np.rint(qsize * 0.3)))
img = np.pad(np.float32(img), ((pad[1], pad[3]), (pad[0], pad[2]), (0, 0)), 'reflect')
h, w, _ = img.shape
y, x, _ = np.ogrid[:h, :w, :1]
mask = np.maximum(1.0 - np.minimum(np.float32(x) / pad[0], np.float32(w-1-x) / pad[2]), 1.0 - np.minimum(np.float32(y) / pad[1], np.float32(h-1-y) / pad[3]))
blur = qsize * 0.01
img += (scipy.ndimage.gaussian_filter(img, [blur, blur, 0]) - img) * np.clip(mask * 3.0 + 1.0, 0.0, 1.0)
img += (np.median(img, axis=(0,1)) - img) * np.clip(mask, 0.0, 1.0)
img = PIL.Image.fromarray(np.uint8(np.clip(np.rint(img), 0, 255)), 'RGB')
quad += pad[:2]
# Transform.
transform_size = 256
img = img.transform((transform_size, transform_size), PIL.Image.QUAD, (quad + 0.5).flatten(), PIL.Image.BILINEAR)
if output_size < transform_size:
img = img.resize((output_size, output_size), PIL.Image.ANTIALIAS)
pix = np.array(img)
return pix
| 3,531 | 36.978495 | 159 | py |
StyleMask | StyleMask-master/libs/utilities/utils_inference.py | import os
import numpy as np
import torch
from torchvision import utils as torch_utils
import cv2
from skimage import io
from libs.utilities.image_utils import read_image_opencv, torch_image_resize
from libs.utilities.ffhq_cropping import align_crop_image
def calculate_evaluation_metrics(params_shifted, params_target, angles_shifted, angles_target, imgs_shifted, imgs_source, id_loss_, exp_ranges):
############ Evaluation ############
yaw_reenacted = angles_shifted[:,0][0].detach().cpu().numpy()
pitch_reenacted = angles_shifted[:,1][0].detach().cpu().numpy()
roll_reenacted = angles_shifted[:,2][0].detach().cpu().numpy()
exp_reenacted = params_shifted['alpha_exp'][0].detach().cpu().numpy()
jaw_reenacted = params_shifted['pose'][0, 3].detach().cpu().numpy()
yaw_target = angles_target[:,0][0].detach().cpu().numpy()
pitch_target = angles_target[:,1][0].detach().cpu().numpy()
roll_target = angles_target[:,2][0].detach().cpu().numpy()
exp_target = params_target['alpha_exp'][0].detach().cpu().numpy()
jaw_target = params_target['pose'][0, 3].detach().cpu().numpy()
exp_error = []
num_expressions = 20
max_range = exp_ranges[3][1]
min_range = exp_ranges[3][0]
jaw_target = (jaw_target - min_range)/(max_range-min_range)
jaw_reenacted = (jaw_reenacted - min_range)/(max_range-min_range)
exp_error.append(abs(jaw_reenacted - jaw_target))
for j in range(num_expressions):
max_range = exp_ranges[j+4][1]
min_range = exp_ranges[j+4][0]
target = (exp_target[j] - min_range)/(max_range-min_range)
reenacted = (exp_reenacted[j] - min_range)/(max_range-min_range)
exp_error.append(abs(reenacted - target) )
exp_error = np.mean(exp_error)
## normalize exp coef in [0,1]
# exp_error = []
# num_expressions = 12 # len(exp_target)
# for j in range(num_expressions):
# exp_error.append(abs(exp_reenacted[j] - exp_target[j]) )
# exp_error.append(abs(jaw_reenacted - jaw_target))
# exp_error = np.mean(exp_error)
pose = (abs(yaw_reenacted-yaw_target) + abs(pitch_reenacted-pitch_target) + abs(roll_reenacted-roll_target))/3
################################################
###### CSIM ######
loss_identity = id_loss_(imgs_shifted, imgs_source)
csim = 1 - loss_identity.data.item()
return csim, pose, exp_error
def generate_grid_image(source, target, reenacted):
num_images = source.shape[0] # batch size
width = 256; height = 256
grid_image = torch.zeros((3, num_images*height, 3*width))
for i in range(num_images):
s = i*height
e = s + height
grid_image[:, s:e, :width] = source[i, :, :, :]
grid_image[:, s:e, width:2*width] = target[i, :, :, :]
grid_image[:, s:e, 2*width:] = reenacted[i, :, :, :]
if grid_image.shape[1] > 1000: # height
grid_image = torch_image_resize(grid_image, height = 800)
return grid_image
" Crop images using facial landmarks like FFHQ "
def preprocess_image(image_path, landmarks_est, save_filename = None):
image = read_image_opencv(image_path)
landmarks = landmarks_est.get_landmarks(image)[0]
landmarks = np.asarray(landmarks)
img = align_crop_image(image, landmarks)
if img is not None and save_filename is not None:
cv2.imwrite(save_filename, cv2.cvtColor(img.copy(), cv2.COLOR_RGB2BGR))
if img is not None:
return img
else:
print('Error with image preprocessing')
exit()
" Invert real image into the latent space of StyleGAN2 "
def invert_image(image, encoder, generator, truncation, trunc, save_path = None, save_name = None):
with torch.no_grad():
latent_codes = encoder(image)
inverted_images, _ = generator([latent_codes], input_is_latent=True, return_latents = False, truncation= truncation, truncation_latent=trunc)
if save_path is not None and save_name is not None:
grid = torch_utils.save_image(
inverted_images,
os.path.join(save_path, '{}.png'.format(save_name)),
normalize=True,
range=(-1, 1),
)
# Latent code
latent_code = latent_codes[0].detach().cpu().numpy()
save_dir = os.path.join(save_path, '{}.npy'.format(save_name))
np.save(save_dir, latent_code)
return inverted_images, latent_codes | 4,100 | 36.281818 | 144 | py |
StyleMask | StyleMask-master/libs/configs/config_models.py | import os
import numpy as np
stylegan2_ffhq_1024 = {
'image_resolution': 1024,
'channel_multiplier': 2,
'gan_weights': './pretrained_models/stylegan2-ffhq-config-f_1024.pt',
'stylespace_dim': 6048,
'split_sections': [512, 512, 512, 512, 512, 512, 512, 512, 512, 512, 256, 256, 128, 128, 64, 64, 32],
'e4e_inversion_model': './pretrained_models/e4e_ffhq_encode_1024.pt',
'expression_ranges': './libs/configs/ranges_FFHQ.npy' # Used for evaluation
}
| 471 | 28.5 | 104 | py |
StyleMask | StyleMask-master/libs/criteria/losses.py | import torch
import numpy as np
"""
Calculate shape losses
"""
class Losses():
def __init__(self):
self.criterion_mse = torch.nn.MSELoss()
self.criterion_l1 = torch.nn.L1Loss()
self.image_deca_size = 224
def calculate_pixel_wise_loss(self, images_shifted, images):
pixel_wise_loss = self.criterion_l1(images, images_shifted)
return pixel_wise_loss
def calculate_shape_loss(self, shape_gt, shape_reenacted, normalize = False):
criterion_l1 = torch.nn.L1Loss()
if normalize:
shape_gt_norm = shape_gt/200 #self.image_deca_size
shape_reenacted_norm = shape_reenacted/200 #self.image_deca_size
loss = criterion_l1(shape_gt_norm, shape_reenacted_norm)
else:
loss = criterion_l1(shape_gt, shape_reenacted)
return loss
def calculate_eye_loss(self, shape_gt, shape_reenacted):
criterion_l1 = torch.nn.L1Loss()
shape_gt_norm = shape_gt.clone()
shape_reenacted_norm = shape_reenacted.clone()
# shape_gt_norm = shape_gt_norm/self.image_deca_size
# shape_reenacted_norm = shape_reenacted_norm/self.image_deca_size
eye_pairs = [(36, 39), (37, 41), (38, 40), (42, 45), (43, 47), (44, 46)]
loss = 0
for i in range(len(eye_pairs)):
pair = eye_pairs[i]
d_gt = abs(shape_gt[:, pair[0],:] - shape_gt[:, pair[1],:])
d_e = abs(shape_reenacted[:, pair[0],:] - shape_reenacted[:, pair[1],:])
loss += criterion_l1(d_gt, d_e)
loss = loss/len(eye_pairs)
return loss
def calculate_mouth_loss(self, shape_gt, shape_reenacted):
criterion_l1 = torch.nn.L1Loss()
shape_gt_norm = shape_gt.clone()
shape_reenacted_norm = shape_reenacted.clone()
# shape_gt_norm = shape_gt_norm/self.image_deca_size
# shape_reenacted_norm = shape_reenacted_norm/self.image_deca_size
mouth_pairs = [(48, 54), (49, 59), (50, 58), (51, 57), (52, 56), (53, 55), (60, 64), (61, 67), (62, 66), (63, 65)]
loss = 0
for i in range(len(mouth_pairs)):
pair = mouth_pairs[i]
d_gt = abs(shape_gt[:, pair[0],:] - shape_gt[:, pair[1],:])
d_e = abs(shape_reenacted[:, pair[0],:] - shape_reenacted[:, pair[1],:])
loss += criterion_l1(d_gt, d_e)
loss = loss/len(mouth_pairs)
return loss
| 2,137 | 32.40625 | 116 | py |
StyleMask | StyleMask-master/libs/criteria/model_irse.py | from torch.nn import Linear, Conv2d, BatchNorm1d, BatchNorm2d, PReLU, Dropout, Sequential, Module
from .helpers import get_blocks, Flatten, bottleneck_IR, bottleneck_IR_SE, l2_norm
"""
Modified Backbone implementation from [TreB1eN](https://github.com/TreB1eN/InsightFace_Pytorch)
"""
class Backbone(Module):
def __init__(self, input_size, num_layers, mode='ir', drop_ratio=0.4, affine=True):
super(Backbone, self).__init__()
assert input_size in [112, 224], "input_size should be 112 or 224"
assert num_layers in [50, 100, 152], "num_layers should be 50, 100 or 152"
assert mode in ['ir', 'ir_se'], "mode should be ir or ir_se"
blocks = get_blocks(num_layers)
if mode == 'ir':
unit_module = bottleneck_IR
elif mode == 'ir_se':
unit_module = bottleneck_IR_SE
self.input_layer = Sequential(Conv2d(3, 64, (3, 3), 1, 1, bias=False),
BatchNorm2d(64),
PReLU(64))
if input_size == 112:
self.output_layer = Sequential(BatchNorm2d(512),
Dropout(drop_ratio),
Flatten(),
Linear(512 * 7 * 7, 512),
BatchNorm1d(512, affine=affine))
else:
self.output_layer = Sequential(BatchNorm2d(512),
Dropout(drop_ratio),
Flatten(),
Linear(512 * 14 * 14, 512),
BatchNorm1d(512, affine=affine))
modules = []
for block in blocks:
for bottleneck in block:
modules.append(unit_module(bottleneck.in_channel,
bottleneck.depth,
bottleneck.stride))
self.body = Sequential(*modules)
def forward(self, x):
x = self.input_layer(x)
x = self.body(x)
x = self.output_layer(x)
return l2_norm(x)
def IR_50(input_size):
"""Constructs a ir-50 model."""
model = Backbone(input_size, num_layers=50, mode='ir', drop_ratio=0.4, affine=False)
return model
def IR_101(input_size):
"""Constructs a ir-101 model."""
model = Backbone(input_size, num_layers=100, mode='ir', drop_ratio=0.4, affine=False)
return model
def IR_152(input_size):
"""Constructs a ir-152 model."""
model = Backbone(input_size, num_layers=152, mode='ir', drop_ratio=0.4, affine=False)
return model
def IR_SE_50(input_size):
"""Constructs a ir_se-50 model."""
model = Backbone(input_size, num_layers=50, mode='ir_se', drop_ratio=0.4, affine=False)
return model
def IR_SE_101(input_size):
"""Constructs a ir_se-101 model."""
model = Backbone(input_size, num_layers=100, mode='ir_se', drop_ratio=0.4, affine=False)
return model
def IR_SE_152(input_size):
"""Constructs a ir_se-152 model."""
model = Backbone(input_size, num_layers=152, mode='ir_se', drop_ratio=0.4, affine=False)
return model
| 2,821 | 32.2 | 97 | py |
StyleMask | StyleMask-master/libs/criteria/l2_loss.py | import torch
l2_criterion = torch.nn.MSELoss(reduction='mean')
def l2_loss(real_images, generated_images):
loss = l2_criterion(real_images, generated_images)
return loss
| 181 | 19.222222 | 54 | py |
StyleMask | StyleMask-master/libs/criteria/helpers.py | from collections import namedtuple
import torch
from torch.nn import Conv2d, BatchNorm2d, PReLU, ReLU, Sigmoid, MaxPool2d, AdaptiveAvgPool2d, Sequential, Module
"""
ArcFace implementation from [TreB1eN](https://github.com/TreB1eN/InsightFace_Pytorch)
"""
class Flatten(Module):
def forward(self, input):
return input.view(input.size(0), -1)
def l2_norm(input, axis=1):
norm = torch.norm(input, 2, axis, True)
output = torch.div(input, norm)
return output
class Bottleneck(namedtuple('Block', ['in_channel', 'depth', 'stride'])):
""" A named tuple describing a ResNet block. """
def get_block(in_channel, depth, num_units, stride=2):
return [Bottleneck(in_channel, depth, stride)] + [Bottleneck(depth, depth, 1) for i in range(num_units - 1)]
def get_blocks(num_layers):
if num_layers == 50:
blocks = [
get_block(in_channel=64, depth=64, num_units=3),
get_block(in_channel=64, depth=128, num_units=4),
get_block(in_channel=128, depth=256, num_units=14),
get_block(in_channel=256, depth=512, num_units=3)
]
elif num_layers == 100:
blocks = [
get_block(in_channel=64, depth=64, num_units=3),
get_block(in_channel=64, depth=128, num_units=13),
get_block(in_channel=128, depth=256, num_units=30),
get_block(in_channel=256, depth=512, num_units=3)
]
elif num_layers == 152:
blocks = [
get_block(in_channel=64, depth=64, num_units=3),
get_block(in_channel=64, depth=128, num_units=8),
get_block(in_channel=128, depth=256, num_units=36),
get_block(in_channel=256, depth=512, num_units=3)
]
else:
raise ValueError("Invalid number of layers: {}. Must be one of [50, 100, 152]".format(num_layers))
return blocks
class SEModule(Module):
def __init__(self, channels, reduction):
super(SEModule, self).__init__()
self.avg_pool = AdaptiveAvgPool2d(1)
self.fc1 = Conv2d(channels, channels // reduction, kernel_size=1, padding=0, bias=False)
self.relu = ReLU(inplace=True)
self.fc2 = Conv2d(channels // reduction, channels, kernel_size=1, padding=0, bias=False)
self.sigmoid = Sigmoid()
def forward(self, x):
module_input = x
x = self.avg_pool(x)
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
x = self.sigmoid(x)
return module_input * x
class bottleneck_IR(Module):
def __init__(self, in_channel, depth, stride):
super(bottleneck_IR, self).__init__()
if in_channel == depth:
self.shortcut_layer = MaxPool2d(1, stride)
else:
self.shortcut_layer = Sequential(
Conv2d(in_channel, depth, (1, 1), stride, bias=False),
BatchNorm2d(depth)
)
self.res_layer = Sequential(
BatchNorm2d(in_channel),
Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False), PReLU(depth),
Conv2d(depth, depth, (3, 3), stride, 1, bias=False), BatchNorm2d(depth)
)
def forward(self, x):
shortcut = self.shortcut_layer(x)
res = self.res_layer(x)
return res + shortcut
class bottleneck_IR_SE(Module):
def __init__(self, in_channel, depth, stride):
super(bottleneck_IR_SE, self).__init__()
if in_channel == depth:
self.shortcut_layer = MaxPool2d(1, stride)
else:
self.shortcut_layer = Sequential(
Conv2d(in_channel, depth, (1, 1), stride, bias=False),
BatchNorm2d(depth)
)
self.res_layer = Sequential(
BatchNorm2d(in_channel),
Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False),
PReLU(depth),
Conv2d(depth, depth, (3, 3), stride, 1, bias=False),
BatchNorm2d(depth),
SEModule(depth, 16)
)
def forward(self, x):
shortcut = self.shortcut_layer(x)
res = self.res_layer(x)
return res + shortcut
| 3,556 | 28.641667 | 112 | py |
StyleMask | StyleMask-master/libs/criteria/id_loss.py | import torch
from torch import nn
from .model_irse import Backbone
import os
import torch.backends.cudnn as cudnn
class IDLoss(nn.Module):
def __init__(self, pretrained_model_path = './pretrained_models/model_ir_se50.pth'):
super(IDLoss, self).__init__()
print('Loading ResNet ArcFace for identity loss')
self.facenet = Backbone(input_size=112, num_layers=50, drop_ratio=0.6, mode='ir_se')
if not os.path.exists(pretrained_model_path):
print('ir_se50 model does not exist in {}'.format(pretrained_model_path))
exit()
self.facenet.load_state_dict(torch.load(pretrained_model_path))
self.face_pool = torch.nn.AdaptiveAvgPool2d((112, 112))
self.facenet.eval()
self.criterion = nn.CosineSimilarity(dim=1, eps=1e-6)
def extract_feats(self, x, crop = True):
if crop:
x = x[:, :, 35:223, 32:220] # Crop interesting region
x = self.face_pool(x)
x_feats = self.facenet(x)
return x_feats
def forward(self, y_hat, y, crop = True):
n_samples = y.shape[0]
y_feats = self.extract_feats(y, crop)
y_hat_feats = self.extract_feats(y_hat, crop)
cosine_sim = self.criterion(y_hat_feats, y_feats.detach())
loss = 1 - cosine_sim
loss = torch.mean(loss)
return loss
| 1,349 | 37.571429 | 92 | py |
StyleMask | StyleMask-master/libs/criteria/lpips/lpips.py | import torch
import torch.nn as nn
from .networks import get_network, LinLayers
from .utils import get_state_dict
class LPIPS(nn.Module):
r"""Creates a criterion that measures https://github.com/eladrich/pixel2style2pixel
Learned Perceptual Image Patch Similarity (LPIPS).
Arguments:
net_type (str): the network type to compare the features:
'alex' | 'squeeze' | 'vgg'. Default: 'alex'.
version (str): the version of LPIPS. Default: 0.1.
"""
def __init__(self, net_type: str = 'alex', version: str = '0.1'):
assert version in ['0.1'], 'v0.1 is only supported now'
super(LPIPS, self).__init__()
# pretrained network
self.net = get_network(net_type).to("cuda")
# linear layers
self.lin = LinLayers(self.net.n_channels_list).to("cuda")
self.lin.load_state_dict(get_state_dict(net_type, version))
def forward(self, x: torch.Tensor, y: torch.Tensor):
feat_x, feat_y = self.net(x), self.net(y)
diff = [(fx - fy) ** 2 for fx, fy in zip(feat_x, feat_y)]
res = [l(d).mean((2, 3), True) for d, l in zip(diff, self.lin)]
return torch.sum(torch.cat(res, 0)) / x.shape[0]
| 1,220 | 33.885714 | 87 | py |
StyleMask | StyleMask-master/libs/criteria/lpips/utils.py | from collections import OrderedDict
import torch
def normalize_activation(x, eps=1e-10):
# print(torch.sum(x ** 2, dim=1, keepdim=True))
# if torch.isnan(x).any():
# # print(gradients_keep)
# pdb.set_trace()
norm_factor = torch.sqrt(torch.sum(x ** 2, dim=1, keepdim=True)+1e-9)
return x / (norm_factor + eps)
def get_state_dict(net_type: str = 'alex', version: str = '0.1'):
# build url
url = 'https://raw.githubusercontent.com/richzhang/PerceptualSimilarity/' \
+ f'master/lpips/weights/v{version}/{net_type}.pth'
# download
old_state_dict = torch.hub.load_state_dict_from_url(
url, progress=True,
map_location=None if torch.cuda.is_available() else torch.device('cpu')
)
# rename keys
new_state_dict = OrderedDict()
for key, val in old_state_dict.items():
new_key = key
new_key = new_key.replace('lin', '')
new_key = new_key.replace('model.', '')
new_state_dict[new_key] = val
return new_state_dict
| 1,033 | 28.542857 | 79 | py |
StyleMask | StyleMask-master/libs/criteria/lpips/networks.py | from typing import Sequence
from itertools import chain
import torch
import torch.nn as nn
from torchvision import models
from .utils import normalize_activation
def get_network(net_type: str):
if net_type == 'alex':
return AlexNet()
elif net_type == 'squeeze':
return SqueezeNet()
elif net_type == 'vgg':
return VGG16()
else:
raise NotImplementedError('choose net_type from [alex, squeeze, vgg].')
class LinLayers(nn.ModuleList):
def __init__(self, n_channels_list: Sequence[int]):
super(LinLayers, self).__init__([
nn.Sequential(
nn.Identity(),
nn.Conv2d(nc, 1, 1, 1, 0, bias=False)
) for nc in n_channels_list
])
for param in self.parameters():
param.requires_grad = False
class BaseNet(nn.Module):
def __init__(self):
super(BaseNet, self).__init__()
# register buffer
self.register_buffer(
'mean', torch.Tensor([-.030, -.088, -.188])[None, :, None, None])
self.register_buffer(
'std', torch.Tensor([.458, .448, .450])[None, :, None, None])
def set_requires_grad(self, state: bool):
for param in chain(self.parameters(), self.buffers()):
param.requires_grad = state
def z_score(self, x: torch.Tensor):
return (x - self.mean) / self.std
def forward(self, x: torch.Tensor):
x = self.z_score(x)
output = []
for i, (_, layer) in enumerate(self.layers._modules.items(), 1):
x = layer(x)
if i in self.target_layers:
output.append(normalize_activation(x))
if len(output) == len(self.target_layers):
break
return output
class SqueezeNet(BaseNet):
def __init__(self):
super(SqueezeNet, self).__init__()
self.layers = models.squeezenet1_1(True).features
self.target_layers = [2, 5, 8, 10, 11, 12, 13]
self.n_channels_list = [64, 128, 256, 384, 384, 512, 512]
self.set_requires_grad(False)
class AlexNet(BaseNet):
def __init__(self):
super(AlexNet, self).__init__()
self.layers = models.alexnet(True).features
self.target_layers = [2, 5, 8, 10, 12]
self.n_channels_list = [64, 192, 384, 256, 256]
self.set_requires_grad(False)
class VGG16(BaseNet):
def __init__(self):
super(VGG16, self).__init__()
self.layers = models.vgg16(True).features
self.target_layers = [4, 9, 16, 23, 30]
self.n_channels_list = [64, 128, 256, 512, 512]
self.set_requires_grad(False) | 2,653 | 26.645833 | 79 | py |
StyleMask | StyleMask-master/libs/criteria/lpips/__init__.py | 0 | 0 | 0 | py | |
StyleMask | StyleMask-master/libs/DECA/estimate_DECA.py | """
"""
import torch
import numpy as np
import cv2
import os
from .decalib.deca import DECA
from .decalib.datasets import datasets
from .decalib.utils import util
from .decalib.utils.config import cfg as deca_cfg
from .decalib.utils.rotation_converter import *
class DECA_model():
def __init__(self, device):
deca_cfg.model.use_tex = False
dir_path = os.path.dirname(os.path.realpath(__file__))
models_path = os.path.join(dir_path, 'data')
if not os.path.exists(models_path):
print('Please download the required data for DECA model. See Readme.')
exit()
self.deca = DECA(config = deca_cfg, device=device)
self.data = datasets.TestData()
'Batch torch tensor'
def extract_DECA_params(self, images):
p_tensor = torch.zeros(images.shape[0], 6).cuda()
alpha_shp_tensor = torch.zeros(images.shape[0], 100).cuda()
alpha_exp_tensor = torch.zeros(images.shape[0], 50).cuda()
angles = torch.zeros(images.shape[0], 3).cuda()
cam = torch.zeros(images.shape[0], 3).cuda()
for batch in range(images.shape[0]):
image_prepro, error_flag = self.data.get_image_tensor(images[batch].clone())
if not error_flag:
codedict = self.deca.encode(image_prepro.unsqueeze(0).cuda())
pose = codedict['pose'][:,:3]
pose = rad2deg(batch_axis2euler(pose))
p_tensor[batch] = codedict['pose'][0]
alpha_shp_tensor[batch] = codedict['shape'][0]
alpha_exp_tensor[batch] = codedict['exp'][0]
cam[batch] = codedict['cam'][0]
angles[batch] = pose
else:
angles[batch][0] = -180
angles[batch][1] = -180
angles[batch][2] = -180
return p_tensor, alpha_shp_tensor, alpha_exp_tensor, angles, cam
def calculate_shape(self, coefficients, image = None, save_path = None, prefix = None):
landmarks2d, landmarks3d, points = self.deca.decode(coefficients)
return landmarks2d, landmarks3d, points
| 2,153 | 36.137931 | 94 | py |
StyleMask | StyleMask-master/libs/DECA/decalib/deca.py | # -*- coding: utf-8 -*-
#
# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
# holder of all proprietary rights on this computer program.
# Using this computer program means that you agree to the terms
# in the LICENSE file included with this software distribution.
# Any use not explicitly granted by the LICENSE is prohibited.
#
# Copyright©2019 Max-Planck-Gesellschaft zur Förderung
# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
# for Intelligent Systems. All rights reserved.
#
# For comments or questions, please email us at deca@tue.mpg.de
# For commercial licensing contact, please contact ps-license@tuebingen.mpg.de
import os, sys
import torch
import torchvision
import torch.nn.functional as F
import torch.nn as nn
import numpy as np
from time import time
from skimage.io import imread
import cv2
import pickle
from .utils.renderer import SRenderY
from .models.encoders import ResnetEncoder
from .models.FLAME import FLAME, FLAMETex
from .models.decoders import Generator
from .utils import util
from .utils.rotation_converter import batch_euler2axis
from .datasets import datasets
from .utils.config import cfg
torch.backends.cudnn.benchmark = True
class DECA(object):
def __init__(self, config=None, device='cuda'):
if config is None:
self.cfg = cfg
else:
self.cfg = config
self.device = device
self.image_size = self.cfg.dataset.image_size
self.uv_size = self.cfg.model.uv_size
self._create_model(self.cfg.model)
self._setup_renderer(self.cfg.model)
def _setup_renderer(self, model_cfg):
self.render = SRenderY(self.image_size, obj_filename=model_cfg.topology_path, uv_size=model_cfg.uv_size).to(self.device)
# face mask for rendering details
mask = imread(model_cfg.face_eye_mask_path).astype(np.float32)/255.; mask = torch.from_numpy(mask[:,:,0])[None,None,:,:].contiguous()
self.uv_face_eye_mask = F.interpolate(mask, [model_cfg.uv_size, model_cfg.uv_size]).to(self.device)
mask = imread(model_cfg.face_mask_path).astype(np.float32)/255.; mask = torch.from_numpy(mask[:,:,0])[None,None,:,:].contiguous()
self.uv_face_mask = F.interpolate(mask, [model_cfg.uv_size, model_cfg.uv_size]).to(self.device)
# displacement correction
fixed_dis = np.load(model_cfg.fixed_displacement_path)
self.fixed_uv_dis = torch.tensor(fixed_dis).float().to(self.device)
# mean texture
mean_texture = imread(model_cfg.mean_tex_path).astype(np.float32)/255.; mean_texture = torch.from_numpy(mean_texture.transpose(2,0,1))[None,:,:,:].contiguous()
self.mean_texture = F.interpolate(mean_texture, [model_cfg.uv_size, model_cfg.uv_size]).to(self.device)
# dense mesh template, for save detail mesh
self.dense_template = np.load(model_cfg.dense_template_path, allow_pickle=True, encoding='latin1').item()
def _create_model(self, model_cfg):
# set up parameters
self.n_param = model_cfg.n_shape+model_cfg.n_tex+model_cfg.n_exp+model_cfg.n_pose+model_cfg.n_cam+model_cfg.n_light
self.n_detail = model_cfg.n_detail
self.n_cond = model_cfg.n_exp + 3 # exp + jaw pose
self.num_list = [model_cfg.n_shape, model_cfg.n_tex, model_cfg.n_exp, model_cfg.n_pose, model_cfg.n_cam, model_cfg.n_light]
self.param_dict = {i:model_cfg.get('n_' + i) for i in model_cfg.param_list}
# encoders
self.E_flame = ResnetEncoder(outsize=self.n_param).to(self.device)
self.E_detail = ResnetEncoder(outsize=self.n_detail).to(self.device)
# decoders
self.flame = FLAME(model_cfg).to(self.device)
if model_cfg.use_tex:
self.flametex = FLAMETex(model_cfg).to(self.device)
self.D_detail = Generator(latent_dim=self.n_detail+self.n_cond, out_channels=1, out_scale=model_cfg.max_z, sample_mode = 'bilinear').to(self.device)
# resume model
model_path = self.cfg.pretrained_modelpath
if os.path.exists(model_path):
print(f'trained model found. Load {model_path}')
checkpoint = torch.load(model_path)
self.checkpoint = checkpoint
util.copy_state_dict(self.E_flame.state_dict(), checkpoint['E_flame'])
util.copy_state_dict(self.E_detail.state_dict(), checkpoint['E_detail'])
util.copy_state_dict(self.D_detail.state_dict(), checkpoint['D_detail'])
else:
print(f'please check model path: {model_path}')
exit()
# eval mode
self.E_flame.eval()
self.E_detail.eval()
self.D_detail.eval()
def decompose_code(self, code, num_dict):
''' Convert a flattened parameter vector to a dictionary of parameters
code_dict.keys() = ['shape', 'tex', 'exp', 'pose', 'cam', 'light']
'''
code_dict = {}
start = 0
for key in num_dict:
end = start+int(num_dict[key])
code_dict[key] = code[:, start:end]
start = end
if key == 'light':
code_dict[key] = code_dict[key].reshape(code_dict[key].shape[0], 9, 3)
return code_dict
def displacement2normal(self, uv_z, coarse_verts, coarse_normals):
''' Convert displacement map into detail normal map
'''
batch_size = uv_z.shape[0]
uv_coarse_vertices = self.render.world2uv(coarse_verts).detach()
uv_coarse_normals = self.render.world2uv(coarse_normals).detach()
uv_z = uv_z*self.uv_face_eye_mask
uv_detail_vertices = uv_coarse_vertices + uv_z*uv_coarse_normals + self.fixed_uv_dis[None,None,:,:]*uv_coarse_normals.detach()
dense_vertices = uv_detail_vertices.permute(0,2,3,1).reshape([batch_size, -1, 3])
uv_detail_normals = util.vertex_normals(dense_vertices, self.render.dense_faces.expand(batch_size, -1, -1))
uv_detail_normals = uv_detail_normals.reshape([batch_size, uv_coarse_vertices.shape[2], uv_coarse_vertices.shape[3], 3]).permute(0,3,1,2)
return uv_detail_normals
def displacement2vertex(self, uv_z, coarse_verts, coarse_normals):
''' Convert displacement map into detail vertices
'''
batch_size = uv_z.shape[0]
uv_coarse_vertices = self.render.world2uv(coarse_verts).detach()
uv_coarse_normals = self.render.world2uv(coarse_normals).detach()
uv_z = uv_z*self.uv_face_eye_mask
uv_detail_vertices = uv_coarse_vertices + uv_z*uv_coarse_normals + self.fixed_uv_dis[None,None,:,:]*uv_coarse_normals.detach()
dense_vertices = uv_detail_vertices.permute(0,2,3,1).reshape([batch_size, -1, 3])
# uv_detail_normals = util.vertex_normals(dense_vertices, self.render.dense_faces.expand(batch_size, -1, -1))
# uv_detail_normals = uv_detail_normals.reshape([batch_size, uv_coarse_vertices.shape[2], uv_coarse_vertices.shape[3], 3]).permute(0,3,1,2)
detail_faces = self.render.dense_faces
return dense_vertices, detail_faces
def visofp(self, normals):
''' visibility of keypoints, based on the normal direction
'''
normals68 = self.flame.seletec_3d68(normals)
vis68 = (normals68[:,:,2:] < 0.1).float()
return vis68
# @torch.no_grad()
def encode(self, images):
batch_size = images.shape[0]
parameters = self.E_flame(images)
detailcode = self.E_detail(images)
codedict = self.decompose_code(parameters, self.param_dict)
codedict['detail'] = detailcode
codedict['images'] = images
return codedict
@torch.no_grad()
def decode_deca(self, codedict):
images = codedict['images']
batch_size = images.shape[0]
## decode
verts, landmarks2d, landmarks3d = self.flame(shape_params=codedict['shape'], expression_params=codedict['exp'], pose_params=codedict['pose'])
uv_z = self.D_detail(torch.cat([codedict['pose'][:,3:], codedict['exp'], codedict['detail']], dim=1))
if self.cfg.model.use_tex:
albedo = self.flametex(codedict['tex'])
else:
albedo = torch.zeros([batch_size, 3, self.uv_size, self.uv_size], device=images.device)
## projection
landmarks2d = util.batch_orth_proj(landmarks2d, codedict['cam'])[:,:,:2]; landmarks2d[:,:,1:] = -landmarks2d[:,:,1:]; landmarks2d = landmarks2d*self.image_size/2 + self.image_size/2
landmarks3d = util.batch_orth_proj(landmarks3d, codedict['cam']); landmarks3d[:,:,1:] = -landmarks3d[:,:,1:]; landmarks3d = landmarks3d*self.image_size/2 + self.image_size/2
trans_verts = util.batch_orth_proj(verts, codedict['cam']); trans_verts[:,:,1:] = -trans_verts[:,:,1:]
## rendering
ops = self.render(verts, trans_verts, albedo, codedict['light'])
uv_detail_normals = self.displacement2normal(uv_z, verts, ops['normals'])
uv_shading = self.render.add_SHlight(uv_detail_normals, codedict['light'])
uv_texture = albedo*uv_shading
landmarks3d_vis = self.visofp(ops['transformed_normals'])
landmarks3d = torch.cat([landmarks3d, landmarks3d_vis], dim=2)
## render shape
shape_images = self.render.render_shape(verts, trans_verts)
detail_normal_images = F.grid_sample(uv_detail_normals, ops['grid'], align_corners=False)*ops['alpha_images']
shape_detail_images = self.render.render_shape(verts, trans_verts, detail_normal_images=detail_normal_images)
## extract texture
## TODO: current resolution 256x256, support higher resolution, and add visibility
uv_pverts = self.render.world2uv(trans_verts)
uv_gt = F.grid_sample(images, uv_pverts.permute(0,2,3,1)[:,:,:,:2], mode='bilinear')
if self.cfg.model.use_tex:
## TODO: poisson blending should give better-looking results
uv_texture_gt = uv_gt[:,:3,:,:]*self.uv_face_eye_mask + (uv_texture[:,:3,:,:]*(1-self.uv_face_eye_mask)*0.7)
else:
uv_texture_gt = uv_gt[:,:3,:,:]*self.uv_face_eye_mask + (torch.ones_like(uv_gt[:,:3,:,:])*(1-self.uv_face_eye_mask)*0.7)
## output
opdict = {
'vertices': verts,
'normals': ops['normals'],
'transformed_vertices': trans_verts,
'landmarks2d': landmarks2d,
'landmarks3d': landmarks3d,
'uv_detail_normals': uv_detail_normals,
'uv_texture_gt': uv_texture_gt,
'displacement_map': uv_z+self.fixed_uv_dis[None,None,:,:],
}
if self.cfg.model.use_tex:
opdict['albedo'] = albedo
opdict['uv_texture'] = uv_texture
visdict = {
'inputs': images,
'landmarks2d': util.tensor_vis_landmarks(images, landmarks2d, isScale=False),
'landmarks3d': util.tensor_vis_landmarks(images, landmarks3d, isScale=False),
'shape_images': shape_images,
'shape_detail_images': shape_detail_images
}
if self.cfg.model.use_tex:
visdict['rendered_images'] = ops['images']
return opdict, visdict
def decode(self, codedict):
images = codedict['shape']
batch_size = images.shape[0]
verts, landmarks2d, landmarks3d = self.flame(shape_params=codedict['shape'], expression_params=codedict['exp'], pose_params=codedict['pose'])
landmarks2d = util.batch_orth_proj(landmarks2d, codedict['cam'])[:,:,:2]; landmarks2d[:,:,1:] = -landmarks2d[:,:,1:]; landmarks2d = landmarks2d*self.image_size/2 + self.image_size/2
trans_verts = util.batch_orth_proj(verts, codedict['cam']); trans_verts[:,:,1:] = -trans_verts[:,:,1:]; trans_verts = trans_verts*self.image_size/2 + self.image_size/2
landmarks3d = util.batch_orth_proj(landmarks3d, codedict['cam']); landmarks3d[:,:,1:] = -landmarks3d[:,:,1:]; landmarks3d = landmarks3d*self.image_size/2 + self.image_size/2
return landmarks2d, landmarks3d, trans_verts
def visualize(self, visdict, size=None):
grids = {}
if size is None:
size = self.image_size
for key in visdict:
grids[key] = torchvision.utils.make_grid(F.interpolate(visdict[key], [size, size])).detach().cpu()
grid = torch.cat(list(grids.values()), 2)
grid_image = (grid.numpy().transpose(1,2,0).copy()*255)[:,:,[2,1,0]]
grid_image = np.minimum(np.maximum(grid_image, 0), 255).astype(np.uint8)
return grid_image
def save_obj(self, filename, opdict):
'''
vertices: [nv, 3], tensor
texture: [3, h, w], tensor
'''
i = 0
vertices = opdict['vertices'][i].cpu().numpy()
faces = self.render.faces[0].cpu().numpy()
texture = util.tensor2image(opdict['uv_texture_gt'][i])
uvcoords = self.render.raw_uvcoords[0].cpu().numpy()
uvfaces = self.render.uvfaces[0].cpu().numpy()
# save coarse mesh, with texture and normal map
normal_map = util.tensor2image(opdict['uv_detail_normals'][i]*0.5 + 0.5)
util.write_obj(filename, vertices, faces,
texture=texture,
uvcoords=uvcoords,
uvfaces=uvfaces,
normal_map=normal_map)
# upsample mesh, save detailed mesh
texture = texture[:,:,[2,1,0]]
normals = opdict['normals'][i].cpu().numpy()
displacement_map = opdict['displacement_map'][i].cpu().numpy().squeeze()
dense_vertices, dense_colors, dense_faces = util.upsample_mesh(vertices, normals, faces, displacement_map, texture, self.dense_template)
util.write_obj(filename.replace('.obj', '_detail.obj'),
dense_vertices,
dense_faces,
colors = dense_colors,
inverse_face_order=True)
def save_ply(self, filename, opdict):
'''
vertices: [nv, 3], tensor
texture: [3, h, w], tensor
'''
header_temp = """ply
format ascii 1.0
element vertex {}
property float x
property float y
property float z
element face {}
property list uchar int vertex_indices
end_header
"""
i = 0
vertices = opdict['vertices'].squeeze(0).cpu().numpy()
print(vertices.shape)
faces = self.render.faces[0].cpu().numpy()
print(faces.shape)
n_vertex = vertices.shape[0]
n_face = faces.shape[0]
header = header_temp.format(n_vertex, n_face)
print(header)
with open(filename, 'w') as f:
f.write(header + '\n')
for i in range(n_vertex):
x, y, z = vertices[i, :]
# if reverse:
# f.write(f'{x:.2f} {height-y:.2f} {z:.2f}\n')
# else:
f.write(f'{x:.2f} {y:.2f} {z:.2f}\n')
for i in range(n_face):
idx1, idx2, idx3 = faces[i] # m x 3
# if reverse:
# f.write(f'3 {idx3} {idx2} {idx1}\n')
# else:
f.write(f'3 {idx1} {idx2} {idx3}\n')
print(f'Dump tp {filename}') | 15,374 | 46.307692 | 189 | py |
StyleMask | StyleMask-master/libs/DECA/decalib/__init__.py | 0 | 0 | 0 | py | |
StyleMask | StyleMask-master/libs/DECA/decalib/models/resnet.py | """
Author: Soubhik Sanyal
Copyright (c) 2019, Soubhik Sanyal
All rights reserved.
Loads different resnet models
"""
'''
file: Resnet.py
date: 2018_05_02
author: zhangxiong(1025679612@qq.com)
mark: copied from pytorch source code
'''
import torch.nn as nn
import torch.nn.functional as F
import torch
from torch.nn.parameter import Parameter
import torch.optim as optim
import numpy as np
import math
import torchvision
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(7, stride=1)
# self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x1 = self.layer4(x)
x2 = self.avgpool(x1)
x2 = x2.view(x2.size(0), -1)
# x = self.fc(x)
## x2: [bz, 2048] for shape
## x1: [bz, 2048, 7, 7] for texture
return x2
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
def copy_parameter_from_resnet(model, resnet_dict):
cur_state_dict = model.state_dict()
# import ipdb; ipdb.set_trace()
for name, param in list(resnet_dict.items())[0:None]:
if name not in cur_state_dict:
# print(name, ' not available in reconstructed resnet')
continue
if isinstance(param, Parameter):
param = param.data
try:
cur_state_dict[name].copy_(param)
except:
print(name, ' is inconsistent!')
continue
# print('copy resnet state dict finished!')
# import ipdb; ipdb.set_trace()
def load_ResNet50Model():
model = ResNet(Bottleneck, [3, 4, 6, 3])
copy_parameter_from_resnet(model, torchvision.models.resnet50(pretrained = True).state_dict())
return model
def load_ResNet101Model():
model = ResNet(Bottleneck, [3, 4, 23, 3])
copy_parameter_from_resnet(model, torchvision.models.resnet101(pretrained = True).state_dict())
return model
def load_ResNet152Model():
model = ResNet(Bottleneck, [3, 8, 36, 3])
copy_parameter_from_resnet(model, torchvision.models.resnet152(pretrained = True).state_dict())
return model
# model.load_state_dict(checkpoint['model_state_dict'])
######## Unet
class DoubleConv(nn.Module):
"""(convolution => [BN] => ReLU) * 2"""
def __init__(self, in_channels, out_channels):
super().__init__()
self.double_conv = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)
)
def forward(self, x):
return self.double_conv(x)
class Down(nn.Module):
"""Downscaling with maxpool then double conv"""
def __init__(self, in_channels, out_channels):
super().__init__()
self.maxpool_conv = nn.Sequential(
nn.MaxPool2d(2),
DoubleConv(in_channels, out_channels)
)
def forward(self, x):
return self.maxpool_conv(x)
class Up(nn.Module):
"""Upscaling then double conv"""
def __init__(self, in_channels, out_channels, bilinear=True):
super().__init__()
# if bilinear, use the normal convolutions to reduce the number of channels
if bilinear:
self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
else:
self.up = nn.ConvTranspose2d(in_channels // 2, in_channels // 2, kernel_size=2, stride=2)
self.conv = DoubleConv(in_channels, out_channels)
def forward(self, x1, x2):
x1 = self.up(x1)
# input is CHW
diffY = x2.size()[2] - x1.size()[2]
diffX = x2.size()[3] - x1.size()[3]
x1 = F.pad(x1, [diffX // 2, diffX - diffX // 2,
diffY // 2, diffY - diffY // 2])
# if you have padding issues, see
# https://github.com/HaiyongJiang/U-Net-Pytorch-Unstructured-Buggy/commit/0e854509c2cea854e247a9c615f175f76fbb2e3a
# https://github.com/xiaopeng-liao/Pytorch-UNet/commit/8ebac70e633bac59fc22bb5195e513d5832fb3bd
x = torch.cat([x2, x1], dim=1)
return self.conv(x)
class OutConv(nn.Module):
def __init__(self, in_channels, out_channels):
super(OutConv, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=1)
def forward(self, x):
return self.conv(x)
class UNet(nn.Module):
def __init__(self, n_channels, n_classes, bilinear=True):
super(UNet, self).__init__()
self.n_channels = n_channels
self.n_classes = n_classes
self.bilinear = bilinear
self.inc = DoubleConv(n_channels, 64)
self.down1 = Down(64, 128)
self.down2 = Down(128, 256)
self.down3 = Down(256, 512)
self.down4 = Down(512, 512)
self.up1 = Up(1024, 256, bilinear)
self.up2 = Up(512, 128, bilinear)
self.up3 = Up(256, 64, bilinear)
self.up4 = Up(128, 64, bilinear)
self.outc = OutConv(64, n_classes)
def forward(self, x):
x1 = self.inc(x)
x2 = self.down1(x1)
x3 = self.down2(x2)
x4 = self.down3(x3)
x5 = self.down4(x4)
x = self.up1(x5, x4)
x = self.up2(x, x3)
x = self.up3(x, x2)
x = self.up4(x, x1)
x = F.normalize(x)
return x | 9,332 | 31.072165 | 122 | py |
StyleMask | StyleMask-master/libs/DECA/decalib/models/lbs.py | # -*- coding: utf-8 -*-
# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
# holder of all proprietary rights on this computer program.
# You can only use this computer program if you have closed
# a license agreement with MPG or you get the right to use the computer
# program from someone who is authorized to grant you that right.
# Any use of the computer program without a valid license is prohibited and
# liable to prosecution.
#
# Copyright©2019 Max-Planck-Gesellschaft zur Förderung
# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
# for Intelligent Systems. All rights reserved.
#
# Contact: ps-license@tuebingen.mpg.de
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import numpy as np
import torch
import torch.nn.functional as F
def rot_mat_to_euler(rot_mats):
# Calculates rotation matrix to euler angles
# Careful for extreme cases of eular angles like [0.0, pi, 0.0]
sy = torch.sqrt(rot_mats[:, 0, 0] * rot_mats[:, 0, 0] +
rot_mats[:, 1, 0] * rot_mats[:, 1, 0])
return torch.atan2(-rot_mats[:, 2, 0], sy)
def find_dynamic_lmk_idx_and_bcoords(vertices, pose, dynamic_lmk_faces_idx,
dynamic_lmk_b_coords,
neck_kin_chain, dtype=torch.float32):
''' Compute the faces, barycentric coordinates for the dynamic landmarks
To do so, we first compute the rotation of the neck around the y-axis
and then use a pre-computed look-up table to find the faces and the
barycentric coordinates that will be used.
Special thanks to Soubhik Sanyal (soubhik.sanyal@tuebingen.mpg.de)
for providing the original TensorFlow implementation and for the LUT.
Parameters
----------
vertices: torch.tensor BxVx3, dtype = torch.float32
The tensor of input vertices
pose: torch.tensor Bx(Jx3), dtype = torch.float32
The current pose of the body model
dynamic_lmk_faces_idx: torch.tensor L, dtype = torch.long
The look-up table from neck rotation to faces
dynamic_lmk_b_coords: torch.tensor Lx3, dtype = torch.float32
The look-up table from neck rotation to barycentric coordinates
neck_kin_chain: list
A python list that contains the indices of the joints that form the
kinematic chain of the neck.
dtype: torch.dtype, optional
Returns
-------
dyn_lmk_faces_idx: torch.tensor, dtype = torch.long
A tensor of size BxL that contains the indices of the faces that
will be used to compute the current dynamic landmarks.
dyn_lmk_b_coords: torch.tensor, dtype = torch.float32
A tensor of size BxL that contains the indices of the faces that
will be used to compute the current dynamic landmarks.
'''
batch_size = vertices.shape[0]
aa_pose = torch.index_select(pose.view(batch_size, -1, 3), 1,
neck_kin_chain)
rot_mats = batch_rodrigues(
aa_pose.view(-1, 3), dtype=dtype).view(batch_size, -1, 3, 3)
rel_rot_mat = torch.eye(3, device=vertices.device,
dtype=dtype).unsqueeze_(dim=0)
for idx in range(len(neck_kin_chain)):
rel_rot_mat = torch.bmm(rot_mats[:, idx], rel_rot_mat)
y_rot_angle = torch.round(
torch.clamp(-rot_mat_to_euler(rel_rot_mat) * 180.0 / np.pi,
max=39)).to(dtype=torch.long)
neg_mask = y_rot_angle.lt(0).to(dtype=torch.long)
mask = y_rot_angle.lt(-39).to(dtype=torch.long)
neg_vals = mask * 78 + (1 - mask) * (39 - y_rot_angle)
y_rot_angle = (neg_mask * neg_vals +
(1 - neg_mask) * y_rot_angle)
dyn_lmk_faces_idx = torch.index_select(dynamic_lmk_faces_idx,
0, y_rot_angle)
dyn_lmk_b_coords = torch.index_select(dynamic_lmk_b_coords,
0, y_rot_angle)
return dyn_lmk_faces_idx, dyn_lmk_b_coords
def vertices2landmarks(vertices, faces, lmk_faces_idx, lmk_bary_coords):
''' Calculates landmarks by barycentric interpolation
Parameters
----------
vertices: torch.tensor BxVx3, dtype = torch.float32
The tensor of input vertices
faces: torch.tensor Fx3, dtype = torch.long
The faces of the mesh
lmk_faces_idx: torch.tensor L, dtype = torch.long
The tensor with the indices of the faces used to calculate the
landmarks.
lmk_bary_coords: torch.tensor Lx3, dtype = torch.float32
The tensor of barycentric coordinates that are used to interpolate
the landmarks
Returns
-------
landmarks: torch.tensor BxLx3, dtype = torch.float32
The coordinates of the landmarks for each mesh in the batch
'''
# Extract the indices of the vertices for each face
# BxLx3
batch_size, num_verts = vertices.shape[:2]
device = vertices.device
lmk_faces = torch.index_select(faces, 0, lmk_faces_idx.view(-1)).view(
batch_size, -1, 3)
lmk_faces += torch.arange(
batch_size, dtype=torch.long, device=device).view(-1, 1, 1) * num_verts
lmk_vertices = vertices.view(-1, 3)[lmk_faces].view(
batch_size, -1, 3, 3)
landmarks = torch.einsum('blfi,blf->bli', [lmk_vertices, lmk_bary_coords])
return landmarks
def lbs(betas, pose, v_template, shapedirs, posedirs, J_regressor, parents,
lbs_weights, pose2rot=True, dtype=torch.float32):
''' Performs Linear Blend Skinning with the given shape and pose parameters
Parameters
----------
betas : torch.tensor BxNB
The tensor of shape parameters
pose : torch.tensor Bx(J + 1) * 3
The pose parameters in axis-angle format
v_template torch.tensor BxVx3
The template mesh that will be deformed
shapedirs : torch.tensor 1xNB
The tensor of PCA shape displacements
posedirs : torch.tensor Px(V * 3)
The pose PCA coefficients
J_regressor : torch.tensor JxV
The regressor array that is used to calculate the joints from
the position of the vertices
parents: torch.tensor J
The array that describes the kinematic tree for the model
lbs_weights: torch.tensor N x V x (J + 1)
The linear blend skinning weights that represent how much the
rotation matrix of each part affects each vertex
pose2rot: bool, optional
Flag on whether to convert the input pose tensor to rotation
matrices. The default value is True. If False, then the pose tensor
should already contain rotation matrices and have a size of
Bx(J + 1)x9
dtype: torch.dtype, optional
Returns
-------
verts: torch.tensor BxVx3
The vertices of the mesh after applying the shape and pose
displacements.
joints: torch.tensor BxJx3
The joints of the model
'''
batch_size = max(betas.shape[0], pose.shape[0])
device = betas.device
# Add shape contribution
v_shaped = v_template + blend_shapes(betas, shapedirs)
# Get the joints
# NxJx3 array
J = vertices2joints(J_regressor, v_shaped)
# 3. Add pose blend shapes
# N x J x 3 x 3
ident = torch.eye(3, dtype=dtype, device=device)
if pose2rot:
rot_mats = batch_rodrigues(
pose.view(-1, 3), dtype=dtype).view([batch_size, -1, 3, 3])
pose_feature = (rot_mats[:, 1:, :, :] - ident).view([batch_size, -1])
# (N x P) x (P, V * 3) -> N x V x 3
pose_offsets = torch.matmul(pose_feature, posedirs) \
.view(batch_size, -1, 3)
else:
pose_feature = pose[:, 1:].view(batch_size, -1, 3, 3) - ident
rot_mats = pose.view(batch_size, -1, 3, 3)
pose_offsets = torch.matmul(pose_feature.view(batch_size, -1),
posedirs).view(batch_size, -1, 3)
v_posed = pose_offsets + v_shaped
# 4. Get the global joint location
J_transformed, A = batch_rigid_transform(rot_mats, J, parents, dtype=dtype)
# 5. Do skinning:
# W is N x V x (J + 1)
W = lbs_weights.unsqueeze(dim=0).expand([batch_size, -1, -1])
# (N x V x (J + 1)) x (N x (J + 1) x 16)
num_joints = J_regressor.shape[0]
T = torch.matmul(W, A.view(batch_size, num_joints, 16)) \
.view(batch_size, -1, 4, 4)
homogen_coord = torch.ones([batch_size, v_posed.shape[1], 1],
dtype=dtype, device=device)
v_posed_homo = torch.cat([v_posed, homogen_coord], dim=2)
v_homo = torch.matmul(T, torch.unsqueeze(v_posed_homo, dim=-1))
verts = v_homo[:, :, :3, 0]
return verts, J_transformed
def vertices2joints(J_regressor, vertices):
''' Calculates the 3D joint locations from the vertices
Parameters
----------
J_regressor : torch.tensor JxV
The regressor array that is used to calculate the joints from the
position of the vertices
vertices : torch.tensor BxVx3
The tensor of mesh vertices
Returns
-------
torch.tensor BxJx3
The location of the joints
'''
return torch.einsum('bik,ji->bjk', [vertices, J_regressor])
def blend_shapes(betas, shape_disps):
''' Calculates the per vertex displacement due to the blend shapes
Parameters
----------
betas : torch.tensor Bx(num_betas)
Blend shape coefficients
shape_disps: torch.tensor Vx3x(num_betas)
Blend shapes
Returns
-------
torch.tensor BxVx3
The per-vertex displacement due to shape deformation
'''
# Displacement[b, m, k] = sum_{l} betas[b, l] * shape_disps[m, k, l]
# i.e. Multiply each shape displacement by its corresponding beta and
# then sum them.
blend_shape = torch.einsum('bl,mkl->bmk', [betas, shape_disps])
return blend_shape
def batch_rodrigues(rot_vecs, epsilon=1e-8, dtype=torch.float32):
''' Calculates the rotation matrices for a batch of rotation vectors
Parameters
----------
rot_vecs: torch.tensor Nx3
array of N axis-angle vectors
Returns
-------
R: torch.tensor Nx3x3
The rotation matrices for the given axis-angle parameters
'''
batch_size = rot_vecs.shape[0]
device = rot_vecs.device
angle = torch.norm(rot_vecs + 1e-8, dim=1, keepdim=True)
rot_dir = rot_vecs / angle
cos = torch.unsqueeze(torch.cos(angle), dim=1)
sin = torch.unsqueeze(torch.sin(angle), dim=1)
# Bx1 arrays
rx, ry, rz = torch.split(rot_dir, 1, dim=1)
K = torch.zeros((batch_size, 3, 3), dtype=dtype, device=device)
zeros = torch.zeros((batch_size, 1), dtype=dtype, device=device)
K = torch.cat([zeros, -rz, ry, rz, zeros, -rx, -ry, rx, zeros], dim=1) \
.view((batch_size, 3, 3))
ident = torch.eye(3, dtype=dtype, device=device).unsqueeze(dim=0)
rot_mat = ident + sin * K + (1 - cos) * torch.bmm(K, K)
return rot_mat
def transform_mat(R, t):
''' Creates a batch of transformation matrices
Args:
- R: Bx3x3 array of a batch of rotation matrices
- t: Bx3x1 array of a batch of translation vectors
Returns:
- T: Bx4x4 Transformation matrix
'''
# No padding left or right, only add an extra row
return torch.cat([F.pad(R, [0, 0, 0, 1]),
F.pad(t, [0, 0, 0, 1], value=1)], dim=2)
def batch_rigid_transform(rot_mats, joints, parents, dtype=torch.float32):
"""
Applies a batch of rigid transformations to the joints
Parameters
----------
rot_mats : torch.tensor BxNx3x3
Tensor of rotation matrices
joints : torch.tensor BxNx3
Locations of joints
parents : torch.tensor BxN
The kinematic tree of each object
dtype : torch.dtype, optional:
The data type of the created tensors, the default is torch.float32
Returns
-------
posed_joints : torch.tensor BxNx3
The locations of the joints after applying the pose rotations
rel_transforms : torch.tensor BxNx4x4
The relative (with respect to the root joint) rigid transformations
for all the joints
"""
joints = torch.unsqueeze(joints, dim=-1)
rel_joints = joints.clone()
rel_joints[:, 1:] -= joints[:, parents[1:]]
# transforms_mat = transform_mat(
# rot_mats.view(-1, 3, 3),
# rel_joints.view(-1, 3, 1)).view(-1, joints.shape[1], 4, 4)
transforms_mat = transform_mat(
rot_mats.view(-1, 3, 3),
rel_joints.reshape(-1, 3, 1)).reshape(-1, joints.shape[1], 4, 4)
transform_chain = [transforms_mat[:, 0]]
for i in range(1, parents.shape[0]):
# Subtract the joint location at the rest pose
# No need for rotation, since it's identity when at rest
curr_res = torch.matmul(transform_chain[parents[i]],
transforms_mat[:, i])
transform_chain.append(curr_res)
transforms = torch.stack(transform_chain, dim=1)
# The last column of the transformations contains the posed joints
posed_joints = transforms[:, :, :3, 3]
# The last column of the transformations contains the posed joints
posed_joints = transforms[:, :, :3, 3]
joints_homogen = F.pad(joints, [0, 0, 0, 1])
rel_transforms = transforms - F.pad(
torch.matmul(transforms, joints_homogen), [3, 0, 0, 0, 0, 0, 0, 0])
return posed_joints, rel_transforms | 13,783 | 35.465608 | 79 | py |
StyleMask | StyleMask-master/libs/DECA/decalib/models/FLAME.py | # -*- coding: utf-8 -*-
#
# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
# holder of all proprietary rights on this computer program.
# Using this computer program means that you agree to the terms
# in the LICENSE file included with this software distribution.
# Any use not explicitly granted by the LICENSE is prohibited.
#
# Copyright©2019 Max-Planck-Gesellschaft zur Förderung
# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
# for Intelligent Systems. All rights reserved.
#
# For comments or questions, please email us at deca@tue.mpg.de
# For commercial licensing contact, please contact ps-license@tuebingen.mpg.de
import torch
import torch.nn as nn
import numpy as np
import pickle
import torch.nn.functional as F
from .lbs import lbs, batch_rodrigues, vertices2landmarks, rot_mat_to_euler
def to_tensor(array, dtype=torch.float32):
if 'torch.tensor' not in str(type(array)):
return torch.tensor(array, dtype=dtype)
def to_np(array, dtype=np.float32):
if 'scipy.sparse' in str(type(array)):
array = array.todense()
return np.array(array, dtype=dtype)
class Struct(object):
def __init__(self, **kwargs):
for key, val in kwargs.items():
setattr(self, key, val)
class FLAME(nn.Module):
"""
borrowed from https://github.com/soubhiksanyal/FLAME_PyTorch/blob/master/FLAME.py
Given flame parameters this class generates a differentiable FLAME function
which outputs the a mesh and 2D/3D facial landmarks
"""
def __init__(self, config):
super(FLAME, self).__init__()
print("creating the FLAME Decoder")
with open(config.flame_model_path, 'rb') as f:
ss = pickle.load(f, encoding='latin1')
flame_model = Struct(**ss)
self.dtype = torch.float32
self.register_buffer('faces_tensor', to_tensor(to_np(flame_model.f, dtype=np.int64), dtype=torch.long))
# The vertices of the template model
self.register_buffer('v_template', to_tensor(to_np(flame_model.v_template), dtype=self.dtype))
# The shape components and expression
shapedirs = to_tensor(to_np(flame_model.shapedirs), dtype=self.dtype)
shapedirs = torch.cat([shapedirs[:,:,:config.n_shape], shapedirs[:,:,300:300+config.n_exp]], 2)
self.register_buffer('shapedirs', shapedirs)
# The pose components
num_pose_basis = flame_model.posedirs.shape[-1]
posedirs = np.reshape(flame_model.posedirs, [-1, num_pose_basis]).T
self.register_buffer('posedirs', to_tensor(to_np(posedirs), dtype=self.dtype))
#
self.register_buffer('J_regressor', to_tensor(to_np(flame_model.J_regressor), dtype=self.dtype))
parents = to_tensor(to_np(flame_model.kintree_table[0])).long(); parents[0] = -1
self.register_buffer('parents', parents)
self.register_buffer('lbs_weights', to_tensor(to_np(flame_model.weights), dtype=self.dtype))
# Fixing Eyeball and neck rotation
default_eyball_pose = torch.zeros([1, 6], dtype=self.dtype, requires_grad=False)
self.register_parameter('eye_pose', nn.Parameter(default_eyball_pose,
requires_grad=False))
default_neck_pose = torch.zeros([1, 3], dtype=self.dtype, requires_grad=False)
self.register_parameter('neck_pose', nn.Parameter(default_neck_pose,
requires_grad=False))
# Static and Dynamic Landmark embeddings for FLAME
lmk_embeddings = np.load(config.flame_lmk_embedding_path, allow_pickle=True, encoding='latin1')
lmk_embeddings = lmk_embeddings[()]
self.register_buffer('lmk_faces_idx', torch.from_numpy(lmk_embeddings['static_lmk_faces_idx']).long())
self.register_buffer('lmk_bary_coords', torch.from_numpy(lmk_embeddings['static_lmk_bary_coords']).to(self.dtype))
self.register_buffer('dynamic_lmk_faces_idx', lmk_embeddings['dynamic_lmk_faces_idx'].long())
self.register_buffer('dynamic_lmk_bary_coords', lmk_embeddings['dynamic_lmk_bary_coords'].to(self.dtype))
self.register_buffer('full_lmk_faces_idx', torch.from_numpy(lmk_embeddings['full_lmk_faces_idx']).long())
self.register_buffer('full_lmk_bary_coords', torch.from_numpy(lmk_embeddings['full_lmk_bary_coords']).to(self.dtype))
neck_kin_chain = []; NECK_IDX=1
curr_idx = torch.tensor(NECK_IDX, dtype=torch.long)
while curr_idx != -1:
neck_kin_chain.append(curr_idx)
curr_idx = self.parents[curr_idx]
self.register_buffer('neck_kin_chain', torch.stack(neck_kin_chain))
def _find_dynamic_lmk_idx_and_bcoords(self, pose, dynamic_lmk_faces_idx,
dynamic_lmk_b_coords,
neck_kin_chain, dtype=torch.float32):
"""
Selects the face contour depending on the reletive position of the head
Input:
vertices: N X num_of_vertices X 3
pose: N X full pose
dynamic_lmk_faces_idx: The list of contour face indexes
dynamic_lmk_b_coords: The list of contour barycentric weights
neck_kin_chain: The tree to consider for the relative rotation
dtype: Data type
return:
The contour face indexes and the corresponding barycentric weights
"""
batch_size = pose.shape[0]
aa_pose = torch.index_select(pose.view(batch_size, -1, 3), 1,
neck_kin_chain)
rot_mats = batch_rodrigues(
aa_pose.view(-1, 3), dtype=dtype).view(batch_size, -1, 3, 3)
rel_rot_mat = torch.eye(3, device=pose.device,
dtype=dtype).unsqueeze_(dim=0).expand(batch_size, -1, -1)
for idx in range(len(neck_kin_chain)):
rel_rot_mat = torch.bmm(rot_mats[:, idx], rel_rot_mat)
y_rot_angle = torch.round(
torch.clamp(rot_mat_to_euler(rel_rot_mat) * 180.0 / np.pi,
max=39)).to(dtype=torch.long)
neg_mask = y_rot_angle.lt(0).to(dtype=torch.long)
mask = y_rot_angle.lt(-39).to(dtype=torch.long)
neg_vals = mask * 78 + (1 - mask) * (39 - y_rot_angle)
y_rot_angle = (neg_mask * neg_vals +
(1 - neg_mask) * y_rot_angle)
dyn_lmk_faces_idx = torch.index_select(dynamic_lmk_faces_idx,
0, y_rot_angle)
dyn_lmk_b_coords = torch.index_select(dynamic_lmk_b_coords,
0, y_rot_angle)
return dyn_lmk_faces_idx, dyn_lmk_b_coords
def _vertices2landmarks(self, vertices, faces, lmk_faces_idx, lmk_bary_coords):
"""
Calculates landmarks by barycentric interpolation
Input:
vertices: torch.tensor NxVx3, dtype = torch.float32
The tensor of input vertices
faces: torch.tensor (N*F)x3, dtype = torch.long
The faces of the mesh
lmk_faces_idx: torch.tensor N X L, dtype = torch.long
The tensor with the indices of the faces used to calculate the
landmarks.
lmk_bary_coords: torch.tensor N X L X 3, dtype = torch.float32
The tensor of barycentric coordinates that are used to interpolate
the landmarks
Returns:
landmarks: torch.tensor NxLx3, dtype = torch.float32
The coordinates of the landmarks for each mesh in the batch
"""
# Extract the indices of the vertices for each face
# NxLx3
batch_size, num_verts = vertices.shape[:dd2]
lmk_faces = torch.index_select(faces, 0, lmk_faces_idx.view(-1)).view(
1, -1, 3).view(batch_size, lmk_faces_idx.shape[1], -1)
lmk_faces += torch.arange(batch_size, dtype=torch.long).view(-1, 1, 1).to(
device=vertices.device) * num_verts
lmk_vertices = vertices.view(-1, 3)[lmk_faces]
landmarks = torch.einsum('blfi,blf->bli', [lmk_vertices, lmk_bary_coords])
return landmarks
def seletec_3d68(self, vertices):
landmarks3d = vertices2landmarks(vertices, self.faces_tensor,
self.full_lmk_faces_idx.repeat(vertices.shape[0], 1),
self.full_lmk_bary_coords.repeat(vertices.shape[0], 1, 1))
return landmarks3d
def forward(self, shape_params=None, expression_params=None, pose_params=None, eye_pose_params=None):
"""
Input:
shape_params: N X number of shape parameters
expression_params: N X number of expression parameters
pose_params: N X number of pose parameters (6)
return:d
vertices: N X V X 3
landmarks: N X number of landmarks X 3
"""
batch_size = shape_params.shape[0]
if eye_pose_params is None:
eye_pose_params = self.eye_pose.expand(batch_size, -1)
betas = torch.cat([shape_params, expression_params], dim=1)
full_pose = torch.cat([pose_params[:, :3], self.neck_pose.expand(batch_size, -1), pose_params[:, 3:], eye_pose_params], dim=1)
template_vertices = self.v_template.unsqueeze(0).expand(batch_size, -1, -1)
vertices, _ = lbs(betas, full_pose, template_vertices,
self.shapedirs, self.posedirs,
self.J_regressor, self.parents,
self.lbs_weights, dtype=self.dtype)
lmk_faces_idx = self.lmk_faces_idx.unsqueeze(dim=0).expand(batch_size, -1)
lmk_bary_coords = self.lmk_bary_coords.unsqueeze(dim=0).expand(batch_size, -1, -1)
dyn_lmk_faces_idx, dyn_lmk_bary_coords = self._find_dynamic_lmk_idx_and_bcoords(
full_pose, self.dynamic_lmk_faces_idx,
self.dynamic_lmk_bary_coords,
self.neck_kin_chain, dtype=self.dtype)
lmk_faces_idx = torch.cat([dyn_lmk_faces_idx, lmk_faces_idx], 1)
lmk_bary_coords = torch.cat([dyn_lmk_bary_coords, lmk_bary_coords], 1)
landmarks2d = vertices2landmarks(vertices, self.faces_tensor,
lmk_faces_idx,
lmk_bary_coords)
bz = vertices.shape[0]
landmarks3d = vertices2landmarks(vertices, self.faces_tensor,
self.full_lmk_faces_idx.repeat(bz, 1),
self.full_lmk_bary_coords.repeat(bz, 1, 1))
return vertices, landmarks2d, landmarks3d
class FLAMETex(nn.Module):
"""
FLAME texture:
https://github.com/TimoBolkart/TF_FLAME/blob/ade0ab152300ec5f0e8555d6765411555c5ed43d/sample_texture.py#L64
FLAME texture converted from BFM:
https://github.com/TimoBolkart/BFM_to_FLAME
"""
def __init__(self, config):
super(FLAMETex, self).__init__()
if config.tex_type == 'BFM':
mu_key = 'MU'
pc_key = 'PC'
n_pc = 199
tex_path = config.tex_path
tex_space = np.load(tex_path)
texture_mean = tex_space[mu_key].reshape(1, -1)
texture_basis = tex_space[pc_key].reshape(-1, n_pc)
elif config.tex_type == 'FLAME':
mu_key = 'mean'
pc_key = 'tex_dir'
n_pc = 200
tex_path = config.flame_tex_path
tex_space = np.load(tex_path)
texture_mean = tex_space[mu_key].reshape(1, -1)/255.
texture_basis = tex_space[pc_key].reshape(-1, n_pc)/255.
else:
print('texture type ', config.tex_type, 'not exist!')
raise NotImplementedError
n_tex = config.n_tex
num_components = texture_basis.shape[1]
texture_mean = torch.from_numpy(texture_mean).float()[None,...]
texture_basis = torch.from_numpy(texture_basis[:,:n_tex]).float()[None,...]
self.register_buffer('texture_mean', texture_mean)
self.register_buffer('texture_basis', texture_basis)
def forward(self, texcode):
'''
texcode: [batchsize, n_tex]
texture: [bz, 3, 256, 256], range: 0-1
'''
texture = self.texture_mean + (self.texture_basis*texcode[:,None,:]).sum(-1)
texture = texture.reshape(texcode.shape[0], 512, 512, 3).permute(0,3,1,2)
texture = F.interpolate(texture, [256, 256])
texture = texture[:,[2,1,0], :,:]
return texture | 12,754 | 47.683206 | 134 | py |
StyleMask | StyleMask-master/libs/DECA/decalib/models/decoders.py | # -*- coding: utf-8 -*-
#
# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
# holder of all proprietary rights on this computer program.
# Using this computer program means that you agree to the terms
# in the LICENSE file included with this software distribution.
# Any use not explicitly granted by the LICENSE is prohibited.
#
# Copyright©2019 Max-Planck-Gesellschaft zur Förderung
# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
# for Intelligent Systems. All rights reserved.
#
# For comments or questions, please email us at deca@tue.mpg.de
# For commercial licensing contact, please contact ps-license@tuebingen.mpg.de
import torch
import torch.nn as nn
class Generator(nn.Module):
def __init__(self, latent_dim=100, out_channels=1, out_scale=0.01, sample_mode = 'bilinear'):
super(Generator, self).__init__()
self.out_scale = out_scale
self.init_size = 32 // 4 # Initial size before upsampling
self.l1 = nn.Sequential(nn.Linear(latent_dim, 128 * self.init_size ** 2))
self.conv_blocks = nn.Sequential(
nn.BatchNorm2d(128),
nn.Upsample(scale_factor=2, mode=sample_mode), #16
nn.Conv2d(128, 128, 3, stride=1, padding=1),
nn.BatchNorm2d(128, 0.8),
nn.LeakyReLU(0.2, inplace=True),
nn.Upsample(scale_factor=2, mode=sample_mode), #32
nn.Conv2d(128, 64, 3, stride=1, padding=1),
nn.BatchNorm2d(64, 0.8),
nn.LeakyReLU(0.2, inplace=True),
nn.Upsample(scale_factor=2, mode=sample_mode), #64
nn.Conv2d(64, 64, 3, stride=1, padding=1),
nn.BatchNorm2d(64, 0.8),
nn.LeakyReLU(0.2, inplace=True),
nn.Upsample(scale_factor=2, mode=sample_mode), #128
nn.Conv2d(64, 32, 3, stride=1, padding=1),
nn.BatchNorm2d(32, 0.8),
nn.LeakyReLU(0.2, inplace=True),
nn.Upsample(scale_factor=2, mode=sample_mode), #256
nn.Conv2d(32, 16, 3, stride=1, padding=1),
nn.BatchNorm2d(16, 0.8),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(16, out_channels, 3, stride=1, padding=1),
nn.Tanh(),
)
def forward(self, noise):
out = self.l1(noise)
out = out.view(out.shape[0], 128, self.init_size, self.init_size)
img = self.conv_blocks(out)
return img*self.out_scale | 2,461 | 42.964286 | 97 | py |
StyleMask | StyleMask-master/libs/DECA/decalib/models/encoders.py | # -*- coding: utf-8 -*-
#
# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
# holder of all proprietary rights on this computer program.
# Using this computer program means that you agree to the terms
# in the LICENSE file included with this software distribution.
# Any use not explicitly granted by the LICENSE is prohibited.
#
# Copyright©2019 Max-Planck-Gesellschaft zur Förderung
# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
# for Intelligent Systems. All rights reserved.
#
# For comments or questions, please email us at deca@tue.mpg.de
# For commercial licensing contact, please contact ps-license@tuebingen.mpg.de
import numpy as np
import torch.nn as nn
import torch
import torch.nn.functional as F
from . import resnet
class ResnetEncoder(nn.Module):
def __init__(self, outsize, last_op=None):
super(ResnetEncoder, self).__init__()
feature_size = 2048
self.encoder = resnet.load_ResNet50Model() #out: 2048
### regressor
self.layers = nn.Sequential(
nn.Linear(feature_size, 1024),
nn.ReLU(),
nn.Linear(1024, outsize)
)
self.last_op = last_op
def forward(self, inputs):
features = self.encoder(inputs)
parameters = self.layers(features)
if self.last_op:
parameters = self.last_op(parameters)
return parameters
| 1,424 | 33.756098 | 78 | py |
StyleMask | StyleMask-master/libs/DECA/decalib/datasets/detectors.py | # -*- coding: utf-8 -*-
#
# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
# holder of all proprietary rights on this computer program.
# Using this computer program means that you agree to the terms
# in the LICENSE file included with this software distribution.
# Any use not explicitly granted by the LICENSE is prohibited.
#
# Copyright©2019 Max-Planck-Gesellschaft zur Förderung
# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
# for Intelligent Systems. All rights reserved.
#
# For comments or questions, please email us at deca@tue.mpg.de
# For commercial licensing contact, please contact ps-license@tuebingen.mpg.de
import numpy as np
import torch
# from libs.pose_estimation.fan_model.models import FAN, ResNetDepth
# from libs.pose_estimation.fan_model.utils import *
from enum import Enum
# from libs.pose_estimation.sfd.sfd_detector import SFDDetector as FaceDetector
class FAN(object):
def __init__(self):
import face_alignment
self.model = face_alignment.FaceAlignment(face_alignment.LandmarksType._2D, flip_input=False)
def run(self, image):
'''
image: 0-255, uint8, rgb, [h, w, 3]
return: detected box list
'''
out = self.model.get_landmarks(image)
if out is None:
return [0], 'error'
else:
kpt = out[0].squeeze()
left = np.min(kpt[:,0]); right = np.max(kpt[:,0]);
top = np.min(kpt[:,1]); bottom = np.max(kpt[:,1])
bbox = [left,top, right, bottom]
return bbox, 'kpt68'
class MTCNN(object):
def __init__(self, device = 'cpu'):
'''
https://github.com/timesler/facenet-pytorch/blob/master/examples/infer.ipynb
'''
from facenet_pytorch import MTCNN as mtcnn
self.device = device
self.model = mtcnn(keep_all=True)
def run(self, input):
'''
image: 0-255, uint8, rgb, [h, w, 3]
return: detected box
'''
out = self.model.detect(input[None,...])
if out[0][0] is None:
return [0]
else:
bbox = out[0][0].squeeze()
return bbox, 'bbox'
| 1,983 | 28.61194 | 95 | py |
StyleMask | StyleMask-master/libs/DECA/decalib/datasets/datasets.py | # -*- coding: utf-8 -*-
#
# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
# holder of all proprietary rights on this computer program.
# Using this computer program means that you agree to the terms
# in the LICENSE file included with this software distribution.
# Any use not explicitly granted by the LICENSE is prohibited.
#
# Copyright©2019 Max-Planck-Gesellschaft zur Förderung
# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
# for Intelligent Systems. All rights reserved.
#
# For comments or questions, please email us at deca@tue.mpg.de
# For commercial licensing contact, please contact ps-license@tuebingen.mpg.de
import os, sys
import torch
from torch.utils.data import Dataset, DataLoader
import torchvision.transforms as transforms
import numpy as np
import cv2
import scipy
from skimage.io import imread, imsave
from skimage.transform import estimate_transform, warp, resize, rescale
from glob import glob
import scipy.io
import torch
import kornia
from . import detectors
class TestData(Dataset):
def __init__(self, iscrop=True, crop_size=224, scale=1.25):
'''
testpath: folder, imagepath_list, image path, video path
'''
self.crop_size = crop_size
self.scale = scale
self.iscrop = iscrop
self.resolution_inp = crop_size
self.face_detector = detectors.FAN() # CHANGE
def bbox2point(self, left, right, top, bottom, type='bbox'):
''' bbox from detector and landmarks are different
'''
if type=='kpt68':
old_size = (right - left + bottom - top)/2*1.1
center = np.array([right - (right - left) / 2.0, bottom - (bottom - top) / 2.0 ])
elif type=='bbox':
old_size = (right - left + bottom - top)/2
center = np.array([right - (right - left) / 2.0, bottom - (bottom - top) / 2.0 + old_size*0.12])
else:
raise NotImplementedError
return old_size, center
def get_image_tensor(self, image):
" image: tensor 3x256x256"
img_tmp = image.clone()
img_tmp = img_tmp.permute(1,2,0)
bbox, bbox_type = self.face_detector.run(img_tmp)
if bbox_type != 'error':
if len(bbox) < 4:
print('no face detected! run original image')
left = 0; right = h-1; top=0; bottom=w-1
else:
left = bbox[0]; right=bbox[2]
top = bbox[1]; bottom=bbox[3]
old_size, center = self.bbox2point(left, right, top, bottom, type=bbox_type)
size = int(old_size*self.scale)
src_pts = np.array([[center[0]-size/2, center[1]-size/2], [center[0] - size/2, center[1]+size/2], [center[0]+size/2, center[1]-size/2]])
DST_PTS = np.array([[0,0], [0,self.resolution_inp - 1], [self.resolution_inp - 1, 0]])
tform = estimate_transform('similarity', src_pts, DST_PTS)
theta = torch.tensor(tform.params, dtype=torch.float32).unsqueeze(0).cuda()
image_tensor = image.clone()
image_tensor = image_tensor.unsqueeze(0)
dst_image = kornia.warp_affine(image_tensor, theta[:,:2,:], dsize=(224, 224))
dst_image = dst_image.div(255.)
return dst_image.squeeze(0), False
else:
return image, True | 3,379 | 38.302326 | 148 | py |
StyleMask | StyleMask-master/libs/DECA/decalib/datasets/detectors_2.py | """
Calculate euler angles yaw pitch roll using deep network HopeNet
https://github.com/natanielruiz/deep-head-pose
The face detector used is SFD (taken from face-alignment FAN) https://github.com/1adrianb/face-alignment
"""
import os
import numpy as np
import sys
from matplotlib import pyplot as plt
import cv2
from enum import Enum
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.utils.data import DataLoader
from torch.utils.model_zoo import load_url
from torchvision import transforms
import torch.backends.cudnn as cudnn
import torchvision
import torch.nn.functional as F
from PIL import Image
# from .image_utils import imshow, imshow_nparray, image_resize
# from .visualization import print_values , draw_detected_face
from libs.pose_estimation.sfd.sfd_detector import SFDDetector as FaceDetector
from libs.pose_estimation.fan_model.models import FAN, ResNetDepth
from libs.pose_estimation.fan_model.utils import *
class LandmarksType(Enum):
"""Enum class defining the type of landmarks to detect.
``_2D`` - the detected points ``(x,y)`` are detected in a 2D space and follow the visible contour of the face
``_2halfD`` - this points represent the projection of the 3D points into 3D
``_3D`` - detect the points ``(x,y,z)``` in a 3D space
"""
_2D = 1
_2halfD = 2
_3D = 3
class NetworkSize(Enum):
# TINY = 1
# SMALL = 2
# MEDIUM = 3
LARGE = 4
def __new__(cls, value):
member = object.__new__(cls)
member._value_ = value
return member
def __int__(self):
return self.value
models_urls = {
'2DFAN-4': 'https://www.adrianbulat.com/downloads/python-fan/2DFAN4-11f355bf06.pth.tar',
'3DFAN-4': 'https://www.adrianbulat.com/downloads/python-fan/3DFAN4-7835d9f11d.pth.tar',
'depth': 'https://www.adrianbulat.com/downloads/python-fan/depth-2a464da4ea.pth.tar',
}
def get_preds_fromhm(hm, center=None, scale=None):
"""Obtain (x,y) coordinates given a set of N heatmaps. If the center
and the scale is provided the function will return the points also in
the original coordinate frame.
Arguments:
hm {torch.tensor} -- the predicted heatmaps, of shape [B, N, W, H]
Keyword Arguments:
center {torch.tensor} -- the center of the bounding box (default: {None})
scale {float} -- face scale (default: {None})
"""
max, idx = torch.max(
hm.view(hm.size(0), hm.size(1), hm.size(2) * hm.size(3)), 2)
idx = idx + 1
preds = idx.view(idx.size(0), idx.size(1), 1).repeat(1, 1, 2).float()
preds[..., 0].apply_(lambda x: (x - 1) % hm.size(3) + 1)
preds[..., 1].add_(-1).div_(hm.size(2)).floor_().add_(1)
for i in range(preds.size(0)):
for j in range(preds.size(1)):
hm_ = hm[i, j, :]
pX, pY = int(preds[i, j, 0]) - 1, int(preds[i, j, 1]) - 1
if pX > 0 and pX < 63 and pY > 0 and pY < 63:
diff = torch.FloatTensor(
[hm_[pY, pX + 1] - hm_[pY, pX - 1],
hm_[pY + 1, pX] - hm_[pY - 1, pX]])
preds[i, j].add_(diff.sign_().mul_(.25))
preds.add_(-.5)
preds_orig = torch.zeros(preds.size())
if center is not None and scale is not None:
for i in range(hm.size(0)):
for j in range(hm.size(1)):
preds_orig[i, j] = transform(
preds[i, j], center, scale, hm.size(2), True)
return preds, preds_orig
def draw_detected_face(img, face):
# for i, d in enumerate(face):
x_min = int(face[0])
y_min = int(face[1])
x_max = int(face[2])
y_max = int(face[3])
# # print(x_min,y_min,x_max,y_max)
# bbox_width = abs(x_max - x_min)
# bbox_height = abs(y_max - y_min)
# x_min -= 50
# x_max += 50
# y_min -= 50
# y_max += 30
# x_min = max(x_min, 0)
# y_min = max(y_min, 0)
# # print(img.shape)
# x_max = min(img.shape[1], x_max)
# y_max = min(img.shape[0], y_max)
# Crop image
# img = image[:, :, y_min:y_max, x_min:x_max]
# print(x_min,y_min,x_max,y_max)
# img = img[int(y_min):int(y_max),int(x_min):int(x_max)]
cv2.rectangle(img, (int(x_min),int(y_min)), (int(x_max),int(y_max)), (255,0,0), 2)
return img
from os.path import abspath, dirname
current_file_directory = dirname(abspath(__file__))
class LandmarksEstimation():
def __init__(self):
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Load all needed models - Face detector and Pose detector
network_size = NetworkSize.LARGE
network_size = int(network_size)
self.landmarks_type = LandmarksType._2D
self.flip_input = False
# SFD face detection
path_to_detector = './libs/pose_estimation/sfd/model/s3fd-619a316812.pth'
if not os.path.exists(path_to_detector):
'Search on scratch'
path_to_detector = '../../../scratch/k2033759/Finding_directions/pretrained_models/s3fd-619a316812.pth'
face_detector = 'sfd'
self.face_detector = FaceDetector(device='cuda', verbose=False,path_to_detector = path_to_detector)
self.transformations_image = transforms.Compose([transforms.Resize(224),
transforms.CenterCrop(224), transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
self.transformations = transforms.Compose([transforms.Resize(224),
transforms.CenterCrop(224),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
# Initialise the face alignemnt networks
self.face_alignment_net = FAN(network_size)
network_name = '2DFAN-' + str(network_size)
fan_weights = load_url(models_urls[network_name], map_location=lambda storage, loc: storage)
self.face_alignment_net.load_state_dict(fan_weights)
self.face_alignment_net.to(self.device)
self.face_alignment_net.eval()
def detect_landmarks_torch(self, images):
"""
images: torch Tensor B x C x W x H
detected_faces: B X 1 x 5
"""
detected_faces, error, error_index = self.face_detector.detect_from_batch(images)
faces = []
for i in range(images.shape[0]):
box = detected_faces[i]
if len(box) > 1:
max_conf = -1
max_ind = -1
for j in range(len(box)):
conf = box[j][4]
if conf > max_conf:
max_conf = conf
max_ind = j
box_new = box[max_ind]
box = box_new
faces.append(box)
else:
faces.append(box[0])
faces = np.asarray(faces)
bboxes = []
for i in range(faces.shape[0]):
kpt = self.find_landmarks_torch(faces[i], images[i])
kpt = kpt[0].detach().cpu().numpy()
left = np.min(kpt[:,0])
right = np.max(kpt[:,0])
top = np.min(kpt[:,1])
bottom = np.max(kpt[:,1])
bbox = [left, top, right, bottom]
bboxes.append(bbox)
return bboxes, 'kpt68'
def find_landmarks_torch(self, face, image):
center = torch.FloatTensor(
[(face[2] + face[0]) / 2.0,
(face[3] + face[1]) / 2.0])
center[1] = center[1] - (face[3] - face[1]) * 0.12
scale = (face[2] - face[0] + face[3] - face[1]) / self.face_detector.reference_scale
inp = crop_torch(image.unsqueeze(0), center, scale).float().cuda()
# print(inp.shape)
# imshow(inp.squeeze(0))
inp = inp.div(255.0)
out = self.face_alignment_net(inp)[-1]
if self.flip_input:
out = out + flip(self.face_alignment_net(flip(inp))
[-1], is_label=True) # patched inp_batch undefined variable error
out = out.cpu()
pts, pts_img = get_preds_fromhm(out, center, scale)
pts, pts_img = pts.view(-1, 68, 2) * 4, pts_img.view(-1, 68, 2)
return pts_img
def find_landmarks(self, face, image):
# face = face[0]
center = torch.FloatTensor(
[(face[2] + face[0]) / 2.0,
(face[3] + face[1]) / 2.0])
center[1] = center[1] - (face[3] - face[1]) * 0.12
scale = (face[2] - face[0] + face[3] - face[1]) / self.face_detector.reference_scale
inp = crop_torch(image.unsqueeze(0), center, scale).float().cuda()
# print(inp.shape)
# imshow(inp.squeeze(0))
inp = inp.div(255.0)
out = self.face_alignment_net(inp)[-1]
if self.flip_input:
out = out + flip(self.face_alignment_net(flip(inp))
[-1], is_label=True) # patched inp_batch undefined variable error
out = out.cpu()
pts, pts_img = get_preds_fromhm(out, center, scale)
out = out.cuda()
# Added 3D landmark support
if self.landmarks_type == LandmarksType._3D:
pts, pts_img = pts.view(68, 2) * 4, pts_img.view(68, 2)
heatmaps = torch.zeros((68,256,256), dtype=torch.float32)
for i in range(68):
if pts[i, 0] > 0:
heatmaps[i] = draw_gaussian(
heatmaps[i], pts[i], 2)
heatmaps = heatmaps.unsqueeze(0)
heatmaps = heatmaps.to(self.device)
if inp.shape[2] != heatmaps.shape[2] or inp.shape[3] != heatmaps.shape[3]:
print(inp.shape)
print(heatmaps.shape)
depth_pred = self.depth_prediciton_net(
torch.cat((inp, heatmaps), 1)).view(68, 1) #.data.cpu().view(68, 1)
# print(depth_pred.view(68, 1).shape)
pts_img = pts_img.cuda()
pts_img = torch.cat(
(pts_img, depth_pred * (1.0 / (256.0 / (200.0 * scale)))), 1)
else:
pts, pts_img = pts.view(-1, 68, 2) * 4, pts_img.view(-1, 68, 2)
# if pts_img.requires_grad:
# pts_img.register_hook(lambda grad: print('pts_img',grad))
# print(pts_img.requires_grad)
return pts_img, out
def face_detection(self, image, save_path, image_path):
image_tensor = torch.tensor(np.transpose(image,(2,0,1))).float().cuda()
if len(image_tensor.shape) == 3:
image_tensor = image_tensor.unsqueeze(0).cuda()
detected_faces,error,error_index = self.face_detector.detect_from_batch(image_tensor)
else:
detected_faces,error,error_index = self.face_detector.detect_from_batch(image_tensor)
faces_num = 0
if len(detected_faces[0]) == 0:
return image
for face in detected_faces[0]:
conf = face[4]
# print('Conf {:.2f}'.format(conf))
if conf > 0.9:
x1 = face[0]
y1 = face[1]
x2 = face[2]
y2 = face[3]
w = x2-x1
h = y2-y1
cx = int(x1+w/2)
cy = int(y1+h/2)
if h>w:
w = h
x1_hat = cx - int(w/2)
if x1_hat < 0:
x1_hat = 0
x2_hat = x1_hat + w
else:
h = w
y1_hat = cy - int(h/2)
if y1_hat < 0:
y1_hat = 0
y2_hat = y1_hat + h
# print(int(w), int(h))
# quit()
# w = 100
# h = 100
w_hat = int(w*1.6)
h_hat = int(h*1.6)
x1_hat = cx - int(w_hat/2)
if x1_hat < 0:
x1_hat = 0
y1_hat = cy - int(h_hat/2)
if y1_hat < 0:
y1_hat = 0
x2_hat = x1_hat + w_hat
y2_hat = y1_hat + h_hat
crop = image.copy()
# print(y1_hat, y2_hat, x1_hat, x2_hat)
crop = crop[ y1_hat:y2_hat, x1_hat:x2_hat]
# print(w_hat, h_hat)
crop, scale = image_resize(crop, 256, 256)
# x2 = x1 + w
# y2 = y1 + h
# cx = int(x1+w/2)
# cy = int(y1+h/2)
# w_hat = int(w*1.6)
# h_hat = int(h*1.6)
# x1_hat = cx - int(w_hat/2)
# if x1_hat < 0:
# x1_hat = 0
# y1_hat = cy - int(h_hat/2)
# if y1_hat < 0:
# y1_hat = 0
# x2_hat = x1_hat + w_hat
# y2_hat = y1_hat + h_hat
# crop = image[ y1_hat:y2_hat, x1_hat:x2_hat]
# # cv2.imwrite('./test.png', cv2.cvtColor(crop.copy(), cv2.COLOR_RGB2BGR))
# crop, scale = image_resize(crop , resize_, resize_)
# print(scale)
# img = draw_detected_face(image, face)
# image_name = image_path.split('/')[-1]
# filename = os.path.join(save_path, 'cropped_' +image_name)
# cv2.imwrite(filename, cv2.cvtColor(crop.copy(), cv2.COLOR_RGB2BGR))
# filename_2 = os.path.join(save_path, 'face_' + image_name)
# # img, scale = image_resize(image, 256)
# cv2.imwrite(filename_2, cv2.cvtColor(img.copy(), cv2.COLOR_RGB2BGR))
return crop
@torch.no_grad()
def detect_landmarks(self, image, detected_faces = None, draw_face = False):
twoface = False
# image.register_hook(lambda grad: print('images',grad))
if detected_faces is None:
if len(image.shape) == 3:
image = image.unsqueeze(0).cuda()
detected_faces,error,error_index = self.face_detector.detect_from_batch(image)
else:
detected_faces,error,error_index = self.face_detector.detect_from_batch(image)
twoface = False
batch = 0
num_faces = 0
em_max = -1
index_face = 0
for face in detected_faces[0]:
conf = face[4]
w = face[2] - face[0]
h = face[3] - face[1]
em = w*h
if em>em_max:
em_max = em
index_face = num_faces
# print(face)
# print(w*h)
# print('Conf {:.2f}'.format(conf))
num_faces += 1
# # print(num_faces)
# if num_faces > 1:
# face_final = detected_faces[0]
# quit()
size = len(detected_faces[0])
if self.landmarks_type == LandmarksType._3D:
landmarks = torch.empty((1, 68, 3), requires_grad=True).cuda()
else:
landmarks = torch.empty((1, 68, 2), requires_grad=True).cuda()
counter = 0
for face in detected_faces[0]:
# print(face)
# if len(detected_faces[0]) >1:
# # print(detected_faces)
# # img_np = image.clone()
# # img_np = img_np.squeeze(0)
# # img_np = img_np.detach().cpu().numpy()
# # img_np = np.transpose(img_np, (1, 2, 0))
# # print(detected_faces)
# # img_face = draw_detected_face(img_np, detected_faces[0])
# # cv2.imwrite('test_face.png',img_face)
# # img_face = draw_detected_face(img_np, detected_faces[1])
# # cv2.imwrite('test_face_1.png',img_face)
# # quit()
# twoface = True
# return [], twoface
# else:
# if len(detected_faces) == 0:
# print("Warning: No faces were detected.")
# return None
# # ### Draw detected face
# if draw_face:
# img_np = image.clone()
# img_np = img_np.squeeze(0)
# img_np = img_np.detach().cpu().numpy()
# img_np = np.transpose(img_np, (1, 2, 0))
# print(detected_faces)
# img_face = draw_detected_face(img_np, detected_faces[0])
# cv2.imwrite('test_face.png',img_face)
# # print_values(img_face)
# # imshow_nparray(img_face)
# error_flag = []
conf = face[4]
if conf > 0.99 and counter == index_face:
# print(index_face)
# print(face)
# print('Conf {:.2f}'.format(conf))
pts_img, heatmaps = self.find_landmarks(face, image[0])
landmarks[batch] = pts_img.cuda()
batch += 1
counter += 1
if batch > 1:
twoface = True
return landmarks, twoface, detected_faces | 14,022 | 27.444219 | 110 | py |
StyleMask | StyleMask-master/libs/DECA/decalib/utils/renderer.py | # -*- coding: utf-8 -*-
#
# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
# holder of all proprietary rights on this computer program.
# Using this computer program means that you agree to the terms
# in the LICENSE file included with this software distribution.
# Any use not explicitly granted by the LICENSE is prohibited.
#
# Copyright©2019 Max-Planck-Gesellschaft zur Förderung
# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
# for Intelligent Systems. All rights reserved.
#
# For comments or questions, please email us at deca@tue.mpg.de
# For commercial licensing contact, please contact ps-license@tuebingen.mpg.de
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from skimage.io import imread
import imageio
from pytorch3d.structures import Meshes
from pytorch3d.io import load_obj
from pytorch3d.renderer.mesh import rasterize_meshes
from . import util
# from .rasterizer.standard_rasterize_cuda import standard_rasterize
class Pytorch3dRasterizer(nn.Module):
""" Borrowed from https://github.com/facebookresearch/pytorch3d
Notice:
x,y,z are in image space, normalized
can only render squared image now
"""
def __init__(self, image_size=224):
"""
use fixed raster_settings for rendering faces
"""
super().__init__()
raster_settings = {
'image_size': image_size,
'blur_radius': 0.0,
'faces_per_pixel': 1,
'bin_size': None,
'max_faces_per_bin': None,
'perspective_correct': False,
}
raster_settings = util.dict2obj(raster_settings)
self.raster_settings = raster_settings
def forward(self, vertices, faces, attributes=None):
fixed_vertices = vertices.clone()
fixed_vertices[...,:2] = -fixed_vertices[...,:2]
meshes_screen = Meshes(verts=fixed_vertices.float(), faces=faces.long())
raster_settings = self.raster_settings
pix_to_face, zbuf, bary_coords, dists = rasterize_meshes(
meshes_screen,
image_size=raster_settings.image_size,
blur_radius=raster_settings.blur_radius,
faces_per_pixel=raster_settings.faces_per_pixel,
bin_size=raster_settings.bin_size,
max_faces_per_bin=raster_settings.max_faces_per_bin,
perspective_correct=raster_settings.perspective_correct,
)
vismask = (pix_to_face > -1).float()
D = attributes.shape[-1]
attributes = attributes.clone(); attributes = attributes.view(attributes.shape[0]*attributes.shape[1], 3, attributes.shape[-1])
N, H, W, K, _ = bary_coords.shape
mask = pix_to_face == -1
pix_to_face = pix_to_face.clone()
pix_to_face[mask] = 0
idx = pix_to_face.view(N * H * W * K, 1, 1).expand(N * H * W * K, 3, D)
pixel_face_vals = attributes.gather(0, idx).view(N, H, W, K, 3, D)
pixel_vals = (bary_coords[..., None] * pixel_face_vals).sum(dim=-2)
pixel_vals[mask] = 0 # Replace masked values in output.
pixel_vals = pixel_vals[:,:,:,0].permute(0,3,1,2)
pixel_vals = torch.cat([pixel_vals, vismask[:,:,:,0][:,None,:,:]], dim=1)
return pixel_vals
class SRenderY(nn.Module):
def __init__(self, image_size, obj_filename, uv_size=256, rasterizer_type='pytorch3d'):
super(SRenderY, self).__init__()
self.image_size = image_size
self.uv_size = uv_size
verts, faces, aux = load_obj(obj_filename)
uvcoords = aux.verts_uvs[None, ...] # (N, V, 2)
uvfaces = faces.textures_idx[None, ...] # (N, F, 3)
faces = faces.verts_idx[None,...]
if rasterizer_type == 'pytorch3d':
self.rasterizer = Pytorch3dRasterizer(image_size)
self.uv_rasterizer = Pytorch3dRasterizer(uv_size)
# faces
dense_triangles = util.generate_triangles(uv_size, uv_size)
self.register_buffer('dense_faces', torch.from_numpy(dense_triangles).long()[None,:,:])
self.register_buffer('faces', faces)
self.register_buffer('raw_uvcoords', uvcoords)
# uv coords
uvcoords = torch.cat([uvcoords, uvcoords[:,:,0:1]*0.+1.], -1) #[bz, ntv, 3]
uvcoords = uvcoords*2 - 1; uvcoords[...,1] = -uvcoords[...,1]
face_uvcoords = util.face_vertices(uvcoords, uvfaces)
self.register_buffer('uvcoords', uvcoords)
self.register_buffer('uvfaces', uvfaces)
self.register_buffer('face_uvcoords', face_uvcoords)
# shape colors, for rendering shape overlay
colors = torch.tensor([180, 180, 180])[None, None, :].repeat(1, faces.max()+1, 1).float()/255.
face_colors = util.face_vertices(colors, faces)
self.register_buffer('face_colors', face_colors)
## SH factors for lighting
pi = np.pi
constant_factor = torch.tensor([1/np.sqrt(4*pi), ((2*pi)/3)*(np.sqrt(3/(4*pi))), ((2*pi)/3)*(np.sqrt(3/(4*pi))),\
((2*pi)/3)*(np.sqrt(3/(4*pi))), (pi/4)*(3)*(np.sqrt(5/(12*pi))), (pi/4)*(3)*(np.sqrt(5/(12*pi))),\
(pi/4)*(3)*(np.sqrt(5/(12*pi))), (pi/4)*(3/2)*(np.sqrt(5/(12*pi))), (pi/4)*(1/2)*(np.sqrt(5/(4*pi)))]).float()
self.register_buffer('constant_factor', constant_factor)
def forward(self, vertices, transformed_vertices, albedos, lights=None, light_type='point'):
'''
-- Texture Rendering
vertices: [batch_size, V, 3], vertices in world space, for calculating normals, then shading
transformed_vertices: [batch_size, V, 3], range:normalized to [-1,1], projected vertices in image space (that is aligned to the iamge pixel), for rasterization
albedos: [batch_size, 3, h, w], uv map
lights:
spherical homarnic: [N, 9(shcoeff), 3(rgb)]
points/directional lighting: [N, n_lights, 6(xyzrgb)]
light_type:
point or directional
'''
batch_size = vertices.shape[0]
## rasterizer near 0 far 100. move mesh so minz larger than 0
transformed_vertices[:,:,2] = transformed_vertices[:,:,2] + 10
# attributes
face_vertices = util.face_vertices(vertices, self.faces.expand(batch_size, -1, -1))
normals = util.vertex_normals(vertices, self.faces.expand(batch_size, -1, -1)); face_normals = util.face_vertices(normals, self.faces.expand(batch_size, -1, -1))
transformed_normals = util.vertex_normals(transformed_vertices, self.faces.expand(batch_size, -1, -1)); transformed_face_normals = util.face_vertices(transformed_normals, self.faces.expand(batch_size, -1, -1))
attributes = torch.cat([self.face_uvcoords.expand(batch_size, -1, -1, -1),
transformed_face_normals.detach(),
face_vertices.detach(),
face_normals],
-1)
# rasterize
rendering = self.rasterizer(transformed_vertices, self.faces.expand(batch_size, -1, -1), attributes)
####
# vis mask
alpha_images = rendering[:, -1, :, :][:, None, :, :].detach()
# albedo
uvcoords_images = rendering[:, :3, :, :]; grid = (uvcoords_images).permute(0, 2, 3, 1)[:, :, :, :2]
albedo_images = F.grid_sample(albedos, grid, align_corners=False)
# visible mask for pixels with positive normal direction
transformed_normal_map = rendering[:, 3:6, :, :].detach()
pos_mask = (transformed_normal_map[:, 2:, :, :] < -0.05).float()
# shading
normal_images = rendering[:, 9:12, :, :]
if lights is not None:
if lights.shape[1] == 9:
shading_images = self.add_SHlight(normal_images, lights)
else:
if light_type=='point':
vertice_images = rendering[:, 6:9, :, :].detach()
shading = self.add_pointlight(vertice_images.permute(0,2,3,1).reshape([batch_size, -1, 3]), normal_images.permute(0,2,3,1).reshape([batch_size, -1, 3]), lights)
shading_images = shading.reshape([batch_size, albedo_images.shape[2], albedo_images.shape[3], 3]).permute(0,3,1,2)
else:
shading = self.add_directionlight(normal_images.permute(0,2,3,1).reshape([batch_size, -1, 3]), lights)
shading_images = shading.reshape([batch_size, albedo_images.shape[2], albedo_images.shape[3], 3]).permute(0,3,1,2)
images = albedo_images*shading_images
else:
images = albedo_images
shading_images = images.detach()*0.
outputs = {
'images': images*alpha_images,
'albedo_images': albedo_images*alpha_images,
'alpha_images': alpha_images,
'pos_mask': pos_mask,
'shading_images': shading_images,
'grid': grid,
'normals': normals,
'normal_images': normal_images*alpha_images,
'transformed_normals': transformed_normals,
}
return outputs
def add_SHlight(self, normal_images, sh_coeff):
'''
sh_coeff: [bz, 9, 3]
'''
N = normal_images
sh = torch.stack([
N[:,0]*0.+1., N[:,0], N[:,1], \
N[:,2], N[:,0]*N[:,1], N[:,0]*N[:,2],
N[:,1]*N[:,2], N[:,0]**2 - N[:,1]**2, 3*(N[:,2]**2) - 1
],
1) # [bz, 9, h, w]
sh = sh*self.constant_factor[None,:,None,None]
shading = torch.sum(sh_coeff[:,:,:,None,None]*sh[:,:,None,:,:], 1) # [bz, 9, 3, h, w]
return shading
def add_pointlight(self, vertices, normals, lights):
'''
vertices: [bz, nv, 3]
lights: [bz, nlight, 6]
returns:
shading: [bz, nv, 3]
'''
light_positions = lights[:,:,:3]; light_intensities = lights[:,:,3:]
directions_to_lights = F.normalize(light_positions[:,:,None,:] - vertices[:,None,:,:], dim=3)
# normals_dot_lights = torch.clamp((normals[:,None,:,:]*directions_to_lights).sum(dim=3), 0., 1.)
normals_dot_lights = (normals[:,None,:,:]*directions_to_lights).sum(dim=3)
shading = normals_dot_lights[:,:,:,None]*light_intensities[:,:,None,:]
return shading.mean(1)
def add_directionlight(self, normals, lights):
'''
normals: [bz, nv, 3]
lights: [bz, nlight, 6]
returns:
shading: [bz, nv, 3]
'''
light_direction = lights[:,:,:3]; light_intensities = lights[:,:,3:]
directions_to_lights = F.normalize(light_direction[:,:,None,:].expand(-1,-1,normals.shape[1],-1), dim=3)
# normals_dot_lights = torch.clamp((normals[:,None,:,:]*directions_to_lights).sum(dim=3), 0., 1.)
# normals_dot_lights = (normals[:,None,:,:]*directions_to_lights).sum(dim=3)
normals_dot_lights = torch.clamp((normals[:,None,:,:]*directions_to_lights).sum(dim=3), 0., 1.)
shading = normals_dot_lights[:,:,:,None]*light_intensities[:,:,None,:]
return shading.mean(1)
def render_shape(self, vertices, transformed_vertices, images=None, detail_normal_images=None, lights=None):
'''
-- rendering shape with detail normal map
'''
batch_size = vertices.shape[0]
# set lighting
if lights is None:
light_positions = torch.tensor(
[
[-1,1,1],
[1,1,1],
[-1,-1,1],
[1,-1,1],
[0,0,1]
]
)[None,:,:].expand(batch_size, -1, -1).float()
light_intensities = torch.ones_like(light_positions).float()*1.7
lights = torch.cat((light_positions, light_intensities), 2).to(vertices.device)
transformed_vertices[:,:,2] = transformed_vertices[:,:,2] + 10
# Attributes
face_vertices = util.face_vertices(vertices, self.faces.expand(batch_size, -1, -1))
normals = util.vertex_normals(vertices, self.faces.expand(batch_size, -1, -1)); face_normals = util.face_vertices(normals, self.faces.expand(batch_size, -1, -1))
transformed_normals = util.vertex_normals(transformed_vertices, self.faces.expand(batch_size, -1, -1)); transformed_face_normals = util.face_vertices(transformed_normals, self.faces.expand(batch_size, -1, -1))
attributes = torch.cat([self.face_colors.expand(batch_size, -1, -1, -1),
transformed_face_normals.detach(),
face_vertices.detach(),
face_normals],
-1)
# rasterize
rendering = self.rasterizer(transformed_vertices, self.faces.expand(batch_size, -1, -1), attributes)
####
alpha_images = rendering[:, -1, :, :][:, None, :, :].detach()
# albedo
albedo_images = rendering[:, :3, :, :]
# mask
transformed_normal_map = rendering[:, 3:6, :, :].detach()
pos_mask = (transformed_normal_map[:, 2:, :, :] < 0.15).float()
# shading
normal_images = rendering[:, 9:12, :, :].detach()
vertice_images = rendering[:, 6:9, :, :].detach()
if detail_normal_images is not None:
normal_images = detail_normal_images
shading = self.add_directionlight(normal_images.permute(0,2,3,1).reshape([batch_size, -1, 3]), lights)
shading_images = shading.reshape([batch_size, albedo_images.shape[2], albedo_images.shape[3], 3]).permute(0,3,1,2).contiguous()
shaded_images = albedo_images*shading_images
alpha_images = alpha_images*pos_mask
if images is None:
shape_images = shaded_images*alpha_images + torch.zeros_like(shaded_images).to(vertices.device)*(1-alpha_images)
else:
shape_images = shaded_images*alpha_images + images*(1-alpha_images)
return shape_images
def render_depth(self, transformed_vertices):
'''
-- rendering depth
'''
batch_size = transformed_vertices.shape[0]
transformed_vertices[:,:,2] = transformed_vertices[:,:,2] - transformed_vertices[:,:,2].min()
z = -transformed_vertices[:,:,2:].repeat(1,1,3).clone()
z = z-z.min()
z = z/z.max()
# Attributes
attributes = util.face_vertices(z, self.faces.expand(batch_size, -1, -1))
# rasterize
transformed_vertices[:,:,2] = transformed_vertices[:,:,2] + 10
rendering = self.rasterizer(transformed_vertices, self.faces.expand(batch_size, -1, -1), attributes)
####
alpha_images = rendering[:, -1, :, :][:, None, :, :].detach()
depth_images = rendering[:, :1, :, :]
return depth_images
def render_normal(self, transformed_vertices, normals):
'''
-- rendering normal
'''
batch_size = normals.shape[0]
# Attributes
attributes = util.face_vertices(normals, self.faces.expand(batch_size, -1, -1))
# rasterize
rendering = self.rasterizer(transformed_vertices, self.faces.expand(batch_size, -1, -1), attributes)
####
alpha_images = rendering[:, -1, :, :][:, None, :, :].detach()
normal_images = rendering[:, :3, :, :]
return normal_images
def world2uv(self, vertices):
'''
warp vertices from world space to uv space
vertices: [bz, V, 3]
uv_vertices: [bz, 3, h, w]
'''
batch_size = vertices.shape[0]
face_vertices = util.face_vertices(vertices, self.faces.expand(batch_size, -1, -1))
uv_vertices = self.uv_rasterizer(self.uvcoords.expand(batch_size, -1, -1), self.uvfaces.expand(batch_size, -1, -1), face_vertices)[:, :3]
return uv_vertices | 15,927 | 45.847059 | 217 | py |
StyleMask | StyleMask-master/libs/DECA/decalib/utils/config.py | '''
Default config for DECA
'''
from yacs.config import CfgNode as CN
import argparse
import yaml
import os
cfg = CN()
abs_deca_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))
cfg.deca_dir = abs_deca_dir
cfg.device = 'cuda'
cfg.device_id = '0'
cfg.pretrained_modelpath = os.path.join(cfg.deca_dir, 'data', 'deca_model.tar')
# ---------------------------------------------------------------------------- #
# Options for Face model
# ---------------------------------------------------------------------------- #
cfg.model = CN()
cfg.model.topology_path = os.path.join(cfg.deca_dir, 'data', 'head_template.obj')
# texture data original from http://files.is.tue.mpg.de/tbolkart/FLAME/FLAME_texture_data.zip
cfg.model.dense_template_path = os.path.join(cfg.deca_dir, 'data', 'texture_data_256.npy')
cfg.model.fixed_displacement_path = os.path.join(cfg.deca_dir, 'data', 'fixed_displacement_256.npy')
cfg.model.flame_model_path = os.path.join(cfg.deca_dir, 'data', 'generic_model.pkl')
cfg.model.flame_lmk_embedding_path = os.path.join(cfg.deca_dir, 'data', 'landmark_embedding.npy')
cfg.model.face_mask_path = os.path.join(cfg.deca_dir, 'data', 'uv_face_mask.png')
cfg.model.face_eye_mask_path = os.path.join(cfg.deca_dir, 'data', 'uv_face_eye_mask.png')
cfg.model.mean_tex_path = os.path.join(cfg.deca_dir, 'data', 'mean_texture.jpg')
cfg.model.tex_path = os.path.join(cfg.deca_dir, 'data', 'FLAME_albedo_from_BFM.npz')
cfg.model.tex_type = 'BFM' # BFM, FLAME, albedoMM
cfg.model.uv_size = 256
cfg.model.param_list = ['shape', 'tex', 'exp', 'pose', 'cam', 'light']
cfg.model.n_shape = 100
cfg.model.n_tex = 50
cfg.model.n_exp = 50
cfg.model.n_cam = 3
cfg.model.n_pose = 6
cfg.model.n_light = 27
cfg.model.use_tex = False
cfg.model.jaw_type = 'aa' # default use axis angle, another option: euler
## details
cfg.model.n_detail = 128
cfg.model.max_z = 0.01
# ---------------------------------------------------------------------------- #
# Options for Dataset
# ---------------------------------------------------------------------------- #
cfg.dataset = CN()
cfg.dataset.batch_size = 24
cfg.dataset.num_workers = 2
cfg.dataset.image_size = 224
def get_cfg_defaults():
"""Get a yacs CfgNode object with default values for my_project."""
# Return a clone so that the defaults will not be altered
# This is for the "local variable" use pattern
return cfg.clone()
def update_cfg(cfg, cfg_file):
cfg.merge_from_file(cfg_file)
return cfg.clone()
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--cfg', type=str, help='cfg file path')
args = parser.parse_args()
print(args, end='\n\n')
cfg = get_cfg_defaults()
if args.cfg is not None:
cfg_file = args.cfg
cfg = update_cfg(cfg, args.cfg)
cfg.cfg_file = cfg_file
return cfg
| 2,858 | 34.7375 | 100 | py |
StyleMask | StyleMask-master/libs/DECA/decalib/utils/util.py | # -*- coding: utf-8 -*-
#
# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
# holder of all proprietary rights on this computer program.
# Using this computer program means that you agree to the terms
# in the LICENSE file included with this software distribution.
# Any use not explicitly granted by the LICENSE is prohibited.
#
# Copyright©2019 Max-Planck-Gesellschaft zur Förderung
# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
# for Intelligent Systems. All rights reserved.
#
# For comments or questions, please email us at deca@tue.mpg.de
# For commercial licensing contact, please contact ps-license@tuebingen.mpg.de
import numpy as np
import torch
import torch.nn.functional as F
import math
from collections import OrderedDict
import os
from scipy.ndimage import morphology
from skimage.io import imsave
import cv2
def upsample_mesh(vertices, normals, faces, displacement_map, texture_map, dense_template):
''' upsampling coarse mesh (with displacment map)
vertices: vertices of coarse mesh, [nv, 3]
normals: vertex normals, [nv, 3]
faces: faces of coarse mesh, [nf, 3]
texture_map: texture map, [256, 256, 3]
displacement_map: displacment map, [256, 256]
dense_template:
Returns:
dense_vertices: upsampled vertices with details, [number of dense vertices, 3]
dense_colors: vertex color, [number of dense vertices, 3]
dense_faces: [number of dense faces, 3]
'''
img_size = dense_template['img_size']
dense_faces = dense_template['f']
x_coords = dense_template['x_coords']
y_coords = dense_template['y_coords']
valid_pixel_ids = dense_template['valid_pixel_ids']
valid_pixel_3d_faces = dense_template['valid_pixel_3d_faces']
valid_pixel_b_coords = dense_template['valid_pixel_b_coords']
pixel_3d_points = vertices[valid_pixel_3d_faces[:, 0], :] * valid_pixel_b_coords[:, 0][:, np.newaxis] + \
vertices[valid_pixel_3d_faces[:, 1], :] * valid_pixel_b_coords[:, 1][:, np.newaxis] + \
vertices[valid_pixel_3d_faces[:, 2], :] * valid_pixel_b_coords[:, 2][:, np.newaxis]
vertex_normals = normals
pixel_3d_normals = vertex_normals[valid_pixel_3d_faces[:, 0], :] * valid_pixel_b_coords[:, 0][:, np.newaxis] + \
vertex_normals[valid_pixel_3d_faces[:, 1], :] * valid_pixel_b_coords[:, 1][:, np.newaxis] + \
vertex_normals[valid_pixel_3d_faces[:, 2], :] * valid_pixel_b_coords[:, 2][:, np.newaxis]
pixel_3d_normals = pixel_3d_normals / np.linalg.norm(pixel_3d_normals, axis=-1)[:, np.newaxis]
displacements = displacement_map[y_coords[valid_pixel_ids].astype(int), x_coords[valid_pixel_ids].astype(int)]
dense_colors = texture_map[y_coords[valid_pixel_ids].astype(int), x_coords[valid_pixel_ids].astype(int)]
offsets = np.einsum('i,ij->ij', displacements, pixel_3d_normals)
dense_vertices = pixel_3d_points + offsets
return dense_vertices, dense_colors, dense_faces
# borrowed from https://github.com/YadiraF/PRNet/blob/master/utils/write.py
def write_obj(obj_name,
vertices,
faces,
colors=None,
texture=None,
uvcoords=None,
uvfaces=None,
inverse_face_order=False,
normal_map=None,
):
''' Save 3D face model with texture.
Ref: https://github.com/patrikhuber/eos/blob/bd00155ebae4b1a13b08bf5a991694d682abbada/include/eos/core/Mesh.hpp
Args:
obj_name: str
vertices: shape = (nver, 3)
colors: shape = (nver, 3)
faces: shape = (ntri, 3)
texture: shape = (uv_size, uv_size, 3)
uvcoords: shape = (nver, 2) max value<=1
'''
if obj_name.split('.')[-1] != 'obj':
obj_name = obj_name + '.obj'
mtl_name = obj_name.replace('.obj', '.mtl')
texture_name = obj_name.replace('.obj', '.png')
material_name = 'FaceTexture'
faces = faces.copy()
# mesh lab start with 1, python/c++ start from 0
faces += 1
if inverse_face_order:
faces = faces[:, [2, 1, 0]]
if uvfaces is not None:
uvfaces = uvfaces[:, [2, 1, 0]]
# write obj
with open(obj_name, 'w') as f:
# first line: write mtlib(material library)
# f.write('# %s\n' % os.path.basename(obj_name))
# f.write('#\n')
# f.write('\n')
if texture is not None:
f.write('mtllib %s\n\n' % os.path.basename(mtl_name))
# write vertices
if colors is None:
for i in range(vertices.shape[0]):
f.write('v {} {} {}\n'.format(vertices[i, 0], vertices[i, 1], vertices[i, 2]))
else:
for i in range(vertices.shape[0]):
f.write('v {} {} {} {} {} {}\n'.format(vertices[i, 0], vertices[i, 1], vertices[i, 2], colors[i, 0], colors[i, 1], colors[i, 2]))
# write uv coords
if texture is None:
for i in range(faces.shape[0]):
f.write('f {} {} {}\n'.format(faces[i, 2], faces[i, 1], faces[i, 0]))
else:
for i in range(uvcoords.shape[0]):
f.write('vt {} {}\n'.format(uvcoords[i,0], uvcoords[i,1]))
f.write('usemtl %s\n' % material_name)
# write f: ver ind/ uv ind
uvfaces = uvfaces + 1
for i in range(faces.shape[0]):
f.write('f {}/{} {}/{} {}/{}\n'.format(
# faces[i, 2], uvfaces[i, 2],
# faces[i, 1], uvfaces[i, 1],
# faces[i, 0], uvfaces[i, 0]
faces[i, 0], uvfaces[i, 0],
faces[i, 1], uvfaces[i, 1],
faces[i, 2], uvfaces[i, 2]
)
)
# write mtl
with open(mtl_name, 'w') as f:
f.write('newmtl %s\n' % material_name)
s = 'map_Kd {}\n'.format(os.path.basename(texture_name)) # map to image
f.write(s)
if normal_map is not None:
name, _ = os.path.splitext(obj_name)
normal_name = f'{name}_normals.png'
f.write(f'disp {normal_name}')
# out_normal_map = normal_map / (np.linalg.norm(
# normal_map, axis=-1, keepdims=True) + 1e-9)
# out_normal_map = (out_normal_map + 1) * 0.5
cv2.imwrite(
normal_name,
# (out_normal_map * 255).astype(np.uint8)[:, :, ::-1]
normal_map
)
cv2.imwrite(texture_name, texture)
# ---------------------------- process/generate vertices, normals, faces
def generate_triangles(h, w, margin_x=2, margin_y=5, mask = None):
# quad layout:
# 0 1 ... w-1
# w w+1
#.
# w*h
triangles = []
for x in range(margin_x, w-1-margin_x):
for y in range(margin_y, h-1-margin_y):
triangle0 = [y*w + x, y*w + x + 1, (y+1)*w + x]
triangle1 = [y*w + x + 1, (y+1)*w + x + 1, (y+1)*w + x]
triangles.append(triangle0)
triangles.append(triangle1)
triangles = np.array(triangles)
triangles = triangles[:,[0,2,1]]
return triangles
# borrowed from https://github.com/daniilidis-group/neural_renderer/blob/master/neural_renderer/vertices_to_faces.py
def face_vertices(vertices, faces):
"""
:param vertices: [batch size, number of vertices, 3]
:param faces: [batch size, number of faces, 3]
:return: [batch size, number of faces, 3, 3]
"""
assert (vertices.ndimension() == 3)
assert (faces.ndimension() == 3)
assert (vertices.shape[0] == faces.shape[0])
assert (vertices.shape[2] == 3)
assert (faces.shape[2] == 3)
bs, nv = vertices.shape[:2]
bs, nf = faces.shape[:2]
device = vertices.device
faces = faces + (torch.arange(bs, dtype=torch.int32).to(device) * nv)[:, None, None]
vertices = vertices.reshape((bs * nv, 3))
# pytorch only supports long and byte tensors for indexing
return vertices[faces.long()]
def vertex_normals(vertices, faces):
"""
:param vertices: [batch size, number of vertices, 3]
:param faces: [batch size, number of faces, 3]
:return: [batch size, number of vertices, 3]
"""
assert (vertices.ndimension() == 3)
assert (faces.ndimension() == 3)
assert (vertices.shape[0] == faces.shape[0])
assert (vertices.shape[2] == 3)
assert (faces.shape[2] == 3)
bs, nv = vertices.shape[:2]
bs, nf = faces.shape[:2]
device = vertices.device
normals = torch.zeros(bs * nv, 3).to(device)
faces = faces + (torch.arange(bs, dtype=torch.int32).to(device) * nv)[:, None, None] # expanded faces
vertices_faces = vertices.reshape((bs * nv, 3))[faces.long()]
faces = faces.reshape(-1, 3)
vertices_faces = vertices_faces.reshape(-1, 3, 3)
normals.index_add_(0, faces[:, 1].long(),
torch.cross(vertices_faces[:, 2] - vertices_faces[:, 1], vertices_faces[:, 0] - vertices_faces[:, 1]))
normals.index_add_(0, faces[:, 2].long(),
torch.cross(vertices_faces[:, 0] - vertices_faces[:, 2], vertices_faces[:, 1] - vertices_faces[:, 2]))
normals.index_add_(0, faces[:, 0].long(),
torch.cross(vertices_faces[:, 1] - vertices_faces[:, 0], vertices_faces[:, 2] - vertices_faces[:, 0]))
normals = F.normalize(normals, eps=1e-6, dim=1)
normals = normals.reshape((bs, nv, 3))
# pytorch only supports long and byte tensors for indexing
return normals
def batch_orth_proj(X, camera):
''' orthgraphic projection
X: 3d vertices, [bz, n_point, 3]
camera: scale and translation, [bz, 3], [scale, tx, ty]
'''
camera = camera.clone().view(-1, 1, 3)
X_trans = X[:, :, :2] + camera[:, :, 1:]
X_trans = torch.cat([X_trans, X[:,:,2:]], 2)
shape = X_trans.shape
Xn = (camera[:, :, 0:1] * X_trans)
return Xn
# -------------------------------------- image processing
# borrowed from: https://torchgeometry.readthedocs.io/en/latest/_modules/kornia/filters
def gaussian(window_size, sigma):
def gauss_fcn(x):
return -(x - window_size // 2)**2 / float(2 * sigma**2)
gauss = torch.stack(
[torch.exp(torch.tensor(gauss_fcn(x))) for x in range(window_size)])
return gauss / gauss.sum()
def get_gaussian_kernel(kernel_size: int, sigma: float):
r"""Function that returns Gaussian filter coefficients.
Args:
kernel_size (int): filter size. It should be odd and positive.
sigma (float): gaussian standard deviation.
Returns:
Tensor: 1D tensor with gaussian filter coefficients.
Shape:
- Output: :math:`(\text{kernel_size})`
Examples::
>>> kornia.image.get_gaussian_kernel(3, 2.5)
tensor([0.3243, 0.3513, 0.3243])
>>> kornia.image.get_gaussian_kernel(5, 1.5)
tensor([0.1201, 0.2339, 0.2921, 0.2339, 0.1201])
"""
if not isinstance(kernel_size, int) or kernel_size % 2 == 0 or \
kernel_size <= 0:
raise TypeError("kernel_size must be an odd positive integer. "
"Got {}".format(kernel_size))
window_1d = gaussian(kernel_size, sigma)
return window_1d
def get_gaussian_kernel2d(kernel_size, sigma):
r"""Function that returns Gaussian filter matrix coefficients.
Args:
kernel_size (Tuple[int, int]): filter sizes in the x and y direction.
Sizes should be odd and positive.
sigma (Tuple[int, int]): gaussian standard deviation in the x and y
direction.
Returns:
Tensor: 2D tensor with gaussian filter matrix coefficients.
Shape:
- Output: :math:`(\text{kernel_size}_x, \text{kernel_size}_y)`
Examples::
>>> kornia.image.get_gaussian_kernel2d((3, 3), (1.5, 1.5))
tensor([[0.0947, 0.1183, 0.0947],
[0.1183, 0.1478, 0.1183],
[0.0947, 0.1183, 0.0947]])
>>> kornia.image.get_gaussian_kernel2d((3, 5), (1.5, 1.5))
tensor([[0.0370, 0.0720, 0.0899, 0.0720, 0.0370],
[0.0462, 0.0899, 0.1123, 0.0899, 0.0462],
[0.0370, 0.0720, 0.0899, 0.0720, 0.0370]])
"""
if not isinstance(kernel_size, tuple) or len(kernel_size) != 2:
raise TypeError("kernel_size must be a tuple of length two. Got {}"
.format(kernel_size))
if not isinstance(sigma, tuple) or len(sigma) != 2:
raise TypeError("sigma must be a tuple of length two. Got {}"
.format(sigma))
ksize_x, ksize_y = kernel_size
sigma_x, sigma_y = sigma
kernel_x = get_gaussian_kernel(ksize_x, sigma_x)
kernel_y = get_gaussian_kernel(ksize_y, sigma_y)
kernel_2d = torch.matmul(
kernel_x.unsqueeze(-1), kernel_y.unsqueeze(-1).t())
return kernel_2d
def gaussian_blur(x, kernel_size=(3,3), sigma=(0.8,0.8)):
b, c, h, w = x.shape
kernel = get_gaussian_kernel2d(kernel_size, sigma).to(x.device).to(x.dtype)
kernel = kernel.repeat(c, 1, 1, 1)
padding = [(k - 1) // 2 for k in kernel_size]
return F.conv2d(x, kernel, padding=padding, stride=1, groups=c)
def _compute_binary_kernel(window_size):
r"""Creates a binary kernel to extract the patches. If the window size
is HxW will create a (H*W)xHxW kernel.
"""
window_range = window_size[0] * window_size[1]
kernel: torch.Tensor = torch.zeros(window_range, window_range)
for i in range(window_range):
kernel[i, i] += 1.0
return kernel.view(window_range, 1, window_size[0], window_size[1])
def median_blur(x, kernel_size=(3,3)):
b, c, h, w = x.shape
kernel = _compute_binary_kernel(kernel_size).to(x.device).to(x.dtype)
kernel = kernel.repeat(c, 1, 1, 1)
padding = [(k - 1) // 2 for k in kernel_size]
features = F.conv2d(x, kernel, padding=padding, stride=1, groups=c)
features = features.view(b,c,-1,h,w)
median = torch.median(features, dim=2)[0]
return median
def get_laplacian_kernel2d(kernel_size: int):
r"""Function that returns Gaussian filter matrix coefficients.
Args:
kernel_size (int): filter size should be odd.
Returns:
Tensor: 2D tensor with laplacian filter matrix coefficients.
Shape:
- Output: :math:`(\text{kernel_size}_x, \text{kernel_size}_y)`
Examples::
>>> kornia.image.get_laplacian_kernel2d(3)
tensor([[ 1., 1., 1.],
[ 1., -8., 1.],
[ 1., 1., 1.]])
>>> kornia.image.get_laplacian_kernel2d(5)
tensor([[ 1., 1., 1., 1., 1.],
[ 1., 1., 1., 1., 1.],
[ 1., 1., -24., 1., 1.],
[ 1., 1., 1., 1., 1.],
[ 1., 1., 1., 1., 1.]])
"""
if not isinstance(kernel_size, int) or kernel_size % 2 == 0 or \
kernel_size <= 0:
raise TypeError("ksize must be an odd positive integer. Got {}"
.format(kernel_size))
kernel = torch.ones((kernel_size, kernel_size))
mid = kernel_size // 2
kernel[mid, mid] = 1 - kernel_size ** 2
kernel_2d: torch.Tensor = kernel
return kernel_2d
def laplacian(x):
# https://torchgeometry.readthedocs.io/en/latest/_modules/kornia/filters/laplacian.html
b, c, h, w = x.shape
kernel_size = 3
kernel = get_laplacian_kernel2d(kernel_size).to(x.device).to(x.dtype)
kernel = kernel.repeat(c, 1, 1, 1)
padding = (kernel_size - 1) // 2
return F.conv2d(x, kernel, padding=padding, stride=1, groups=c)
def angle2matrix(angles):
''' get rotation matrix from three rotation angles(degree). right-handed.
Args:
angles: [batch_size, 3] tensor containing X, Y, and Z angles.
x: pitch. positive for looking down.
y: yaw. positive for looking left.
z: roll. positive for tilting head right.
Returns:
R: [batch_size, 3, 3]. rotation matrices.
'''
angles = angles*(np.pi)/180.
s = torch.sin(angles)
c = torch.cos(angles)
cx, cy, cz = (c[:, 0], c[:, 1], c[:, 2])
sx, sy, sz = (s[:, 0], s[:, 1], s[:, 2])
zeros = torch.zeros_like(s[:, 0]).to(angles.device)
ones = torch.ones_like(s[:, 0]).to(angles.device)
# Rz.dot(Ry.dot(Rx))
R_flattened = torch.stack(
[
cz * cy, cz * sy * sx - sz * cx, cz * sy * cx + sz * sx,
sz * cy, sz * sy * sx + cz * cx, sz * sy * cx - cz * sx,
-sy, cy * sx, cy * cx,
],
dim=0) #[batch_size, 9]
R = torch.reshape(R_flattened, (-1, 3, 3)) #[batch_size, 3, 3]
return R
def binary_erosion(tensor, kernel_size=5):
# tensor: [bz, 1, h, w].
device = tensor.device
mask = tensor.cpu().numpy()
structure=np.ones((kernel_size,kernel_size))
new_mask = mask.copy()
for i in range(mask.shape[0]):
new_mask[i,0] = morphology.binary_erosion(mask[i,0], structure)
return torch.from_numpy(new_mask.astype(np.float32)).to(device)
def flip_image(src_image, kps):
'''
purpose:
flip a image given by src_image and the 2d keypoints
flip_mode:
0: horizontal flip
>0: vertical flip
<0: horizontal & vertical flip
'''
h, w = src_image.shape[0], src_image.shape[1]
src_image = cv2.flip(src_image, 1)
if kps is not None:
kps[:, 0] = w - 1 - kps[:, 0]
kp_map = [5, 4, 3, 2, 1, 0, 11, 10, 9, 8, 7, 6, 12, 13]
kps[:, :] = kps[kp_map]
return src_image, kps
# -------------------------------------- io
def copy_state_dict(cur_state_dict, pre_state_dict, prefix='', load_name=None):
def _get_params(key):
key = prefix + key
if key in pre_state_dict:
return pre_state_dict[key]
return None
for k in cur_state_dict.keys():
if load_name is not None:
if load_name not in k:
continue
v = _get_params(k)
try:
if v is None:
# print('parameter {} not found'.format(k))
continue
cur_state_dict[k].copy_(v)
except:
# print('copy param {} failed'.format(k))
continue
def check_mkdir(path):
if not os.path.exists(path):
print('creating %s' % path)
os.makedirs(path)
def check_mkdirlist(pathlist):
for path in pathlist:
if not os.path.exists(path):
print('creating %s' % path)
os.makedirs(path)
def tensor2image(tensor):
image = tensor.detach().cpu().numpy()
image = image*255.
image = np.maximum(np.minimum(image, 255), 0)
image = image.transpose(1,2,0)[:,:,[2,1,0]]
return image.astype(np.uint8).copy()
def dict2obj(d):
# if isinstance(d, list):
# d = [dict2obj(x) for x in d]
if not isinstance(d, dict):
return d
class C(object):
pass
o = C()
for k in d:
o.__dict__[k] = dict2obj(d[k])
return o
class Struct(object):
def __init__(self, **kwargs):
for key, val in kwargs.items():
setattr(self, key, val)
# original saved file with DataParallel
def remove_module(state_dict):
# create new OrderedDict that does not contain `module.`
new_state_dict = OrderedDict()
for k, v in state_dict.items():
name = k[7:] # remove `module.`
new_state_dict[name] = v
return new_state_dict
def dict_tensor2npy(tensor_dict):
npy_dict = {}
for key in tensor_dict:
npy_dict[key] = tensor_dict[key][0].cpu().numpy()
return npy_dict
# ---------------------------------- visualization
end_list = np.array([17, 22, 27, 42, 48, 31, 36, 68], dtype = np.int32) - 1
def plot_kpts(image, kpts, color = 'r'):
''' Draw 68 key points
Args:
image: the input image
kpt: (68, 3).
'''
if color == 'r':
c = (255, 0, 0)
elif color == 'g':
c = (0, 255, 0)
elif color == 'b':
c = (255, 0, 0)
image = image.copy()
kpts = kpts.copy()
for i in range(kpts.shape[0]):
st = kpts[i, :2]
if kpts.shape[1]==4:
if kpts[i, 3] > 0.5:
c = (0, 255, 0)
else:
c = (0, 0, 255)
image = cv2.circle(image,(st[0], st[1]), 1, c, 2)
if i in end_list:
continue
ed = kpts[i + 1, :2]
image = cv2.line(image, (st[0], st[1]), (ed[0], ed[1]), (255, 255, 255), 1)
return image
def plot_verts(image, kpts, color = 'r'):
''' Draw 68 key points
Args:
image: the input image
kpt: (68, 3).
'''
if color == 'r':
c = (255, 0, 0)
elif color == 'g':
c = (0, 255, 0)
elif color == 'b':
c = (0, 0, 255)
elif color == 'y':
c = (0, 255, 255)
image = image.copy()
for i in range(kpts.shape[0]):
st = kpts[i, :2]
image = cv2.circle(image,(st[0], st[1]), 1, c, 2)
return image
def tensor_vis_landmarks(images, landmarks, gt_landmarks=None, color = 'g', isScale=True):
# visualize landmarks
vis_landmarks = []
images = images.cpu().numpy()
predicted_landmarks = landmarks.detach().cpu().numpy()
if gt_landmarks is not None:
gt_landmarks_np = gt_landmarks.detach().cpu().numpy()
for i in range(images.shape[0]):
image = images[i]
image = image.transpose(1,2,0)[:,:,[2,1,0]].copy(); image = (image*255)
if isScale:
predicted_landmark = predicted_landmarks[i]*image.shape[0]/2 + image.shape[0]/2
else:
predicted_landmark = predicted_landmarks[i]
if predicted_landmark.shape[0] == 68:
image_landmarks = plot_kpts(image, predicted_landmark, color)
if gt_landmarks is not None:
image_landmarks = plot_verts(image_landmarks, gt_landmarks_np[i]*image.shape[0]/2 + image.shape[0]/2, 'r')
else:
image_landmarks = plot_verts(image, predicted_landmark, color)
if gt_landmarks is not None:
image_landmarks = plot_verts(image_landmarks, gt_landmarks_np[i]*image.shape[0]/2 + image.shape[0]/2, 'r')
vis_landmarks.append(image_landmarks)
vis_landmarks = np.stack(vis_landmarks)
vis_landmarks = torch.from_numpy(vis_landmarks[:,:,:,[2,1,0]].transpose(0,3,1,2))/255.#, dtype=torch.float32)
return vis_landmarks | 22,570 | 36.55574 | 145 | py |
StyleMask | StyleMask-master/libs/DECA/decalib/utils/rotation_converter.py | # -*- coding: utf-8 -*-
#
# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
# holder of all proprietary rights on this computer program.
# Using this computer program means that you agree to the terms
# in the LICENSE file included with this software distribution.
# Any use not explicitly granted by the LICENSE is prohibited.
#
# Copyright©2019 Max-Planck-Gesellschaft zur Förderung
# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
# for Intelligent Systems. All rights reserved.
#
# For comments or questions, please email us at deca@tue.mpg.de
# For commercial licensing contact, please contact ps-license@tuebingen.mpg.de
import torch
''' Rotation Converter
Representations:
euler angle(3), angle axis(3), rotation matrix(3x3), quaternion(4), continous repre
Ref:
https://kornia.readthedocs.io/en/v0.1.2/_modules/torchgeometry/core/conversions.html#
smplx/lbs
'''
pi = torch.Tensor([3.14159265358979323846])
def rad2deg(tensor):
"""Function that converts angles from radians to degrees.
See :class:`~torchgeometry.RadToDeg` for details.
Args:
tensor (Tensor): Tensor of arbitrary shape.
Returns:
Tensor: Tensor with same shape as input.
Example:
>>> input = tgm.pi * torch.rand(1, 3, 3)
>>> output = tgm.rad2deg(input)
"""
if not torch.is_tensor(tensor):
raise TypeError("Input type is not a torch.Tensor. Got {}"
.format(type(tensor)))
return 180. * tensor / pi.to(tensor.device).type(tensor.dtype)
def deg2rad(tensor):
"""Function that converts angles from degrees to radians.
See :class:`~torchgeometry.DegToRad` for details.
Args:
tensor (Tensor): Tensor of arbitrary shape.
Returns:
Tensor: Tensor with same shape as input.
Examples::
>>> input = 360. * torch.rand(1, 3, 3)
>>> output = tgm.deg2rad(input)
"""
if not torch.is_tensor(tensor):
raise TypeError("Input type is not a torch.Tensor. Got {}"
.format(type(tensor)))
return tensor * pi.to(tensor.device).type(tensor.dtype) / 180.
######### to quaternion
def euler_to_quaternion(r):
x = r[..., 0]
y = r[..., 1]
z = r[..., 2]
z = z/2.0
y = y/2.0
x = x/2.0
cz = torch.cos(z)
sz = torch.sin(z)
cy = torch.cos(y)
sy = torch.sin(y)
cx = torch.cos(x)
sx = torch.sin(x)
quaternion = torch.zeros_like(r.repeat(1,2))[..., :4].to(r.device)
quaternion[..., 0] += cx*cy*cz - sx*sy*sz
quaternion[..., 1] += cx*sy*sz + cy*cz*sx
quaternion[..., 2] += cx*cz*sy - sx*cy*sz
quaternion[..., 3] += cx*cy*sz + sx*cz*sy
return quaternion
def rotation_matrix_to_quaternion(rotation_matrix, eps=1e-6):
"""Convert 3x4 rotation matrix to 4d quaternion vector
This algorithm is based on algorithm described in
https://github.com/KieranWynn/pyquaternion/blob/master/pyquaternion/quaternion.py#L201
Args:
rotation_matrix (Tensor): the rotation matrix to convert.
Return:
Tensor: the rotation in quaternion
Shape:
- Input: :math:`(N, 3, 4)`
- Output: :math:`(N, 4)`
Example:
>>> input = torch.rand(4, 3, 4) # Nx3x4
>>> output = tgm.rotation_matrix_to_quaternion(input) # Nx4
"""
if not torch.is_tensor(rotation_matrix):
raise TypeError("Input type is not a torch.Tensor. Got {}".format(
type(rotation_matrix)))
if len(rotation_matrix.shape) > 3:
raise ValueError(
"Input size must be a three dimensional tensor. Got {}".format(
rotation_matrix.shape))
# if not rotation_matrix.shape[-2:] == (3, 4):
# raise ValueError(
# "Input size must be a N x 3 x 4 tensor. Got {}".format(
# rotation_matrix.shape))
rmat_t = torch.transpose(rotation_matrix, 1, 2)
mask_d2 = rmat_t[:, 2, 2] < eps
mask_d0_d1 = rmat_t[:, 0, 0] > rmat_t[:, 1, 1]
mask_d0_nd1 = rmat_t[:, 0, 0] < -rmat_t[:, 1, 1]
t0 = 1 + rmat_t[:, 0, 0] - rmat_t[:, 1, 1] - rmat_t[:, 2, 2]
q0 = torch.stack([rmat_t[:, 1, 2] - rmat_t[:, 2, 1],
t0, rmat_t[:, 0, 1] + rmat_t[:, 1, 0],
rmat_t[:, 2, 0] + rmat_t[:, 0, 2]], -1)
t0_rep = t0.repeat(4, 1).t()
t1 = 1 - rmat_t[:, 0, 0] + rmat_t[:, 1, 1] - rmat_t[:, 2, 2]
q1 = torch.stack([rmat_t[:, 2, 0] - rmat_t[:, 0, 2],
rmat_t[:, 0, 1] + rmat_t[:, 1, 0],
t1, rmat_t[:, 1, 2] + rmat_t[:, 2, 1]], -1)
t1_rep = t1.repeat(4, 1).t()
t2 = 1 - rmat_t[:, 0, 0] - rmat_t[:, 1, 1] + rmat_t[:, 2, 2]
q2 = torch.stack([rmat_t[:, 0, 1] - rmat_t[:, 1, 0],
rmat_t[:, 2, 0] + rmat_t[:, 0, 2],
rmat_t[:, 1, 2] + rmat_t[:, 2, 1], t2], -1)
t2_rep = t2.repeat(4, 1).t()
t3 = 1 + rmat_t[:, 0, 0] + rmat_t[:, 1, 1] + rmat_t[:, 2, 2]
q3 = torch.stack([t3, rmat_t[:, 1, 2] - rmat_t[:, 2, 1],
rmat_t[:, 2, 0] - rmat_t[:, 0, 2],
rmat_t[:, 0, 1] - rmat_t[:, 1, 0]], -1)
t3_rep = t3.repeat(4, 1).t()
mask_c0 = mask_d2 * mask_d0_d1.float()
mask_c1 = mask_d2 * (1 - mask_d0_d1.float())
mask_c2 = (1 - mask_d2.float()) * mask_d0_nd1
mask_c3 = (1 - mask_d2.float()) * (1 - mask_d0_nd1.float())
mask_c0 = mask_c0.view(-1, 1).type_as(q0)
mask_c1 = mask_c1.view(-1, 1).type_as(q1)
mask_c2 = mask_c2.view(-1, 1).type_as(q2)
mask_c3 = mask_c3.view(-1, 1).type_as(q3)
q = q0 * mask_c0 + q1 * mask_c1 + q2 * mask_c2 + q3 * mask_c3
q /= torch.sqrt(t0_rep * mask_c0 + t1_rep * mask_c1 + # noqa
t2_rep * mask_c2 + t3_rep * mask_c3) # noqa
q *= 0.5
return q
# def angle_axis_to_quaternion(theta):
# batch_size = theta.shape[0]
# l1norm = torch.norm(theta + 1e-8, p=2, dim=1)
# angle = torch.unsqueeze(l1norm, -1)
# normalized = torch.div(theta, angle)
# angle = angle * 0.5
# v_cos = torch.cos(angle)
# v_sin = torch.sin(angle)
# quat = torch.cat([v_cos, v_sin * normalized], dim=1)
# return quat
def angle_axis_to_quaternion(angle_axis: torch.Tensor) -> torch.Tensor:
"""Convert an angle axis to a quaternion.
Adapted from ceres C++ library: ceres-solver/include/ceres/rotation.h
Args:
angle_axis (torch.Tensor): tensor with angle axis.
Return:
torch.Tensor: tensor with quaternion.
Shape:
- Input: :math:`(*, 3)` where `*` means, any number of dimensions
- Output: :math:`(*, 4)`
Example:
>>> angle_axis = torch.rand(2, 4) # Nx4
>>> quaternion = tgm.angle_axis_to_quaternion(angle_axis) # Nx3
"""
if not torch.is_tensor(angle_axis):
raise TypeError("Input type is not a torch.Tensor. Got {}".format(
type(angle_axis)))
if not angle_axis.shape[-1] == 3:
raise ValueError("Input must be a tensor of shape Nx3 or 3. Got {}"
.format(angle_axis.shape))
# unpack input and compute conversion
a0: torch.Tensor = angle_axis[..., 0:1]
a1: torch.Tensor = angle_axis[..., 1:2]
a2: torch.Tensor = angle_axis[..., 2:3]
theta_squared: torch.Tensor = a0 * a0 + a1 * a1 + a2 * a2
theta: torch.Tensor = torch.sqrt(theta_squared)
half_theta: torch.Tensor = theta * 0.5
mask: torch.Tensor = theta_squared > 0.0
ones: torch.Tensor = torch.ones_like(half_theta)
k_neg: torch.Tensor = 0.5 * ones
k_pos: torch.Tensor = torch.sin(half_theta) / theta
k: torch.Tensor = torch.where(mask, k_pos, k_neg)
w: torch.Tensor = torch.where(mask, torch.cos(half_theta), ones)
quaternion: torch.Tensor = torch.zeros_like(angle_axis)
quaternion[..., 0:1] += a0 * k
quaternion[..., 1:2] += a1 * k
quaternion[..., 2:3] += a2 * k
# print(quaternion)
return torch.cat([w, quaternion], dim=-1)
#### quaternion to
def quaternion_to_rotation_matrix(quat):
"""Convert quaternion coefficients to rotation matrix.
Args:
quat: size = [B, 4] 4 <===>(w, x, y, z)
Returns:
Rotation matrix corresponding to the quaternion -- size = [B, 3, 3]
"""
norm_quat = quat
norm_quat = norm_quat / norm_quat.norm(p=2, dim=1, keepdim=True)
w, x, y, z = norm_quat[:, 0], norm_quat[:, 1], norm_quat[:, 2], norm_quat[:, 3]
B = quat.size(0)
w2, x2, y2, z2 = w.pow(2), x.pow(2), y.pow(2), z.pow(2)
wx, wy, wz = w * x, w * y, w * z
xy, xz, yz = x * y, x * z, y * z
rotMat = torch.stack([w2 + x2 - y2 - z2, 2 * xy - 2 * wz, 2 * wy + 2 * xz,
2 * wz + 2 * xy, w2 - x2 + y2 - z2, 2 * yz - 2 * wx,
2 * xz - 2 * wy, 2 * wx + 2 * yz, w2 - x2 - y2 + z2], dim=1).view(B, 3, 3)
return rotMat
def quaternion_to_angle_axis(quaternion: torch.Tensor):
"""Convert quaternion vector to angle axis of rotation. TODO: CORRECT
Adapted from ceres C++ library: ceres-solver/include/ceres/rotation.h
Args:
quaternion (torch.Tensor): tensor with quaternions.
Return:
torch.Tensor: tensor with angle axis of rotation.
Shape:
- Input: :math:`(*, 4)` where `*` means, any number of dimensions
- Output: :math:`(*, 3)`
Example:
>>> quaternion = torch.rand(2, 4) # Nx4
>>> angle_axis = tgm.quaternion_to_angle_axis(quaternion) # Nx3
"""
if not torch.is_tensor(quaternion):
raise TypeError("Input type is not a torch.Tensor. Got {}".format(
type(quaternion)))
if not quaternion.shape[-1] == 4:
raise ValueError("Input must be a tensor of shape Nx4 or 4. Got {}"
.format(quaternion.shape))
# unpack input and compute conversion
q1: torch.Tensor = quaternion[..., 1]
q2: torch.Tensor = quaternion[..., 2]
q3: torch.Tensor = quaternion[..., 3]
sin_squared_theta: torch.Tensor = q1 * q1 + q2 * q2 + q3 * q3
sin_theta: torch.Tensor = torch.sqrt(sin_squared_theta)
cos_theta: torch.Tensor = quaternion[..., 0]
two_theta: torch.Tensor = 2.0 * torch.where(
cos_theta < 0.0,
torch.atan2(-sin_theta, -cos_theta),
torch.atan2(sin_theta, cos_theta))
k_pos: torch.Tensor = two_theta / sin_theta
k_neg: torch.Tensor = 2.0 * torch.ones_like(sin_theta).to(quaternion.device)
k: torch.Tensor = torch.where(sin_squared_theta > 0.0, k_pos, k_neg)
angle_axis: torch.Tensor = torch.zeros_like(quaternion).to(quaternion.device)[..., :3]
angle_axis[..., 0] += q1 * k
angle_axis[..., 1] += q2 * k
angle_axis[..., 2] += q3 * k
return angle_axis
#### batch converter
def batch_euler2axis(r):
return quaternion_to_angle_axis(euler_to_quaternion(r))
def batch_euler2matrix(r):
return quaternion_to_rotation_matrix(euler_to_quaternion(r))
def batch_matrix2euler(rot_mats):
# Calculates rotation matrix to euler angles
# Careful for extreme cases of eular angles like [0.0, pi, 0.0]
### only y?
# TODO:
# sy = torch.sqrt(rot_mats[:, 0, 0] * rot_mats[:, 0, 0] +
# rot_mats[:, 1, 0] * rot_mats[:, 1, 0])
# return torch.atan2(-rot_mats[:, 2, 0], sy)
batch_index = 0
yaw = torch.zeros(rot_mats.shape[0],1)
pitch = torch.zeros(rot_mats.shape[0],1)
roll = torch.zeros(rot_mats.shape[0],1)
for R in rot_mats:
if R[2, 0] > 0.998:
z = 0
x = np.pi / 2
y = z + atan2(-R[0, 1], -R[0, 2])
elif R[2, 0] < -0.998:
z = 0
x = -np.pi / 2
y = -z + torch.atan2(R[0, 1], R[0, 2])
else:
x = torch.asin(R[2, 0])
y = torch.atan2(R[2, 1] / torch.cos(x), R[2, 2] / torch.cos(x))
z = torch.atan2(R[1, 0] / torch.cos(x), R[0, 0] / torch.cos(x))
yaw[batch_index] = x
pitch[batch_index] = y
roll[batch_index] = z
batch_index = batch_index + 1
angles = torch.zeros(1, 3)
angles[:,0] = x
angles[:,1] = y
angles[:,2] = z
return angles
def batch_matrix2axis(rot_mats):
return quaternion_to_angle_axis(rotation_matrix_to_quaternion(rot_mats))
def batch_axis2matrix(theta):
# angle axis to rotation matrix
# theta N x 3
# return quat2mat(quat)
# batch_rodrigues
return quaternion_to_rotation_matrix(angle_axis_to_quaternion(theta))
def batch_axis2euler(theta):
return batch_matrix2euler(batch_axis2matrix(theta))
def batch_orth_proj(X, camera):
'''
X is N x num_pquaternion_to_angle_axisoints x 3
'''
camera = camera.clone().view(-1, 1, 3)
X_trans = X[:, :, :2] + camera[:, :, 1:]
X_trans = torch.cat([X_trans, X[:,:,2:]], 2)
Xn = (camera[:, :, 0:1] * X_trans)
return Xn
def batch_rodrigues(rot_vecs, epsilon=1e-8, dtype=torch.float32):
''' same as batch_matrix2axis
Calculates the rotation matrices for a batch of rotation vectors
Parameters
----------
rot_vecs: torch.tensor Nx3
array of N axis-angle vectors
Returns
-------
R: torch.tensor Nx3x3
The rotation matrices for the given axis-angle parameters
'''
batch_size = rot_vecs.shape[0]
device = rot_vecs.device
angle = torch.norm(rot_vecs + 1e-8, dim=1, keepdim=True)
rot_dir = rot_vecs / angle
cos = torch.unsqueeze(torch.cos(angle), dim=1)
sin = torch.unsqueeze(torch.sin(angle), dim=1)
# Bx1 arrays
rx, ry, rz = torch.split(rot_dir, 1, dim=1)
K = torch.zeros((batch_size, 3, 3), dtype=dtype, device=device)
zeros = torch.zeros((batch_size, 1), dtype=dtype, device=device)
K = torch.cat([zeros, -rz, ry, rz, zeros, -rx, -ry, rx, zeros], dim=1) \
.view((batch_size, 3, 3))
ident = torch.eye(3, dtype=dtype, device=device).unsqueeze(dim=0)
rot_mat = ident + sin * K + (1 - cos) * torch.bmm(K, K)
return rot_mat
| 12,670 | 30.132678 | 87 | py |
hedgecut | hedgecut-master/python/prepare_propublica.py | import pandas as pd
from experimentation.encoding import discretize, ordinalize, binarize
from sklearn.model_selection import train_test_split
"""The custom pre-processing function is adapted from
https://github.com/IBM/AIF360/blob/master/aif360/algorithms/preprocessing/optim_preproc_helpers/data_preproc_functions.py
https://github.com/fair-preprocessing/nips2017/blob/master/compas/code/Generate_Compas_Data.ipynb
"""
df = pd.read_csv('datasets/propublica-recidivism.csv', na_values='?', sep=',')
df = df[['age', 'c_charge_degree', 'race', 'age_cat', 'score_text',
'sex', 'priors_count', 'days_b_screening_arrest', 'decile_score',
'is_recid', 'two_year_recid', 'c_jail_in', 'c_jail_out']]
ix = df['days_b_screening_arrest'] <= 100
ix = (df['days_b_screening_arrest'] >= -100) & ix
ix = (df['is_recid'] != -1) & ix
ix = (df['c_charge_degree'] != "O") & ix
ix = (df['score_text'] != 'N/A') & ix
df = df.loc[ix, :]
df['length_of_stay'] = (pd.to_datetime(df['c_jail_out']) - pd.to_datetime(df['c_jail_in'])).apply(
lambda x: x.days)
df = df[['age', 'decile_score', 'priors_count', 'days_b_screening_arrest', 'decile_score', 'is_recid',
'c_charge_degree', 'age_cat', 'score_text', 'sex', 'race', 'two_year_recid']]
train_samples, test_samples = train_test_split(df, test_size=0.2)
age, age_discretizer = discretize(train_samples, 'age')
decile_score, decile_score_discretizer = discretize(train_samples, 'decile_score')
priors_count, priors_count_discretizer = discretize(train_samples, 'priors_count')
days_b_screening_arrest, days_b_screening_arrest_discretizer = discretize(train_samples, 'days_b_screening_arrest')
is_recid = train_samples['is_recid'].values
c_charge_degree = train_samples.apply(lambda row: binarize(row, 'c_charge_degree', 'F'), axis=1).values
sex = train_samples.apply(lambda row: binarize(row, 'sex', 'Female'), axis=1).values
age_cat, age_cat_encoder = ordinalize(df, train_samples, 'age_cat')
score_text, score_text_encoder = ordinalize(df, train_samples, 'score_text')
race, race_encoder = ordinalize(df, train_samples, 'race')
labels = train_samples['two_year_recid'].values
with open('datasets/propublica-train.csv', 'w') as file:
file.write(f'record_id\tage\tdecile_score\tpriors_count\tdays_b_screening_arrest\tis_recid\tc_charge_degree\t' +
'sex\tage_cat\tscore_text\trace\tlabel\n')
for i in range(0, len(train_samples)):
line = '\t'.join([
str(i),
str(int(age[i][0])),
str(int(decile_score[i][0])),
str(int(priors_count[i][0])),
str(int(days_b_screening_arrest[i][0])),
str(is_recid[i]),
str(c_charge_degree[i]),
str(sex[i]),
str(age_cat[i]),
str(score_text[i]),
str(race[i]),
str(labels[i])
])
file.write(line + '\n')
age, age_discretizer.transform(test_samples['age'].values.reshape(-1, 1))
decile_score, decile_score_discretizer.transform(test_samples['decile_score'].values.reshape(-1, 1))
priors_count, priors_count_discretizer.transform(test_samples['priors_count'].values.reshape(-1, 1))
days_b_screening_arrest, days_b_screening_arrest_discretizer\
.transform(test_samples['days_b_screening_arrest'].values.reshape(-1, 1))
is_recid = test_samples['is_recid'].values
c_charge_degree = test_samples.apply(lambda row: binarize(row, 'c_charge_degree', 'F'), axis=1).values
sex = test_samples.apply(lambda row: binarize(row, 'sex', 'Female'), axis=1).values
age_cat, age_cat_encoder.transform(test_samples['age_cat'].values.reshape(-1, 1))
score_text, score_text_encoder.transform(test_samples['score_text'].values.reshape(-1, 1))
race, race_encoder.transform(test_samples['race'].values.reshape(-1, 1))
labels = test_samples['two_year_recid'].values
with open('datasets/propublica-test.csv', 'w') as file:
file.write(f'record_id\tage\tdecile_score\tpriors_count\tdays_b_screening_arrest\tis_recid\tc_charge_degree\t' +
'sex\tage_cat\tscore_text\trace\tlabel\n')
for i in range(0, len(test_samples)):
line = '\t'.join([
str(i + len(train_samples)),
str(int(age[i][0])),
str(int(decile_score[i][0])),
str(int(priors_count[i][0])),
str(int(days_b_screening_arrest[i][0])),
str(is_recid[i]),
str(c_charge_degree[i]),
str(sex[i]),
str(age_cat[i]),
str(score_text[i]),
str(race[i]),
str(labels[i])
])
file.write(line + '\n')
| 4,586 | 45.806122 | 121 | py |
hedgecut | hedgecut-master/python/prepare_shopping.py | import numpy as np
import pandas as pd
from experimentation.encoding import discretize, ordinalize, binarize
from sklearn.model_selection import train_test_split
raw_data = pd.read_csv('datasets/shopping.csv', sep=',', index_col=False)
raw_data = raw_data.dropna()
raw_data['Weekend'] = raw_data['Weekend'].astype(str)
raw_data['Revenue'] = raw_data['Revenue'].astype(str)
train_samples, test_samples = train_test_split(raw_data, test_size=0.2)
administrative, administrative_discretizer = discretize(train_samples, 'Administrative')
administrative_duration, administrative_duration_discretizer = discretize(train_samples, 'Administrative_Duration')
informational, informational_discretizer = discretize(train_samples, 'Informational')
informational_duration, informational_duration_discretizer = discretize(train_samples, 'Informational_Duration')
product_related, product_related_discretizer = discretize(train_samples, 'ProductRelated')
product_related_duration, product_related_duration_discretizer = discretize(train_samples, 'ProductRelated_Duration')
bounce_rates, bounce_rates_discretizer = discretize(train_samples, 'BounceRates')
exit_rates, exit_rates_discretizer = discretize(train_samples, 'ExitRates')
page_values, page_values_discretizer = discretize(train_samples, 'PageValues')
special_day, special_day_discretizer = discretize(train_samples, 'SpecialDay')
month, month_encoder = ordinalize(raw_data, train_samples, 'Month')
operating_systems = train_samples['OperatingSystems'].values - 1
browser = train_samples['Browser'].values - 1
region = train_samples['Region'].values - 1
traffic_type = train_samples['TrafficType'].values - 1
visitor_type, visitor_type_encoder = ordinalize(raw_data, train_samples, 'VisitorType')
weekend = train_samples.apply(lambda row: binarize(row, 'Weekend', 'True'), axis=1).values
labels = train_samples.apply(lambda row: binarize(row, 'Revenue', 'True'), axis=1).values
with open('datasets/shopping-train.csv', 'w') as file:
file.write(f'record_id\tadministrative\tadministrative_duration\tinformational\tinformational_duration' +
f'\tproduct_related\tproduct_related_duration\tbounce_rates\texit_rates\tpage_values\t' +
f'special_day\tmonth\toperating_systems\tbrowser\tregion\ttraffic_type\tvisitor_type\t' +
f'weekend\tlabel\n')
for i in range(0, len(train_samples)):
line = '\t'.join([
str(i),
str(int(administrative[i][0])),
str(int(administrative_duration[i][0])),
str(int(informational[i][0])),
str(int(informational_duration[i][0])),
str(int(product_related[i][0])),
str(int(product_related_duration[i][0])),
str(int(bounce_rates[i][0])),
str(int(exit_rates[i][0])),
str(int(page_values[i][0])),
str(int(special_day[i][0])),
str(month[i]),
str(operating_systems[i]),
str(browser[i]),
str(region[i]),
str(traffic_type[i]),
str(visitor_type[i]),
str(weekend[i]),
str(labels[i])
])
file.write(line + '\n')
administrative = administrative_discretizer.transform(test_samples['Administrative'].values.reshape(-1, 1))
administrative_duration = administrative_duration_discretizer.transform(test_samples['Administrative_Duration']
.values.reshape(-1, 1))
informational = informational_discretizer.transform(test_samples['Informational'].values.reshape(-1, 1))
informational_duration = informational_duration_discretizer.transform(test_samples['Informational_Duration']
.values.reshape(-1, 1))
product_related = product_related_discretizer.transform(test_samples['ProductRelated'].values.reshape(-1, 1))
product_related_duration = product_related_duration_discretizer.transform(test_samples['ProductRelated_Duration']
.values.reshape(-1, 1))
bounce_rates = bounce_rates_discretizer.transform(test_samples['BounceRates'].values.reshape(-1, 1))
exit_rates = exit_rates_discretizer.transform(test_samples['ExitRates'].values.reshape(-1, 1))
page_values = page_values_discretizer.transform(test_samples['PageValues'].values.reshape(-1, 1))
special_day = special_day_discretizer.transform(test_samples['SpecialDay'].values.reshape(-1, 1))
month = month_encoder.transform(test_samples['Month'].values.reshape(-1, 1))
operating_systems = train_samples['OperatingSystems'].values - 1
browser = test_samples['Browser'].values - 1
region = test_samples['Region'].values - 1
traffic_type = test_samples['TrafficType'].values - 1
visitor_type = visitor_type_encoder.transform(test_samples['VisitorType'].values.reshape(-1, 1))
weekend = test_samples.apply(lambda row: binarize(row, 'Weekend', 'True'), axis=1).values
labels = test_samples.apply(lambda row: binarize(row, 'Revenue', 'True'), axis=1).values
with open('datasets/shopping-test.csv', 'w') as file:
file.write(f'record_id\tadministrative\tadministrative_duration\tinformational\tinformational_duration' +
f'\tproduct_related\tproduct_related_duration\tbounce_rates\texit_rates\tpage_values\t' +
f'special_day\tmonth\toperating_systems\tbrowser\tregion\ttraffic_type\tvisitor_type\t' +
f'weekend\tlabel\n')
for i in range(0, len(test_samples)):
line = '\t'.join([
str(i + len(train_samples)),
str(int(administrative[i][0])),
str(int(administrative_duration[i][0])),
str(int(informational[i][0])),
str(int(informational_duration[i][0])),
str(int(product_related[i][0])),
str(int(product_related_duration[i][0])),
str(int(bounce_rates[i][0])),
str(int(exit_rates[i][0])),
str(int(page_values[i][0])),
str(int(special_day[i][0])),
str(month[i]),
str(operating_systems[i]),
str(browser[i]),
str(region[i]),
str(traffic_type[i]),
str(visitor_type[i]),
str(weekend[i]),
str(labels[i])
])
file.write(line + '\n') | 6,342 | 53.213675 | 117 | py |
hedgecut | hedgecut-master/python/train_time.py | import pandas as pd
from experimentation.baseline import train_time
label_attribute = 'label'
train_samples = pd.read_csv('datasets/adult-train.csv', sep='\t')
attribute_candidates = ['age', 'workclass', 'fnlwgt', 'education', 'marital_status', 'occupation', 'relationship',
'race', 'sex', 'capital_gain', 'hours_per_week', 'native_country']
train_time('adult', train_samples, attribute_candidates, label_attribute)
train_samples = pd.read_csv('datasets/cardio-train.csv', sep='\t')
attribute_candidates = ['age', 'gender', 'height', 'weight', 'ap_hi', 'ap_lo', 'cholesterol', 'glucose', 'smoke',
'alcohol', 'active']
train_time('cardio', train_samples, attribute_candidates, label_attribute)
train_samples = pd.read_csv('datasets/givemesomecredit-train.csv', sep='\t')
attribute_candidates = ['revolving_util', 'age', 'past_due', 'debt_ratio', 'income', 'lines',
'real_estate', 'dependents']
train_time('givemesomecredit', train_samples, attribute_candidates, label_attribute)
train_samples = pd.read_csv('datasets/propublica-train.csv', sep='\t')
attribute_candidates = ['age', 'decile_score', 'priors_count', 'days_b_screening_arrest', 'is_recid',
'c_charge_degree', 'sex', 'age_cat', 'score_text', 'race']
train_time('propublica', train_samples, attribute_candidates, label_attribute)
train_samples = pd.read_csv('datasets/shopping-train.csv', sep='\t')
attribute_candidates = ['administrative', 'administrative_duration', 'informational', 'informational_duration',
'product_related', 'product_related_duration', 'bounce_rates', 'exit_rates', 'page_values',
'special_day', 'month', 'operating_systems', 'browser', 'region', 'traffic_type',
'visitor_type', 'weekend']
train_time('shopping', train_samples, attribute_candidates, label_attribute)
| 1,931 | 46.121951 | 115 | py |
hedgecut | hedgecut-master/python/prepare_cardio.py | import pandas as pd
from experimentation.encoding import discretize, ordinalize, binarize
from sklearn.model_selection import train_test_split
raw_data = pd.read_csv('datasets/cardio.csv', sep=';')
raw_data = raw_data.dropna()
train_samples, test_samples = train_test_split(raw_data, test_size=0.2)
age, age_discretizer = discretize(train_samples, 'age')
gender = train_samples['gender'].values - 1
height, height_discretizer = discretize(train_samples, 'height')
weight, weight_discretizer = discretize(train_samples, 'weight')
ap_hi, ap_hi_discretizer = discretize(train_samples, 'ap_hi')
ap_lo, ap_lo_discretizer = discretize(train_samples, 'ap_lo')
cholesterol = train_samples['cholesterol'].values - 1
glucose = train_samples['gluc'].values - 1
smoke = train_samples['smoke'].values
alcohol = train_samples['alco'].values
active = train_samples['active'].values
labels = train_samples['cardio'].values
with open('datasets/cardio-train.csv', 'w') as file:
file.write(f'record_id\tage\tgender\theight\tweight\tap_hi\tap_lo\tcholesterol\tglucose\tsmoke\talcohol\t' +
'active\tlabel\n')
for i in range(0, len(train_samples)):
line = '\t'.join([
str(i),
str(int(age[i][0])),
str(gender[i]),
str(int(height[i][0])),
str(int(weight[i][0])),
str(int(ap_hi[i][0])),
str(int(ap_lo[i][0])),
str(cholesterol[i]),
str(glucose[i]),
str(smoke[i]),
str(alcohol[i]),
str(active[i]),
str(labels[i])
])
file.write(line + '\n')
age = age_discretizer.transform(test_samples['age'].values.reshape(-1, 1))
gender = test_samples['gender'].values - 1
height = height_discretizer.transform(test_samples['height'].values.reshape(-1, 1))
weight = weight_discretizer.transform(test_samples['weight'].values.reshape(-1, 1))
ap_hi = ap_hi_discretizer.transform(test_samples['ap_hi'].values.reshape(-1, 1))
ap_lo = ap_lo_discretizer.transform(test_samples['ap_lo'].values.reshape(-1, 1))
cholesterol = test_samples['cholesterol'].values - 1
glucose = test_samples['gluc'].values - 1
smoke = test_samples['smoke'].values
alcohol = test_samples['alco'].values
active = test_samples['active'].values
labels = test_samples['cardio'].values
with open('datasets/cardio-test.csv', 'w') as file:
file.write(f'record_id\tage\tgender\theight\tweight\tap_hi\tap_lo\tcholesterol\tglucose\tsmoke\talcohol\t' +
'active\tlabel\n')
for i in range(0, len(test_samples)):
line = '\t'.join([
str(i + len(train_samples)),
str(int(age[i][0])),
str(gender[i]),
str(int(height[i][0])),
str(int(weight[i][0])),
str(int(ap_hi[i][0])),
str(int(ap_lo[i][0])),
str(cholesterol[i]),
str(glucose[i]),
str(smoke[i]),
str(alcohol[i]),
str(active[i]),
str(labels[i])
])
file.write(line + '\n')
| 3,052 | 35.783133 | 112 | py |
hedgecut | hedgecut-master/python/prepare_adult.py | import pandas as pd
from experimentation.encoding import discretize, ordinalize, binarize
from sklearn.model_selection import train_test_split
names = ['age', 'workclass', 'fnlwgt', 'education', 'education-num', 'marital-status', 'occupation', 'relationship',
'race', 'sex', 'capital-gain', 'capital-loss', 'hours-per-week', 'native-country', 'income']
raw_data = pd.read_csv('datasets/adult.csv', sep=', ', na_values='?', names=names, index_col=False, engine='python')
raw_data = raw_data.dropna()
train_samples, test_samples = train_test_split(raw_data, test_size=0.2)
age, age_discretizer = discretize(train_samples, 'age')
workclass, workclass_encoder = ordinalize(raw_data, train_samples, 'workclass')
fnlwgt, fnlwgt_discretizer = discretize(train_samples, 'fnlwgt')
education, education_encoder = ordinalize(raw_data, train_samples, 'education')
marital_status, marital_status_encoder = ordinalize(raw_data, train_samples, 'marital-status')
occupation, occupation_encoder = ordinalize(raw_data, train_samples, 'occupation')
relationship, relationship_encoder = ordinalize(raw_data, train_samples, 'relationship')
race, race_encoder = ordinalize(raw_data, train_samples, 'race')
sex, sex_encoder = ordinalize(raw_data, train_samples, 'sex')
capital_gain, capital_gain_discretizer = discretize(train_samples, 'capital-gain')
capital_loss, capital_loss_discretizer = discretize(train_samples, 'capital-loss')
hours_per_week, hours_per_week_discretizer = discretize(train_samples, 'hours-per-week')
native_country, native_country_encoder = ordinalize(raw_data, train_samples, 'native-country')
labels = train_samples.apply(lambda row: binarize(row, 'income', '>50K'), axis=1).values
with open('datasets/adult-train.csv', 'w') as file:
file.write(f'record_id\tage\tworkclass\tfnlwgt\teducation\tmarital_status\toccupation\trelationship\trace\tsex'
f'\tcapital_gain\thours_per_week\tnative_country\tlabel\n')
for i in range(0, len(train_samples)):
line = '\t'.join([
str(i),
str(int(age[i][0])),
str(workclass[i]),
str(int(fnlwgt[i][0])),
str(education[i]),
str(marital_status[i]),
str(occupation[i]),
str(relationship[i]),
str(race[i]),
str(sex[i]),
str(int(capital_gain[i][0])),
str(int(hours_per_week[i][0])),
str(native_country[i]),
str(labels[i])
])
file.write(line + '\n')
age = age_discretizer.transform(test_samples['age'].values.reshape(-1, 1))
workclass = workclass_encoder.transform(test_samples['workclass'].values.reshape(-1, 1))
fnlwgt = fnlwgt_discretizer.transform(test_samples['fnlwgt'].values.reshape(-1, 1))
education = education_encoder.transform(test_samples['education'].values.reshape(-1, 1))
marital_status = marital_status_encoder.transform(test_samples['marital-status'].values.reshape(-1, 1))
occupation = occupation_encoder.transform(test_samples['occupation'].values.reshape(-1, 1))
relationship = relationship_encoder.transform(test_samples['relationship'].values.reshape(-1, 1))
race = race_encoder.transform(test_samples['race'].values.reshape(-1, 1))
sex = sex_encoder.transform(test_samples['sex'].values.reshape(-1, 1))
capital_gain = capital_gain_discretizer.transform(test_samples['capital-gain'].values.reshape(-1, 1))
capital_loss = capital_loss_discretizer.transform(test_samples['capital-loss'].values.reshape(-1, 1))
hours_per_week = hours_per_week_discretizer.transform(test_samples['hours-per-week'].values.reshape(-1, 1))
native_country = native_country_encoder.transform(test_samples['native-country'].values.reshape(-1, 1))
labels = test_samples.apply(lambda row: binarize(row, 'income', '>50K'), axis=1).values
with open('datasets/adult-test.csv', 'w') as file:
file.write(f'record_id\tage\tworkclass\tfnlwgt\teducation\tmarital_status\toccupation\trelationship\trace\tsex'
f'\tcapital_gain\thours_per_week\tnative_country\tlabel\n')
for i in range(0, len(test_samples)):
line = '\t'.join([
str(i + len(train_samples)),
str(int(age[i][0])),
str(workclass[i]),
str(int(fnlwgt[i][0])),
str(education[i]),
str(marital_status[i]),
str(occupation[i]),
str(relationship[i]),
str(race[i]),
str(sex[i]),
str(int(capital_gain[i][0])),
#str(int(capital_loss[i][0])),
str(int(hours_per_week[i][0])),
str(native_country[i]),
str(labels[i])
])
file.write(line + '\n')
| 4,670 | 47.65625 | 116 | py |
hedgecut | hedgecut-master/python/sklearn_givemesomecredit.py | import pandas as pd
from experimentation.baseline import run_evaluation
train_samples = pd.read_csv('datasets/givemesomecredit-train.csv', sep='\t')
test_samples = pd.read_csv('datasets/givemesomecredit-test.csv', sep='\t')
label_attribute = 'label'
attribute_candidates = ['revolving_util', 'age', 'past_due', 'debt_ratio', 'income', 'lines',
'real_estate', 'dependents']
run_evaluation('givemesomecredit', train_samples, test_samples, attribute_candidates, label_attribute)
| 503 | 41 | 102 | py |
hedgecut | hedgecut-master/python/prepare_givemesomecredit.py | import pandas as pd
from experimentation.encoding import discretize, ordinalize, binarize
from sklearn.model_selection import train_test_split
df = pd.read_csv('datasets/givemesomecredit.csv', sep=',', na_values='NA')
df = df.dropna()
train_samples, test_samples = train_test_split(df, test_size=0.2)
revolving_util, revolving_util_discretizer = discretize(train_samples, 'RevolvingUtilizationOfUnsecuredLines')
age, age_discretizer = discretize(train_samples, 'age')
past_due, past_due_discretizer = discretize(train_samples, 'NumberOfTime30-59DaysPastDueNotWorse')
debt_ratio, debt_ratio_discretizer = discretize(train_samples, 'DebtRatio')
income, income_discretizer = discretize(train_samples, 'MonthlyIncome')
lines, lines_discretizer = discretize(train_samples, 'NumberOfOpenCreditLinesAndLoans')
real_estate, real_estate_discretizer = discretize(train_samples, 'NumberRealEstateLoansOrLines')
dependents, dependents_discretizer = discretize(train_samples, 'NumberOfDependents')
labels = train_samples['SeriousDlqin2yrs'].values
with open('datasets/givemesomecredit-train.csv', 'w') as file:
file.write(f'record_id\trevolving_util\tage\tpast_due\tdebt_ratio\tincome\tlines\treal_estate\t' +
f'dependents\tlabel\n')
for i in range(0, len(train_samples)):
line = '\t'.join([
str(i),
str(int(revolving_util[i][0])),
str(int(age[i][0])),
str(int(past_due[i][0])),
str(int(debt_ratio[i][0])),
str(int(income[i][0])),
str(int(lines[i][0])),
str(int(real_estate[i][0])),
str(int(dependents[i][0])),
str(labels[i])
])
file.write(line + '\n')
revolving_util = \
revolving_util_discretizer.transform(test_samples['RevolvingUtilizationOfUnsecuredLines'].values.reshape(-1, 1))
age = age_discretizer.transform(test_samples['age'].values.reshape(-1, 1))
past_due = past_due_discretizer.transform(test_samples['NumberOfTime30-59DaysPastDueNotWorse'].values.reshape(-1, 1))
debt_ratio = debt_ratio_discretizer.transform(test_samples['DebtRatio'].values.reshape(-1, 1))
income = income_discretizer.transform(test_samples['MonthlyIncome'].values.reshape(-1, 1))
lines = lines_discretizer.transform(test_samples['NumberOfOpenCreditLinesAndLoans'].values.reshape(-1, 1))
real_estate = real_estate_discretizer.transform(test_samples['NumberRealEstateLoansOrLines'].values.reshape(-1, 1))
dependents = dependents_discretizer.transform(test_samples['NumberOfDependents'].values.reshape(-1, 1))
labels = test_samples['SeriousDlqin2yrs'].values
with open('datasets/givemesomecredit-test.csv', 'w') as file:
file.write(f'record_id\trevolving_util\tage\tpast_due\tdebt_ratio\tincome\tlines\treal_estate\t' +
f'dependents\tlabel\n')
for i in range(0, len(test_samples)):
line = '\t'.join([
str(i + len(train_samples)),
str(int(revolving_util[i][0])),
str(int(age[i][0])),
str(int(past_due[i][0])),
str(int(debt_ratio[i][0])),
str(int(income[i][0])),
str(int(lines[i][0])),
str(int(real_estate[i][0])),
str(int(dependents[i][0])),
str(labels[i])
])
file.write(line + '\n') | 3,302 | 44.246575 | 117 | py |
hedgecut | hedgecut-master/python/forget.py | import pandas as pd
from experimentation.baseline import forget
label_attribute = 'label'
train_samples = pd.read_csv('datasets/adult-train.csv', sep='\t')
attribute_candidates = ['age', 'workclass', 'fnlwgt', 'education', 'marital_status', 'occupation', 'relationship',
'race', 'sex', 'capital_gain', 'hours_per_week', 'native_country']
forget('adult', train_samples, attribute_candidates, label_attribute)
train_samples = pd.read_csv('datasets/cardio-train.csv', sep='\t')
attribute_candidates = ['age', 'gender', 'height', 'weight', 'ap_hi', 'ap_lo', 'cholesterol', 'glucose', 'smoke',
'alcohol', 'active']
forget('cardio', train_samples, attribute_candidates, label_attribute)
train_samples = pd.read_csv('datasets/givemesomecredit-train.csv', sep='\t')
attribute_candidates = ['revolving_util', 'age', 'past_due', 'debt_ratio', 'income', 'lines',
'real_estate', 'dependents']
forget('givemesomecredit', train_samples, attribute_candidates, label_attribute)
train_samples = pd.read_csv('datasets/propublica-train.csv', sep='\t')
attribute_candidates = ['age', 'decile_score', 'priors_count', 'days_b_screening_arrest', 'is_recid',
'c_charge_degree', 'sex', 'age_cat', 'score_text', 'race']
forget('propublica', train_samples, attribute_candidates, label_attribute)
train_samples = pd.read_csv('datasets/shopping-train.csv', sep='\t')
attribute_candidates = ['administrative', 'administrative_duration', 'informational', 'informational_duration',
'product_related', 'product_related_duration', 'bounce_rates', 'exit_rates', 'page_values',
'special_day', 'month', 'operating_systems', 'browser', 'region', 'traffic_type',
'visitor_type', 'weekend']
forget('shopping', train_samples, attribute_candidates, label_attribute)
| 1,907 | 45.536585 | 115 | py |
hedgecut | hedgecut-master/python/sklearn_adult.py | import pandas as pd
from experimentation.baseline import run_evaluation
train_samples = pd.read_csv('datasets/adult-train.csv', sep='\t')
test_samples = pd.read_csv('datasets/adult-test.csv', sep='\t')
label_attribute = 'label'
attribute_candidates = ['age', 'workclass', 'fnlwgt', 'education', 'marital_status', 'occupation', 'relationship',
'race', 'sex', 'capital_gain', 'hours_per_week', 'native_country']
run_evaluation('adult', train_samples, test_samples, attribute_candidates, label_attribute)
| 529 | 43.166667 | 114 | py |
hedgecut | hedgecut-master/python/sklearn_propublica.py | import pandas as pd
from experimentation.baseline import run_evaluation
train_samples = pd.read_csv('datasets/propublica-train.csv', sep='\t')
test_samples = pd.read_csv('datasets/propublica-test.csv', sep='\t')
label_attribute = 'label'
attribute_candidates = ['age', 'decile_score', 'priors_count', 'days_b_screening_arrest', 'is_recid',
'c_charge_degree', 'sex', 'age_cat', 'score_text', 'race']
run_evaluation('propublica', train_samples, test_samples, attribute_candidates, label_attribute)
| 523 | 42.666667 | 101 | py |
hedgecut | hedgecut-master/python/sklearn_cardio.py | import pandas as pd
from experimentation.baseline import run_evaluation
train_samples = pd.read_csv('datasets/cardio-train.csv', sep='\t')
test_samples = pd.read_csv('datasets/cardio-test.csv', sep='\t')
label_attribute = 'label'
attribute_candidates = ['age', 'gender', 'height', 'weight', 'ap_hi', 'ap_lo', 'cholesterol', 'glucose', 'smoke',
'alcohol', 'active']
run_evaluation('cardio', train_samples, test_samples, attribute_candidates, label_attribute)
| 485 | 39.5 | 113 | py |
hedgecut | hedgecut-master/python/sklearn_shopping.py | import pandas as pd
from experimentation.baseline import run_evaluation
train_samples = pd.read_csv('datasets/shopping-train.csv', sep='\t')
test_samples = pd.read_csv('datasets/shopping-test.csv', sep='\t')
label_attribute = 'label'
attribute_candidates = ['administrative', 'administrative_duration', 'informational', 'informational_duration',
'product_related', 'product_related_duration', 'bounce_rates', 'exit_rates', 'page_values',
'special_day', 'month', 'operating_systems', 'browser', 'region', 'traffic_type',
'visitor_type', 'weekend']
run_evaluation('shopping', train_samples, test_samples, attribute_candidates, label_attribute)
| 717 | 50.285714 | 115 | py |
hedgecut | hedgecut-master/python/experimentation/baseline.py | import time
import pandas as pd
from sklearn.metrics import accuracy_score, confusion_matrix
from sklearn import tree, ensemble
def train_time(name, train_samples, attribute_candidates, label_attribute):
X_train = train_samples[attribute_candidates].values
y_train = train_samples[label_attribute].values
t = time.process_time()
clf_sklearn = tree.DecisionTreeClassifier()
clf_sklearn.fit(X_train, y_train)
dt_train_time = time.process_time() - t
t = time.process_time()
clf_sklearn_rf = ensemble.RandomForestClassifier()
clf_sklearn_rf.fit(X_train, y_train)
rf_train_time = time.process_time() - t
t = time.process_time()
etd_sklearn = ensemble.ExtraTreesClassifier(n_estimators=100,
criterion='gini',
min_samples_leaf=2,
max_features='sqrt')
etd_sklearn.fit(X_train, y_train)
etd_train_time = time.process_time() - t
print(f'{name},decision_tree,{int(dt_train_time * 1000)}')
print(f'{name},random_forest,{int(rf_train_time * 1000)}')
print(f'{name},extremely_randomized_trees,{int(etd_train_time * 1000)}')
def forget(name, train_samples, attribute_candidates, label_attribute):
train_samples = train_samples.sample(frac=1).reset_index(drop=True)
repetitions = int(len(train_samples) / 1000)
for _ in range(0, repetitions):
train_samples = train_samples.iloc[1:]
X_train = train_samples[attribute_candidates].values
y_train = train_samples[label_attribute].values
t = time.process_time()
clf_sklearn = tree.DecisionTreeClassifier()
clf_sklearn.fit(X_train, y_train)
dt_train_time = time.process_time() - t
t = time.process_time()
clf_sklearn_rf = ensemble.RandomForestClassifier()
clf_sklearn_rf.fit(X_train, y_train)
rf_train_time = time.process_time() - t
t = time.process_time()
etd_sklearn = ensemble.ExtraTreesClassifier(n_estimators=100,
criterion='gini',
min_samples_leaf=2,
max_features='sqrt')
etd_sklearn.fit(X_train, y_train)
etd_train_time = time.process_time() - t
print(f'{name},decision_tree,{int(dt_train_time * 1000000)}')
print(f'{name},random_forest,{int(rf_train_time * 1000000)}')
print(f'{name},extremely_randomized_trees,{int(etd_train_time * 1000000)}')
def run_evaluation(name, train_samples, test_samples, attribute_candidates, label_attribute):
X_train = train_samples[attribute_candidates].values
y_train = train_samples[label_attribute].values
t = time.process_time()
clf_sklearn = tree.DecisionTreeClassifier()
clf_sklearn = clf_sklearn.fit(X_train, y_train)
dt_train_time = time.process_time() - t
t = time.process_time()
clf_sklearn_rf = ensemble.RandomForestClassifier()
clf_sklearn_rf = clf_sklearn_rf.fit(X_train, y_train)
rf_train_time = time.process_time() - t
t = time.process_time()
etd_sklearn = ensemble.ExtraTreesClassifier(n_estimators=100,
criterion='gini',
min_samples_leaf=2,
max_features='sqrt',
n_jobs=-1)
etd_sklearn = etd_sklearn.fit(X_train, y_train)
etd_train_time = time.process_time() - t
t = time.process_time()
etd_sklearn_single = ensemble.ExtraTreesClassifier(n_estimators=100,
criterion='gini',
min_samples_leaf=2,
max_features='sqrt')
etd_sklearn_single = etd_sklearn_single.fit(X_train, y_train)
etd_train_time_single = time.process_time() - t
X_test = test_samples[attribute_candidates].values
y_true_sklearn = test_samples[label_attribute].values
y_pred_sklearn = clf_sklearn.predict(X_test)
#print('Train time (sklearn)', dt_train_time)
#print('Accuracy (sklearn)', accuracy_score(y_true_sklearn, y_pred_sklearn))
#print('Confusion matrix (sklearn)\n', confusion_matrix(y_true_sklearn, y_pred_sklearn))
y_pred_sklearn_rf = clf_sklearn_rf.predict(X_test)
#print('Train time (RF sklearn)', rf_train_time)
#print('Accuracy (RF sklearn)', accuracy_score(y_true_sklearn, y_pred_sklearn_rf))
#print('Confusion matrix (sklearn)\n', confusion_matrix(y_true_sklearn, y_pred_sklearn_rf))
y_pred_sklearn_etd = etd_sklearn.predict(X_test)
#print('Train time (ETD sklearn)', etd_train_time)
#print('Accuracy (ETD sklearn)', accuracy_score(y_true_sklearn, y_pred_sklearn_etd))
#print('Confusion matrix (ETD sklearn)\n', confusion_matrix(y_true_sklearn, y_pred_sklearn_etd))
y_pred_sklearn_etd_single = etd_sklearn_single.predict(X_test)
#print('Train time (ETD sklearn single)', etd_train_time_single)
#print('Accuracy (ETD sklearn)', accuracy_score(y_true_sklearn, y_pred_sklearn_etd_single))
#print('Confusion matrix (ETD sklearn)\n', confusion_matrix(y_true_sklearn, y_pred_sklearn_etd_single))
print(f'{name},decision_tree,{accuracy_score(y_true_sklearn, y_pred_sklearn)}')
print(f'{name},random_forest,{accuracy_score(y_true_sklearn, y_pred_sklearn_rf)}')
print(f'{name},extremely_randomized_trees,{accuracy_score(y_true_sklearn, y_pred_sklearn_etd_single)}') | 5,686 | 43.085271 | 107 | py |
hedgecut | hedgecut-master/python/experimentation/encoding.py |
from sklearn.preprocessing import KBinsDiscretizer, LabelEncoder
def discretize(data, attribute):
discretizer = KBinsDiscretizer(n_bins=16, encode='ordinal', strategy='quantile')
discretizer = discretizer.fit(data[attribute].values.reshape(-1, 1))
transformed_values = discretizer.transform(data[attribute].values.reshape(-1, 1))
return transformed_values, discretizer
def ordinalize(all_data, data, attribute):
encoder = LabelEncoder()
encoder = encoder.fit(all_data[attribute].values.reshape(1, -1)[0])
transformed_values = encoder.transform(data[attribute].values.reshape(1, -1)[0])
return transformed_values, encoder
def binarize(row, attribute, positive_value):
if row[attribute] == positive_value:
return 1
else:
return 0
| 791 | 32 | 85 | py |
pyUSID-legacy | pyUSID-master-legacy/setup.py | from codecs import open
import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'README.rst')) as f:
long_description = f.read()
with open(os.path.join(here, 'pyUSID/__version__.py')) as f:
__version__ = f.read().split("'")[1]
# TODO: Move requirements to requirements.txt
requirements = ['numpy>=1.10',
'toolz', # dask installation failing without this
'cytoolz', # dask installation failing without this
'dask>=0.10',
'h5py>=2.6.0',
'pillow', # Remove once ImageReader is in ScopeReaders
'psutil',
'six',
'sidpy>=0.0.2'
]
setup(
name='pyUSID',
version=__version__,
description='Framework for storing, visualizing, and processing Universal Spectroscopic and Imaging Data (USID)',
long_description=long_description,
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Cython',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: Implementation :: CPython',
'Topic :: Scientific/Engineering :: Information Analysis'],
keywords=['imaging', 'spectra', 'multidimensional', 'data format', 'universal', 'hdf5'],
packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]),
url='https://pycroscopy.github.io/pyUSID/about.html',
license='MIT',
author='S. Somnath, C. R. Smith, and contributors',
author_email='pycroscopy@gmail.com',
install_requires=requirements,
setup_requires=['pytest-runner'],
tests_require=['unittest2;python_version<"3.0"', 'pytest'],
platforms=['Linux', 'Mac OSX', 'Windows 10/8.1/8/7'],
# package_data={'sample':['dataset_1.dat']}
test_suite='pytest',
# dependency='',
# dependency_links=[''],
include_package_data=True,
# https://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-dependencies
extras_require={
'MPI': ["mpi4py"],
},
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
# package_data={
# 'sample': ['package_data.dat'],
# },
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
# data_files=[('my_data', ['data/data_file'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
# entry_points={
# 'console_scripts': [
# 'sample=sample:main',
# ],
# },
)
| 3,498 | 39.686047 | 117 | py |
pyUSID-legacy | pyUSID-master-legacy/pyUSID/__version__.py | version = '0.0.10r2'
time = '2021-03-05 18:20:25'
| 50 | 16 | 28 | py |
pyUSID-legacy | pyUSID-master-legacy/pyUSID/__init__.py | """
The pyUSID package.
Submodules
----------
.. autosummary::
:toctree: _autosummary
"""
from . import io
from .io import *
from . import processing
from .processing import *
from .__version__ import version as __version__
__all__ = ['__version__']
__all__ += io.__all__
__all__ += processing.__all__
| 310 | 14.55 | 47 | py |
pyUSID-legacy | pyUSID-master-legacy/pyUSID/io/reg_ref.py | # -*- coding: utf-8 -*-
"""
Created on Tue Nov 3 21:14:25 2015
@author: Chris Smith, Suhas Somnath
"""
from __future__ import division, print_function, absolute_import, unicode_literals
import sys
import h5py
from sidpy.hdf.reg_ref import *
from .hdf_utils import check_if_main
if sys.version_info.major == 3:
unicode = str
def copy_region_refs(h5_source, h5_target):
"""
Check the input dataset for plot groups, copy them if they exist
Also make references in the Spectroscopic Values and Indices tables
Parameters
----------
h5_source : HDF5 Dataset
source dataset to copy references from
h5_target : HDF5 Dataset
target dataset the references from h5_source are copied to
"""
'''
Check both h5_source and h5_target to ensure that are Main
'''
are_main = all([check_if_main(h5_source), check_if_main(h5_target)])
if not all([isinstance(h5_source, h5py.Dataset), isinstance(h5_target, h5py.Dataset)]):
raise TypeError('Inputs to copy_region_refs must be HDF5 Datasets')
# It is OK if objects are in different files
if are_main:
h5_source_inds = h5_source.file[h5_source.attrs['Spectroscopic_Indices']]
h5_spec_inds = h5_target.file[h5_target.attrs['Spectroscopic_Indices']]
h5_spec_vals = h5_target.file[h5_target.attrs['Spectroscopic_Values']]
for key in h5_source.attrs.keys():
if not isinstance(h5_source.attrs[key], h5py.RegionReference):
continue
if are_main:
if h5_source_inds.shape[0] == h5_spec_inds.shape[0]:
'''
Spectroscopic dimensions are identical.
Do direct copy.
'''
ref_inds = simple_region_ref_copy(h5_source, h5_target, key)
else:
'''
Spectroscopic dimensions are different.
Do the dimension reducing copy.
'''
ref_inds = copy_reg_ref_reduced_dim(h5_source, h5_target, h5_source_inds, h5_spec_inds, key)
'''
Create references for Spectroscopic Indices and Values
Set the end-point of each hyperslab in the position dimension to the number of
rows in the index array
'''
ref_inds[:, 1, 0][ref_inds[:, 1, 0] > h5_spec_inds.shape[0]] = h5_spec_inds.shape[0] - 1
spec_inds_ref = create_region_reference(h5_spec_inds, ref_inds)
h5_spec_inds.attrs[key] = spec_inds_ref
spec_vals_ref = create_region_reference(h5_spec_vals, ref_inds)
h5_spec_vals.attrs[key] = spec_vals_ref
else:
'''
If not main datasets, then only simple copy can be used.
'''
simple_region_ref_copy(h5_source, h5_target, key) | 2,836 | 33.597561 | 108 | py |
pyUSID-legacy | pyUSID-master-legacy/pyUSID/io/image.py | """
:class:`~pyUSID.io.image.ImageTranslator` class that translates conventional 2D images to USID HDF5 files
Created on Feb 9, 2016
@author: Suhas Somnath, Chris Smith
"""
from __future__ import division, print_function, absolute_import, unicode_literals
import os
import sys
from warnings import warn
import h5py
import numpy as np
from PIL import Image
from sidpy.base.num_utils import contains_integers
from sidpy.hdf.hdf_utils import write_simple_attrs
from .array_translator import ArrayTranslator
from .dimension import Dimension
from enum import Enum
if sys.version_info.major == 3:
unicode = str
else:
FileExistsError = ValueError
FileNotFoundError = ValueError
# The following code will be used to support both the old and new versions of pillow.
# Pillow added a new enum class 'Resampling' in the newer version>=9.0.0
if not hasattr(Image, 'Resampling'): # pillow<9.0.0
Image.Resampling = Image
resample_dict = {0: 'NEAREST',
1: 'LANCOS',
2: 'BILINEAR',
3: 'BICUBIC',
4: 'BOX',
5: 'HAMMING'}
class ImageTranslator(ArrayTranslator):
"""
Translates data from an image file to an HDF5 file
"""
def __init__(self, *args, **kwargs):
super(ImageTranslator, self).__init__(*args, **kwargs)
warn("pyUSID.ImageTranslator will be removed shortly. Consider using "
"SciFiReaders.ImageReader instead", FutureWarning)
@staticmethod
def _parse_file_path(image_path, h5_path=None):
"""
Returns a list of all files in the directory given by path
Parameters
---------------
image_path : str
absolute path to the image file
h5_path : str, optional
absolute path to the desired output HDF5 file. If nothing is provided, a valid file path will be provided
Returns
----------
image_path : str
Absolute file path to the image
h5_path : str
absolute path to the desired output HDF5 file.
"""
if not isinstance(image_path, (str, unicode)):
raise TypeError("'image_path' argument for ImageTranslator should be a str or unicode")
if not os.path.exists(os.path.abspath(image_path)):
raise FileNotFoundError('Specified image does not exist.')
else:
image_path = os.path.abspath(image_path)
if h5_path is not None:
if not isinstance(h5_path, (str, unicode)):
raise TypeError("'h5_path' argument for ImageTranslator should be a str or unicode (if provided)")
# NOT checking the extension of the file path for simplicity
else:
base_name, _ = os.path.splitext(image_path)
h5_name = base_name + '.h5'
h5_path = os.path.join(image_path, h5_name)
if os.path.exists(os.path.abspath(h5_path)):
raise FileExistsError("ImageTranslator: There is already a valid (output HDF5) file at:\n{}\n"
"Please consider providing an alternate path or deleting the "
"specified file".format(h5_path))
return image_path, h5_path
def translate(self, image_path, h5_path=None, bin_factor=None, interp_func=Image.Resampling.BICUBIC,
normalize=False, **image_args):
"""
Translates the image in the provided file into a USID HDF5 file
Parameters
----------------
image_path : str
Absolute path to folder holding the image files
h5_path : str, optional
Absolute path to where the HDF5 file should be located.
Default is None
bin_factor : uint or array-like of uint, optional
Down-sampling factor for each dimension. Default is None.
If specifying different binning for each dimension, please specify as (height binning, width binning)
interp_func : int, optional.
Default = :attr:`PIL.Image.BICUBIC` for pillow<9.0.0 or `PIL.Image.Resampling.BICUBIC` for pillow>9.0.0
How the image will be interpolated to provide the down-sampled or binned image.
For more information see instructions for the `resample` argument for :meth:`PIL.Image.resize`
normalize : boolean, optional. Default = False
Should the raw image be normalized between the values of 0 and 1
image_args : dict
Arguments to be passed to read_image. Arguments depend on the type of image.
Returns
----------
h5_main : h5py.Dataset
HDF5 Dataset object that contains the flattened images
"""
image_path, h5_path = self._parse_file_path(image_path, h5_path=h5_path)
image = read_image(image_path, **image_args)
image_parms = dict()
usize, vsize = image.shape[:2]
'''
Check if a bin_factor is given. Set up binning objects if it is.
'''
if bin_factor is not None:
if isinstance(bin_factor, (list, tuple)):
if not contains_integers(bin_factor, min_val=1):
raise TypeError('bin_factor should contain positive whole integers')
if len(bin_factor) == 2:
bin_factor = tuple(bin_factor)
else:
raise ValueError('Input parameter `bin_factor` must be a length 2 array-like or an integer.\n' +
'{} was given.'.format(bin_factor))
elif isinstance(bin_factor, int):
bin_factor = (bin_factor, bin_factor)
else:
raise TypeError('bin_factor should either be an integer or an iterable of positive integers')
if np.min(bin_factor) < 0:
raise ValueError('bin_factor must consist of positive factors')
# pillow<9.0.0
if isinstance(interp_func, int):
if interp_func not in resample_dict.keys():
raise ValueError("'interp_func' argument for ImageTranslator.translate must be one of "
"PIL.Image.NEAREST, PIL.Image.BILINEAR, PIL.Image.BICUBIC, PIL.Image.LANCZOS, "
"PIL.Image.BOX, PIL.Image.HAMMING")
else:
interp_func_name = resample_dict[interp_func]
# pillow>9.0.0
elif isinstance(interp_func, Enum):
if interp_func.value not in resample_dict.keys():
raise ValueError("'interp_func' argument for ImageTranslator.translate must be one of "
"PIL.Image.Resampling.NEAREST, PIL.Image.Resampling.BILINEAR, "
"PIL.Image.Resampling.BICUBIC, PIL.Image.Resampling.LANCZOS, "
"PIL.Image.BOX, PIL.Image.HAMMING")
else:
interp_func_name = interp_func.name
else:
raise TypeError('interp_func should be an int (pillow<9.0.0) or an enum (pillow>=9.0.0)'
' but received {}'.format(type(interp_func)))
image_parms.update({'image_binning_size': bin_factor, 'image_PIL_resample_mode': interp_func_name})
usize = int(usize / bin_factor[0])
vsize = int(vsize / bin_factor[1])
# Unfortunately, we need to make a round-trip through PIL for the interpolation. Not possible with numpy
img_obj = Image.fromarray(image)
img_obj = img_obj.resize((vsize, usize), resample=interp_func)
image = np.asarray(img_obj)
# Working around occasional "cannot modify read-only array" error
image = image.copy()
'''
Normalize Raw Image
'''
if normalize:
image -= np.min(image)
image = image / np.float32(np.max(image))
image_parms.update({'normalized': normalize,
'image_min': np.min(image), 'image_max': np.max(image)})
"""
Enable the line below if there is a need make the image "look" the right side up. This would be manipulation
# of the original data. Therefore it remains commented
"""
# image = np.flipud(image)
'''
Ready to write to h5
'''
pos_dims = [Dimension('Y', 'a.u.', np.arange(usize)), Dimension('X', 'a.u.', np.arange(vsize))]
spec_dims = Dimension('arb', 'a.u.', 1)
# Need to transpose to for correct reshaping
image = image.transpose()
h5_path = super(ImageTranslator, self).translate(h5_path, 'Raw_Data', image.reshape((-1, 1)),
'Intensity', 'a.u.', pos_dims, spec_dims,
translator_name='ImageTranslator', parm_dict=image_parms)
with h5py.File(h5_path, mode='r+') as h5_f:
# For legacy reasons:
write_simple_attrs(h5_f, {'data_type': 'ImageData'})
return h5_path
def read_image(image_path, as_grayscale=True, as_numpy_array=True, *args, **kwargs):
"""
Read the image file at `image_path` into a numpy array either via numpy (.txt) or via pillow (.jpg, .tif, etc.)
Parameters
----------
image_path : str
Path to the image file
as_grayscale : bool, optional. Default = True
Whether or not to read the image as a grayscale image
as_numpy_array : bool, optional. Default = True
If set to True, the image is read into a numpy array. If not, it is returned as a pillow Image
Returns
-------
image : :class:`numpy.ndarray` or :class:`PIL.Image.Image`
if `as_numpy_array` is set to True - Array containing the image from the file `image_path`.
If `as_numpy_array` is set to False - PIL.Image object containing the image within the file - `image_path`.
"""
ext = os.path.splitext(image_path)[-1]
if ext in ['.txt', '.csv']:
if ext == '.csv' and 'delimiter' not in kwargs.keys():
kwargs['delimiter'] = ','
img_data = np.loadtxt(image_path, *args, **kwargs)
if as_numpy_array:
return img_data
else:
img_obj = Image.fromarray(img_data)
img_obj = img_obj.convert(mode="L")
return img_obj
else:
img_obj = Image.open(image_path)
if as_grayscale:
img_obj = img_obj.convert(mode="L", **kwargs)
if as_numpy_array:
# Open the image as a numpy array
return np.asarray(img_obj)
return img_obj
| 10,743 | 39.851711 | 117 | py |
pyUSID-legacy | pyUSID-master-legacy/pyUSID/io/anc_build_utils.py | # -*- coding: utf-8 -*-
"""
Utilities that assist in building ancillary USID datasets manually.
Formerly known as "write_utils"
Created on Thu Sep 7 21:14:25 2017
@author: Suhas Somnath, Chris Smith
"""
from __future__ import division, print_function, unicode_literals, absolute_import
import sys
import numpy as np
from sidpy.base.num_utils import contains_integers
from sidpy.base.string_utils import validate_list_of_strings
# For legacy reasons
from .dimension import Dimension, DimType, validate_dimensions
__all__ = ['get_aux_dset_slicing', 'make_indices_matrix', 'INDICES_DTYPE',
'VALUES_DTYPE', 'build_ind_val_matrices', 'calc_chunks',
'create_spec_inds_from_vals',
'Dimension', 'DimType', 'validate_dimensions']
if sys.version_info.major == 3:
unicode = str
# Constants:
INDICES_DTYPE = np.uint32
VALUES_DTYPE = np.float32
def get_aux_dset_slicing(dim_names, last_ind=None, is_spectroscopic=False):
"""
Returns a dictionary of slice objects to help in creating region references in the position or spectroscopic
indices and values datasets
Parameters
------------
dim_names : iterable
List of strings denoting the names of the position axes or spectroscopic dimensions arranged in the same order
that matches the dimensions in the indices / values dataset
last_ind : (Optional) unsigned int, default = None
Last pixel in the positon or spectroscopic matrix. Useful in experiments where the
parameters have changed (eg. BEPS new data format) during the experiment.
is_spectroscopic : bool, optional. default = True
set to True for position datasets and False for spectroscopic datasets
Returns
------------
slice_dict : dictionary
Dictionary of tuples containing slice objects corresponding to
each position axis.
"""
dim_names = validate_list_of_strings(dim_names, 'dim_names')
if len(dim_names) == 0:
raise ValueError('No valid dim_names provided')
slice_dict = dict()
for spat_ind, curr_dim_name in enumerate(dim_names):
val = (slice(last_ind), slice(spat_ind, spat_ind + 1))
if is_spectroscopic:
val = val[::-1]
slice_dict[str(curr_dim_name)] = val
return slice_dict
def make_indices_matrix(num_steps, is_position=True):
"""
Makes an ancillary indices matrix given the number of steps in each dimension. In other words, this function builds
a matrix whose rows correspond to unique combinations of the multiple dimensions provided.
Parameters
------------
num_steps : List / numpy array / int
Number of steps in each spatial or spectral dimension
Note that the axes must be ordered from fastest varying to slowest varying
is_position : bool, optional, default = True
Whether the returned matrix is meant for position (True) indices (tall and skinny) or spectroscopic (False)
indices (short and wide)
Returns
--------------
indices_matrix : 2D unsigned int numpy array
arranged as [steps, spatial dimension]
"""
if isinstance(num_steps, int):
num_steps = [num_steps]
if not isinstance(num_steps, (tuple, list, np.ndarray)):
raise TypeError('num_steps should be a list / tuple / numpy array')
if isinstance(num_steps, np.ndarray) and num_steps.ndim < 1:
num_steps = np.expand_dims(num_steps, 0)
if len(num_steps) == 0:
raise ValueError('num_steps should not be an empty array or list')
if len(num_steps) == 1 and num_steps[0] == 1:
num_steps = [1]
elif not contains_integers(num_steps, min_val=1 + int(len(num_steps) > 0)):
raise ValueError('num_steps should contain integers greater than equal'
' to 1 (empty dimension) or 2')
num_steps = np.array(num_steps)
spat_dims = max(1, len(np.where(num_steps > 1)[0]))
indices_matrix = np.zeros(shape=(np.prod(num_steps), spat_dims), dtype=INDICES_DTYPE)
dim_ind = 0
for indx, curr_steps in enumerate(num_steps):
if curr_steps > 1:
part1 = np.prod(num_steps[:indx + 1])
if indx > 0:
part2 = np.prod(num_steps[:indx])
else:
part2 = 1
if indx + 1 == len(num_steps):
part3 = 1
else:
part3 = np.prod(num_steps[indx + 1:])
indices_matrix[:, dim_ind] = np.tile(np.floor(np.arange(part1) / part2), part3)
dim_ind += 1
if not is_position:
indices_matrix = indices_matrix.T
return indices_matrix
def build_ind_val_matrices(unit_values, is_spectral=True):
"""
Builds indices and values matrices using given unit values for each dimension.
Unit values must be arranged from fastest varying to slowest varying
Parameters
----------
unit_values : list / tuple
Sequence of values vectors for each dimension
is_spectral : bool (optional), default = True
If true, returns matrices for spectroscopic datasets, else returns matrices for Position datasets
Returns
-------
ind_mat : 2D numpy array
Indices matrix
val_mat : 2D numpy array
Values matrix
"""
if not isinstance(unit_values, (list, tuple)):
raise TypeError('unit_values should be a list or tuple')
if not np.all([np.array(x).ndim == 1 for x in unit_values]):
raise ValueError('unit_values should only contain 1D array')
lengths = [len(x) for x in unit_values]
tile_size = [np.prod(lengths[x:]) for x in range(1, len(lengths))] + [1]
rep_size = [1] + [np.prod(lengths[:x]) for x in range(1, len(lengths))]
val_mat = np.zeros(shape=(len(lengths), np.prod(lengths)))
ind_mat = np.zeros(shape=val_mat.shape, dtype=np.uint32)
for ind, ts, rs, vec in zip(range(len(lengths)), tile_size, rep_size, unit_values):
val_mat[ind] = np.tile(np.repeat(vec, rs), ts)
ind_mat[ind] = np.tile(np.repeat(np.arange(len(vec)), rs), ts)
if not is_spectral:
val_mat = val_mat.T
ind_mat = ind_mat.T
return INDICES_DTYPE(ind_mat), VALUES_DTYPE(val_mat)
def create_spec_inds_from_vals(ds_spec_val_mat):
"""
Create new Spectroscopic Indices table from the changes in the
Spectroscopic Values
Parameters
----------
ds_spec_val_mat : array-like,
Holds the spectroscopic values to be indexed
Returns
-------
ds_spec_inds_mat : numpy array of uints the same shape as ds_spec_val_mat
Indices corresponding to the values in ds_spec_val_mat
"""
if not isinstance(ds_spec_val_mat, np.ndarray):
raise TypeError('ds_spec_val_mat must be a numpy array')
if ds_spec_val_mat.ndim != 2:
raise ValueError('ds_spec_val_mat must be a 2D array arranged as [dimension, values]')
ds_spec_inds_mat = np.zeros_like(ds_spec_val_mat, dtype=np.int32)
"""
Find how quickly the spectroscopic values are changing in each row
and the order of row from fastest changing to slowest.
"""
change_count = [len(np.where([row[i] != row[i - 1] for i in range(len(row))])[0]) for row in ds_spec_val_mat]
change_sort = np.argsort(change_count)[::-1]
"""
Determine everywhere the spectroscopic values change and build
index table based on those changed
"""
indices = np.zeros(ds_spec_val_mat.shape[0])
for jcol in range(1, ds_spec_val_mat.shape[1]):
this_col = ds_spec_val_mat[change_sort, jcol]
last_col = ds_spec_val_mat[change_sort, jcol - 1]
"""
Check if current column values are different than those
in last column.
"""
changed = np.where(this_col != last_col)[0]
"""
If only one row changed, increment the index for that
column
If more than one row has changed, increment the index for
the last row that changed and set all others to zero
"""
if len(changed) == 1:
indices[changed] += 1
elif len(changed > 1):
for change in changed[:-1]:
indices[change] = 0
indices[changed[-1]] += 1
"""
Store the indices for the current column in the dataset
"""
ds_spec_inds_mat[change_sort, jcol] = indices
return ds_spec_inds_mat
def calc_chunks(dimensions, dtype_byte_size, unit_chunks=None, max_chunk_mem=10240):
"""
Calculate the chunk size for the HDF5 dataset based on the dimensions and the
maximum chunk size in memory
Parameters
----------
dimensions : array_like of int
Shape of the data to be chunked
dtype_byte_size : unsigned int
Size of an entry in the data in bytes
unit_chunks : array_like of int, optional
Unit size of the chunking in each dimension. Must be the same size as
the shape of `ds_main`. Default None, `unit_chunks` is set to 1 in all
dimensions
max_chunk_mem : int, optional
Maximum size of the chunk in memory in bytes. Default 10240b or 10kb per h5py recommendations
Returns
-------
chunking : tuple of int
Calculated maximum size of a chunk in each dimension that is as close to the
requested `max_chunk_mem` as posible while having steps based on the input
`unit_chunks`.
"""
if not isinstance(dimensions, (list, tuple)):
raise TypeError('dimensions should either be a tuple or list')
if not isinstance(dtype_byte_size, int):
raise TypeError('dtype_byte_size should be an integer')
if unit_chunks is not None:
if not isinstance(unit_chunks, (tuple, list)):
raise TypeError('unit_chunks should either be a tuple or list')
'''
Ensure that dimensions is an array
'''
dimensions = np.asarray(dimensions, dtype=np.uint)
'''
Set the unit_chunks to all ones if not given. Ensure it is an array if it is.
'''
if unit_chunks is None:
unit_chunks = np.ones_like(dimensions)
else:
unit_chunks = np.asarray(unit_chunks, dtype=np.uint)
if unit_chunks.shape != dimensions.shape:
raise ValueError('Unit chunk size must have the same shape as the input dataset.')
'''
Save the original size of unit_chunks to use for incrementing the chunk size during
loop
'''
base_chunks = unit_chunks.copy()
'''
Loop until chunk_size is greater than the maximum chunk_mem or the chunk_size is equal to
that of dimensions
'''
while np.prod(unit_chunks) * dtype_byte_size <= max_chunk_mem:
'''
Check if all chunk dimensions are greater or equal to the
actual dimensions. Exit the loop if true.
'''
if np.all(unit_chunks >= dimensions):
break
'''
Find the index of the next chunk to be increased and increment it by the base_chunk
size
'''
ichunk = np.argmax(dimensions / unit_chunks)
unit_chunks[ichunk] += base_chunks[ichunk]
'''
Ensure that the size of the chunks is between one and the dimension size.
'''
unit_chunks = np.clip(unit_chunks, np.ones_like(unit_chunks), dimensions)
chunking = tuple(unit_chunks)
return chunking
| 11,302 | 35.111821 | 119 | py |
pyUSID-legacy | pyUSID-master-legacy/pyUSID/io/usi_data.py | # -*- coding: utf-8 -*-
"""
:class:`~pyUSID.io.usi_data.USIDataset` class that simplifies slicing, visualization, reshaping, etc. of USID datasets
Created on Thu Sep 7 21:14:25 2017
@author: Suhas Somnath, Chris Smith
"""
from __future__ import division, print_function, absolute_import, unicode_literals
import os
import sys
from warnings import warn
import h5py
import numpy as np
import dask.array as da
import matplotlib.pyplot as plt
from sidpy.base.string_utils import validate_single_string_arg, \
validate_list_of_strings
from sidpy.base.num_utils import contains_integers, get_exponent
from sidpy.hdf.hdf_utils import get_attr, lazy_load_array, copy_attributes
from sidpy.hdf.dtype_utils import flatten_to_real, is_complex_dtype
from sidpy.viz.jupyter_utils import simple_ndim_visualizer
from sidpy.viz.plot_utils import plot_map, get_plot_grid_size
from .hdf_utils import check_if_main, create_results_group, write_reduced_anc_dsets, link_as_main, \
get_dimensionality, get_sort_order, get_unit_values, reshape_to_n_dims, write_main_dataset, reshape_from_n_dims
from .dimension import Dimension
if sys.version_info.major == 3:
unicode = str
class USIDataset(h5py.Dataset):
"""
A class that simplifies slicing, visualization, reshaping, reduction etc. of USID datasets in HDF5 files.
This class extends the :class:`h5py.Dataset`.
"""
def __init__(self, h5_ref, sort_dims=False):
"""
Parameters
----------
h5_ref : :class:`h5py.Dataset`
The dataset which is actually a USID Main dataset
sort_dims : bool, Optional. Default=False
If set to True - Dimensions will be sorted from slowest to fastest
Else - Dimensions will be arranged as they appear in ancillary datasets
Methods
-------
self.get_current_sorting
self.toggle_sorting
self.get_pos_values
self.get_spec_values
self.get_n_dim_form
self.slice
Attributes
----------
self.h5_spec_vals : :class:`h5py.Dataset`
Associated Spectroscopic Values dataset
self.h5_spec_inds : :class:`h5py.Dataset`
Associated Spectroscopic Indices dataset
self.h5_pos_vals : :class:`h5py.Dataset`
Associated Position Values dataset
self.h5_pos_inds : :class:`h5py.Dataset`
Associated Position Indices dataset
self.pos_dim_labels : list of str
The labels for the position dimensions.
self.spec_dim_labels : list of str
The labels for the spectroscopic dimensions.
self.n_dim_labels : list of str
The labels for the n-dimensional dataset.
self.pos_dim_sizes : list of int
A list of the sizes of each position dimension.
self.spec_dim_sizes : list of int
A list of the sizes of each spectroscopic dimension.
self.n_dim_sizes : list of int
A list of the sizes of each dimension.
Notes
-----
The order of all labels and sizes attributes is determined by the current value of `sort_dims`.
"""
if not check_if_main(h5_ref):
raise TypeError('Supply a h5py.Dataset object that is a USID main dataset')
super(USIDataset, self).__init__(h5_ref.id)
# User accessible properties
# The required Position and Spectroscopic datasets
self.h5_spec_vals = self.file[self.attrs['Spectroscopic_Values']]
self.h5_spec_inds = self.file[self.attrs['Spectroscopic_Indices']]
self.h5_pos_vals = self.file[self.attrs['Position_Values']]
self.h5_pos_inds = self.file[self.attrs['Position_Indices']]
# The dimension labels as they appear in the ancillary datasets
self.__orig_pos_dim_labels = get_attr(self.h5_pos_inds, 'labels')
self.__orig_spec_dim_labels = get_attr(self.h5_spec_inds, 'labels')
# Data descriptors
self.data_descriptor = '{} ({})'.format(get_attr(self, 'quantity'), get_attr(self, 'units'))
self.pos_dim_descriptors = self.__get_anc_labels(self.h5_pos_inds)
self.spec_dim_descriptors = self.__get_anc_labels(self.h5_spec_inds)
# The size of each dimension
self.__orig_pos_dim_sizes = np.array(get_dimensionality(np.transpose(self.h5_pos_inds)))
self.__orig_spec_dim_sizes = np.array(get_dimensionality(np.atleast_2d(self.h5_spec_inds)))
# Sorted dimension order
self.__pos_sort_order = get_sort_order(np.transpose(self.h5_pos_inds))
self.__spec_sort_order = get_sort_order(np.atleast_2d(self.h5_spec_inds))
# internal book-keeping / we don't want users to mess with these?
self.__orig_n_dim_sizes = np.append(self.__orig_pos_dim_sizes, self.__orig_spec_dim_sizes)
self.__orig_n_dim_labs = np.append(self.__orig_pos_dim_labels, self.__orig_spec_dim_labels)
self.__n_dim_sort_order_orig_s2f = np.append(self.__pos_sort_order[::-1],
self.__spec_sort_order[::-1] + len(self.__pos_sort_order))
self.__n_dim_sort_order_orig_f2s = np.append(self.__pos_sort_order,
self.__spec_sort_order + len(self.__pos_sort_order))
self.__n_dim_data_orig = None
self.__n_dim_data_s2f = None
self.__curr_ndim_form = None
self.__n_dim_form_avail = False
# Should the dimensions be sorted from slowest to fastest
self.__sort_dims = sort_dims
# Declaring var names within init
self.pos_dim_labels = None
self.spec_dim_labels = None
self.pos_dim_sizes = None
self.spec_dim_sizes = None
self.n_dim_labels = None
self.n_dim_sizes = None
self.__lazy_2d = lazy_load_array(self)
self.__set_labels_and_sizes()
try:
self.__n_dim_data_orig = self.get_n_dim_form(lazy=True)
self.__n_dim_form_avail = True
self.__n_dim_data_s2f = self.__n_dim_data_orig.transpose(tuple(self.__n_dim_sort_order_orig_s2f))
except ValueError:
warn('This dataset does not have an N-dimensional form')
self.__set_n_dim_view()
def __eq__(self, other):
if isinstance(other, h5py.Dataset):
return super(USIDataset, self).__eq__(other)
return False
def __repr__(self):
h5_str = super(USIDataset, self).__repr__()
pos_str = ' \n'.join(['\t{} - size: {}'.format(dim_name, str(dim_size)) for dim_name, dim_size in
zip(self.__orig_pos_dim_labels, self.__orig_pos_dim_sizes)])
spec_str = ' \n'.join(['\t{} - size: {}'.format(dim_name, str(dim_size)) for dim_name, dim_size in
zip(self.__orig_spec_dim_labels, self.__orig_spec_dim_sizes)])
usid_str = ' \n'.join(['located at:',
'\t' + self.name,
'Data contains:', '\t' + self.data_descriptor,
'Data dimensions and original shape:',
'Position Dimensions:',
pos_str,
'Spectroscopic Dimensions:',
spec_str])
if self.dtype.fields is not None:
usid_str = '\n'.join([usid_str,
'Data Fields:', '\t' + ', '.join([field for field in self.dtype.fields])])
else:
usid_str = '\n'.join([usid_str,
'Data Type:', '\t' + self.dtype.name])
if sys.version_info.major == 2:
usid_str = usid_str.encode('utf8')
return '\n'.join([h5_str, usid_str])
def __set_labels_and_sizes(self):
"""
Sets the labels and sizes attributes to the correct values based on
the value of `self.__sort_dims`
"""
if self.__sort_dims:
self.pos_dim_labels = self.__orig_pos_dim_labels[self.__pos_sort_order].tolist()
self.spec_dim_labels = self.__orig_spec_dim_labels[self.__spec_sort_order].tolist()
self.pos_dim_sizes = self.__orig_pos_dim_sizes[self.__pos_sort_order].tolist()
self.spec_dim_sizes = self.__orig_spec_dim_sizes[self.__spec_sort_order].tolist()
self.n_dim_labels = self.__orig_n_dim_labs[self.__n_dim_sort_order_orig_s2f].tolist()
self.n_dim_sizes = self.__orig_n_dim_sizes[self.__n_dim_sort_order_orig_s2f].tolist()
else:
self.pos_dim_labels = self.__orig_pos_dim_labels.tolist()
self.spec_dim_labels = self.__orig_spec_dim_labels.tolist()
self.pos_dim_sizes = self.__orig_pos_dim_sizes.tolist()
self.spec_dim_sizes = self.__orig_spec_dim_sizes.tolist()
self.n_dim_labels = self.__orig_n_dim_labs.tolist()
self.n_dim_sizes = self.__orig_n_dim_sizes.tolist()
def __set_n_dim_view(self):
"""
Sets the current view of the N-dimensional form of the dataset
"""
self.__curr_ndim_form = self.__n_dim_data_s2f if self.__sort_dims else self.__n_dim_data_orig
@staticmethod
def __get_anc_labels(h5_dset):
"""
Takes any dataset which has the labels and units attributes and returns a list of strings
formatted as 'label k (unit k)'
Parameters
----------
h5_dset : h5py.Dataset object
dataset which has labels and units attributes
Returns
-------
labels : list
list of strings formatted as 'label k (unit k)'
"""
labels = []
for lab, unit in zip(get_attr(h5_dset, 'labels'), get_attr(h5_dset, 'units')):
labels.append('{} ({})'.format(lab, unit))
return labels
def get_pos_values(self, dim_name):
"""
Extract the reference values for the specified position dimension
Parameters
----------
dim_name : str
Name of one of the dimensions in `self.pos_dim_labels`
Returns
-------
dim_values : :class:`numpy.ndarray`
Array containing the unit values of the dimension `dim_name`
"""
dim_name = validate_single_string_arg(dim_name, 'dim_name')
return get_unit_values(self.h5_pos_inds, self.h5_pos_vals)[dim_name]
def get_spec_values(self, dim_name):
"""
Extract the values for the specified spectroscopic dimension
Parameters
----------
dim_name : str
Name of one of the dimensions in `self.spec_dim_labels`
Returns
-------
dim_values : :class:`numpy.ndarray`
Array containing the unit values of the dimension `dim_name`
"""
dim_name = validate_single_string_arg(dim_name, 'dim_name')
return get_unit_values(self.h5_spec_inds, self.h5_spec_vals)[dim_name]
def get_current_sorting(self):
"""
Prints the current sorting method.
"""
if self.__sort_dims:
print('Data dimensions are sorted in order from fastest changing dimension to slowest.')
else:
print('Data dimensions are in the order they occur in the file.')
def toggle_sorting(self):
"""
Toggles between sorting from the fastest changing dimension to the slowest and sorting based on the
order of the labels
"""
self.__sort_dims = not self.__sort_dims
self.__set_labels_and_sizes()
self.__set_n_dim_view()
def get_n_dim_form(self, as_scalar=False, lazy=False):
"""
Reshapes the dataset to an N-dimensional array
Parameters
----------
as_scalar : bool, optional. Default = False
If False, the data is returned in its original (complex, compound) dtype
Else, the data is flattened to a real-valued dataset
lazy : bool, optional. Default = False
If set to false, n_dim_data will be a :class:`numpy.ndarray`
Else returned object is :class:`dask.array.core.Array`
Returns
-------
n_dim_data : :class:`numpy.ndarray` or :class:`dask.core.Array`
N-dimensional form of the dataset
"""
if self.__curr_ndim_form is None:
# To be on the safe side, always read as dask Array
n_dim_data, success = reshape_to_n_dims(self, sort_dims=self.__sort_dims,
lazy=True)
if success is not True:
raise ValueError('Unable to reshape data to N-dimensional form.')
else:
n_dim_data = self.__curr_ndim_form
if as_scalar:
n_dim_data = flatten_to_real(n_dim_data)
if not lazy:
assert(isinstance(n_dim_data, da.core.Array))
n_dim_data = n_dim_data.compute()
return n_dim_data
def __validate_slice_dict(self, slice_dict):
"""
Validates the slice dictionary
Parameters
----------
slice_dict : dict
Dictionary of array-likes.
Returns
-------
None
"""
if not isinstance(slice_dict, dict):
raise TypeError('slice_dict should be a dictionary of slice '
'objects')
for key, val in slice_dict.items():
# Make sure the dimension is valid
if key not in self.n_dim_labels:
raise KeyError('Cannot slice on dimension {}. Valid '
'dimensions are {}.'.format(key,
self.n_dim_labels))
if not isinstance(val, (slice, list, np.ndarray, tuple, int,
np.int, np.int64, np.int32, np.int16)):
raise TypeError('The values for a slice must be a slice, list,'
' numpy array, a tuple, or an int. Provided '
'value: {} for dimension: {} was of type: {}'
''.format(val, key, type(val)))
return True
def __slice_n_dim_form(self, slice_dict, verbose=False, lazy=False):
"""
Slices the N-dimensional form of the dataset based on the slice dictionary.
Assumes that an N-dimensional form exists and is what was requested
Parameters
----------
slice_dict : dict
Dictionary of array-likes. for any dimension one needs to slice
verbose : bool, optional
Whether or not to print debugging statements
lazy : bool, optional. Default = False
If set to false, data_slice will be a :class:`numpy.ndarray`
Else returned object is :class:`dask.array.core.Array`
Returns
-------
data_slice : :class:`numpy.ndarray`, or :class:`dask.array.core.Array`
Slice of the dataset.
success : bool
Always True
"""
nd_slice = []
for dim_name in self.n_dim_labels:
nd_slice.append(slice_dict.get(dim_name, slice(None)))
# Dask multidimensional slicing does not work if list is passed:
nd_slice = tuple(nd_slice)
if verbose:
print(self.n_dim_labels)
print(nd_slice)
sliced_dset = self.__curr_ndim_form[nd_slice]
if not lazy:
sliced_dset = sliced_dset.compute()
return sliced_dset, True
def slice(self, slice_dict, ndim_form=True, as_scalar=False, verbose=False, lazy=False):
"""
Slice the dataset based on an input dictionary of 'str': slice pairs.
Each string should correspond to a dimension label. The slices can be
array-likes or slice objects.
Parameters
----------
slice_dict : dict
Dictionary of array-likes. for any dimension one needs to slice
ndim_form : bool, optional
Whether or not to return the slice in it's N-dimensional form. Default = True
as_scalar : bool, optional
Should the data be returned as scalar values only.
verbose : bool, optional
Whether or not to print debugging statements
lazy : bool, optional. Default = False
If set to false, data_slice will be a :class:`numpy.ndarray`
Else returned object is :class:`dask.array.core.Array`
Returns
-------
data_slice : :class:`numpy.ndarray`, or :class:`dask.array.core.Array`
Slice of the dataset. Dataset has been reshaped to N-dimensions if `success` is True, only
by Position dimensions if `success` is 'Positions', or not reshape at all if `success`
is False.
success : str or bool
Informs the user as to how the data_slice has been shaped.
"""
# TODO: Accept sequences of integers and build a list of slice objects for each dimension
if slice_dict is None:
slice_dict = dict()
else:
self.__validate_slice_dict(slice_dict)
if not isinstance(as_scalar, bool):
raise TypeError('as_scalar should be a bool')
if not isinstance(verbose, bool):
raise TypeError('verbose should be a bool')
if self.__n_dim_form_avail and ndim_form:
return self.__slice_n_dim_form(slice_dict, verbose=verbose, lazy=lazy)
# Convert the slice dictionary into lists of indices for each dimension
pos_slice, spec_slice = self._get_pos_spec_slices(slice_dict)
if verbose:
print('Position slice: shape - {}'.format(pos_slice.shape))
print(pos_slice)
print('Spectroscopic slice: shape - {}'.format(spec_slice.shape))
print(spec_slice)
# Now that the slices are built, we just need to apply them to the data
# This method is slow and memory intensive but shouldn't fail if multiple lists are given.
if lazy:
raw_2d = self.__lazy_2d
else:
raw_2d = self
if verbose:
print('Slicing to 2D based on dataset of shape: {} and type: {}'
''.format(raw_2d.shape, type(raw_2d)))
if lazy:
data_slice = raw_2d[pos_slice[:, 0], :][:, spec_slice[:, 0]]
else:
if len(pos_slice) <= len(spec_slice):
# Fewer final positions than spectra
data_slice = np.atleast_2d(raw_2d[pos_slice[:, 0], :])[:, spec_slice[:, 0]]
else:
# Fewer final spectral points compared to positions
data_slice = np.atleast_2d(raw_2d[:, spec_slice[:, 0]])[pos_slice[:, 0], :]
if verbose:
print('data_slice of shape: {} and type: {} after slicing'
''.format(data_slice.shape, type(data_slice)))
if not lazy:
orig_shape = data_slice.shape
data_slice = np.atleast_2d(np.squeeze(data_slice))
if data_slice.shape[0] == orig_shape[1] and data_slice.shape[1] == orig_shape[0]:
data_slice = data_slice.T
if verbose:
print('data_slice of shape: {} after squeezing'.format(data_slice.shape))
pos_inds = self.h5_pos_inds[pos_slice.ravel(), :]
spec_inds = self.h5_spec_inds[:, spec_slice.ravel()].reshape([self.h5_spec_inds.shape[0], -1])
if verbose:
print('Sliced position indices:')
print(pos_inds)
print('Spectroscopic Indices (transposed)')
print(spec_inds.T)
# At this point, the empty dimensions MUST be removed in order to avoid problems with dimension sort etc.
def remove_singular_dims(anc_inds):
new_inds = []
for dim_values in anc_inds:
if len(np.unique(dim_values)) > 1:
new_inds.append(dim_values)
# if all dimensions are removed?
if len(new_inds) == 0:
new_inds = np.arange(1)
else:
new_inds = np.array(new_inds)
return new_inds
pos_inds = np.atleast_2d(remove_singular_dims(pos_inds.T).T)
spec_inds = np.atleast_2d(remove_singular_dims(spec_inds))
if verbose:
print('After removing any singular dimensions')
print('Sliced position indices:')
print(pos_inds)
print('Spectroscopic Indices (transposed)')
print(spec_inds.T)
print('data slice of shape: {}. Position indices of shape: {}, Spectroscopic indices of shape: {}'
'.'.format(data_slice.shape, pos_inds.shape, spec_inds.shape))
success = True
if ndim_form:
# TODO: if data is already loaded into memory, try to avoid I/O and slice in memory!!!!
data_slice, success = reshape_to_n_dims(data_slice, h5_pos=pos_inds, h5_spec=spec_inds, verbose=verbose, lazy=lazy)
data_slice = data_slice.squeeze()
if as_scalar:
return flatten_to_real(data_slice), success
else:
return data_slice, success
def _get_pos_spec_slices(self, slice_dict):
"""
Convert the slice dictionary into two lists of indices, one each for the position and spectroscopic
dimensions.
Parameters
----------
slice_dict : dict
Dictionary of array-likes.
Returns
-------
pos_slice : list of unsigned int
Position indices included in the slice
spec_slice : list of unsigned int
Spectroscopic indices included in the slice
"""
if slice_dict is None:
slice_dict = dict()
self.__validate_slice_dict(slice_dict)
if len(slice_dict) == 0:
pos_slice = np.expand_dims(np.arange(self.shape[0]), axis=1)
spec_slice = np.expand_dims(np.arange(self.shape[1]), axis=1)
return pos_slice, spec_slice
# Create default slices that include the entire dimension
n_dim_slices = dict()
n_dim_slices_sizes = dict()
for dim_lab, dim_size in zip(self.n_dim_labels, self.n_dim_sizes):
n_dim_slices[dim_lab] = list(range(dim_size))
n_dim_slices_sizes[dim_lab] = len(n_dim_slices[dim_lab])
# Loop over all the keyword arguments and create slices for each.
for key, val in slice_dict.items():
# Check the value and convert to a slice object if possible.
# Use a list if not.
if isinstance(val, slice):
val = n_dim_slices[key][val]
elif isinstance(val, list):
pass
elif isinstance(val, np.ndarray):
val = val.flatten().tolist()
elif isinstance(val, tuple):
val = list(val)
elif isinstance(val, int):
val = [val]
else:
raise TypeError('The slices must be array-likes or slice objects.')
if not contains_integers(val, min_val=0):
# TODO: Is there a more elegant way of handling this?
raise ValueError('Slicing indices should be >= 0')
# check to make sure that the values are not out of bounds:
dim_ind = np.squeeze(np.argwhere(self.__orig_n_dim_labs == key))
cur_dim_size = self.__orig_n_dim_sizes[dim_ind]
if np.max(val) >= cur_dim_size:
raise IndexError('slicing argument for dimension: {} was beyond {}'.format(key, cur_dim_size))
n_dim_slices[key] = val
n_dim_slices_sizes[key] = len(val)
# Build the list of position slice indices
for pos_ind, pos_lab in enumerate(self.__orig_pos_dim_labels):
# n_dim_slices[pos_lab] = np.isin(self.h5_pos_inds[:, pos_ind], n_dim_slices[pos_lab])
temp = [self.h5_pos_inds[:, pos_ind] == item for item in n_dim_slices[pos_lab]]
n_dim_slices[pos_lab] = np.any(np.vstack(temp), axis=0)
if pos_ind == 0:
pos_slice = n_dim_slices[pos_lab]
else:
pos_slice = np.logical_and(pos_slice, n_dim_slices[pos_lab])
pos_slice = np.argwhere(pos_slice)
# Do the same for the spectroscopic slice
for spec_ind, spec_lab in enumerate(self.__orig_spec_dim_labels):
# n_dim_slices[spec_lab] = np.isin(self.h5_spec_inds[spec_ind], n_dim_slices[spec_lab])
temp = [self.h5_spec_inds[spec_ind] == item for item in n_dim_slices[spec_lab]]
n_dim_slices[spec_lab] = np.any(np.vstack(temp), axis=0)
if spec_ind == 0:
spec_slice = n_dim_slices[spec_lab]
else:
spec_slice = np.logical_and(spec_slice, n_dim_slices[spec_lab])
spec_slice = np.argwhere(spec_slice)
# TODO: Shouldn't we simply squeeze before returning?
return pos_slice, spec_slice
def _get_dims_for_slice(self, slice_dict=None, verbose=False):
"""
Provides Dimension objects that express the reference position and spectroscopic dimensions for this dataset
once it is sliced via the provided slicing dictionary.
Parameters
----------
slice_dict : dict (optional)
Dictionary to slice one or more dimensions of the dataset by indices
verbose : bool (optional)
Whether or not to print debugging statements to stdout. Default = False
Returns
-------
pos_dims : list
List of :class:`~pyUSID.io.write_utils.Dimension` objects for each of the remaining position dimensions
spec_dims : list
List of :class:`~pyUSID.io.write_utils.Dimension` objects for each of the remaining spectroscopic dimensions
"""
if slice_dict is None:
slice_dict = dict()
pos_labels = self.pos_dim_labels
pos_units = get_attr(self.h5_pos_inds, 'units')
spec_labels = self.spec_dim_labels
spec_units = get_attr(self.h5_spec_inds, 'units')
self.__validate_slice_dict(slice_dict)
# First work on slicing the ancillary matrices. Determine dimensionality before slicing n dims:
pos_slices, spec_slices = self._get_pos_spec_slices(slice_dict)
# Things are too big to print here.
pos_inds = self.h5_pos_inds[np.squeeze(pos_slices), :]
pos_vals = self.h5_pos_vals[np.squeeze(pos_slices), :]
if verbose:
print('Checking for and correcting the dimensionality of the indices and values datasets:')
print('Pos Inds: {}, Pos Vals: {}'.format(pos_inds.shape, pos_vals.shape))
if pos_inds.ndim == 1:
pos_inds = np.expand_dims(pos_inds, axis=0)
pos_vals = np.expand_dims(pos_vals, axis=0)
spec_inds = self.h5_spec_inds[:, np.squeeze(spec_slices)]
spec_vals = self.h5_spec_vals[:, np.squeeze(spec_slices)]
if verbose:
print('Checking for and correcting the dimensionality of the indices and values datasets:')
print('Spec Inds: {}, Spec Vals: {}'.format(spec_inds.shape, spec_vals.shape))
if spec_inds.ndim == 1:
spec_inds = np.expand_dims(spec_inds, axis=1)
spec_vals = np.expand_dims(spec_vals, axis=1)
if verbose:
print('After correction of shape:')
print('Pos Inds: {}, Pos Vals: {}, Spec Inds: {}, Spec Vals: {}'.format(pos_inds.shape, pos_vals.shape,
spec_inds.shape,
spec_vals.shape))
# TODO: This assumes an N-dimensional form!
pos_unit_values = get_unit_values(pos_inds, pos_vals, all_dim_names=self.pos_dim_labels, is_spec=False,
verbose=verbose)
spec_unit_values = get_unit_values(spec_inds, spec_vals, all_dim_names=self.spec_dim_labels, is_spec=True,
verbose=verbose)
if verbose:
print('Position unit values:')
print(pos_unit_values)
print('Spectroscopic unit values:')
print(spec_unit_values)
# Now unit values will be correct for this slicing
# additional benefit - remove those dimensions which have at most 1 value:
def assemble_dimensions(full_labels, full_units, full_values):
new_labels = []
new_units = []
for dim_ind, dim_name in enumerate(full_labels):
if len(full_values[dim_name]) < 2:
del (full_values[dim_name])
else:
new_labels.append(dim_name)
new_units.append(full_units[dim_ind])
return np.array(new_labels), np.array(new_units), full_values
pos_labels, pos_units, pos_unit_values = assemble_dimensions(pos_labels, pos_units, pos_unit_values)
spec_labels, spec_units, spec_unit_values = assemble_dimensions(spec_labels, spec_units, spec_unit_values)
# Ensuring that there are always at least 1 position and spectroscopic dimensions:
if len(pos_labels) == 0:
pos_labels = ['arb.']
pos_units = ['a. u.']
pos_unit_values = {pos_labels[-1]: np.array([1])}
if len(spec_labels) == 0:
spec_labels = ['arb.']
spec_units = ['a. u.']
spec_unit_values = {spec_labels[-1]: np.array([1])}
if verbose:
print('\n\nAfter removing singular dimensions:')
print('Position: Labels: {}, Units: {}, Values:'.format(pos_labels, pos_units))
print(pos_unit_values)
print('Spectroscopic: Labels: {}, Units: {}, Values:'.format(spec_labels, spec_units))
print(spec_unit_values)
pos_dims = []
for name, units in zip(pos_labels, pos_units):
pos_dims.append(Dimension(name, units, pos_unit_values[name]))
spec_dims = []
for name, units in zip(spec_labels, spec_units):
spec_dims.append(Dimension(name, units, spec_unit_values[name]))
return pos_dims, spec_dims
def slice_to_dataset(self, slice_dict, dset_name=None, verbose=False, **kwargs):
"""
Slices the dataset, writes its output to back to the HDF5 file, and returns a USIDataset object
Parameters
----------
slice_dict : dict
Dictionary to slice one or more dimensions of the dataset by indices
dset_name : str (optional)
Name of the new USID Main datset in the HDF5 file that will contain the sliced data.
Default - the sliced dataset takes the same name as this source dataset
verbose : bool (optional)
Whether or not to print debugging statements to stdout. Default = False
kwargs : keyword arguments
keyword arguments that will be passed on to write_main_data()
Returns
-------
h5_trunc : USIDataset
USIDataset containing the sliced data
"""
if slice_dict is None:
raise ValueError('slice_dict should not be None or be empty')
if dset_name is None:
dset_name = self.name.split('/')[-1]
else:
dset_name = validate_single_string_arg(dset_name, 'dset_name')
if verbose:
print('Decided / provided name of new sliced HDF5 dataset to be: {}'.format(dset_name))
pos_dims, spec_dims = self._get_dims_for_slice(slice_dict=slice_dict, verbose=verbose)
if verbose:
print('Sliced ancillary datasets returned:\n------------------------------------------')
print('Position:')
for dim in pos_dims:
print(dim)
print('\nSpectroscopic:')
for dim in spec_dims:
print(dim)
data_slice_2d, success = self.slice(slice_dict, ndim_form=False, as_scalar=False, verbose=verbose)
if not success:
raise ValueError('Unable to slice the dataset. success returned: {}'.format(success))
if verbose:
print('Slicing the main dataset returned:\n------------------------------------------')
print('Reshape success: {}'.format(success))
print('2D data shape: {}'.format(data_slice_2d.shape))
# check if a pos dimension was sliced:
pos_sliced = False
for dim_name in slice_dict.keys():
if dim_name in self.pos_dim_labels:
pos_sliced = True
if verbose:
print('Position dimension: {} was sliced'.format(dim_name))
break
if not pos_sliced:
pos_dims = None
kwargs['h5_pos_inds'] = self.h5_pos_inds
kwargs['h5_pos_vals'] = self.h5_pos_vals
if verbose:
print('Reusing this main datasets position datasets')
else:
if verbose:
print('Using new Position dimensions:\n------------------------------------------')
spec_sliced = False
for dim_name in slice_dict.keys():
if dim_name in self.spec_dim_labels:
spec_sliced = True
if verbose:
print('Spectroscopic dimension: {} was sliced'.format(dim_name))
break
if not spec_sliced:
spec_dims = None
kwargs['h5_spec_inds'] = self.h5_spec_inds
kwargs['h5_spec_vals'] = self.h5_spec_vals
if verbose:
print('Reusing this main datasets spectroscopic datasets')
else:
if verbose:
print('Using new spectroscopic dimensions:\n------------------------------------------')
h5_group = create_results_group(self, 'slice')
# TODO: Make this memory safe.
h5_trunc = write_main_dataset(h5_group, data_slice_2d, dset_name, get_attr(self, 'quantity'),
get_attr(self, 'units'), pos_dims, spec_dims, verbose=verbose, **kwargs)
return h5_trunc
def visualize(self, slice_dict=None, verbose=False, **kwargs):
"""
Interactive visualization of this dataset. **Only available on jupyter notebooks**
Parameters
----------
slice_dict : dictionary, optional
Slicing instructions
verbose : bool, optional
Whether or not to print debugging statements. Default = Off
Returns
-------
fig : :class:`matplotlib.figure` handle
Handle for the figure object
axis : :class:`matplotlib.Axes.axis` object
Axis within which the data was plotted. Note - the interactive visualizer does not return this object
"""
if slice_dict is None:
if len(self.pos_dim_labels) > 2 or len(self.spec_dim_labels) > 2:
raise NotImplementedError('Unable to support visualization of more than 2 position / spectroscopic '
'dimensions. Try slicing the dataset')
data_slice = self.get_n_dim_form()
spec_unit_values = get_unit_values(self.h5_spec_inds, self.h5_spec_vals)
pos_unit_values = get_unit_values(self.h5_pos_inds, self.h5_pos_vals)
pos_dims = []
for name, units in zip(self.pos_dim_labels, get_attr(self.h5_pos_inds, 'units')):
pos_dims.append(Dimension(name, units, pos_unit_values[name]))
spec_dims = []
for name, units in zip(self.spec_dim_labels, get_attr(self.h5_spec_inds, 'units')):
spec_dims.append(Dimension(name, units, spec_unit_values[name]))
else:
pos_dims, spec_dims = self._get_dims_for_slice(slice_dict=slice_dict, verbose=verbose)
# see if the total number of pos and spec keys are either 1 or 2
if not (0 < len(pos_dims) < 3) or not (0 < len(spec_dims) < 3):
raise ValueError('Number of position ({}) / spectroscopic dimensions ({}) more than 2'
'. Try slicing again'.format(len(pos_dims), len(spec_dims)))
# now should be safe to slice:
data_slice, success = self.slice(slice_dict, ndim_form=True, lazy=False)
if not success:
raise ValueError('Something went wrong when slicing the dataset. slice message: {}'.format(success))
# don't forget to remove singular dimensions via a squeeze
data_slice = np.squeeze(data_slice)
# Unlikely event that all dimensions were removed and we are left with a scalar:
if data_slice.ndim == 0:
# Nothing to visualize - just return a value
return data_slice
# There is a chance that the data dimensionality may have reduced to 1:
elif data_slice.ndim == 1:
if len(pos_dims) == 0:
data_slice = np.expand_dims(data_slice, axis=0)
else:
data_slice = np.expand_dims(data_slice, axis=-1)
if verbose:
print('Position Dimensions:')
for item in pos_dims:
print('{}\n{}'.format(len(item.values), item))
print('Spectroscopic Dimensions:')
for item in spec_dims:
print('{}\n{}'.format(len(item.values), item))
print('N dimensional data sent to visualizer of shape: {}'.format(data_slice.shape))
# Handle the simple cases first:
fig_args = dict()
temp = kwargs.pop('figsize', None)
if temp is not None:
fig_args['figsize'] = temp
def plot_curve(ref_dims, curve):
x_suffix = ''
x_exp = get_exponent(ref_dims[0].values)
if x_exp < -2 or x_exp > 3:
ref_dims[0] /= 10 ** x_exp
x_suffix = ' x $10^{' + str(x_exp) + '}$'
if is_complex_dtype(curve.dtype):
# Plot real and image
fig, axes = plt.subplots(nrows=2, **fig_args)
for axis, ufunc, comp_name in zip(axes.flat, [np.abs, np.angle], ['Magnitude', 'Phase']):
axis.plot(ref_dims[0].values, ufunc(np.squeeze(curve)), **kwargs)
if comp_name == 'Magnitude':
axis.set_title(self.name + '\n(' + comp_name + ')', pad=15)
axis.set_ylabel(self.data_descriptor)
else:
axis.set_title(comp_name, pad=15)
axis.set_ylabel('Phase (rad)')
axis.set_xlabel(ref_dims[0].name + ' (' + ref_dims[0].units + ')' + x_suffix)
fig.tight_layout()
return fig, axes
elif len(curve.dtype) > 0:
plot_grid = get_plot_grid_size(len(curve.dtype))
fig, axes = plt.subplots(nrows=plot_grid[0], ncols=plot_grid[1], **fig_args)
for axis, comp_name in zip(axes.flat, curve.dtype.fields):
axis.plot(ref_dims[0].values, np.squeeze(curve[comp_name]), **kwargs)
axis.set_title(comp_name, pad=15)
axis.set_xlabel(ref_dims[0].name + ' (' + ref_dims[0].units + ')' + x_suffix)
axis.set_ylabel(comp_name)
# fig.suptitle(self.name)
fig.tight_layout()
return fig, axes
else:
y_exp = get_exponent(np.squeeze(curve))
y_suffix = ''
if y_exp < -2 or y_exp > 3:
curve = np.squeeze(curve) / 10 ** y_exp
y_suffix = ' x $10^{' + str(y_exp) + '}$'
fig, axis = plt.subplots(**fig_args)
axis.plot(ref_dims[0].values, np.squeeze(curve), **kwargs)
axis.set_xlabel(ref_dims[0].name + ' (' + ref_dims[0].units + ')' + x_suffix)
axis.set_ylabel(self.data_descriptor + y_suffix)
axis.set_title(self.name)
return fig, axis
def plot_image(ref_dims, img):
exponents = [get_exponent(item.values) for item in ref_dims]
suffix = []
for item, scale in zip(ref_dims, exponents):
curr_suff = ''
if scale < -1 or scale > 3:
item /= 10 ** scale
curr_suff = ' x $10^{' + str(scale) + '}$'
suffix.append(curr_suff)
if is_complex_dtype(img.dtype):
# Plot real and image
fig, axes = plt.subplots(nrows=2, **fig_args)
for axis, ufunc, comp_name in zip(axes.flat, [np.abs, np.angle], ['Magnitude', 'Phase']):
cbar_label = self.data_descriptor
if comp_name == 'Phase':
cbar_label = 'Phase (rad)'
plot_map(axis, ufunc(np.squeeze(img)), show_xy_ticks=True, show_cbar=True,
cbar_label=cbar_label, x_vec=ref_dims[1].values, y_vec=ref_dims[0].values,
**kwargs)
axis.set_title(self.name + '\n(' + comp_name + ')', pad=15)
axis.set_xlabel(ref_dims[1].name + ' (' + ref_dims[1].units + ')' + suffix[1])
axis.set_ylabel(ref_dims[0].name + ' (' + ref_dims[0].units + ')' + suffix[0])
fig.tight_layout()
return fig, axes
elif len(img.dtype) > 0:
# Compound
# I would like to have used plot_map_stack by providing it the flattened (real) image cube
# However, the order of the components in the cube and that provided by img.dtype.fields is not matching
plot_grid = get_plot_grid_size(len(img.dtype))
fig, axes = plt.subplots(nrows=plot_grid[0], ncols=plot_grid[1], **fig_args)
for axis, comp_name in zip(axes.flat, img.dtype.fields):
plot_map(axis, np.squeeze(img[comp_name]), show_xy_ticks=True, show_cbar=True,
x_vec=ref_dims[1].values, y_vec=ref_dims[0].values, **kwargs)
axis.set_title(comp_name, pad=15)
axis.set_xlabel(ref_dims[1].name + ' (' + ref_dims[1].units + ')' + suffix[1])
axis.set_ylabel(ref_dims[0].name + ' (' + ref_dims[0].units + ')' + suffix[0])
# delete empty axes
for ax_ind in range(len(img.dtype), np.prod(plot_grid)):
fig.delaxes(axes.flatten()[ax_ind])
# fig.suptitle(self.name)
fig.tight_layout()
return fig, axes
else:
fig, axis = plt.subplots(**fig_args)
# Need to convert to float since image could be unsigned integers or low precision floats
plot_map(axis, np.float32(np.squeeze(img)), show_xy_ticks=True, show_cbar=True,
cbar_label=self.data_descriptor, x_vec=ref_dims[1].values, y_vec=ref_dims[0].values, **kwargs)
try:
axis.set_title(self.name, pad=15)
except AttributeError:
axis.set_title(self.name)
axis.set_xlabel(ref_dims[1].name + ' (' + ref_dims[1].units + ')' + suffix[1])
axis.set_ylabel(ref_dims[0].name + ' (' + ref_dims[0].units + ')' + suffix[0])
fig.tight_layout()
return fig, axis
if np.prod([len(item.values) for item in spec_dims]) == 1:
# No spectroscopic dimensions at all
if len(pos_dims) == 2:
# 2D spatial map
# Check if we need to adjust the aspect ratio of the image (only if units are same):
if pos_dims[0].units == pos_dims[1].units:
kwargs['infer_aspect'] = True
return plot_image(pos_dims, data_slice)
elif np.prod([len(item.values) for item in pos_dims]) > 1:
# 1D position curve:
return plot_curve(pos_dims, data_slice)
elif np.prod([len(item.values) for item in pos_dims]) == 1:
if len(spec_dims) == 2:
# 2D spectrogram
return plot_image(spec_dims, data_slice)
elif np.prod([len(item.values) for item in pos_dims]) == 1 and \
np.prod([len(item.values) for item in spec_dims]) > 1:
# 1D spectral curve:
return plot_curve(spec_dims, data_slice)
elif len(pos_dims) == 1 and len(spec_dims) == 1 and \
np.prod([len(item.values) for item in pos_dims]) > 1 and \
np.prod([len(item.values) for item in spec_dims]) > 1:
# One spectroscopic and one position dimension
return plot_image(pos_dims + spec_dims, data_slice)
# If data has at least one dimension with 2 values in pos. AND spec., it can be visualized interactively:
return simple_ndim_visualizer(data_slice, pos_dims, spec_dims, verbose=verbose, **kwargs)
def reduce(self, dims, ufunc=da.mean, to_hdf5=False, dset_name=None, verbose=False):
"""
Parameters
----------
dims : str or list of str
Names of the position and/or spectroscopic dimensions that need to be reduced
ufunc : callable, optional. Default = dask.array.mean
Reduction function such as dask.array.mean available in dask.array
to_hdf5 : bool, optional. Default = False
Whether or not to write the reduced data back to a new dataset
dset_name : str (optional)
Name of the new USID Main datset in the HDF5 file that will contain the sliced data.
Default - the sliced dataset takes the same name as this source dataset
verbose : bool, optional. Default = False
Whether or not to print any debugging statements to stdout
Returns
-------
reduced_nd : dask.array object
Dask array object containing the reduced data.
Call compute() on this object to get the equivalent numpy array
h5_main_red : USIDataset
USIDataset reference if to_hdf5 was set to True. Otherwise - None.
"""
dims = validate_list_of_strings(dims, 'dims')
for curr_dim in self.n_dim_labels:
if curr_dim not in self.n_dim_labels:
raise KeyError('{} not a dimension in this dataset'.format(curr_dim))
if ufunc not in [da.all, da.any, da.max, da.mean, da.min, da.moment, da.prod, da.std, da.sum, da.var,
da.nanmax, da.nanmean, da.nanmin, da.nanprod, da.nanstd, da.nansum, da.nanvar]:
raise NotImplementedError('ufunc must be a valid reduction function such as dask.array.mean')
# At this point, dims are valid
da_nd, status, labels = reshape_to_n_dims(self, get_labels=True, verbose=verbose, sort_dims=False,
lazy=True)
# Translate the names of the dimensions to the indices:
dim_inds = [np.where(labels == curr_dim)[0][0] for curr_dim in dims]
# Now apply the reduction:
reduced_nd = ufunc(da_nd, axis=dim_inds)
if not to_hdf5:
return reduced_nd, None
if dset_name is None:
dset_name = self.name.split('/')[-1]
else:
dset_name = validate_single_string_arg(dset_name, 'dset_name')
# Create the group to hold the results:
h5_group = create_results_group(self, 'Reduce')
# check if a pos dimension was sliced:
pos_sliced = False
for dim_name in dims:
if dim_name in self.pos_dim_labels:
pos_sliced = True
if verbose:
print('Position dimension: {} was reduced. Breaking...'.format(dim_name))
break
if not pos_sliced:
h5_pos_inds = self.h5_pos_inds
h5_pos_vals = self.h5_pos_vals
if verbose:
print('Reusing this main datasets position datasets')
else:
if verbose:
print('Creating new Position dimensions:\n------------------------------------------')
# First figure out the names of the position dimensions
pos_dim_names = []
for cur_dim in dims:
if cur_dim in self.pos_dim_labels:
pos_dim_names.append(cur_dim)
if verbose:
print('Position dimensions reduced: {}'.format(pos_dim_names))
# Now create the reduced position datasets
h5_pos_inds, h5_pos_vals = write_reduced_anc_dsets(h5_group, self.h5_pos_inds, self.h5_pos_vals,
pos_dim_names, is_spec=False, verbose=verbose)
if verbose:
print('Position dataset created: {}. Labels: {}'.format(h5_pos_inds, get_attr(h5_pos_inds, 'labels')))
spec_sliced = False
for dim_name in dims:
if dim_name in self.spec_dim_labels:
spec_sliced = True
if verbose:
print('Spectroscopic dimension: {} was reduced. Breaking...'.format(dim_name))
break
if not spec_sliced:
h5_spec_inds = self.h5_spec_inds
h5_spec_vals = self.h5_spec_vals
if verbose:
print('Reusing this main datasets spectroscopic datasets')
else:
if verbose:
print('Creating new spectroscopic dimensions:\n------------------------------------------')
# First figure out the names of the position dimensions
spec_dim_names = []
for cur_dim in dims:
if cur_dim in self.spec_dim_labels:
spec_dim_names.append(cur_dim)
if verbose:
print('Spectroscopic dimensions reduced: {}'.format(spec_dim_names))
# Now create the reduced position datasets
h5_spec_inds, h5_spec_vals = write_reduced_anc_dsets(h5_group, self.h5_spec_inds, self.h5_spec_vals,
spec_dim_names, is_spec=True, verbose=verbose)
if verbose:
print('Spectroscopic dataset created: {}. Labels: {}'.format(h5_spec_inds,
get_attr(h5_spec_inds, 'labels')))
# Now put the reduced N dimensional Dask array back to 2D form:
reduced_2d, status = reshape_from_n_dims(reduced_nd, h5_pos=h5_pos_inds, h5_spec=h5_spec_inds, verbose=verbose)
if status != True and verbose:
print('Status from reshape_from_n_dims: {}'.format(status))
if verbose:
print('2D reduced dataset: {}'.format(reduced_2d))
# Create a HDF5 dataset to hold this flattened 2D data:
h5_red_main = h5_group.create_dataset(dset_name, shape=reduced_2d.shape,
dtype=reduced_2d.dtype) # , compression=self.compression)
if verbose:
print('Created an empty dataset to hold flattened dataset: {}. Chunks: {}'.format(h5_red_main,
h5_red_main.chunks))
# Copy the mandatory attributes:
copy_attributes(self, h5_red_main)
# Now make this dataset a main dataset:
link_as_main(h5_red_main, h5_pos_inds, h5_pos_vals, h5_spec_inds, h5_spec_vals)
if verbose:
print('{} is a main dataset?: {}'.format(h5_red_main, check_if_main(h5_red_main, verbose=verbose)))
# Now write this data to the HDF5 dataset:
if verbose:
print('About to write dask array to this dataset at path: {}, in file: {}'.format(h5_red_main.name,
self.file.filename))
reduced_2d.to_hdf5(self.file.filename, h5_red_main.name)
return reduced_nd, USIDataset(h5_red_main)
def to_csv(self, output_path=None, force=False):
"""
Output this USIDataset and position + spectroscopic values to a csv file.
This should ideally be limited to small datasets only
Parameters
----------
output_path : str, optional
path that the output file should be written to.
By default, the file will be written to the same directory as the HDF5 file
force : bool, optional
Whether or not to force large dataset to be written to CSV. Default = False
Returns
-------
output_file: str
Author - Daniel Streater, Suhas Somnath
"""
if not isinstance(force, bool):
raise TypeError('force should be a bool')
if self.dtype.itemsize * self.size / (1024 ** 2) > 15:
if force:
print('Note - the CSV file can be (much) larger than 100 MB')
else:
print('CSV file will not be written since the CSV file could be several 100s of MB large.\n'
'If you still want the file to be written, add the keyword argument "force=True"\n'
'We recommend that you save the data as a .npy or .npz file using numpy.dump')
return
if output_path is not None:
if not isinstance(output_path, str):
raise TypeError('output_path should be a string with a valid path for the output file')
else:
parent_folder, file_name = os.path.split(self.file.filename)
csv_name = file_name[:file_name.rfind('.')] + self.name.replace('/', '-') + '.csv'
output_path = os.path.join(parent_folder, csv_name)
if os.path.exists(output_path):
if force:
os.remove(output_path)
else:
raise FileExistsError('A file of the following name already exists. Set "force=True" to overwrite.\n'
'File path: ' + output_path)
header = ''
for spec_vals_for_dim in self.h5_spec_vals:
# create one line of the header for each of the spectroscopic dimensions
header += ','.join(str(item) for item in spec_vals_for_dim) + '\n'
# Add a dashed-line separating the spec vals from the data
header += ','.join(
'--------------------------------------------------------------' for _ in self.h5_spec_vals[0])
# Write the contents to a temporary file
np.savetxt('temp.csv', self, delimiter=',', header=header, comments='')
"""
Create the spectral and position labels for the dataset in string form then
create the position value array in string form, right-strip the last comma from the
string to deliver the correct number of values, append all of the labels and values together,
save the data and header to a temporary csv output
"""
# First few lines will have the spectroscopic dimension names + units
spec_dim_labels = ''
for dim_desc in self.spec_dim_descriptors:
spec_dim_labels += ','.join('' for _ in self.pos_dim_labels) + str(dim_desc) + ',\n'
# Next line will have the position dimension names
pos_labels = ','.join(pos_dim for pos_dim in self.pos_dim_descriptors) + ',\n'
# Finally, the remaining rows will have the position values themselves
pos_values = ''
for pos_vals_in_row in self.h5_pos_vals:
pos_values += ','.join(str(item) for item in pos_vals_in_row) + ',\n'
pos_values = pos_values.rstrip('\n')
# Now put together all the rows for the first few columns:
output = spec_dim_labels + pos_labels + pos_values
left_dset = output.splitlines()
with open('temp.csv', 'r+') as in_file, open(output_path, 'w') as out_file:
for left_line, right_line in zip(left_dset, in_file):
out_file.write(left_line + right_line)
os.remove('temp.csv')
print('Successfully wrote this dataset to: ' + output_path)
return output_path
| 56,744 | 43.056677 | 127 | py |
pyUSID-legacy | pyUSID-master-legacy/pyUSID/io/dimension.py | import sys
from enum import Enum
from warnings import warn
import numpy as np
from sidpy import Dimension as SIDimension
if sys.version_info.major == 3:
unicode = str
class DimType(Enum):
DEFAULT = 0
INCOMPLETE = 1
DEPENDENT = 2
@staticmethod
def __check_other_type(other):
if not isinstance(other, DimType):
raise TypeError('Provided object not of type DimType')
def __lt__(self, other):
self.__check_other_type(other)
return self.value < other.value
def __gt__(self, other):
self.__check_other_type(other)
return self.value > other.value
def __eq__(self, other):
self.__check_other_type(other)
return self.value == other.value
class Dimension(SIDimension):
"""
..autoclass::Dimension
"""
def __new__(cls, name, units, values, quantity='generic',
dimension_type='unknown', mode=DimType.DEFAULT):
"""
Simple object that describes a dimension in a dataset by its name, units, and values
Parameters
----------
name : str or unicode
Name of the dimension. For example 'Bias'
units : str or unicode
Units for this dimension. For example: 'V'
values : array-like or int
Values over which this dimension was varied. A linearly increasing set of values will be generated if an
integer is provided instead of an array.
mode : Enum, Optional. Default = DimType.DEFAULT
How the parameter associated with the dimension was varied.
DimType.DEFAULT - data was recorded for all combinations of values in this dimension against **all** other
dimensions. This is typically the case.
DimType.INCOMPLETE - Data not present for all combinations of values in this dimension and all other
dimensions. Examples include spiral scans, sparse sampling, aborted measurements
DimType.DEPENDENT - Values in this dimension were varied as a function of another (independent) dimension.
quantity : str or unicode
Physical quantity such as Length
dimension_type : str or sidpy.DimensionTypes
Type of dimension. such as spectral, spatial, etc.
"""
if isinstance(values, int):
if values < 1:
raise ValueError('values must be a whole number. {} provided'
''.format(values))
self = SIDimension.__new__(cls, values, name=name, quantity=quantity,
units=units, dimension_type=dimension_type)
self.mode = mode
return self
@property
def mode(self):
return self._mode
@mode.setter
def mode(self, value):
if not isinstance(value, DimType):
raise TypeError('mode must be of type pyUSID.DimType. Provided '
'object was of type: {}'.format(type(value)))
self._mode = value
@property
def units(self):
return self._units
# pyUSID allows empty values for units unlike sid.Dimension
@units.setter
def units(self, value):
if not isinstance(value, (str, unicode)):
raise TypeError('units should be a string')
self._units = value.strip()
def __repr__(self):
return '{}: {} ({}) mode:{} : {}' \
''.format(self.name, self.quantity, self.units, self.mode,
self.values)
def __str__(self):
return '{}: {} ({}) mode:{} : {}' \
''.format(self.name, self.quantity, self.units, self.mode,
self.values)
def __eq__(self, other):
# Since __eq__ has not been implemented in sidpy.Dimension:
if not isinstance(other, Dimension):
raise TypeError('Cannot compare against object type: {}'
''.format(type(other)))
if self._name != other._name:
return False
if self._quantity != other._quantity:
return False
if self.mode != other._mode:
return False
if self._units != other._units:
return False
if len(self) != len(other):
return False
return np.allclose(self, other)
def validate_dimensions(dimensions, dim_type='Position'):
"""
Checks if the provided object is an iterable with pyUSID.Dimension objects.
If it is not full of Dimension objects, Exceptions are raised.
Parameters
----------
dimensions : iterable or pyUSID.Dimension
Iterable containing pyUSID.Dimension objects
dim_type : str, Optional. Default = "Position"
Type of Dimensions in the iterable. Set to "Spectroscopic" if not Position dimensions.
This string is only used for more descriptive Exceptions
Returns
-------
list
List containing pyUSID.Dimension objects
"""
if isinstance(dimensions, Dimension):
dimensions = [dimensions]
if isinstance(dimensions, np.ndarray):
if dimensions.ndim > 1:
dimensions = dimensions.ravel()
warn(dim_type + ' dimensions should be specified by a 1D array-like. Raveled this numpy array for now')
if not isinstance(dimensions, (list, np.ndarray, tuple)):
raise TypeError(dim_type + ' dimensions should be array-like of Dimension objects')
if not np.all([isinstance(x, Dimension) for x in dimensions]):
raise TypeError(dim_type + ' dimensions should be a sequence of Dimension objects')
return dimensions
| 5,617 | 35.480519 | 118 | py |
pyUSID-legacy | pyUSID-master-legacy/pyUSID/io/__init__.py | """
Tools to read, write data in h5USID files
Submodules
----------
.. autosummary::
:toctree: _autosummary
hdf_utils
image
array_translator
usi_data
dimension
translator
anc_build_utils
"""
from sidpy.sid.translator import Translator
from . import usi_data
from . import array_translator
from . import hdf_utils
from . import anc_build_utils
from . import dimension
from .usi_data import USIDataset
from .array_translator import ArrayTranslator
from .image import ImageTranslator
from .dimension import DimType, Dimension
# For legacy reasons
write_utils = anc_build_utils
numpy_translator = array_translator
NumpyTranslator = ArrayTranslator
__all__ = ['USIDataset', 'hdf_utils', 'write_utils', 'Dimension', 'DimType',
'ImageTranslator', 'ArrayTranslator', 'Translator',
'anc_build_utils',
'write_utils', 'numpy_translator', 'NumpyTranslator']
| 918 | 21.975 | 76 | py |
pyUSID-legacy | pyUSID-master-legacy/pyUSID/io/array_translator.py | # -*- coding: utf-8 -*-
"""
:class:`~pyUSID.io.numpy_translator.ArrayTranslator` capable of translating
numeric arrays to USID HDF5 files
Created on Fri Jan 27 17:58:35 2017
@author: Suhas Somnath
"""
from __future__ import division, print_function, absolute_import, \
unicode_literals
from os import path, remove
import sys
import h5py
import numpy as np
import dask.array as da
from sidpy.hdf.hdf_utils import write_simple_attrs
from sidpy.sid.translator import Translator
from sidpy.base.string_utils import validate_string_args
from .hdf_utils.base import write_book_keeping_attrs
from .hdf_utils.simple import create_indexed_group, validate_main_dset
from .hdf_utils.model import write_main_dataset, validate_dims_against_main
from .dimension import validate_dimensions
if sys.version_info.major == 3:
unicode = str
__all__ = ['ArrayTranslator']
class ArrayTranslator(Translator):
"""
Translator that writes numeric arrays (already in memory) that describe a USID dataset to a HDF5 file
"""
def translate(self, h5_path, data_name, raw_data, quantity, units, pos_dims, spec_dims,
translator_name='ArrayTranslator', parm_dict=None, extra_dsets=None, **kwargs):
"""
Writes the provided datasets and parameters to an h5 file
Parameters
----------
h5_path : str
Absolute path of the h5 file to be written
data_name : str
Name of the scientific data type. Example - 'SEM'
raw_data : :class:`np.ndarray` or :class:`dask.array.core.Array`
2D matrix formatted as [position, spectral]
quantity : str
Name of the physical quantity stored in the dataset. Example - 'Current'
units : str
Name of units for the quantity stored in the dataset. Example - 'A' for amperes
pos_dims : :class:`~pyUSID.io.write_utils.Dimension` or array-like of :class:`~pyUSID.io.write_utils.Dimension`
objects
Sequence of :class:`~pyUSID.io.write_utils.Dimension` objects that provides all necessary instructions for
constructing the indices and values datasets
Object specifying the instructions necessary for building the Position indices and values datasets
spec_dims : :class:`~pyUSID.io.write_utils.Dimension` or array-like of :class:`~pyUSID.io.write_utils.Dimension`
objects
Sequence of :class:`~pyUSID.io.write_utils.Dimension` objects that provides all necessary instructions for
constructing the indices and values datasets
Object specifying the instructions necessary for building the Spectroscopic indices and values datasets
translator_name : str, Optional
Name of the translator. Example - 'HitachiSEMTranslator'
parm_dict : dict, Optional
Dictionary of parameters that will be written under the group 'Measurement_000'
extra_dsets : dict, Optional
Dictionary whose values will be written into individual HDF5 datasets and whose corresponding keys provide
the names of the datasets. You are recommended to limit these to simple and small datasets.
kwargs: dict, Optional.
Additional keyword arguments that will be passed onto :meth:`pyUSID.hdf_utils.write_main_dset()` which will
in turn will be passed onto the creation of the dataset. Please pass chunking, compression, dtype, and other
arguments this way
Returns
-------
h5_path : str
Absolute path of the written h5 file
"""
h5_path, data_name, translator_name, quantity, units = validate_string_args([h5_path, data_name,
translator_name, quantity, units],
['h5_path', 'data_name',
'translator_name', 'quantity',
'units'])
validate_main_dset(raw_data, False)
for dimensions, dim_name in zip([pos_dims, spec_dims], ['Position', 'Spectroscopic']):
dimensions = validate_dimensions(dimensions, dim_type=dim_name)
validate_dims_against_main(raw_data.shape, dimensions, dim_name == 'Spectroscopic')
if extra_dsets is not None:
if not isinstance(extra_dsets, dict):
raise TypeError('extra_dsets should be specified as dictionaries')
for key, val in extra_dsets.items():
[key] = validate_string_args(key, 'keys for extra_dsets')
if np.any([key in x for x in ['Spectroscopic_Indices', 'Spectroscopic_Values', 'Position_Indices',
'Position_Values', 'Raw_Data']]):
raise KeyError('keys for extra_dsets cannot match reserved names for existing datasets')
# Now check for data:
if not isinstance(val, (list, tuple, np.ndarray, da.core.Array)):
raise TypeError('values for extra_dsets should be a tuple, list, or numpy / dask array')
else:
extra_dsets = dict()
if path.exists(h5_path):
remove(h5_path)
if parm_dict is None:
parm_dict = {}
global_parms = dict()
global_parms['data_type'] = data_name
global_parms['translator'] = translator_name
# Begin writing to file:
with h5py.File(h5_path, mode='w') as h5_f:
# Root attributes first:
write_simple_attrs(h5_f, global_parms)
write_book_keeping_attrs(h5_f)
# measurement group next
meas_grp = create_indexed_group(h5_f, 'Measurement')
write_simple_attrs(meas_grp, parm_dict)
# channel group next
chan_grp = create_indexed_group(meas_grp, 'Channel')
_ = write_main_dataset(chan_grp, raw_data, 'Raw_Data', quantity, units, pos_dims, spec_dims, **kwargs)
for key, val in extra_dsets.items():
if isinstance(val, da.core.Array):
da.to_hdf5(chan_grp.file.filename, {chan_grp.name + '/' + key: val})
else:
chan_grp.create_dataset(key.strip(), data=val)
return h5_path
| 6,492 | 44.725352 | 120 | py |
pyUSID-legacy | pyUSID-master-legacy/pyUSID/io/hdf_utils/base.py | # -*- coding: utf-8 -*-
"""
Simple yet handy HDF5 utilities, independent of the USID model
Created on Tue Nov 3 21:14:25 2015
@author: Suhas Somnath, Chris Smith
"""
from __future__ import division, print_function, absolute_import, unicode_literals
import sys
import h5py
from sidpy.hdf import hdf_utils as hut
from ...__version__ import version as py_usid_version
if sys.version_info.major == 3:
unicode = str
def print_tree(parent, rel_paths=False, main_dsets_only=False):
"""
Simple function to recursively print the contents of an hdf5 group
Parameters
----------
parent : :class:`h5py.Group`
HDF5 (sub-)tree to print
rel_paths : bool, optional. Default = False
True - prints the relative paths for all elements.
False - prints a tree-like structure with only the element names
main_dsets_only : bool, optional. default=False
True - prints only groups and Main datasets
False - prints all dataset and group objects
"""
# TODO: Leverage copy in sidpy.hdf.hdf_utils
if not isinstance(parent, (h5py.File, h5py.Group)):
raise TypeError('Provided object is not a h5py.File or h5py.Group '
'object')
def __print(name, obj):
show = True
if main_dsets_only:
show = False
from .simple import check_if_main
if check_if_main(obj) or isinstance(obj, h5py.Group):
show = True
if not show:
return
if rel_paths:
print(name)
else:
levels = name.count('/')
curr_name = name[name.rfind('/') + 1:]
print(levels * ' ' + '├ ' + curr_name)
if isinstance(obj, h5py.Group):
print((levels + 1) * ' ' + len(curr_name) * '-')
print(parent.name)
parent.visititems(__print)
def get_h5_obj_refs(obj_names, h5_refs):
"""
Given a list of H5 references and a list of names,
this method returns H5 objects corresponding to the names
Parameters
----------
obj_names : string or List of strings
names of target h5py objects
h5_refs : H5 object reference or List of H5 object references
list containing the target reference
Returns
-------
found_objects : List of HDF5 dataset references
Corresponding references
"""
from ..usi_data import USIDataset
found_objects = []
for h5_object in hut.get_h5_obj_refs(obj_names, h5_refs):
try:
found_objects.append(USIDataset(h5_object))
except TypeError:
found_objects.append(h5_object)
return found_objects
def write_book_keeping_attrs(h5_obj):
"""
Writes basic book-keeping and posterity related attributes to groups
created in pyUSID such as machine id, pyUSID version, timestamp.
Parameters
----------
h5_obj : :class:`h5py.Dataset`, :class:`h5py.Group`, or :class:`h5py.File`
Object to which basic book-keeping attributes need to be written
"""
hut.write_book_keeping_attrs(h5_obj)
hut.write_simple_attrs(h5_obj, {'pyUSID_version': py_usid_version})
| 3,164 | 28.579439 | 82 | py |
pyUSID-legacy | pyUSID-master-legacy/pyUSID/io/hdf_utils/simple.py | # -*- coding: utf-8 -*-
"""
Lower-level and simpler USID-specific HDF5 utilities that facilitate higher-level data operations
Created on Tue Nov 3 21:14:25 2015
@author: Suhas Somnath, Chris Smith
"""
from __future__ import division, print_function, absolute_import, unicode_literals
import collections
from warnings import warn
import sys
import h5py
import numpy as np
import dask.array as da
from sidpy.hdf.hdf_utils import get_auxiliary_datasets, link_h5_obj_as_alias, \
write_simple_attrs, is_editable_h5, validate_h5_objs_in_same_h5_file, \
get_attr
from sidpy.hdf.dtype_utils import validate_dtype
from sidpy.hdf import hdf_utils as hut
from sidpy.base.string_utils import validate_single_string_arg, validate_list_of_strings
from sidpy.base.num_utils import contains_integers
from sidpy.base.string_utils import clean_string_att
from ..anc_build_utils import build_ind_val_matrices, INDICES_DTYPE, VALUES_DTYPE
from ..dimension import DimType, Dimension
from .base import write_book_keeping_attrs
if sys.version_info.major == 3:
unicode = str
"""
__all__ = ['assign_group_index', 'check_and_link_ancillary', 'check_for_matching_attrs', 'check_for_old',
'check_if_main', 'copy_attributes', 'copy_main_attributes']
"""
def get_all_main(parent, verbose=False):
"""
Simple function to recursively print the contents of an hdf5 group
Parameters
----------
parent : :class:`h5py.Group`
HDF5 Group to search within
verbose : bool, optional. Default = False
If true, extra print statements (usually for debugging) are enabled
Returns
-------
main_list : list of h5py.Dataset
The datasets found in the file that meet the 'Main Data' criteria.
"""
if not isinstance(parent, (h5py.Group, h5py.File)):
raise TypeError('parent should be a h5py.File or h5py.Group object')
from ..usi_data import USIDataset
main_list = list()
def __check(name, obj):
if verbose:
print(name, obj)
if isinstance(obj, h5py.Dataset):
if verbose:
print(name, 'is an HDF5 Dataset.')
ismain = check_if_main(obj)
if ismain:
if verbose:
print(name, 'is a `Main` dataset.')
main_list.append(USIDataset(obj))
if verbose:
print('Checking the group {} for `Main` datasets.'.format(parent.name))
parent.visititems(__check)
return main_list
def find_dataset(h5_group, dset_name):
"""
Uses visit() to find all datasets with the desired name
Parameters
----------
h5_group : :class:`h5py.Group`
Group to search within for the Dataset
dset_name : str
Name of the dataset to search for
Returns
-------
datasets : list
List of [Name, object] pairs corresponding to datasets that match `ds_name`.
"""
from ..usi_data import USIDataset
datasets = list()
for obj in hut.find_dataset(h5_group, dset_name):
try:
datasets.append(USIDataset(obj))
except TypeError:
datasets.append(obj)
return datasets
def find_results_groups(h5_main, tool_name, h5_parent_group=None):
"""
Finds a list of all groups containing results of the process of name
`tool_name` being applied to the dataset
Parameters
----------
h5_main : h5 dataset reference
Reference to the target dataset to which the tool was applied
tool_name : String / unicode
Name of the tool applied to the target dataset
h5_parent_group : h5py.Group, optional. Default = None
Parent group under which the results group will be searched for. Use
this option when the results groups are contained in different HDF5
file compared to `h5_main`. BY default, this function will search
within the same group that contains `h5_main`
Returns
-------
groups : list of references to :class:`h5py.Group` objects
groups whose name contains the tool name and the dataset name
"""
if not isinstance(h5_main, h5py.Dataset):
raise TypeError('h5_main should be a h5py.Dataset object')
tool_name = validate_single_string_arg(tool_name, 'tool_name')
if h5_parent_group is not None:
if not isinstance(h5_parent_group, (h5py.File, h5py.Group)):
raise TypeError("'h5_parent_group' should either be a h5py.File "
"or h5py.Group object")
else:
h5_parent_group = h5_main.parent
dset_name = h5_main.name.split('/')[-1]
groups = []
for key in h5_parent_group.keys():
if dset_name in key and tool_name in key and isinstance(h5_parent_group[key], h5py.Group):
groups.append(h5_parent_group[key])
return groups
def check_and_link_ancillary(h5_dset, anc_names, h5_main=None, anc_refs=None):
"""
This function will add references to auxilliary datasets as attributes
of an input dataset.
If the entries in anc_refs are valid references, they will be added
as attributes with the name taken from the corresponding entry in
anc_names.
If an entry in anc_refs is not a valid reference, the function will
attempt to get the attribute with the same name from the h5_main
dataset
Parameters
----------
h5_dset : HDF5 Dataset
dataset to which the attributes will be written
anc_names : list of str
the attribute names to be used
h5_main : HDF5 Dataset, optional
dataset from which attributes will be copied if `anc_refs` is None
anc_refs : list of HDF5 Object References, optional
references that correspond to the strings in `anc_names`
Returns
-------
None
Notes
-----
Either `h5_main` or `anc_refs` MUST be provided and `anc_refs` has the
higher priority if both are present.
"""
if not isinstance(h5_dset, h5py.Dataset):
raise TypeError('h5_dset should be a h5py.Dataset object')
if isinstance(anc_names, (str, unicode)):
anc_names = [anc_names]
if isinstance(anc_refs, (h5py.Dataset, h5py.Group, h5py.File,
h5py.Reference)):
anc_refs = [anc_refs]
if not isinstance(anc_names, (list, tuple)):
raise TypeError('anc_names should be a list / tuple')
if h5_main is not None:
if not isinstance(h5_main, h5py.Dataset):
raise TypeError('h5_main should be a h5py.Dataset object')
validate_h5_objs_in_same_h5_file(h5_dset, h5_main)
if anc_refs is not None:
if not isinstance(anc_refs, (list, tuple)):
raise TypeError('anc_refs should be a list / tuple')
if anc_refs is None and h5_main is None:
raise ValueError('No objected provided to link as ancillary')
def __check_and_link_single(h5_obj_ref, target_ref_name):
if isinstance(h5_obj_ref, h5py.Reference):
# TODO: Same HDF5 file?
h5_dset.attrs[target_ref_name] = h5_obj_ref
elif isinstance(h5_obj_ref, (h5py.Dataset, h5py.Group, h5py.File)):
validate_h5_objs_in_same_h5_file(h5_obj_ref, h5_dset)
h5_dset.attrs[target_ref_name] = h5_obj_ref.ref
elif h5_main is not None:
h5_anc = get_auxiliary_datasets(h5_main, aux_dset_name=[target_ref_name])
if len(h5_anc) == 1:
link_h5_obj_as_alias(h5_dset, h5_anc[0], target_ref_name)
else:
warnstring = '{} is not a valid h5py Reference and will be skipped.'.format(repr(h5_obj_ref))
warn(warnstring)
if bool(np.iterable(anc_refs) and not isinstance(anc_refs, h5py.Dataset)):
"""
anc_refs can be iterated over
"""
for ref_name, h5_ref in zip(anc_names, anc_refs):
__check_and_link_single(h5_ref, ref_name)
elif anc_refs is not None:
"""
anc_refs is just a single value
"""
__check_and_link_single(anc_refs, anc_names)
elif isinstance(anc_names, str) or isinstance(anc_names, unicode):
"""
Single name provided
"""
__check_and_link_single(None, anc_names)
else:
"""
Iterable of names provided
"""
for name in anc_names:
__check_and_link_single(None, name)
h5_dset.file.flush()
def validate_main_dset(h5_main, must_be_h5):
"""
Checks to make sure that the provided object is a USID main dataset
Errors in parameters will result in Exceptions
Parameters
----------
h5_main : h5py.Dataset or numpy.ndarray or Dask.array.core.array
object that represents the USID main data
must_be_h5 : bool
Set to True if the expecting an h5py.Dataset object.
Set to False if expecting a numpy.ndarray or Dask.array.core.array
Returns
-------
"""
# Check that h5_main is a dataset
if must_be_h5:
if not isinstance(h5_main, h5py.Dataset):
raise TypeError('{} is not an HDF5 Dataset object.'.format(h5_main))
else:
if not isinstance(h5_main, (np.ndarray, da.core.Array)):
raise TypeError('raw_data should either be a np.ndarray or a da.core.Array')
# Check dimensionality
if len(h5_main.shape) != 2:
raise ValueError('Main data is not 2D. Provided object has shape: {}'.format(h5_main.shape))
def validate_anc_h5_dsets(h5_inds, h5_vals, main_shape, is_spectroscopic=True):
"""
Checks ancillary HDF5 datasets against shape of a main dataset.
Errors in parameters will result in Exceptions
Parameters
----------
h5_inds : h5py.Dataset
HDF5 dataset corresponding to the ancillary Indices dataset
h5_vals : h5py.Dataset
HDF5 dataset corresponding to the ancillary Values dataset
main_shape : array-like
Shape of the main dataset expressed as a tuple or similar
is_spectroscopic : bool, Optional. Default = True
set to True if ``dims`` correspond to Spectroscopic Dimensions.
False otherwise.
"""
if not isinstance(h5_inds, h5py.Dataset):
raise TypeError('h5_inds must be a h5py.Dataset object')
if not isinstance(h5_vals, h5py.Dataset):
raise TypeError('h5_vals must be a h5py.Dataset object')
if h5_inds.shape != h5_vals.shape:
raise ValueError('h5_inds: {} and h5_vals: {} should be of the same '
'shape'.format(h5_inds.shape, h5_vals.shape))
if isinstance(main_shape, (list, tuple)):
if not contains_integers(main_shape, min_val=1) or \
len(main_shape) != 2:
raise ValueError("'main_shape' must be a valid HDF5 dataset shape")
else:
raise TypeError('main_shape should be of the following types:'
'h5py.Dataset, tuple, or list. {} provided'
''.format(type(main_shape)))
if h5_inds.shape[is_spectroscopic] != main_shape[is_spectroscopic]:
raise ValueError('index {} in shape of h5_inds: {} and main_data: {} '
'should be equal'.format(int(is_spectroscopic),
h5_inds.shape, main_shape))
def validate_dims_against_main(main_shape, dims, is_spectroscopic=True):
"""
Checks Dimension objects against a given shape for main datasets.
Errors in parameters will result in Exceptions
Parameters
----------
main_shape : array-like
Tuple or list with the shape of the main data
dims : iterable
List of Dimension objects
is_spectroscopic : bool, Optional. Default = True
set to True if ``dims`` correspond to Spectroscopic Dimensions.
False otherwise.
"""
if not isinstance(main_shape, (list, tuple)):
raise TypeError('main_shape should be a list or tuple. Provided object'
' was of type: {}'.format(type(main_shape)))
if len(main_shape) != 2:
raise ValueError('"main_shape" should be of length 2')
contains_integers(main_shape, min_val=1)
if isinstance(dims, Dimension):
dims = [dims]
elif not isinstance(dims, (list, tuple)):
raise TypeError('"dims" must be a list or tuple of usid.Dimension '
'objects. Provided object was of type: {}'
''.format(type(dims)))
if not all([isinstance(obj, Dimension) for obj in dims]):
raise TypeError('One or more objects in "dims" was not usid.Dimension')
if is_spectroscopic:
main_dim = 1
dim_category = 'Spectroscopic'
else:
main_dim = 0
dim_category = 'Position'
# TODO: This is where the dimension type will need to be taken into account
lhs = main_shape[main_dim]
rhs = np.product([len(x.values) for x in dims])
if lhs != rhs:
raise ValueError(dim_category +
' dimensions in main data of size: {} do not match '
'with product of values in provided Dimension objects'
': {}'.format(lhs, rhs))
def check_if_main(h5_main, verbose=False):
"""
Checks the input dataset to see if it has all the necessary
features to be considered a Main dataset. This means it is
2D and has the following attributes:
* Position_Indices
* Position_Values
* Spectroscopic_Indices
* Spectroscopic_Values
* quantity
* units
In addition, the shapes of the ancillary matrices should match with that of
h5_main
Parameters
----------
h5_main : HDF5 Dataset
Dataset of interest
verbose : Boolean (Optional. Default = False)
Whether or not to print statements
Returns
-------
success : Boolean
True if all tests pass
"""
try:
validate_main_dset(h5_main, True)
except Exception as exep:
if verbose:
print(exep)
return False
h5_name = h5_main.name.split('/')[-1]
success = True
# Check for Datasets
dset_names = ['Position_Indices', 'Position_Values',
'Spectroscopic_Indices', 'Spectroscopic_Values']
for name in dset_names:
try:
h5_anc_dset = h5_main.file[h5_main.attrs[name]]
success = np.all([success, isinstance(h5_anc_dset, h5py.Dataset)])
except:
if verbose:
print('{} not found as an attribute of {}.'.format(name, h5_name))
return False
attr_success = np.all([att in h5_main.attrs for att in ['quantity', 'units']])
if not attr_success:
if verbose:
print('{} does not have the mandatory "quantity" and "units" attributes'.format(h5_main.name))
return False
for attr_name in ['quantity', 'units']:
val = get_attr(h5_main, attr_name)
if not isinstance(val, (str, unicode)):
if verbose:
print('Attribute {} of {} found to be {}. Expected a string'.format(attr_name, h5_main.name, val))
return False
# Blindly linking four datasets is still not sufficient. The sizes need to match:
anc_shape_match = list()
h5_pos_inds = h5_main.file[h5_main.attrs['Position_Indices']]
h5_pos_vals = h5_main.file[h5_main.attrs['Position_Values']]
anc_shape_match.append(np.all(h5_pos_vals.shape == h5_pos_inds.shape))
for anc_dset in [h5_pos_vals, h5_pos_inds]:
anc_shape_match.append(np.all(h5_main.shape[0] == anc_dset.shape[0]))
if not np.all(anc_shape_match):
if verbose:
print('The shapes of the Position indices:{}, values:{} datasets did not match with that of the main '
'dataset: {}'.format(h5_pos_inds.shape, h5_pos_vals.shape, h5_main.shape))
return False
anc_shape_match = list()
h5_spec_inds = h5_main.file[h5_main.attrs['Spectroscopic_Indices']]
h5_spec_vals = h5_main.file[h5_main.attrs['Spectroscopic_Values']]
anc_shape_match.append(np.all(h5_spec_inds.shape == h5_spec_vals.shape))
for anc_dset in [h5_spec_inds, h5_spec_vals]:
anc_shape_match.append(np.all(h5_main.shape[1] == anc_dset.shape[1]))
if not np.all(anc_shape_match):
if verbose:
print('The shapes of the Spectroscopic indices:{}, values:{} datasets did not match with that of the main '
'dataset: {}'.format(h5_spec_inds.shape, h5_spec_vals.shape, h5_main.shape))
return False
try:
validate_anc_dset_attrs(h5_pos_inds, h5_pos_vals, is_spec=False)
except ValueError:
if verbose:
print('Attributes of Position datasets did not match')
return False
try:
validate_anc_dset_attrs(h5_spec_inds, h5_spec_vals, is_spec=True)
except ValueError:
if verbose:
print('Attributes of Spectroscopic datasets did not match')
return False
return success
def validate_anc_dset_attrs(h5_inds, h5_vals, is_spec=True):
"""
Validates the attributes of a pair of indices and values datasets.
Throws ValueErrors if any rule is not satisfied
Parameters
----------
h5_inds : h5py.Dataset
Indices dataset
h5_vals : h5py.Dataset
Values Dataset
is_spec : bool, optional. Default = True
Set to True if spectroscopic. Else - Position datasets
"""
def lists_match(left, right):
if len(left) != len(right):
return False
return all([l_it == r_it for l_it, r_it in zip(left, right)])
v_names = get_attr(h5_vals, 'labels')
v_units = get_attr(h5_vals, 'units')
i_names = get_attr(h5_inds, 'labels')
i_units = get_attr(h5_inds, 'units')
for names, units, dset_type in zip([v_names, i_names], [v_units, i_units],
['Values', 'Indices']):
if len(names) != len(units):
raise ValueError('Length of labels: {} and units: {} for the {} '
'dataset do not match'
''.format(len(names), len(units), dset_type))
for i_item, v_item, prop in zip([i_names, i_units], [v_names, v_units],
['labels', 'units']):
if not lists_match(i_item, v_item):
raise ValueError('The "{}" values of the Indices: {} and Values: '
'{} datasets do not match'.format(prop, i_item,
v_item))
# Now check the rows / cols nums against size of any attr:
if h5_inds.shape != h5_vals.shape:
raise ValueError('Shape of Indices: {} and Values: {} datasets do '
'not match'.format(h5_inds.shape, h5_vals.shape))
dim_ind = 1
if is_spec:
dim_ind = 0
if h5_inds.shape[dim_ind] != len(v_names):
raise ValueError('Length of mandatory attributes: {} did not match '
'dimension: {} of the ancillary dataset of shape: {}'
''.format(len(v_names), dim_ind, h5_inds.shape))
def link_as_main(h5_main, h5_pos_inds, h5_pos_vals, h5_spec_inds, h5_spec_vals):
"""
Links the object references to the four position and spectroscopic datasets as
attributes of `h5_main`
Parameters
----------
h5_main : h5py.Dataset
2D Dataset which will have the references added as attributes
h5_pos_inds : h5py.Dataset
Dataset that will be linked with the name 'Position_Indices'
h5_pos_vals : h5py.Dataset
Dataset that will be linked with the name 'Position_Values'
h5_spec_inds : h5py.Dataset
Dataset that will be linked with the name 'Spectroscopic_Indices'
h5_spec_vals : h5py.Dataset
Dataset that will be linked with the name 'Spectroscopic_Values'
Returns
-------
pyUSID.USIDataset
USIDataset version of h5_main now that it is a USID Main dataset
"""
if not isinstance(h5_main, h5py.Dataset):
raise TypeError('h5_main should be a h5py.Dataset object')
validate_anc_h5_dsets(h5_pos_inds, h5_pos_vals, h5_main.shape,
is_spectroscopic=False)
validate_anc_h5_dsets(h5_spec_inds, h5_spec_vals, h5_main.shape,
is_spectroscopic=True)
link_h5_obj_as_alias(h5_main, h5_pos_inds, 'Position_Indices')
link_h5_obj_as_alias(h5_main, h5_pos_vals, 'Position_Values')
link_h5_obj_as_alias(h5_main, h5_spec_inds, 'Spectroscopic_Indices')
link_h5_obj_as_alias(h5_main, h5_spec_vals, 'Spectroscopic_Values')
from ..usi_data import USIDataset
try:
# If all other conditions are satisfied
return USIDataset(h5_main)
except TypeError:
# If some other conditions are yet to be satisfied
return h5_main
def check_for_old(h5_base, tool_name, new_parms=None, target_dset=None,
h5_parent_goup=None, verbose=False):
"""
Check to see if the results of a tool already exist and if they
were performed with the same parameters.
Parameters
----------
h5_base : h5py.Dataset object
Dataset on which the tool is being applied to
tool_name : str
process or analysis name
new_parms : dict, optional
Parameters with which this tool will be performed.
target_dset : str, optional, default = None
Name of the dataset whose attributes will be compared against new_parms.
Default - checking against the group
h5_parent_goup : h5py.Group, optional. Default = None
The group to search under. Use this option when `h5_base` and
the potential results groups (within `h5_parent_goup` are located
in different HDF5 files. Default - search within h5_base.parent
verbose : bool, optional, default = False
Whether or not to print debugging statements
Returns
-------
group : list
List of all :class:`h5py.Group` objects with parameters matching those in `new_parms`
"""
if not isinstance(h5_base, h5py.Dataset):
raise TypeError('h5_base should be a h5py.Dataset object')
tool_name = validate_single_string_arg(tool_name, 'tool_name')
if h5_parent_goup is not None:
if not isinstance(h5_parent_goup, (h5py.File, h5py.Group)):
raise TypeError("'h5_parent_group' should either be a h5py.File "
"or h5py.Group object")
else:
h5_parent_goup = h5_base.parent
if new_parms is None:
new_parms = dict()
else:
if not isinstance(new_parms, dict):
raise TypeError('new_parms should be a dict')
if target_dset is not None:
target_dset = validate_single_string_arg(target_dset, 'target_dset')
matching_groups = []
groups = find_results_groups(h5_base, tool_name,
h5_parent_group=h5_parent_goup)
for group in groups:
if verbose:
print('Looking at group - {}'.format(group.name.split('/')[-1]))
h5_obj = group
if target_dset is not None:
if target_dset in group.keys():
h5_obj = group[target_dset]
else:
if verbose:
print('{} did not contain the target dataset: {}'.format(group.name.split('/')[-1],
target_dset))
continue
if check_for_matching_attrs(h5_obj, new_parms=new_parms, verbose=verbose):
# return group
matching_groups.append(group)
return matching_groups
def get_source_dataset(h5_group):
"""
Find the name of the source dataset used to create the input `h5_group`,
so long as the source dataset is in the same HDF5 file
Parameters
----------
h5_group : :class:`h5py.Group`
Child group whose source dataset will be returned
Returns
-------
h5_source : USIDataset object
Main dataset from which this group was generated
"""
if not isinstance(h5_group, h5py.Group):
raise TypeError('h5_group should be a h5py.Group object')
h5_parent_group = h5_group.parent
group_name = h5_group.name.split('/')[-1]
# What if the group name was not formatted according to Pycroscopy rules?
name_split = group_name.split('-')
if len(name_split) != 2:
raise ValueError("The provided group's name could not be split by '-' as expected in "
"SourceDataset-ProcessName_000")
h5_source = h5_parent_group[name_split[0]]
if not isinstance(h5_source, h5py.Dataset):
raise ValueError('Source object was not a dataset!')
from ..usi_data import USIDataset
return USIDataset(h5_source)
def assign_group_index(h5_parent_group, base_name, verbose=False):
"""
Searches the parent h5 group to find the next available index for the group
Parameters
----------
h5_parent_group : :class:`h5py.Group` object
Parent group under which the new group object will be created
base_name : str or unicode
Base name of the new group without index
verbose : bool, optional. Default=False
Whether or not to print debugging statements
Returns
-------
base_name : str or unicode
Base name of the new group with the next available index as a suffix
"""
if not isinstance(h5_parent_group, h5py.Group):
raise TypeError('h5_parent_group should be a h5py.Group object')
base_name = validate_single_string_arg(base_name, 'base_name')
if len(base_name) == 0:
raise ValueError('base_name should not be an empty string')
if not base_name.endswith('_'):
base_name += '_'
temp = [key for key in h5_parent_group.keys()]
if verbose:
print('Looking for group names starting with {} in parent containing items: '
'{}'.format(base_name, temp))
previous_indices = []
for item_name in temp:
if isinstance(h5_parent_group[item_name], h5py.Group) and item_name.startswith(base_name):
previous_indices.append(int(item_name.replace(base_name, '')))
previous_indices = np.sort(previous_indices)
if verbose:
print('indices of existing groups with the same prefix: {}'.format(previous_indices))
if len(previous_indices) == 0:
index = 0
else:
index = previous_indices[-1] + 1
return base_name + '{:03d}'.format(index)
def create_indexed_group(h5_parent_group, base_name):
"""
Creates a group with an indexed name (eg - 'Measurement_012') under h5_parent_group using the provided base_name
as a prefix for the group's name
Parameters
----------
h5_parent_group : :class:`h5py.Group` or :class:`h5py.File`
File or group within which the new group will be created
base_name : str or unicode
Prefix for the group name. This need not end with a '_'. It will be added automatically
Returns
-------
"""
if not isinstance(h5_parent_group, (h5py.Group, h5py.File)):
raise TypeError('h5_parent_group should be a h5py.File or Group object')
base_name = validate_single_string_arg(base_name, 'base_name')
group_name = assign_group_index(h5_parent_group, base_name)
h5_new_group = h5_parent_group.create_group(group_name)
write_book_keeping_attrs(h5_new_group)
return h5_new_group
def create_results_group(h5_main, tool_name, h5_parent_group=None):
"""
Creates a h5py.Group object autoindexed and named as 'DatasetName-ToolName_00x'
Parameters
----------
h5_main : h5py.Dataset object
Reference to the dataset based on which the process / analysis is being performed
tool_name : string / unicode
Name of the Process / Analysis applied to h5_main
h5_parent_group : h5py.Group, optional. Default = None
Parent group under which the results group will be created. Use this
option to write results into a new HDF5 file. By default, results will
be written into the same group containing `h5_main`
Returns
-------
h5_group : :class:`h5py.Group`
Results group which can now house the results datasets
"""
if not isinstance(h5_main, h5py.Dataset):
raise TypeError('h5_main should be a h5py.Dataset object')
if h5_parent_group is not None:
if not isinstance(h5_parent_group, (h5py.File, h5py.Group)):
raise TypeError("'h5_parent_group' should either be a h5py.File "
"or h5py.Group object")
else:
h5_parent_group = h5_main.parent
tool_name = validate_single_string_arg(tool_name, 'tool_name')
if '-' in tool_name:
warn('tool_name should not contain the "-" character. Reformatted name from:{} to '
'{}'.format(tool_name, tool_name.replace('-', '_')))
tool_name = tool_name.replace('-', '_')
group_name = h5_main.name.split('/')[-1] + '-' + tool_name + '_'
group_name = assign_group_index(h5_parent_group, group_name)
h5_group = h5_parent_group.create_group(group_name)
write_book_keeping_attrs(h5_group)
# Also add some basic attributes like source and tool name. This will allow relaxation of nomenclature restrictions:
# this are NOT being used right now but will be in the subsequent versions of pyUSID
write_simple_attrs(h5_group, {'tool': tool_name, 'num_source_dsets': 1})
# in this case, there is only one source
if h5_parent_group.file == h5_main.file:
for dset_ind, dset in enumerate([h5_main]):
h5_group.attrs['source_' + '{:03d}'.format(dset_ind)] = dset.ref
return h5_group
def copy_main_attributes(h5_main, h5_new):
"""
Copies the units and quantity name from one dataset to another
Parameters
----------
h5_main : h5py.Dataset
Dataset containing the target attributes
h5_new : h5py.Dataset
Dataset to which the target attributes are to be copied
"""
for param, param_name in zip([h5_main, h5_new], ['h5_main', 'h5_new']):
if not isinstance(param, h5py.Dataset):
raise TypeError(param_name + ' should be a h5py.Dataset object')
for att_name in ['quantity', 'units']:
if att_name not in h5_main.attrs:
raise KeyError('Attribute: {} does not exist in {}'.format(att_name, h5_main))
val = get_attr(h5_main, att_name)
h5_new.attrs[att_name] = clean_string_att(val)
def create_empty_dataset(source_dset, dtype, dset_name, h5_group=None,
new_attrs=None, skip_refs=False):
"""
Creates an empty dataset in the h5 file based on the provided dataset in
the same or specified group
Parameters
----------
source_dset : h5py.Dataset object
Source object that provides information on the group and shape of the dataset
dtype : dtype
Data type of the fit / guess datasets
dset_name : String / Unicode
Name of the dataset
h5_group : :class:`h5py.Group`, optional. Default = None
Group within which this dataset will be created
new_attrs : dictionary (Optional)
Any new attributes that need to be written to the dataset
skip_refs : boolean, optional
Should ObjectReferences be skipped when copying attributes from the
`source_dset`
Returns
-------
h5_new_dset : h5py.Dataset object
Newly created dataset
"""
if not isinstance(source_dset, h5py.Dataset):
raise TypeError('source_deset should be a h5py.Dataset object')
_ = validate_dtype(dtype)
if new_attrs is not None:
if not isinstance(new_attrs, dict):
raise TypeError('new_attrs should be a dictionary')
else:
new_attrs = dict()
if h5_group is None:
h5_group = source_dset.parent
else:
if not isinstance(h5_group, (h5py.Group, h5py.File)):
raise TypeError('h5_group should be a h5py.Group or h5py.File object')
if source_dset.file != h5_group.file and not skip_refs:
# Cannot carry over references
warn('H5 object references will not be copied over since {} is in '
'a different HDF5 file as {}'.format(h5_group, source_dset))
skip_refs = True
dset_name = validate_single_string_arg(dset_name, 'dset_name')
if '-' in dset_name:
warn('dset_name should not contain the "-" character. Reformatted name from:{} to '
'{}'.format(dset_name, dset_name.replace('-', '_')))
dset_name = dset_name.replace('-', '_')
kwargs = {'shape': source_dset.shape, 'dtype': dtype, 'compression': source_dset.compression,
'chunks': source_dset.chunks}
if source_dset.file.driver == 'mpio':
if kwargs.pop('compression', None) is not None:
warn('This HDF5 file has been opened wth the "mpio" communicator. '
'mpi4py does not allow creation of compressed datasets. Compression kwarg has been removed')
if dset_name in h5_group.keys():
if isinstance(h5_group[dset_name], h5py.Dataset):
warn('A dataset named: {} already exists in group: {}'.format(dset_name, h5_group.name))
h5_new_dset = h5_group[dset_name]
# Make sure it has the correct shape and dtype
if any((source_dset.shape != h5_new_dset.shape, dtype != h5_new_dset.dtype)):
warn('Either the shape (existing: {} desired: {}) or dtype (existing: {} desired: {}) of the dataset '
'did not match with expectations. Deleting and creating a new one.'.format(h5_new_dset.shape,
source_dset.shape,
h5_new_dset.dtype,
dtype))
del h5_new_dset, h5_group[dset_name]
h5_new_dset = h5_group.create_dataset(dset_name, **kwargs)
else:
raise KeyError('{} is already a {} in group: {}'.format(dset_name, type(h5_group[dset_name]),
h5_group.name))
else:
h5_new_dset = h5_group.create_dataset(dset_name, **kwargs)
# This should link the ancillary datasets correctly
h5_new_dset = hut.copy_attributes(source_dset, h5_new_dset,
skip_refs=skip_refs)
if source_dset.file != h5_group.file:
hut.copy_linked_objects(source_dset, h5_new_dset)
h5_new_dset.attrs.update(new_attrs)
if check_if_main(h5_new_dset):
from ..usi_data import USIDataset
h5_new_dset = USIDataset(h5_new_dset)
# update book keeping attributes
write_book_keeping_attrs(h5_new_dset)
return h5_new_dset
def check_for_matching_attrs(h5_obj, new_parms=None, verbose=False):
"""
Compares attributes in the given H5 object against those in the provided dictionary and returns True if
the parameters match, and False otherwise
Parameters
----------
h5_obj : h5py object (Dataset or :class:`h5py.Group`)
Object whose attributes will be compared against new_parms
new_parms : dict, optional. default = empty dictionary
Parameters to compare against the attributes present in h5_obj
verbose : bool, optional, default = False
Whether or not to print debugging statements
Returns
-------
tests: bool
Whether or not all paramters in new_parms matched with those in h5_obj's attributes
"""
if not isinstance(h5_obj, (h5py.Dataset, h5py.Group, h5py.File)):
raise TypeError('h5_obj should be a h5py.Dataset, h5py.Group, or h5py.File object')
if new_parms is None:
new_parms = dict()
else:
if not isinstance(new_parms, dict):
raise TypeError('new_parms should be a dictionary')
tests = []
for key in new_parms.keys():
if verbose:
print('Looking for new attribute named: {}'.format(key))
# HDF5 cannot store None as an attribute anyway. ignore
if new_parms[key] is None:
continue
try:
old_value = get_attr(h5_obj, key)
except KeyError:
# if parameter was not found assume that something has changed
if verbose:
print('New parm: {} \t- new parm not in group *****'.format(key))
tests.append(False)
break
if isinstance(old_value, np.ndarray):
if not isinstance(new_parms[key], collections.Iterable):
if verbose:
print('New parm: {} \t- new parm not iterable unlike old parm *****'.format(key))
tests.append(False)
break
new_array = np.array(new_parms[key])
if old_value.size != new_array.size:
if verbose:
print('New parm: {} \t- are of different sizes ****'.format(key))
tests.append(False)
else:
try:
answer = np.allclose(old_value, new_array)
except TypeError:
# comes here when comparing string arrays
# Not sure of a better way
answer = []
for old_val, new_val in zip(old_value, new_array):
answer.append(old_val == new_val)
answer = np.all(answer)
if verbose:
print('New parm: {} \t- match: {}'.format(key, answer))
tests.append(answer)
else:
"""if isinstance(new_parms[key], collections.Iterable):
if verbose:
print('New parm: {} \t- new parm is iterable unlike old parm *****'.format(key))
tests.append(False)
break"""
answer = np.all(new_parms[key] == old_value)
if verbose:
print('New parm: {} \t- match: {}'.format(key, answer))
tests.append(answer)
if verbose:
print('')
return all(tests)
def write_ind_val_dsets(h5_parent_group, dimensions, is_spectral=True, verbose=False, base_name=None,
slow_to_fast=False):
"""
Creates h5py.Datasets for the position OR spectroscopic indices and values of the data.
Remember that the contents of the dataset can be changed if need be after the creation of the datasets.
For example if one of the spectroscopic dimensions (e.g. - Bias) was sinusoidal and not linear, The specific
dimension in the Spectroscopic_Values dataset can be manually overwritten.
Parameters
----------
h5_parent_group : :class:`h5py.Group` or :class:`h5py.File`
Group under which the indices and values datasets will be created
dimensions : Dimension or array-like of Dimension objects
Sequence of Dimension objects that provides all necessary instructions for constructing the indices and values
datasets
is_spectral : bool, optional. default = True
Spectroscopic (True) or Position (False)
verbose : Boolean, optional
Whether or not to print statements for debugging purposes
base_name : str or unicode, optional
Prefix for the datasets. Default: 'Position' when is_spectral is False, 'Spectroscopic' otherwise
slow_to_fast : bool, Optional. Default=False
Set to True if the dimensions are arranged from slowest varying to fastest varying.
Set to False otherwise.
Returns
-------
h5_spec_inds : h5py.Dataset
Dataset containing the position indices
h5_spec_vals : h5py.Dataset
Dataset containing the value at each position
Notes
-----
`steps`, `initial_values`, `labels`, and 'units' must be the same length as
`dimensions` when they are specified.
Dimensions should be in the order from fastest varying to slowest.
"""
if isinstance(dimensions, Dimension):
dimensions = [dimensions]
if not isinstance(dimensions, (list, np.ndarray, tuple)):
raise TypeError('dimensions should be array-like ')
if not np.all([isinstance(x, Dimension) for x in dimensions]):
raise TypeError('dimensions should be a sequence of Dimension objects')
if not isinstance(h5_parent_group, (h5py.Group, h5py.File)):
raise TypeError('h5_parent_group should be a h5py.File or Group object')
if not is_editable_h5(h5_parent_group):
raise ValueError('The provided h5 object is not valid / open')
if base_name is not None:
base_name = validate_single_string_arg(base_name, 'base_name')
if not base_name.endswith('_'):
base_name += '_'
else:
base_name = 'Position_'
if is_spectral:
base_name = 'Spectroscopic_'
if not slow_to_fast:
warn('In the future write_ind_val_dsets will default to requiring dimensions to be arranged from slowest to fastest varying')
# check if the datasets already exist. If they do, there's no point in going any further
for sub_name in ['Indices', 'Values']:
if base_name + sub_name in h5_parent_group.keys():
raise KeyError('Dataset: {} already exists in provided group: {}'.format(base_name + sub_name,
h5_parent_group.name))
modes = [dim.mode for dim in dimensions]
sing_mode = np.unique(modes)
if sing_mode.size > 1:
raise NotImplementedError('Cannot yet work on combinations of modes for Dimensions. Consider doing manually')
sing_mode = sing_mode[0]
if sing_mode == DimType.DEFAULT:
if slow_to_fast:
# Ensure that the dimensions are arranged from fast to slow instead
dimensions = dimensions[::-1]
indices, values = build_ind_val_matrices([dim.values for dim in dimensions],
is_spectral=is_spectral)
# At this point, dimensions and unit values are arranged from fastest to slowest
# We want dimensions to be arranged from slowest to fastest:
rev_func = np.flipud if is_spectral else np.fliplr
dimensions = dimensions[::-1]
indices = rev_func(indices)
values = rev_func(values)
elif sing_mode == DimType.INCOMPLETE:
lengths = np.unique([len(dim.values) for dim in dimensions])
if len(lengths) > 1:
raise ValueError('Values for dimensions not of same length')
single_dim = np.arange(lengths[0], dtype=INDICES_DTYPE)
indices = np.tile(single_dim, (2, 1)).T
values = np.dstack(tuple([dim.values for dim in dimensions])).squeeze()
if is_spectral:
indices = indices.T
values = values.T
else:
raise NotImplementedError('Cannot yet work on Dependent dimensions')
if verbose:
print('Indices:')
print(indices)
print('Values:')
print(values)
# Create the Datasets for both Indices and Values
h5_indices = h5_parent_group.create_dataset(base_name + 'Indices', data=INDICES_DTYPE(indices), dtype=INDICES_DTYPE)
h5_values = h5_parent_group.create_dataset(base_name + 'Values', data=VALUES_DTYPE(values), dtype=VALUES_DTYPE)
for h5_dset in [h5_indices, h5_values]:
write_simple_attrs(h5_dset, {'units': [x.units for x in dimensions], 'labels': [x.name for x in dimensions],
'type': [dim.mode.value for dim in dimensions]})
warn('pyUSID.io.hdf_utils.simple.write_ind_val_dsets no longer creates'
'region references for each dimension. Please use '
'pyUSID.io.reg_ref.write_region_references to manually create region '
'references')
return h5_indices, h5_values
def write_reduced_anc_dsets(h5_parent_group, h5_inds, h5_vals, dim_name, basename=None, is_spec=None,
verbose=False):
"""
Creates new Ancillary Indices and Values datasets from the input datasets by dropping the specified dimensions
Parameters
----------
h5_parent_group : :class:`h5py.Group` or h5py.File
Group under which the indices and values datasets will be created
h5_inds : HDF5 Dataset
Spectroscopic or Positions indices dataset
h5_vals : HDF5 Dataset
Spectroscopic or Positions values dataset
dim_name : str or unicode or list of strings
Names of the dimension(s) to remove
basename : str or unicode, Optional
String to which '_Indices' and '_Values' will be appended to get the names of the new datasets.
Default = 'Position' or 'Spectroscopic'
is_spec : bool, optional
Whether or not the provided ancillary datasets are position or spectroscopic
The user is recommended to supply this parameter whenever it is known or possible.
By default, this function will attempt to recognize the answer based on the shape of the datasets.
verbose : bool, optional. Default = False
Whether or not to print debugging print statements
Returns
-------
h5_inds_new : h5py.Dataset
Reduced indices dataset
h5_vals_new : h5py.Dataset
Reduces values dataset
"""
if not isinstance(h5_parent_group, (h5py.Group, h5py.File)):
raise TypeError('h5_parent_group should either be a h5py. Group or File object')
for param, param_name in zip([h5_inds, h5_vals], ['h5_inds', 'h5_vals']):
if not isinstance(param, h5py.Dataset):
raise TypeError(param_name + ' should be a h5py.Dataset object')
if dim_name is not None:
dim_name = validate_list_of_strings(dim_name, 'dim_name')
all_dim_names = list(get_attr(h5_inds, 'labels'))
for item in dim_name:
if item not in all_dim_names:
raise KeyError('Requested dimension: {} not in the list of labels: {}'.format(item, all_dim_names))
ind_mat = h5_inds[()]
val_mat = h5_vals[()]
if is_spec is None:
# Attempt to recognize the type automatically
is_spec = False
if ind_mat.shape[0] == ind_mat.shape[1]:
raise ValueError('Unable automatically guess whether the provided datasets are position or '
'spectroscopic. Please explicitely specify via the "is_spec" boolean kwarg')
if ind_mat.shape[0] < ind_mat.shape[1]:
is_spec = True
else:
if not isinstance(is_spec, bool):
raise TypeError('is_spec should be a boolean. Provided object is of type: {}'.format(type(is_spec)))
if basename is not None:
basename = validate_single_string_arg(basename, 'basename')
if basename.endswith('_'):
basename = basename[:-1]
else:
if is_spec:
basename = 'Spectroscopic'
else:
basename = 'Position'
for sub_name in ['_Indices', '_Values']:
if basename + sub_name in h5_parent_group.keys():
raise KeyError('Dataset: {} already exists in provided group: {}'.format(basename + sub_name,
h5_parent_group.name))
if set(dim_name) != set(all_dim_names):
# At least one dimension will remain
if verbose:
print('All Dimensions: {}. Dimensions to be removed: {}'.format(all_dim_names, dim_name))
if not is_spec:
# Convert to spectral shape
ind_mat = np.transpose(ind_mat)
val_mat = np.transpose(val_mat)
# For all dimensions, find where the index = 0
# basically, we are indexing all dimensions to 0
first_indices = []
keep_dim = np.ones(len(all_dim_names), dtype=bool)
for cur_dim in dim_name:
dim_ind = all_dim_names.index(cur_dim)
keep_dim[dim_ind] = False
# check equality against the minimum value instead of 0 to account for cases when a dimension does not start
# from 0 (already been sliced) - think of multi-dimensional slicing!
first_indices.append(ind_mat[dim_ind] == np.min(ind_mat[dim_ind]))
first_indices = np.vstack(first_indices)
if verbose:
print('Raw first_indices:')
print(first_indices)
print('Dimensions to keep: {}'.format(keep_dim))
step_starts = np.all(first_indices, axis=0)
if verbose:
print('Columns in dataset to keep:')
print(step_starts)
'''
Extract all rows that we want to keep from input indices and values
'''
# TODO: handle TypeError: Indexing elements must be in increasing order
ind_mat = ind_mat[keep_dim, :][:, step_starts]
val_mat = val_mat[keep_dim, :][:, step_starts]
if not is_spec:
# Convert back to position shape
ind_mat = np.transpose(ind_mat)
val_mat = np.transpose(val_mat)
'''
Create new Datasets to hold the data
Name them based on basename
'''
h5_inds_new = h5_parent_group.create_dataset(basename + '_Indices', data=ind_mat, dtype=h5_inds.dtype)
h5_vals_new = h5_parent_group.create_dataset(basename + '_Values', data=val_mat, dtype=h5_vals.dtype)
# Extracting the labels from the original spectroscopic data sets
labels = h5_inds.attrs['labels'][keep_dim]
# Creating the dimension slices for the new spectroscopic data sets
# Adding the labels and units to the new spectroscopic data sets
for dset in [h5_inds_new, h5_vals_new]:
write_simple_attrs(dset, {'labels': labels, 'units': h5_inds.attrs['units'][keep_dim]})
else:
# Remove all dimensions:
h5_inds_new = h5_parent_group.create_dataset(basename + '_Indices', data=np.array([[0]]), dtype=INDICES_DTYPE)
h5_vals_new = h5_parent_group.create_dataset(basename + '_Values', data=np.array([[0]]), dtype=VALUES_DTYPE)
for dset in [h5_inds_new, h5_vals_new]:
write_simple_attrs(dset, {'labels': ['Single_Step'], 'units': ['a. u.']})
return h5_inds_new, h5_vals_new
| 50,455 | 38.326578 | 133 | py |
pyUSID-legacy | pyUSID-master-legacy/pyUSID/io/hdf_utils/model.py | # -*- coding: utf-8 -*-
"""
Utilities for reading and writing USID datasets that are highly model-dependent (with or without N-dimensional form)
Created on Tue Nov 3 21:14:25 2015
@author: Suhas Somnath, Chris Smith
"""
from __future__ import division, print_function, absolute_import, unicode_literals
from warnings import warn
import sys
import h5py
import numpy as np
from dask import array as da
from sidpy.hdf.hdf_utils import get_attr, write_simple_attrs, is_editable_h5, \
copy_dataset, lazy_load_array
from sidpy.base.num_utils import contains_integers
from sidpy.base.dict_utils import flatten_dict
from sidpy.base.string_utils import validate_single_string_arg, \
validate_list_of_strings, validate_string_args
from sidpy.hdf.dtype_utils import validate_dtype
from sidpy import sid
from .base import write_book_keeping_attrs
from .simple import link_as_main, check_if_main, write_ind_val_dsets, validate_dims_against_main, validate_anc_h5_dsets
from ..dimension import Dimension, validate_dimensions
from ..anc_build_utils import INDICES_DTYPE, make_indices_matrix
if sys.version_info.major == 3:
unicode = str
def reshape_to_n_dims(h5_main, h5_pos=None, h5_spec=None, get_labels=False, verbose=False, sort_dims=False,
lazy=False):
"""
Reshape the input 2D matrix to be N-dimensions based on the
position and spectroscopic datasets.
Parameters
----------
h5_main : HDF5 Dataset
2D data to be reshaped
h5_pos : HDF5 Dataset, optional
Position indices corresponding to rows in `h5_main`
h5_spec : HDF5 Dataset, optional
Spectroscopic indices corresponding to columns in `h5_main`
get_labels : bool, optional
Whether or not to return the dimension labels. Default False
verbose : bool, optional
Whether or not to print debugging statements
sort_dims : bool
If True, the data is sorted so that the dimensions are in order from slowest to fastest
If False, the data is kept in the original order
If `get_labels` is also True, the labels are sorted as well.
lazy : bool, optional. Default = False
If False, ds_Nd will be a numpy.ndarray object - this is suitable if the HDF5 dataset fits into memory
If True, ds_Nd will be a dask.array object - This is suitable if the HDF5 dataset is too large to fit into
memory. Note that this will bea lazy computation meaning that the returned object just contains the instructions
. In order to get the actual value or content in numpy arrays, call ds_Nd.compute()
Returns
-------
ds_Nd : N-D numpy array or dask.array object
N dimensional array arranged as [positions slowest to fastest, spectroscopic slowest to fastest]
success : boolean or string
True if full reshape was successful
"Positions" if it was only possible to reshape by
the position dimensions
False if no reshape was possible
ds_labels : list of str
List of the labels of each dimension of `ds_Nd`
Notes
-----
If either `h5_pos` or `h5_spec` are not provided, the function will first
attempt to find them as attributes of `h5_main`. If that fails, it will
generate dummy values for them.
"""
# TODO: automatically switch on lazy if the data is larger than memory
# TODO: sort_dims does not appear to do much. Functions as though it was always True
if h5_pos is None and h5_spec is None:
if not check_if_main(h5_main):
raise ValueError('if h5_main is a h5py.Dataset it should be a Main dataset')
else:
if not isinstance(h5_main, (h5py.Dataset, np.ndarray, da.core.Array)):
raise TypeError('h5_main should either be a h5py.Dataset or numpy array')
if h5_pos is not None:
if not isinstance(h5_pos, (h5py.Dataset, np.ndarray, da.core.Array)):
raise TypeError('h5_pos should either be a h5py.Dataset or numpy array')
if h5_pos.shape[0] != h5_main.shape[0]:
raise ValueError('The size of h5_pos: {} does not match with h5_main: {}'.format(h5_pos.shape,
h5_main.shape))
if h5_spec is not None:
if not isinstance(h5_spec, (h5py.Dataset, np.ndarray, da.core.Array)):
raise TypeError('h5_spec should either be a h5py.Dataset or numpy array')
if h5_spec.shape[1] != h5_main.shape[1]:
raise ValueError('The size of h5_spec: {} does not match with h5_main: {}'.format(h5_spec.shape,
h5_main.shape))
pos_labs = np.array(['Positions'])
spec_labs = np.array(['Spectral_Step'])
if h5_pos is None:
"""
Get the Position datasets from the references if possible
"""
if isinstance(h5_main, h5py.Dataset):
try:
h5_pos = h5_main.file[h5_main.attrs['Position_Indices']]
ds_pos = h5_pos[()]
pos_labs = get_attr(h5_pos, 'labels')
except KeyError:
print('No position datasets found as attributes of {}'.format(h5_main.name))
if len(h5_main.shape) > 1:
ds_pos = np.arange(h5_main.shape[0], dtype=INDICES_DTYPE).reshape(-1, 1)
pos_labs = np.array(['Position Dimension {}'.format(ipos) for ipos in range(ds_pos.shape[1])])
else:
ds_pos = np.array(0, dtype=INDICES_DTYPE).reshape(-1, 1)
else:
ds_pos = np.arange(h5_main.shape[0], dtype=INDICES_DTYPE).reshape(-1, 1)
pos_labs = np.array(['Position Dimension {}'.format(ipos) for ipos in range(ds_pos.shape[1])])
elif isinstance(h5_pos, h5py.Dataset):
"""
Position Indices dataset was provided
"""
ds_pos = h5_pos[()]
pos_labs = get_attr(h5_pos, 'labels')
elif isinstance(h5_pos, (np.ndarray, da.core.Array)):
ds_pos = np.atleast_2d(h5_pos)
pos_labs = np.array(['Position Dimension {}'.format(ipos) for ipos in range(ds_pos.shape[1])])
else:
raise TypeError('Position Indices must be either h5py.Dataset or None')
if h5_spec is None:
"""
Get the Spectroscopic datasets from the references if possible
"""
if isinstance(h5_main, h5py.Dataset):
try:
h5_spec = h5_main.file[h5_main.attrs['Spectroscopic_Indices']]
ds_spec = h5_spec[()]
spec_labs = get_attr(h5_spec, 'labels')
except KeyError:
print('No spectroscopic datasets found as attributes of {}'.format(h5_main.name))
if len(h5_main.shape) > 1:
ds_spec = np.arange(h5_main.shape[1], dtype=INDICES_DTYPE).reshape([1, -1])
spec_labs = np.array(['Spectral Dimension {}'.format(ispec) for ispec in range(ds_spec.shape[0])])
else:
ds_spec = np.array(0, dtype=INDICES_DTYPE).reshape([1, 1])
else:
ds_spec = np.arange(h5_main.shape[1], dtype=INDICES_DTYPE).reshape([1, -1])
spec_labs = np.array(['Spectral Dimension {}'.format(ispec) for ispec in range(ds_spec.shape[0])])
elif isinstance(h5_spec, h5py.Dataset):
"""
Spectroscopic Indices dataset was provided
"""
ds_spec = h5_spec[()]
spec_labs = get_attr(h5_spec, 'labels')
elif isinstance(h5_spec, (np.ndarray, da.core.Array)):
ds_spec = h5_spec
spec_labs = np.array(['Spectral Dimension {}'.format(ispec) for ispec in range(ds_spec.shape[0])])
else:
raise TypeError('Spectroscopic Indices must be either h5py.Dataset or None')
'''
Sort the indices from fastest to slowest
'''
pos_sort = get_sort_order(np.transpose(ds_pos))
spec_sort = get_sort_order(ds_spec)
if verbose:
print('Position dimensions:', pos_labs)
print('Position sort order:', pos_sort)
print('Spectroscopic Dimensions:', spec_labs)
print('Spectroscopic sort order:', spec_sort)
'''
Get the size of each dimension in the sorted order
'''
pos_dims = get_dimensionality(np.transpose(ds_pos), pos_sort)
spec_dims = get_dimensionality(ds_spec, spec_sort)
if np.prod(pos_dims) != h5_main.shape[0]:
mesg = 'Product of position dimension sizes: {} = {} not matching ' \
'with size of first axis of main dataset: {}. One or more ' \
'dimensions are dependent dimensions and not marked as such' \
'.'.format(pos_dims, np.prod(pos_dims), h5_main.shape[0])
raise ValueError(mesg)
if np.prod(spec_dims) != h5_main.shape[1]:
mesg = 'Product of spectroscopic dimension sizes: {} = {} not matching ' \
'with size of second axis of main dataset: {}. One or more ' \
'dimensions are dependent dimensions and not marked as such' \
'.'.format(spec_dims, np.prod(spec_dims), h5_main.shape[1])
raise ValueError(mesg)
if verbose:
print('\nPosition dimensions (sort applied):', pos_labs[pos_sort])
print('Position dimensionality (sort applied):', pos_dims)
print('Spectroscopic dimensions (sort applied):', spec_labs[spec_sort])
print('Spectroscopic dimensionality (sort applied):', spec_dims)
if lazy:
ds_main = lazy_load_array(h5_main)
else:
ds_main = h5_main[()]
"""
Now we reshape the dataset based on those dimensions
numpy reshapes correctly when the dimensions are arranged from slowest to fastest.
Since the sort orders we have are from fastest to slowest, we need to reverse the orders
for both the position and spectroscopic dimensions
"""
if verbose:
print('Will attempt to reshape main dataset from:\n{} to {}'.format(ds_main.shape, pos_dims[::-1] + spec_dims[::-1]))
try:
ds_Nd = ds_main.reshape(pos_dims[::-1] + spec_dims[::-1])
except ValueError:
warn('Could not reshape dataset to full N-dimensional form. Attempting reshape based on position only.')
try:
ds_Nd = ds_main.reshape(pos_dims[::-1] + [-1])
except ValueError:
warn('Reshape by position only also failed. Will keep dataset in 2d form.')
if get_labels:
return ds_main, False, ['Position', 'Spectral Step']
else:
return ds_main, False
# No exception
else:
if get_labels:
return ds_Nd, 'Positions', ['Position'] + spec_labs
else:
return ds_Nd, 'Positions'
all_labels = np.hstack((pos_labs[pos_sort][::-1],
spec_labs[spec_sort][::-1]))
if verbose:
print('\nAfter reshaping, labels are', all_labels)
print('Data shape is', ds_Nd.shape)
"""
At this point, the data is arranged from slowest to fastest dimension in both pos and spec
"""
if sort_dims:
results = [ds_Nd, True]
if get_labels:
results.append(all_labels)
return results
if verbose:
print('\nGoing to put dimensions back in the same order as in the file:')
swap_axes = list()
# Compare the original order of the pos / spec labels with where these dimensions occur in the sorted labels
for lab in pos_labs:
swap_axes.append(np.argwhere(all_labels == lab).squeeze())
for lab in spec_labs:
swap_axes.append(np.argwhere(all_labels == lab).squeeze())
swap_axes = np.array(swap_axes)
#If there are empty arrays, remove them.
swap_axes = np.array([ax for ax in swap_axes if ax.size>0])
if verbose:
print('Axes will permuted in this order:', swap_axes)
print('New labels ordering:', all_labels[swap_axes])
ds_Nd = ds_Nd.transpose(tuple(swap_axes))
results = [ds_Nd, True]
if verbose:
print('Dataset now of shape:', ds_Nd.shape)
if get_labels:
'''
Get the labels in the proper order
'''
results.append(all_labels[swap_axes])
return results
def reshape_from_n_dims(data_n_dim, h5_pos=None, h5_spec=None, verbose=False):
"""
Reshape the input 2D matrix to be N-dimensions based on the
position and spectroscopic datasets.
Parameters
----------
data_n_dim : numpy.array or dask.array.core.Array
N dimensional array arranged as [positions dimensions..., spectroscopic dimensions]
If h5_pos and h5_spec are not provided, this function will have to assume that the dimensions
are arranged as [positions slowest to fastest, spectroscopic slowest to fastest].
This restriction is removed if h5_pos and h5_spec are provided
h5_pos : HDF5 Dataset, numpy.array or dask.array.core.Array
Position indices corresponding to rows in the final 2d array
The dimensions should be arranged in terms of rate of change corresponding to data_n_dim.
In other words if data_n_dim had two position dimensions arranged as [pos_fast, pos_slow, spec_dim_1....],
h5_pos should be arranged as [pos_fast, pos_slow]
h5_spec : HDF5 Dataset, numpy. array or dask.array.core.Array
Spectroscopic indices corresponding to columns in the final 2d array
The dimensions should be arranged in terms of rate of change corresponding to data_n_dim.
In other words if data_n_dim had two spectral dimensions arranged as [pos_dim_1,..., spec_fast, spec_slow],
h5_spec should be arranged as [pos_slow, pos_fast]
verbose : bool, optional. Default = False
Whether or not to print log statements
Returns
-------
ds_2d : numpy.array
2 dimensional numpy array arranged as [positions, spectroscopic]
success : boolean or string
True if full reshape was successful
"Positions" if it was only possible to reshape by
the position dimensions
False if no reshape was possible
Notes
-----
If either `h5_pos` or `h5_spec` are not provided, the function will
assume the first dimension is position and the remaining are spectroscopic already
in order from fastest to slowest.
"""
if not isinstance(data_n_dim, (np.ndarray, da.core.Array)):
raise TypeError('data_n_dim is not a numpy or dask array')
if h5_spec is None and h5_pos is None:
raise ValueError('at least one of h5_pos or h5_spec must be specified for an attempt to reshape to 2D')
if data_n_dim.ndim < 2:
return data_n_dim, True
if h5_pos is None:
pass
elif isinstance(h5_pos, h5py.Dataset):
'''
Position Indices dataset was provided
'''
ds_pos = h5_pos[()]
elif isinstance(h5_pos, da.core.Array):
ds_pos = h5_pos.compute()
elif isinstance(h5_pos, np.ndarray):
ds_pos = h5_pos
else:
raise TypeError('Position Indices must be either h5py.Dataset or None')
if h5_spec is None:
pass
elif isinstance(h5_spec, h5py.Dataset):
'''
Spectroscopic Indices dataset was provided
'''
ds_spec = h5_spec[()]
elif isinstance(h5_spec, da.core.Array):
ds_spec = h5_spec.compute()
elif isinstance(h5_spec, np.ndarray):
ds_spec = h5_spec
else:
raise TypeError('Spectroscopic Indices must be either h5py.Dataset or None')
if h5_spec is None and h5_pos is not None:
if verbose:
print('Spectral indices not provided but position indices provided.\n'
'Building spectral indices assuming that dimensions are arranged as slow -> fast')
pos_dims = get_dimensionality(ds_pos, index_sort=get_sort_order(ds_pos))
if not np.all([x in data_n_dim.shape for x in pos_dims]):
raise ValueError('Dimension sizes in pos_dims: {} do not exist in data_n_dim shape: '
'{}'.format(pos_dims, data_n_dim.shape))
spec_dims = [col for col in list(data_n_dim.shape[len(pos_dims):])]
if verbose:
print('data has dimensions: {}. Provided position indices had dimensions of size: {}. Spectral dimensions '
'will built with dimensions: {}'.format(data_n_dim.shape, pos_dims, spec_dims))
ds_spec = make_indices_matrix(spec_dims, is_position=False)
elif h5_pos is None and h5_spec is not None:
if verbose:
print('Position indices not provided but spectral indices provided.\n'
'Building position indices assuming that dimensions are arranged as slow -> fast')
spec_dims = get_dimensionality(ds_spec, index_sort=get_sort_order(ds_spec))
if not np.all([x in data_n_dim.shape for x in spec_dims]):
raise ValueError('Dimension sizes in spec_dims: {} do not exist in data_n_dim shape: '
'{}'.format(spec_dims, data_n_dim.shape))
pos_dims = [col for col in list(data_n_dim.shape[:data_n_dim.ndim-len(spec_dims)])]
if verbose:
print('data has dimensions: {}. Spectroscopic position indices had dimensions of size: {}. Position '
'dimensions will built with dimensions: {}'.format(data_n_dim.shape, spec_dims, pos_dims))
ds_pos = make_indices_matrix(pos_dims, is_position=True)
elif h5_spec is not None and h5_pos is not None:
if ds_pos.shape[0] * ds_spec.shape[1] != np.product(data_n_dim.shape):
raise ValueError('The product ({}) of the number of positions ({}) and spectroscopic ({}) observations is '
'not equal to the product ({}) of the data shape ({})'
'.'.format(ds_pos.shape[0] * ds_spec.shape[1], ds_pos.shape[0], ds_spec.shape[1],
np.product(data_n_dim.shape), data_n_dim.shape))
if ds_pos.shape[1] + ds_spec.shape[0] != data_n_dim.ndim:
# This may mean that the dummy position or spectroscopic axes has been squeezed out!
# Dask does NOT allow singular dimensions apparently. So cannot do expand_dims. Handle later
if ds_pos.size == 1 or ds_spec.size == 1:
if verbose:
print('ALL Position dimensions squeezed: {}. ALL Spectroscopic dimensions squeezed: {}'
'.'.format(ds_pos.size == 1, ds_spec.size == 1))
else:
raise ValueError('The number of position ({}) and spectroscopic ({}) dimensions do not match with the '
'dimensionality of the N-dimensional dataset: {}'
'.'.format(ds_pos.shape[1], ds_spec.shape[0], data_n_dim.ndim))
'''
Sort the indices from fastest to slowest
'''
if ds_pos.size == 1:
# Position dimension squeezed out:
pos_sort = []
else:
pos_sort = get_sort_order(np.transpose(ds_pos))
if ds_spec.size == 1:
# Spectroscopic axis squeezed out:
spec_sort = []
else:
spec_sort = get_sort_order(ds_spec)
if h5_spec is None:
spec_sort = spec_sort[::-1]
if h5_pos is None:
pos_sort = pos_sort[::-1]
if verbose:
print('Position sort order: {}'.format(pos_sort))
print('Spectroscopic sort order: {}'.format(spec_sort))
'''
Now we transpose the axes associated with the spectroscopic dimensions
so that they are in the same order as in the index array
'''
swap_axes = np.uint16(np.append(pos_sort[::-1], spec_sort[::-1] + len(pos_sort)))
if verbose:
print('swap axes: {} to be applied to N dimensional data of shape {}'.format(swap_axes, data_n_dim.shape))
data_n_dim_2 = data_n_dim.transpose(tuple(swap_axes))
if verbose:
print('N dimensional data shape after axes swap: {}'.format(data_n_dim_2.shape))
'''
Now we reshape the dataset based on those dimensions
We must use the spectroscopic dimensions in reverse order
'''
try:
ds_2d = data_n_dim_2.reshape([ds_pos.shape[0], ds_spec.shape[1]])
except ValueError:
raise ValueError('Could not reshape dataset to full N-dimensional form')
return ds_2d, True
def get_dimensionality(ds_index, index_sort=None):
"""
Get the size of each index dimension in a specified sort order
Parameters
----------
ds_index : 2D HDF5 Dataset or numpy array
Row matrix of indices
index_sort : Iterable of unsigned integers (Optional)
Sort that can be applied to dimensionality.
For example - Order of rows sorted from fastest to slowest
Returns
-------
sorted_dims : list of unsigned integers
Dimensionality of each row in ds_index. If index_sort is supplied, it will be in the sorted order
"""
if isinstance(ds_index, da.core.Array):
ds_index = ds_index.compute()
if not isinstance(ds_index, (np.ndarray, h5py.Dataset)):
raise TypeError('ds_index should either be a numpy array or h5py.Dataset')
if ds_index.shape[0] > ds_index.shape[1]:
# must be spectroscopic like in shape (few rows, more cols)
ds_index = np.transpose(ds_index)
if index_sort is None:
index_sort = np.arange(ds_index.shape[0])
else:
if not contains_integers(index_sort, min_val=0):
raise ValueError('index_sort should contain integers > 0')
index_sort = np.array(index_sort)
if index_sort.ndim != 1:
raise ValueError('index_sort should be a 1D array')
if len(np.unique(index_sort)) > ds_index.shape[0]:
raise ValueError('length of index_sort ({}) should be smaller than number of dimensions in provided dataset'
' ({}'.format(len(np.unique(index_sort)), ds_index.shape[0]))
if set(np.arange(ds_index.shape[0])) != set(index_sort):
raise ValueError('Sort order of dimensions ({}) not matching with number of dimensions ({})'
''.format(index_sort, ds_index.shape[0]))
sorted_dims = [len(np.unique(row)) for row in np.array(ds_index, ndmin=2)[index_sort]]
return sorted_dims
def get_sort_order(ds_spec):
"""
Find how quickly the spectroscopic values are changing in each row
and the order of rows from fastest changing to slowest.
Parameters
----------
ds_spec : 2D HDF5 dataset or numpy array
Rows of indices to be sorted from fastest changing to slowest
Returns
-------
change_sort : List of unsigned integers
Order of rows sorted from fastest changing to slowest
"""
if isinstance(ds_spec, da.core.Array):
ds_spec = ds_spec.compute()
if not isinstance(ds_spec, (np.ndarray, h5py.Dataset)):
raise TypeError('ds_spec should either be a numpy array or h5py.Dataset')
if ds_spec.shape[0] > ds_spec.shape[1]:
# must be spectroscopic like in shape (few rows, more cols)
ds_spec = np.transpose(ds_spec)
change_count = [len(np.where([row[i] != row[i - 1] for i in range(len(row))])[0]) for row in ds_spec]
change_sort = np.argsort(change_count)[::-1]
return change_sort
def get_unit_values(ds_inds, ds_vals, dim_names=None, all_dim_names=None, is_spec=None, verbose=False):
"""
Gets the unit arrays of values that describe the spectroscopic dimensions
Parameters
----------
ds_inds : h5py.Dataset or numpy.ndarray
Spectroscopic or Position Indices dataset
ds_vals : h5py.Dataset or numpy.ndarray
Spectroscopic or Position Values dataset
dim_names : str, or list of str, Optional
Names of the dimensions of interest. Default = all
all_dim_names : list of str, Optional
Names of all the dimensions in these datasets. Use this if supplying numpy arrays instead of h5py.Dataset
objects for h5_inds, h5_vals since there is no other way of getting the dimension names.
is_spec : bool, optional
Whether or not the provided ancillary datasets are position or spectroscopic
The user is recommended to supply this parameter whenever it is known
By default, this function will attempt to recognize the answer based on the shape of the datasets.
verbose : bool, optional
Whether or not to print debugging statements. Default - off
Note - this function can be extended / modified for ancillary position dimensions as well
Returns
-------
unit_values : dict
Dictionary containing the unit array for each dimension. The name of the dimensions are the keys.
"""
if all_dim_names is None:
allowed_types = h5py.Dataset
else:
all_dim_names = validate_list_of_strings(all_dim_names, 'all_dim_names')
all_dim_names = np.array(all_dim_names)
allowed_types = (h5py.Dataset, np.ndarray)
for dset, dset_name in zip([ds_inds, ds_vals], ['ds_inds', 'ds_vals']):
if not isinstance(dset, allowed_types):
raise TypeError(dset_name + ' should be of type: {}'.format(allowed_types))
# For now, we will throw an error if even a single dimension is listed as an incomplete dimension:
if isinstance(ds_inds, h5py.Dataset):
if np.any(['incomplete_dimensions' in dset.attrs.keys() for dset in [ds_inds, ds_vals]]):
try:
incomp_dims_inds = get_attr(ds_inds, 'incomplete_dimensions')
except KeyError:
incomp_dims_inds = None
try:
incomp_dims_vals = get_attr(ds_vals, 'incomplete_dimensions')
except KeyError:
incomp_dims_vals = None
if incomp_dims_inds is None and incomp_dims_vals is not None:
incomp_dims = incomp_dims_vals
elif incomp_dims_inds is not None and incomp_dims_vals is None:
incomp_dims = incomp_dims_inds
else:
# ensure that both attributes are the same
if incomp_dims_vals != incomp_dims_inds:
raise ValueError('Provided indices ({}) and values ({}) datasets were marked with different values '
'for incomplete_datasets.'.format(incomp_dims_inds, incomp_dims_vals))
incomp_dims = incomp_dims_vals
all_dim_names = get_attr(ds_inds, 'labels')
raise ValueError('Among all dimensions: {}, These dimensions were marked as incomplete dimensions: {}'
'. You are recommended to find unit values manually'.format(all_dim_names, incomp_dims))
# Do we need to check that the provided inds and vals correspond to the same main dataset?
if ds_inds.shape != ds_vals.shape:
raise ValueError('h5_inds: {} and h5_vals: {} should have the same shapes'.format(ds_inds.shape, ds_vals.shape))
if all_dim_names is None:
all_dim_names = get_attr(ds_inds, 'labels')
if verbose:
print('All dimensions: {}'.format(all_dim_names))
# First load to memory
inds_mat = ds_inds[()]
vals_mat = ds_vals[()]
if is_spec is None:
# Attempt to recognize the type automatically
is_spec = False
if inds_mat.shape[0] < inds_mat.shape[1]:
is_spec = True
else:
if not isinstance(is_spec, bool):
raise TypeError('is_spec should be a boolean. Provided object is of type: {}'.format(type(is_spec)))
if verbose:
print(
'Ancillary matrices of shape: {}, hence determined to be Spectroscopic:{}'.format(inds_mat.shape, is_spec))
if not is_spec:
# Convert to spectral shape
inds_mat = np.transpose(inds_mat)
vals_mat = np.transpose(vals_mat)
if len(all_dim_names) != inds_mat.shape[0]:
raise ValueError('Length of dimension names list: {} not matching with shape of dataset: {}'
'.'.format(len(all_dim_names), inds_mat.shape[0]))
if dim_names is None:
dim_names = all_dim_names
if verbose:
print('Going to return unit values for all dimensions: {}'.format(all_dim_names))
else:
dim_names = validate_list_of_strings(dim_names, 'dim_names')
if verbose:
print('Checking to make sure that the target dimension names: {} exist in the datasets attributes: {}'
'.'.format(dim_names, all_dim_names))
# check to make sure that the dimension names exist in the datasets:
for dim_name in dim_names:
if dim_name not in all_dim_names:
raise KeyError('Dimension {} does not exist in the provided ancillary datasets'.format(dim_name))
unit_values = dict()
for dim_name in all_dim_names:
# Find the row in the spectroscopic indices that corresponds to the dimensions we want to slice:
if verbose:
print('Looking for dimension: {} in {}'.format(dim_name, dim_names))
desired_row_ind = np.where(all_dim_names == dim_name)[0][0]
inds_for_dim = inds_mat[desired_row_ind]
# Wherever this dimension goes to 0 - start of a new tile
starts = np.where(inds_for_dim == np.min(inds_for_dim))[0]
if starts[0] != 0:
raise ValueError('Spectroscopic Indices for dimension: "{}" not '
'starting with 0. Please fix this and try again'
'.'.format(dim_name))
# There may be repetitions in addition to tiling. Find how the the positions increase.
# 1 = repetition, > 1 = new tile
step_sizes = np.hstack(([1], np.diff(starts)))
# This array is of the same length as the full indices array
# We should expect only two values of step sizes for a regular dimension (tiles of the same size):
# 1 for same value repeating and a big jump in indices when the next tile starts
# If the repeats / tiles are of different lengths, then this is not a regular dimension.
# What does a Unit Values vector even mean in this case? Just raise an error for now
if np.where(np.unique(step_sizes) - 1)[0].size > 1:
raise ValueError('Non constant step sizes')
# Finding Start of a new tile
tile_starts = np.where(step_sizes > 1)[0]
# converting these indices to correct indices that can be mapped straight to
if len(tile_starts) < 1:
# Dimension(s) with no tiling at all
# Make it look as though the next tile starts at the end of the whole indices vector
tile_starts = np.array([0, len(inds_for_dim)])
else:
# Dimension with some form of repetition
tile_starts = np.hstack(([0], starts[tile_starts]))
# Verify that each tile is identical here
# Last tile will not be checked unless we add the length of the indices vector as the start of next tile
tile_starts = np.hstack((tile_starts, [len(inds_for_dim)]))
subsections = [inds_for_dim[tile_starts[ind]: tile_starts[ind + 1]] for ind in range(len(tile_starts) - 1)]
if np.max(np.diff(subsections, axis=0)) != 0:
# Should get unit values for ALL dimensions regardless of expectations to catch such scenarios.
raise ValueError('Values in each tile of dimension: {} are different'.format(dim_name))
# Now looking within the first tile:
subsection = inds_for_dim[tile_starts[0]:tile_starts[1]]
# remove all repetitions. ie - take indices only where jump == 1
step_inds = np.hstack(([0], np.where(np.hstack(([0], np.diff(subsection))))[0]))
# Finally, use these indices to get the values
if dim_name in dim_names:
# Only add this dimension to dictionary if requwested.
unit_values[dim_name] = vals_mat[desired_row_ind, step_inds]
return unit_values
def write_main_dataset(h5_parent_group, main_data, main_data_name, quantity, units, pos_dims, spec_dims,
main_dset_attrs=None, h5_pos_inds=None, h5_pos_vals=None, h5_spec_inds=None, h5_spec_vals=None,
aux_spec_prefix='Spectroscopic_', aux_pos_prefix='Position_', verbose=False,
slow_to_fast=False, **kwargs):
"""
Writes the provided data as a 'Main' dataset with all appropriate linking.
By default, the instructions for generating the ancillary datasets should be specified using the pos_dims and
spec_dims arguments as dictionary objects. Alternatively, if both the indices and values datasets are already
available for either/or the positions / spectroscopic, they can be specified using the keyword arguments. In this
case, fresh datasets will not be generated.
Parameters
----------
h5_parent_group : :class:`h5py.Group`
Parent group under which the datasets will be created
main_data : numpy.ndarray, dask.array.core.Array, list or tuple
2D matrix formatted as [position, spectral] or a list / tuple with the shape for an empty dataset.
If creating an empty dataset - the dtype must be specified via a kwarg.
main_data_name : String / Unicode
Name to give to the main dataset. This cannot contain the '-' character.
quantity : String / Unicode
Name of the physical quantity stored in the dataset. Example - 'Current'
units : String / Unicode
Name of units for the quantity stored in the dataset. Example - 'A' for amperes
pos_dims : Dimension or array-like of Dimension objects
Sequence of Dimension objects that provides all necessary instructions for constructing the indices and values
datasets
Object specifying the instructions necessary for building the Position indices and values datasets
spec_dims : Dimension or array-like of Dimension objects
Sequence of Dimension objects that provides all necessary instructions for constructing the indices and values
datasets
Object specifying the instructions necessary for building the Spectroscopic indices and values datasets
main_dset_attrs : dictionary, Optional
Dictionary of parameters that will be written to the main dataset. Do NOT include region references here.
h5_pos_inds : h5py.Dataset, Optional
Dataset that will be linked with the name "Position_Indices"
h5_pos_vals : h5py.Dataset, Optional
Dataset that will be linked with the name "Position_Values"
h5_spec_inds : h5py.Dataset, Optional
Dataset that will be linked with the name "Spectroscopic_Indices"
h5_spec_vals : h5py.Dataset, Optional
Dataset that will be linked with the name "Spectroscopic_Values"
aux_spec_prefix : str or unicode, Optional
Default prefix for Spectroscopic datasets. Default = "Spectroscopic"
aux_pos_prefix : str or unicode, Optional
Default prefix for Position datasets. Default = "Position"
verbose : bool, Optional, default=False
If set to true - prints debugging logs
slow_to_fast : bool, Optional. Default=False
Set to True if the dimensions are arranged from slowest varying to fastest varying.
Set to False otherwise.
kwargs will be passed onto the creation of the dataset. Please pass chunking, compression, dtype, and other
arguments this way
Returns
-------
h5_main : USIDataset
Reference to the main dataset
"""
def __check_anc_before_creation(aux_prefix, dim_type='pos'):
aux_prefix = validate_single_string_arg(aux_prefix, 'aux_' + dim_type + '_prefix')
if not aux_prefix.endswith('_'):
aux_prefix += '_'
if '-' in aux_prefix:
warn('aux_' + dim_type + ' should not contain the "-" character. Reformatted name from:{} to '
'{}'.format(aux_prefix, aux_prefix.replace('-', '_')))
aux_prefix = aux_prefix.replace('-', '_')
for dset_name in [aux_prefix + 'Indices', aux_prefix + 'Values']:
if dset_name in h5_parent_group.keys():
# TODO: What if the contained data was correct?
raise KeyError('Dataset named: ' + dset_name + ' already exists in group: '
'{}. Consider passing these datasets using kwargs (if they are correct) instead of providing the pos_dims and spec_dims arguments'.format(h5_parent_group.name))
return aux_prefix
def __ensure_anc_in_correct_file(h5_inds, h5_vals, prefix):
if h5_inds.file != h5_vals.file:
raise ValueError('Provided ' + prefix + ' datasets are present in different HDF5 files!')
if h5_inds.file != h5_parent_group.file:
# Need to copy over the anc datasets to the new group
if verbose:
print('Need to copy over ancillary datasets: {} and {} to '
'destination group: {} which is in a different HDF5 '
'file'.format(h5_inds, h5_vals, h5_parent_group))
ret_vals = [copy_dataset(x, h5_parent_group, verbose=verbose) for x in [h5_inds, h5_vals]]
else:
ret_vals = [h5_inds, h5_vals]
return tuple(ret_vals)
if not isinstance(h5_parent_group, (h5py.Group, h5py.File)):
raise TypeError('h5_parent_group should be a h5py.File or h5py.Group object')
if not is_editable_h5(h5_parent_group):
raise ValueError('The provided file is not editable')
if verbose:
print('h5 group and file OK')
quantity, units, main_data_name = validate_string_args([quantity, units, main_data_name],
['quantity', 'units', 'main_data_name'])
if verbose:
print('quantity, units, main_data_name all OK')
quantity = quantity.strip()
units = units.strip()
main_data_name = main_data_name.strip()
if '-' in main_data_name:
warn('main_data_name should not contain the "-" character. Reformatted name from:{} to '
'{}'.format(main_data_name, main_data_name.replace('-', '_')))
main_data_name = main_data_name.replace('-', '_')
if isinstance(main_data, (list, tuple)):
if not contains_integers(main_data, min_val=1):
raise ValueError('main_data if specified as a shape should be a list / tuple of integers >= 1')
if len(main_data) != 2:
raise ValueError('main_data if specified as a shape should contain 2 numbers')
if 'dtype' not in kwargs:
raise ValueError('dtype must be included as a kwarg when creating an empty dataset')
_ = validate_dtype(kwargs.get('dtype'))
main_shape = main_data
if verbose:
print('Selected empty dataset creation. OK so far')
elif isinstance(main_data, (np.ndarray, da.core.Array)):
if main_data.ndim != 2:
raise ValueError('main_data should be a 2D array')
main_shape = main_data.shape
if verbose:
print('Provided numpy or Dask array for main_data OK so far')
else:
raise TypeError('main_data should either be a numpy array or a tuple / list with the shape of the data')
if h5_pos_inds is not None and h5_pos_vals is not None:
# The provided datasets override fresh building instructions.
validate_anc_h5_dsets(h5_pos_inds, h5_pos_vals, main_shape, is_spectroscopic=False)
if verbose:
print('The shapes of the provided h5 position indices and values are OK')
h5_pos_inds, h5_pos_vals = __ensure_anc_in_correct_file(h5_pos_inds, h5_pos_vals, 'Position')
else:
aux_pos_prefix = __check_anc_before_creation(aux_pos_prefix, dim_type='pos')
pos_dims = validate_dimensions(pos_dims, dim_type='Position')
validate_dims_against_main(main_shape, pos_dims, is_spectroscopic=False)
if verbose:
print('Passed all pre-tests for creating position datasets')
h5_pos_inds, h5_pos_vals = write_ind_val_dsets(h5_parent_group, pos_dims, is_spectral=False, verbose=verbose,
slow_to_fast=slow_to_fast, base_name=aux_pos_prefix)
if verbose:
print('Created position datasets!')
if h5_spec_inds is not None and h5_spec_vals is not None:
# The provided datasets override fresh building instructions.
validate_anc_h5_dsets(h5_spec_inds, h5_spec_vals, main_shape, is_spectroscopic=True)
if verbose:
print('The shapes of the provided h5 position indices and values '
'are OK')
h5_spec_inds, h5_spec_vals = __ensure_anc_in_correct_file(h5_spec_inds, h5_spec_vals,
'Spectroscopic')
else:
aux_spec_prefix = __check_anc_before_creation(aux_spec_prefix, dim_type='spec')
spec_dims = validate_dimensions(spec_dims, dim_type='Spectroscopic')
validate_dims_against_main(main_shape, spec_dims, is_spectroscopic=True)
if verbose:
print('Passed all pre-tests for creating spectroscopic datasets')
h5_spec_inds, h5_spec_vals = write_ind_val_dsets(h5_parent_group, spec_dims, is_spectral=True, verbose=verbose,
slow_to_fast=slow_to_fast, base_name=aux_spec_prefix)
if verbose:
print('Created Spectroscopic datasets')
if h5_parent_group.file.driver == 'mpio':
if kwargs.pop('compression', None) is not None:
warn('This HDF5 file has been opened wth the "mpio" communicator. '
'mpi4py does not allow creation of compressed datasets. Compression kwarg has been removed')
if isinstance(main_data, np.ndarray):
# Case 1 - simple small dataset
h5_main = h5_parent_group.create_dataset(main_data_name, data=main_data, **kwargs)
if verbose:
print('Created main dataset with provided data')
elif isinstance(main_data, da.core.Array):
# Case 2 - Dask dataset
# step 0 - get rid of any automated dtype specification:
_ = kwargs.pop('dtype', None)
# step 1 - create the empty dataset:
h5_main = h5_parent_group.create_dataset(main_data_name, shape=main_data.shape, dtype=main_data.dtype,
**kwargs)
if verbose:
print('Created empty dataset: {} for writing Dask dataset: {}'.format(h5_main, main_data))
print('Dask array will be written to HDF5 dataset: "{}" in file: "{}"'.format(h5_main.name,
h5_main.file.filename))
# Step 2 - now ask Dask to dump data to disk
da.to_hdf5(h5_main.file.filename, {h5_main.name: main_data})
# main_data.to_hdf5(h5_main.file.filename, h5_main.name) # Does not work with python 2 for some reason
else:
# Case 3 - large empty dataset
h5_main = h5_parent_group.create_dataset(main_data_name, main_data, **kwargs)
if verbose:
print('Created empty dataset for Main')
write_simple_attrs(h5_main, {'quantity': quantity, 'units': units})
if verbose:
print('Wrote quantity and units attributes to main dataset')
if isinstance(main_dset_attrs, dict):
write_simple_attrs(h5_main, main_dset_attrs)
if verbose:
print('Wrote provided attributes to main dataset')
write_book_keeping_attrs(h5_main)
# make it main
link_as_main(h5_main, h5_pos_inds, h5_pos_vals, h5_spec_inds, h5_spec_vals)
if verbose:
print('Successfully linked datasets - dataset should be main now')
from ..usi_data import USIDataset
return USIDataset(h5_main)
def map_grid_to_cartesian(h5_main, grid_shape, mode='histogram', **kwargs):
"""
Map an incomplete measurement, such as a spiral scan, to a cartesian grid.
Parameters
----------
h5_main : :class:`pyUSID.USIDataset`
Dataset containing the sparse measurement
grid_shape : int or [int, int]
Shape of the output :class:`numpy.ndarray`.
mode : str, optional. Default = 'histogram'
Method used for building a cartesian grid.
Available methods = 'histogram', 'linear', 'nearest', 'cubic'
Use kwargs to pass onto each of the techniques
Note
----
UNDER DEVELOPMENT!
Currently only valid for 2 position dimensions
@author: Patrik Marschalik
Returns
-------
:class:`numpy.ndarray` but could be a h5py.Dataset or dask.array.core.Array object
"""
try:
from scipy.interpolate import griddata
except ImportError as expn:
griddata = None
warn('map_grid_to_cartesian() requires scipy')
raise expn
from ..usi_data import USIDataset
if not isinstance(h5_main, USIDataset):
raise TypeError('Provided object is not a pyUSID.USIDataset object')
if mode not in ['histogram', 'linear', 'nearest', 'cubic']:
raise ValueError('mode must be a string among["histogram", "cubic"]')
ds_main = h5_main[()].squeeze()
ds_pos_vals = h5_main.h5_pos_vals[()]
if ds_pos_vals.shape[1] != 2:
raise TypeError("Only working for 2 position dimensions.")
# Transform to row, col image format
rotation = np.array([[0, 1], [-1, 0]])
ds_pos_vals = np.dot(ds_pos_vals, rotation)
try:
grid_n = len(grid_shape)
except TypeError:
grid_n = 1
if grid_n != 1 and grid_n != 2:
raise ValueError("grid_shape must be of type int or [int, int].")
if grid_n == 1:
grid_shape = 2 * [grid_shape]
def interpolate(points, values, grid_shape, method):
grid_shape = list(map((1j).__mul__, grid_shape))
grid_x, grid_y = np.mgrid[
np.amin(points[:, 0]):np.amax(points[:, 0]):grid_shape[0],
np.amin(points[:, 1]):np.amax(points[:, 1]):grid_shape[1]
]
ndim_data = griddata(points, values, (grid_x, grid_y), method=method)
return ndim_data
if mode == "histogram":
histogram_weighted, _, _ = np.histogram2d(*ds_pos_vals.T, bins=grid_shape, weights=ds_main)
histogram, _, _ = np.histogram2d(*ds_pos_vals.T, bins=grid_shape)
cart_data = np.divide(histogram_weighted, histogram)
else:
cart_data = interpolate(ds_pos_vals, ds_main, grid_shape, method=mode)
return cart_data
def write_sidpy_dataset(si_dset, h5_parent_group, verbose=False,
**kwargs):
"""
Writes a sidpy.Dataset as a USID dataset in the provided HDF5 Group.
Please see notes about dimension types
Parameters
----------
si_dset: sidpy.Dataset
Dataset to be written to HDF5 in NSID format
h5_parent_group : class:`h5py.Group`
Parent group under which the datasets will be created
verbose : bool, Optional. Default = False
Whether or not to write logs to standard out
kwargs: dict
additional keyword arguments passed on to h5py when writing data
Returns
------
h5_main : USIDataset
Reference to the main dataset
Notes
-----
USID only has two dimension types - Position and Spectroscopic.
Consider changing the types of dimensions of all other dimensions to either
"SPATIAL" or "SPECTRAL".
"""
if not isinstance(si_dset, sid.Dataset):
raise TypeError('Data to write is not a sidpy dataset')
if not isinstance(h5_parent_group, (h5py.File, h5py.Group)):
raise TypeError('h5_parent_group is not a h5py.File or '
'h5py.Group object')
spatial_dims, spectral_dims, spatial_size, spectral_size = [], [], 1, 1
for dim_ind, dime in si_dset._axes.items():
if dime._dimension_type == sid.DimensionType.SPATIAL:
spatial_dims.append(Dimension(dime._name,
dime._units,
dime.values,
dime._quantity,
dime._dimension_type))
spatial_size *= np.size(dime.values)
else:
if not dime._dimension_type == sid.DimensionType.SPECTRAL:
warn('Will consider dimension: {} of type: {} as a '
'spectroscopic dimension'.format(dime._name,
dime._dimension_type))
spectral_dims.append(Dimension(dime._name,
dime._units,
dime.values,
dime._quantity,
dime._dimension_type))
spectral_size *= np.size(dime.values)
main_dataset = da.reshape(si_dset, [spatial_size, spectral_size])
# TODO : Consider writing this out as a separate group
main_dset_attr = {}
for attr_name in dir(si_dset):
attr_val = getattr(si_dset, attr_name)
if isinstance(attr_val, dict):
main_dset_attr.update(attr_val)
h5_main = write_main_dataset(h5_parent_group=h5_parent_group,
main_data=main_dataset,
main_data_name=si_dset.name,
quantity=si_dset.quantity,
units=si_dset.units,
pos_dims=spatial_dims,
spec_dims=spectral_dims,
main_dset_attrs=flatten_dict(main_dset_attr),
slow_to_fast=True,
verbose=verbose,
**kwargs)
return h5_main
| 50,035 | 44.281448 | 223 | py |
pyUSID-legacy | pyUSID-master-legacy/pyUSID/io/hdf_utils/__init__.py | """
Utilities for reading and writing USID data in HDF5 files
Submodules
----------
.. autosummary::
:toctree: _autosummary
base
simple
model
"""
from .base import *
from .simple import *
from .model import *
| 230 | 11.157895 | 57 | py |
pyUSID-legacy | pyUSID-master-legacy/pyUSID/processing/__init__.py | """
Formalizing data processing on USID datasets using parallel computing tools
Submodules
----------
.. autosummary::
:toctree: _autosummary
"""
from .process import Process
from sidpy.proc import comp_utils
from sidpy.proc.comp_utils import parallel_compute
__all__ = ['Process', 'parallel_compute', 'comp_utils']
| 325 | 18.176471 | 75 | py |
pyUSID-legacy | pyUSID-master-legacy/pyUSID/processing/process.py | """
:class:`~pyUSID.processing.process.Process` - An abstract class for formulating scientific problems as computational
problems
Created on 7/17/16 10:08 AM
@author: Suhas Somnath, Chris Smith
"""
from __future__ import division, unicode_literals, print_function, \
absolute_import
import numpy as np
import psutil
import time as tm
import h5py
from warnings import warn
from numbers import Number
from multiprocessing import cpu_count
from sidpy.proc.comp_utils import parallel_compute, get_MPI, \
group_ranks_by_socket, get_available_memory
from sidpy.base.num_utils import integers_to_slices
from sidpy.base.string_utils import validate_single_string_arg, format_time, \
format_size
from sidpy.hdf.hdf_utils import write_simple_attrs, lazy_load_array
from ..io.hdf_utils import check_if_main, check_for_old
from ..io.usi_data import USIDataset
# TODO: internalize as many attributes as possible. Expose only those that will be required by the user
class Process(object):
"""
An abstract class for formulating scientific problems as computational problems. This class handles the tedious,
science-agnostic, file-operations, parallel-computations, and book-keeping operations such that children classes
only need to specify application-relevant code for processing the data.
"""
def __init__(self, h5_main, process_name, parms_dict=None, cores=None,
max_mem_mb=4*1024, mem_multiplier=1.0, lazy=False,
h5_target_group=None, verbose=False):
"""
Parameters
----------
h5_main : :class:`~pyUSID.io.usi_data.USIDataset`
The USID main HDF5 dataset over which the analysis will be performed.
process_name : str
Name of the process
cores : uint, optional
How many cores to use for the computation. Default: all available cores - 2 if operating outside MPI context
max_mem_mb : uint, optional
How much memory to use for the computation. Default 1024 Mb
mem_multiplier : float, optional. Default = 1
mem_multiplier is the number that will be multiplied with the
(byte) size of a single position in the source dataset in order to
better estimate the number of positions that can be processed at
any given time (how many pixels of the source and results datasets
can be retained in memory). The default value of 1.0 only accounts
for the source dataset. A value greater than 1 would account for
the size of results datasets as well. For example, if the result
dataset is the same size and precision as the source dataset,
the multiplier will be 2 (1 for source, 1 for result)
lazy : bool, optional. Default = False
If True, read_data_chunk and write_results_chunk will operate on
dask arrays. If False - everything will be in numpy.
h5_target_group : h5py.Group, optional. Default = None
Location where to look for existing results and to place newly
computed results. Use this kwarg if the results need to be written
to a different HDF5 file. By default, this value is set to the
parent group containing `h5_main`
verbose : bool, Optional, default = False
Whether or not to print debugging statements
Attributes
----------
self.h5_results_grp : :class:`h5py.Group`
HDF5 group containing the HDF5 datasets that contain the results
of the computation
self.verbose : bool
Whether or not to print debugging statements
self.parms_dict : dict
Dictionary of parameters for the computation
self.duplicate_h5_groups : list
List of :class:`h5py.Group` objects containing computational
results that have been completely computed with the same
set of parameters as those in self.parms_dict
self.partial_h5_groups : list
List of :class:`h5py.Group` objects containing computational
results that have been partially computed with the same
set of parameters as those in self.parms_dict
self.process_name : str
Name of the process. This is used for checking for existing
completely and partially computed results as well as for naming
the HDF5 group that will contain the results of the computation
self._cores : uint
Number of CPU cores to use for parallel computations.
Ignored in the MPI context. Each rank gets 1 CPU core
self._max_pos_per_read : uint
Number of positions in the dataset to read per chunk
self._status_dset_name : str
Name of the HDF5 dataset that keeps track of the positions in the
source dataset thave already been computed
self._results : list
List of objects returned as the result of computation performed by
the self._map_function for each position in the current batch of
positions that were processed
self._h5_target_group : h5py.Group
Location where existing / future results will be stored
self.__resume_implemented : bool
Whether or not this (child) class has implemented the
self._get_existing_datasets() function
self.__bytes_per_pos : uint
Number of bytes used by one position of the source dataset
self.mpi_comm : :class:`mpi4py.MPI.COMM_WORLD`
MPI communicator. None if not running in an MPI context
self.mpi_rank: uint
MPI rank. Always 0 if not running in an MPI context
self.mpi_size: uint
Number of ranks in COMM_WORLD. 1 if not running in an MPI context
self.__ranks_on_socket : uint
Number of MPI ranks on a given CPU socket
self.__socket_master_rank : uint
Master MPI rank for a given CPU chip / socket
self.__compute_jobs : array-like
List of positions in the HDF5 dataset that need to be computed.
This may not be a continuous list of numbers if multiple MPI
workers had previously started computing and were interrupted.
self.__start_pos : uint
The index within self.__compute_jobs that a particular MPI rank /
worker needs to start computing from.
self.__rank_end_pos : uint
The index within self.__compute_jobs that a particular MPI rank /
worker needs to start computing till.
self.__end_pos : uint
The index within self.__compute_jobs that a particular MPI rank /
worker needs to start computing till for the current batch of
positions.
self.__pixels_in_batch : array-like
The positions being computed on by the current compute worker
"""
MPI = get_MPI()
# Ensure that the file is opened in the correct comm or something
if MPI is not None and h5_main.file.driver != 'mpio':
warn('Code was called in MPI context but HDF5 file was not opened '
'with the "mpio" driver. JobLib will be used instead of MPI '
'for parallel computation')
MPI = None
if MPI is not None:
# If we came here then, the user has intentionally asked for multi-node computation
comm = MPI.COMM_WORLD
self.mpi_comm = comm
self.mpi_rank = comm.Get_rank()
self.mpi_size = comm.Get_size()
if verbose:
print("Rank {} of {} on {} sees {} logical cores on the socket".format(comm.Get_rank(), comm.Get_size(),
MPI.Get_processor_name(),
cpu_count()))
# First, ensure that cores=logical cores in node. No point being economical / considerate
cores = psutil.cpu_count()
# It is sufficient if just one rank checks all this.
if self.mpi_rank == 0:
print('Working on {} ranks via MPI'.format(self.mpi_size))
if verbose and self.mpi_rank == 0:
print('Finished getting all necessary MPI information')
"""
# Not sure how to check for this correctly
messg = None
try:
if h5_main.file.comm != comm:
messg = 'The HDF5 file should have been opened with comm=MPI.COMM_WORLD. Currently comm={}'
''.format(h5_main.file.comm)
except AttributeError:
messg = 'The HDF5 file should have been opened with comm=MPI.COMM_WORLD'
if messg is not None:
raise TypeError(messg)
"""
else:
if verbose:
print('No mpi4py found or script was not called via mpixexec / mpirun. '
'Assuming single node computation')
self.mpi_comm = None
self.mpi_size = 1
self.mpi_rank = 0
# Checking if dataset is "Main"
if not check_if_main(h5_main, verbose=verbose and self.mpi_rank == 0):
raise ValueError('Provided dataset is not a "Main" dataset with necessary ancillary datasets')
if h5_target_group is not None:
if not isinstance(h5_target_group, (h5py.Group, h5py.File)):
raise TypeError("'h5_target_group' must be a h5py.Group object")
else:
h5_target_group = h5_main.parent
self._h5_target_group = h5_target_group
if h5_target_group.file.mode == 'r':
raise IOError('the file meant to contain the results '
'(h5_target_group) must not be in read-only mode to '
'write results to the file')
process_name = validate_single_string_arg(process_name, 'process_name')
if parms_dict is None:
parms_dict = {}
else:
if not isinstance(parms_dict, dict):
raise TypeError("Expected 'parms_dict' of type: dict")
if MPI is not None:
MPI.COMM_WORLD.barrier()
# Not sure if we need a barrier here.
if verbose and self.mpi_rank == 0:
print('Rank {}: Upgrading from a regular h5py.Dataset to a USIDataset'.format(self.mpi_rank))
# Generation of N-dimensional form would break things for some reason.
self.h5_main = USIDataset(h5_main)
if verbose and self.mpi_rank == 0:
print('Rank {}: The HDF5 dataset is now a USIDataset'.format(self.mpi_rank))
# Saving these as properties of the object:
self.verbose = verbose
self.__lazy = lazy
self._cores = None
self.__ranks_on_socket = 1
self.__socket_master_rank = 0
self._max_pos_per_read = None
self.__bytes_per_pos = None
# Now have to be careful here since the below properties are a function of the MPI rank
self.__start_pos = None
self.__rank_end_pos = None
self.__end_pos = None
self.__pixels_in_batch = None
self.__compute_jobs = None
# Determining the max size of the data that can be put into memory
# all ranks go through this and they need to have this value any
self._set_memory_and_cores(cores=cores, man_mem_limit=max_mem_mb,
mem_multiplier=mem_multiplier)
if verbose and self.mpi_rank == 0:
print('Finished collecting info on memory and workers')
self.duplicate_h5_groups = []
self.partial_h5_groups = []
self.process_name = process_name # Reset this in the extended classes
self.parms_dict = parms_dict
"""
The name of the HDF5 dataset that should be present to signify which positions have already been computed
This is NOT a fully private variable so that multiple processes can be run within a single group - Eg Fitter
In the case of Fitter - this name can be changed from 'completed_guesses' to 'completed_fits'
check_for_duplicates will be called by the Child class where they have the opportunity to change this
variable before checking for duplicates
"""
self._status_dset_name = 'completed_positions'
self._results = None
self.h5_results_grp = None
# Check to see if the resuming feature has been implemented:
self.__resume_implemented = False
try:
self._get_existing_datasets()
except NotImplementedError:
if verbose and self.mpi_rank == 0:
print('It appears that this class may not be able to resume computations')
except:
# NameError for variables that don't exist
# AttributeError for self.var_name that don't exist
# TypeError (NoneType) etc.
self.__resume_implemented = True
if self.mpi_rank == 0:
print('Consider calling test() to check results before calling compute() which computes on the entire'
' dataset and writes results to the HDF5 file')
self.duplicate_h5_groups, self.partial_h5_groups = self._check_for_duplicates()
def __assign_job_indices(self):
"""
Sets the start and end indices for each MPI rank
"""
# First figure out what positions need to be computed
self.__compute_jobs = np.where(self._h5_status_dset[()] == 0)[0]
if self.verbose and self.mpi_rank == 0:
if len(self.__compute_jobs) > 100:
print('Among the {} positions in this dataset, {} positions '
'need to be computed'
'.'.format(self.h5_main.shape[0],
len(self.__compute_jobs)))
else:
print('Among the {} positions in this dataset, the following '
'positions need to be computed: {}'
'.'.format(self.h5_main.shape[0], self.__compute_jobs))
# integer division
pos_per_rank = self.__compute_jobs.size // self.mpi_size
if self.verbose and self.mpi_rank == 0:
print('Each rank is required to work on {} of the {} (remaining) positions in this dataset'
'.'.format(pos_per_rank, self.__compute_jobs.size))
# The start and end indices now correspond to the indices in the incomplete jobs rather than the h5 dataset
self.__start_pos = self.mpi_rank * pos_per_rank
self.__rank_end_pos = (self.mpi_rank + 1) * pos_per_rank
self.__end_pos = int(min(self.__rank_end_pos, self.__start_pos + self._max_pos_per_read))
if self.mpi_rank == self.mpi_size - 1:
# Force the last rank to go to the end of the dataset
self.__rank_end_pos = self.__compute_jobs.size
if self.verbose:
print('Rank {} will read positions {} to {} of {}'.format(self.mpi_rank, self.__start_pos,
self.__rank_end_pos, self.h5_main.shape[0]))
def _estimate_compute_time_per_pixel(self, *args, **kwargs):
"""
Estimates how long it takes to compute an average pixel's worth of data. This information should be used by the
user to limit the number of pixels that will be processed per batch to make best use of check-pointing. This
function is exposed to the developer of the child classes. An approximate can be derived if it is simpler
Returns
-------
"""
chosen_pos = np.random.randint(0, high=self.h5_main.shape[0]-1, size=5)
t0 = tm.time()
_ = parallel_compute(self.h5_main[chosen_pos, :], self._map_function, cores=1,
lengthy_computation=False, func_args=args, func_kwargs=kwargs, verbose=False)
return (tm.time() - t0) / len(chosen_pos)
def _get_pixels_in_current_batch(self):
"""
Returns the indices of the pixels that will be processed in this batch.
Returns
-------
pixels_in_batch : :class:`numpy.ndarray`
1D array of unsigned integers denoting the pixels that will be read, processed, and written back to
"""
return self.__pixels_in_batch
def test(self, **kwargs):
"""
Tests the process on a subset (for example a pixel) of the whole data. The class can be re-instantiated with
improved parameters and tested repeatedly until the user is content, at which point the user can call
:meth:`~pyUSID.processing.process.Process.compute` on the whole dataset.
Notes
-----
This is not a function that is expected to be called in MPI
Parameters
----------
kwargs - dict, optional
keyword arguments to test the process
Returns
-------
"""
# All children classes should call super() OR ensure that they only work for self.mpi_rank == 0
raise NotImplementedError('test_on_subset has not yet been implemented')
def _check_for_duplicates(self):
"""
Checks for instances where the process was applied to the same dataset with the same parameters
Returns
-------
duplicate_h5_groups : list of h5py.Group objects
List of groups satisfying the above conditions with completely computed results
partial_h5_groups : list of h5py.Group objects
List of groups satisfying the above conditions with partially computed results
"""
if self.verbose and self.mpi_rank == 0:
print('Checking for duplicates:')
# This list will contain completed runs only
existing = check_for_old(self.h5_main, self.process_name,
new_parms=self.parms_dict,
h5_parent_goup=self._h5_target_group,
verbose=self.verbose and self.mpi_rank == 0)
partial_h5_groups = []
duplicate_h5_groups = []
# First figure out which ones are partially completed:
while len(existing) > 0:
curr_group = existing.pop(0)
"""
Earlier, we only checked the 'last_pixel' but to be rigorous we
should check self._status_dset_name
The last_pixel attribute check may be deprecated in the future.
Note that legacy computations did not have this dataset. We can add
to partially computed datasets
"""
# Case 1: Modern book-keeping dataset available:
if self._status_dset_name in curr_group.keys():
status_dset = curr_group[self._status_dset_name]
if not isinstance(status_dset, h5py.Dataset):
# We should not come here if things were implemented correctly
if self.mpi_rank == 0:
print('Results group: {} contained an object named: {} that should have been a dataset'
'.'.format(curr_group, self._status_dset_name))
continue
if self.h5_main.shape[0] != status_dset.shape[0] or len(status_dset.shape) > 1 or \
status_dset.dtype != np.uint8:
if self.mpi_rank == 0:
print('Status dataset: {} was not of the expected shape or datatype'.format(status_dset))
continue
# ##### ACTUAL COMPLETENESS TEST HERE #########
completed_positions = np.sum(status_dset[()])
if self.verbose and self.mpi_rank == 0:
print('{} has results that are {} % complete'
'.'.format(status_dset.name,
int(100 * completed_positions / self.h5_main.shape[0])))
# Case 1.A: Incomplete computation?
if completed_positions < self.h5_main.shape[0]:
# If there are pixels uncompleted
# remove from duplicates and move to partial
if self.verbose and self.mpi_rank == 0:
print('moving {} to partial'.format(curr_group.name))
partial_h5_groups.append(curr_group)
# Let's write the legacy attribute for safety
curr_group.attrs['last_pixel'] = self.h5_main.shape[0]
# No further checks necessary
continue
# Case 1.B: Complete computation:
if self.verbose and self.mpi_rank == 0:
print('Moving {} to duplicate groups'.format(curr_group.name))
duplicate_h5_groups.append(curr_group)
continue
# Case 2: Even the legacy book-keeping is absent:
elif 'last_pixel' not in curr_group.attrs.keys():
if self.mpi_rank == 0:
# Should not be coming here at all
print('Group: {} had neither the status HDF5 dataset or the legacy attribute: "last_pixel"'
'.'.format(curr_group))
# Not sure what to do with such groups. Don't consider them
continue
# Case 3: Only the legacy book-keeping is available:
else:
last_pixel = curr_group.attrs['last_pixel']
# Creating status dataset for forward compatibility:
self._h5_status_dset = curr_group.create_dataset(
self._status_dset_name, dtype=np.uint8,
shape=(self.h5_main.shape[0],))
if last_pixel > 0:
self._h5_status_dset[:last_pixel] = 1
# Case 3.A: Partial
if last_pixel < self.h5_main.shape[0]:
# move to partial
if self.verbose and self.mpi_rank == 0:
print('moving {} to partial since computation was {} % complete'
'.'.format(curr_group.name,
int(100 * curr_group.attrs['last_pixel'] / self.h5_main.shape[0])))
partial_h5_groups.append(curr_group)
continue
# Case 3.B: complete:
else:
if self.verbose and self.mpi_rank == 0:
print('Moving {} to duplicate groups'.format(curr_group.name))
duplicate_h5_groups.append(curr_group)
continue
if len(duplicate_h5_groups) > 0 and self.mpi_rank == 0:
print('\nNote: ' + self.process_name + ' has already been performed with the same parameters before. '
'These results will be returned by compute() by default. '
'Set override to True to force fresh computation\n')
print(duplicate_h5_groups)
if len(partial_h5_groups) > 0 and self.mpi_rank == 0:
print('\nNote: ' + self.process_name + ' has already been performed PARTIALLY with the same parameters. '
'compute() will resuming computation in the last group below. '
'To choose a different group call use_patial_computation()'
'Set override to True to force fresh computation or resume from a '
'data group besides the last in the list.\n')
print(partial_h5_groups)
return duplicate_h5_groups, partial_h5_groups
def use_partial_computation(self, h5_partial_group=None):
"""
Extracts the necessary parameters from the provided h5 group to resume computation
Parameters
----------
h5_partial_group : :class:`h5py.Group`
Group containing partially computed results
"""
# Attempt to automatically take partial results
if h5_partial_group is None:
if len(self.partial_h5_groups) < 1:
raise ValueError('No group was found with partial results and no such group was provided')
h5_partial_group = self.partial_h5_groups[-1]
else:
# Make sure that this group is among the legal ones already discovered:
if h5_partial_group not in self.partial_h5_groups:
raise ValueError('Provided group does not appear to be in the list of discovered groups')
# Unnecessary since this will be defined at init
# self.parms_dict = get_attributes(h5_partial_group)
self.h5_results_grp = h5_partial_group
def __set_cores(self, cores=None):
"""
Checks number of CPU cores and sets the recommended number of cores to
be used by analysis methods.
This function can work with clusters with heterogeneous core counts
(e.g. CADES SHPC Condo).
Parameters
----------
cores : uint, optional, Default = None (all or nearly all available)
How many CPU cores to use for the computation.
"""
if self.mpi_comm is None:
min_free_cores = 1 + int(psutil.cpu_count() > 4)
if cores is None:
self._cores = max(1, psutil.cpu_count() - min_free_cores)
else:
if not isinstance(cores, int):
raise TypeError('cores should be an integer but got: {}'.format(cores))
cores = int(abs(cores))
self._cores = max(1, min(psutil.cpu_count(), cores))
self.__socket_master_rank = 0
self.__ranks_on_socket = 1
else:
# user-provided input cores will simply be ignored in an effort to use the entire CPU
ranks_by_socket = group_ranks_by_socket(verbose=False)
self.__socket_master_rank = ranks_by_socket[self.mpi_rank]
# which ranks in this socket?
ranks_on_this_socket = np.where(ranks_by_socket == self.__socket_master_rank)[0]
# how many in this socket?
self.__ranks_on_socket = ranks_on_this_socket.size
# Force usage of all available memory
man_mem_limit = None
self._cores = 1
# Disabling the following line since mpi4py and joblib didn't play well for Bayesian Inference
# self._cores = self.__cores_per_rank = psutil.cpu_count() // self.__ranks_on_socket
def _set_memory_and_cores(self, cores=None, man_mem_limit=None,
mem_multiplier=1.0):
"""
Checks hardware limitations such as memory, number of CPU cores and sets the recommended data chunk sizes and
the number of cores to be used by analysis methods. This function can work with clusters with heterogeneous
memory sizes (e.g. CADES SHPC Condo).
Parameters
----------
cores : uint, optional, Default = 1
How many cores to use for the computation.
man_mem_limit : uint, optional, Default = None (all available memory)
The amount a memory in Mb to use in the computation
mem_multiplier : float, optional. Default = 1
mem_multiplier is the number that will be multiplied with the
(byte) size of a single position in the source dataset in order to
better estimate the number of positions that can be processed at
any given time (how many pixels of the source and results datasets
can be retained in memory). The default value of 1.0 only accounts
for the source dataset. A value greater than 1 would account for
the size of results datasets as well. For example, if the result
dataset is the same size and precision as the source dataset,
the multiplier will be 2 (1 for source, 1 for result)
"""
self.__set_cores(cores=cores)
self.__set_memory(man_mem_limit=man_mem_limit,
mem_multiplier=mem_multiplier)
def __set_memory(self, man_mem_limit=None, mem_multiplier=1.0):
"""
Checks memory capabilities of each node and sets the recommended data
chunk sizes to be used by analysis methods.
This function can work with clusters with heterogeneous memory sizes
(e.g. CADES SHPC Condo).
Parameters
----------
man_mem_limit : uint, optional, Default = None (all available memory)
The amount a memory in Mb to use in the computation
mem_multiplier : float, optional. Default = 1
mem_multiplier is the number that will be multiplied with the
(byte) size of a single position in the source dataset in order to
better estimate the number of positions that can be processed at
any given time (how many pixels of the source and results datasets
can be retained in memory). The default value of 1.0 only accounts
for the source dataset. A value greater than 1 would account for
the size of results datasets as well. For example, if the result
dataset is the same size and precision as the source dataset,
the multiplier will be 2 (1 for source, 1 for result)
"""
if not isinstance(mem_multiplier, float):
raise TypeError('mem_multiplier must be a floating point number')
mem_multiplier = abs(mem_multiplier)
if mem_multiplier < 1:
raise ValueError('mem_multiplier must be at least 1')
avail_mem_bytes = get_available_memory() # in bytes
if self.verbose and self.mpi_rank == self.__socket_master_rank:
# expected to be the same for all ranks so just use this.
print('Rank {} - on socket with {} cores and {} avail. RAM shared '
'by {} ranks each given {} cores'
'.'.format(self.__socket_master_rank, psutil.cpu_count(),
format_size(avail_mem_bytes),
self.__ranks_on_socket, self._cores))
if man_mem_limit is None:
man_mem_limit = avail_mem_bytes
else:
if not isinstance(man_mem_limit, int):
raise TypeError('man_mem_limit must be a whole number')
# Note that man_mem_limit is specified in mega bytes
man_mem_limit = abs(man_mem_limit) * 1024 ** 2 # in bytes
if self.verbose and self.mpi_rank == 0:
print('User has requested to use no more than {} of memory'
'.'.format(format_size(man_mem_limit)))
max_mem_bytes = min(avail_mem_bytes, man_mem_limit)
# Remember that multiple processes (either via MPI or joblib) will share this socket
# This makes logical sense but there's always too much free memory and the
# cores are starved.
max_mem_per_worker = max_mem_bytes / (self._cores * self.__ranks_on_socket)
if self.verbose and self.mpi_rank == self.__socket_master_rank:
print('Rank {}: Each of the {} workers on this socket are allowed '
'to use {} of RAM'
'.'.format(self.mpi_rank,
self._cores * self.__ranks_on_socket,
format_size(max_mem_per_worker)))
# Now calculate the number of positions OF RAW DATA ONLY that can be
# stored in memory in one go PER worker
self.__bytes_per_pos = self.h5_main.dtype.itemsize * self.h5_main.shape[1]
if self.verbose and self.mpi_rank == 0:
print('Each position in the SOURCE dataset is {} large'
'.'.format(format_size(self.__bytes_per_pos)))
# Now multiply this with a factor that takes into account the expected
# sizes of the results (Final and intermediate) datasets.
self.__bytes_per_pos *= mem_multiplier
if self.verbose and self.mpi_rank == 0 and mem_multiplier > 1:
print('Each position of the source and results dataset(s) is {} '
'large.'.format(format_size(self.__bytes_per_pos)))
self._max_pos_per_read = int(np.floor(max_mem_per_worker / self.__bytes_per_pos))
if self.verbose and self.mpi_rank == self.__socket_master_rank:
title = 'SOURCE dataset only'
if mem_multiplier > 1:
title = 'source and result(s) datasets'
# expected to be the same for all ranks so just use this.
print('Rank {}: Workers on this socket allowed to read {} '
'positions of the {} per chunk'
'.'.format(self.mpi_rank, self._max_pos_per_read, title))
@staticmethod
def _map_function(*args, **kwargs):
"""
The function that manipulates the data on a single instance (position). This will be used by
:meth:`~pyUSID.processing.process.Process._unit_computation` to process a chunk of data in parallel
Parameters
----------
args : list
arguments to the function in the correct order
kwargs : dict
keyword arguments to the function
Returns
-------
object
"""
raise NotImplementedError('Please override the _unit_function specific to your process')
def _read_data_chunk(self):
"""
Reads a chunk of data for the intended computation into memory
"""
if self.__start_pos < self.__rank_end_pos:
self.__end_pos = int(min(self.__rank_end_pos, self.__start_pos + self._max_pos_per_read))
# DON'T DIRECTLY apply the start and end indices anymore to the h5 dataset. Find out what it means first
self.__pixels_in_batch = self.__compute_jobs[self.__start_pos: self.__end_pos]
if self.verbose:
print('Rank {} will read positions: {}'.format(self.mpi_rank, self.__pixels_in_batch))
bytes_this_read = self.__bytes_per_pos * len(self.__pixels_in_batch)
print('Rank {} will read {} of the SOURCE dataset'
'.'.format(self.mpi_rank, format_size(bytes_this_read)))
if self.mpi_rank == self.__socket_master_rank:
tot_workers = self.__ranks_on_socket * self._cores
print('Rank: {} available memory: {}. '
'{} workers on this socket will in total read ~ {}'
'.'.format(self.mpi_rank,
format_size(get_available_memory()),
tot_workers,
format_size(bytes_this_read * tot_workers)
))
# Reading as Dask array to minimize memory copies when restructuring in child classes
if self.__lazy:
main_dset = lazy_load_array(self.h5_main)
else:
main_dset = self.h5_main
self.data = main_dset[self.__pixels_in_batch, :]
# DON'T update the start position
else:
if self.verbose:
print('Rank {} - Finished reading all data!'.format(self.mpi_rank))
self.data = None
def _write_results_chunk(self):
"""
Writes the computed results into appropriate datasets.
This needs to be rewritten since the processed data is expected to be at least as large as the dataset
"""
# Now update the start position
self.__start_pos = self.__end_pos
# This line can remain as is
raise NotImplementedError('Please override the _set_results specific to your process')
def _create_results_datasets(self):
"""
Process specific call that will write the h5 group, guess dataset, corresponding spectroscopic datasets and also
link the guess dataset to the spectroscopic datasets. It is recommended that the ancillary datasets be populated
within this function.
"""
raise NotImplementedError('Please override the _create_results_datasets specific to your process')
def __create_compute_status_dataset(self):
"""
Creates a dataset that keeps track of what pixels / rows have already been computed. Users are not expected to
extend / modify this function.
"""
# Check to make sure that such a group doesn't already exist
if self._status_dset_name in self.h5_results_grp.keys():
self._h5_status_dset = self.h5_results_grp[self._status_dset_name]
if not isinstance(self._h5_status_dset, h5py.Dataset):
raise ValueError('Provided results group: {} contains an expected object ({}) that is not a dataset'
'.'.format(self.h5_results_grp, self._h5_status_dset))
if self.h5_main.shape[0] != self._h5_status_dset.shape[0] or len(self._h5_status_dset.shape) > 1 or \
self._h5_status_dset.dtype != np.uint8:
if self.mpi_rank == 0:
raise ValueError('Status dataset: {} was not of the expected shape or datatype'
'.'.format(self._h5_status_dset))
else:
self._h5_status_dset = self.h5_results_grp.create_dataset(self._status_dset_name, dtype=np.uint8,
shape=(self.h5_main.shape[0],))
# Could be fresh computation or resuming from a legacy computation
if 'last_pixel' in self.h5_results_grp.attrs.keys():
completed_pixels = self.h5_results_grp.attrs['last_pixel']
if completed_pixels > 0:
self._h5_status_dset[:completed_pixels] = 1
def _write_source_dset_provenance(self):
"""
Writes path of HDF5 file and path of h5_main to the results group
if results are being written to a new HDF5 file
"""
if self.h5_main.file == self.h5_results_grp.file:
return
write_simple_attrs(self.h5_results_grp,
{'source_file_path': self.h5_main.file.filename,
'source_dataset_path': self.h5_main.name})
def _get_existing_datasets(self):
"""
The purpose of this function is to allow processes to resume from partly computed results
Start with self.h5_results_grp
"""
raise NotImplementedError('Please override the _get_existing_datasets specific to your process')
def _unit_computation(self, *args, **kwargs):
"""
The unit computation that is performed per data chunk. This allows room for any data pre / post-processing
as well as multiple calls to parallel_compute if necessary
"""
# TODO: Try to use the functools.partials to preconfigure the map function
# cores = number of processes / rank here
if self.verbose and self.mpi_rank == 0:
print("Rank {} at Process class' default _unit_computation() that "
"will call parallel_compute()".format(self.mpi_rank))
self._results = parallel_compute(self.data, self._map_function, cores=self._cores,
lengthy_computation=False,
func_args=args, func_kwargs=kwargs,
verbose=self.verbose)
def compute(self, override=False, *args, **kwargs):
"""
Creates placeholders for the results, applies the :meth:`~pyUSID.processing.process.Process._unit_computation`
to chunks of the dataset
Parameters
----------
override : bool, optional. default = False
By default, compute will simply return duplicate results to avoid recomputing or resume computation on a
group with partial results. Set to True to force fresh computation.
args : list
arguments to the mapped function in the correct order
kwargs : dict
keyword arguments to the mapped function
Returns
-------
h5_results_grp : :class:`h5py.Group`
Group containing all the results
"""
class SimpleFIFO(object):
"""
Simple class that maintains a moving average of some numbers.
"""
def __init__(self, length=5):
"""
Create a SimpleFIFO object
Parameters
----------
length : unsigned integer
Number of values that need to be maintained for the moving average
"""
self.__queue = list()
if not isinstance(length, int):
raise TypeError('length must be a positive integer')
if length <= 0:
raise ValueError('length must be a positive integer')
self.__max_length = length
self.__count = 0
def put(self, item):
"""
Adds the item to the internal queue. If the size of the queue exceeds its capacity, the oldest
item is removed.
Parameters
----------
item : float or int
Any real valued number
"""
if (not isinstance(item, Number)) or isinstance(item, complex):
raise TypeError('Provided item: {} is not a Number'.format(item))
self.__queue.append(item)
self.__count += 1
if len(self.__queue) > self.__max_length:
_ = self.__queue.pop(0)
def get_mean(self):
"""
Returns the average of the elements within the queue
Returns
-------
avg : number.Number
Mean of all elements within the queue
"""
return np.mean(self.__queue)
def get_cycles(self):
"""
Returns the number of items that have been added to the queue in total
Returns
-------
count : int
number of items that have been added to the queue in total
"""
return self.__count
if not override:
if len(self.duplicate_h5_groups) > 0:
if self.mpi_rank == 0:
print('Returned previously computed results at ' + self.duplicate_h5_groups[-1].name)
self.h5_results_grp = self.duplicate_h5_groups[-1]
return self.duplicate_h5_groups[-1]
elif len(self.partial_h5_groups) > 0 and self.h5_results_grp is None:
if self.mpi_rank == 0:
print('Resuming computation in group: ' + self.partial_h5_groups[-1].name)
self.use_partial_computation()
resuming = False
if self.h5_results_grp is None:
# starting fresh
if self.verbose and self.mpi_rank == 0:
print('Creating HDF5 group and datasets to hold results')
self._create_results_datasets()
self._write_source_dset_provenance()
else:
# resuming from previous checkpoint
resuming = True
self._get_existing_datasets()
self.__create_compute_status_dataset()
if resuming and self.mpi_rank == 0:
percent_complete = int(100 * len(np.where(self._h5_status_dset[()] == 1)[0]) /
self._h5_status_dset.shape[0])
print('Resuming computation. {}% completed already'.format(percent_complete))
self.__assign_job_indices()
# Not sure if this is necessary but I don't think it would hurt either
if self.mpi_comm is not None:
self.mpi_comm.barrier()
compute_times = SimpleFIFO(5)
write_times = SimpleFIFO(5)
orig_rank_start = self.__start_pos
if self.mpi_rank == 0 and self.mpi_size == 1:
if self.__resume_implemented:
print('\tThis class (likely) supports interruption and resuming of computations!\n'
'\tIf you are operating in a python console, press Ctrl+C or Cmd+C to abort\n'
'\tIf you are in a Jupyter notebook, click on "Kernel">>"Interrupt"\n'
'\tIf you are operating on a cluster and your job gets killed, re-run the job to resume\n')
else:
print('\tThis class does NOT support interruption and resuming of computations.\n'
'\tIn order to enable this feature, simply implement the _get_existing_datasets() function')
if self.verbose and self.mpi_rank == self.__socket_master_rank:
print('Rank: {} - with nothing loaded has {} free memory'
''.format(self.mpi_rank, format_size(get_available_memory())))
self._read_data_chunk()
if self.mpi_comm is not None:
self.mpi_comm.barrier()
if self.verbose and self.mpi_rank == self.__socket_master_rank:
print('Rank: {} - with only raw data loaded has {} free memory'
''.format(self.mpi_rank, format_size(get_available_memory())))
while self.data is not None:
num_jobs_in_batch = self.__end_pos - self.__start_pos
t_start_1 = tm.time()
self._unit_computation(*args, **kwargs)
comp_time = np.round(tm.time() - t_start_1, decimals=2) # in seconds
time_per_pix = comp_time / num_jobs_in_batch
compute_times.put(time_per_pix)
if self.verbose:
print('Rank {} - computed chunk in {} or {} per pixel. Average: {} per pixel'
'.'.format(self.mpi_rank, format_time(comp_time), format_time(time_per_pix),
format_time(compute_times.get_mean())))
# Ranks can become memory starved. Check memory usage - raw data + results in memory at this point
if self.verbose and self.mpi_rank == self.__socket_master_rank:
print('Rank: {} - now holding onto raw data + results has {} free memory'
''.format(self.mpi_rank, format_size(get_available_memory())))
t_start_2 = tm.time()
self._write_results_chunk()
# NOW, update the positions. Users are NOT allowed to touch start and end pos
self.__start_pos = self.__end_pos
# Leaving in this provision that will allow restarting of processes
if self.mpi_size == 1:
self.h5_results_grp.attrs['last_pixel'] = self.__end_pos
# Child classes don't even have to worry about flushing. Process will do it.
self.h5_main.file.flush()
dump_time = np.round(tm.time() - t_start_2, decimals=2)
write_times.put(dump_time / num_jobs_in_batch)
if self.verbose:
print('Rank {} - wrote its {} pixel chunk in {}'.format(self.mpi_rank,
num_jobs_in_batch,
format_time(dump_time)))
time_remaining = (self.__rank_end_pos - self.__end_pos) * \
(compute_times.get_mean() + write_times.get_mean())
if self.verbose or self.mpi_rank == 0:
percent_complete = int(100 * (self.__end_pos - orig_rank_start) /
(self.__rank_end_pos - orig_rank_start))
print('Rank {} - {}% complete. Time remaining: {}'.format(self.mpi_rank, percent_complete,
format_time(time_remaining)))
# All ranks should mark the pixels for this batch as completed. 'last_pixel' attribute will be updated later
# Setting each section to 1 independently
for curr_slice in integers_to_slices(self.__pixels_in_batch):
self._h5_status_dset[curr_slice] = 1
self._read_data_chunk()
if self.verbose:
print('Rank {} - Finished computing all jobs!'.format(self.mpi_rank))
if self.mpi_comm is not None:
self.mpi_comm.barrier()
if self.mpi_rank == 0:
print('Finished processing the entire dataset!')
# Update the legacy 'last_pixel' attribute here:
if self.mpi_rank == 0:
self.h5_results_grp.attrs['last_pixel'] = self.h5_main.shape[0]
return self.h5_results_grp
| 49,297 | 46.908649 | 120 | py |
pyUSID-legacy | pyUSID-master-legacy/tests/__init__.py | 0 | 0 | 0 | py | |
pyUSID-legacy | pyUSID-master-legacy/tests/io/test_partial_h5.py | """
This script creates a partial h5py file then tests the process class with it.
Created on: Jul 12, 2019
Author: Emily Costa
from tests.io.data_utils import make_sparse_sampling_file
import pyUSID as usid
from pyUSID.io import dtype_utils, hdf_utils
import h5py
import numpy as np
from tests.io.simple_process import SimpleProcess
import os
# Creates incomplete h5py dataset object in current path
h5_path = 'sparse_sampling.h5'
if not os.path.exists(h5_path):
make_sparse_sampling_file()
h5_f = h5py.File(h5_path, mode='r+')
hdf_utils.print_tree(h5_f)
h5_main0 = h5_f['Measurement_000/Channel_000/Raw_Data']
h5_main1 = h5_f['Measurement_000/Channel_001/Raw_Data']
print(hdf_utils.simple.check_if_main(h5_main0, verbose=True))
#dtype_utils.check_dtype(h5_maini)
if __name__ == '__main__':
simp = SimpleProcess(h5_main0)
#print(simp.test())
#simp.test()
#simp.plot_test()
simp.compute()
"""
| 921 | 26.117647 | 77 | py |
pyUSID-legacy | pyUSID-master-legacy/tests/io/test_image_translator.py | # -*- coding: utf-8 -*-
"""
Created on Thu Apr 4 15:07:16 2017
@author: Suhas Somnath
"""
from __future__ import division, print_function, unicode_literals, absolute_import
import unittest
import sys
from enum import Enum
from PIL import Image
import h5py
import numpy as np
from .data_utils import validate_aux_dset_pair, delete_existing_file
sys.path.append("../../pyUSID/")
from pyUSID.io import ImageTranslator, hdf_utils, USIDataset
from pyUSID.io.image import read_image
# The following code will be used to support both the old and new versions of pillow.
# Pillow added a new enum class 'Resampling' in the newer version>=9.0.0
if not hasattr(Image, 'Resampling'): # pillow<9.0.0
Image.Resampling = Image
resample_dict = {0: 'NEAREST',
1: 'LANCOS',
2: 'BILINEAR',
3: 'BICUBIC',
4: 'BOX',
5: 'HAMMING'}
if sys.version_info.major == 3:
unicode = str
else:
FileExistsError = ValueError
FileNotFoundError = ValueError
image_path = 'random_image.png'
rand_image = np.uint16(np.random.randint(0, high=255, size=(128, 256)))
class TestImage(unittest.TestCase):
def setUp(self):
result = Image.fromarray(rand_image.astype(np.uint8))
for file_path in [image_path, image_path.replace('.png', '.h5')]:
delete_existing_file(file_path)
result.save(image_path)
def tearDown(self):
delete_existing_file(image_path)
class TestReadImage(TestImage):
def test_color_to_bw_image(self):
color_image_path = './tests/io/logo_v01.png'
img_obj = Image.open(color_image_path).convert(mode="L")
pillow_obj = read_image(color_image_path, as_numpy_array=False)
self.assertEqual(img_obj, pillow_obj)
def test_color(self):
color_image_path = './tests/io/logo_v01.png'
img_obj = Image.open(color_image_path)
pillow_obj = read_image(color_image_path, as_numpy_array=False, as_grayscale=False)
self.assertEqual(img_obj, pillow_obj)
def test_text_to_numpy_simple(self):
img_data = rand_image.astype(np.uint8)
img_path = 'image_text.txt'
delete_existing_file(img_path)
np.savetxt(img_path, img_data)
np_data = read_image(image_path, as_numpy_array=True)
self.assertIsInstance(np_data, np.ndarray)
self.assertTrue(np.allclose(np_data, img_data))
delete_existing_file(img_path)
def test_text_to_numpy_complex(self):
img_data = np.uint16(np.random.randint(0, high=255, size=(4, 3)))
img_path = 'image_text.csv'
delete_existing_file(img_path)
txt_kwargs = {'delimiter': ',',
'newline': '\n',
'header': 'cat, dog, cow'}
np.savetxt(img_path, img_data, **txt_kwargs)
np_data = read_image(img_path, as_numpy_array=True, delimiter=',', skiprows=1)
self.assertIsInstance(np_data, np.ndarray)
self.assertTrue(np.allclose(np_data, img_data))
delete_existing_file(img_path)
def test_text_complex_to_pillow(self):
img_data = np.uint16(np.random.randint(0, high=255, size=(4, 3)))
img_path = 'image_text.csv'
delete_existing_file(img_path)
txt_kwargs = {'delimiter': ',',
'newline': '\n',
'header': 'cat, dog, cow'}
np.savetxt(img_path, img_data, **txt_kwargs)
pillow_obj = read_image(img_path, as_grayscale=True, as_numpy_array=False,
delimiter=',', skiprows=1)
self.assertIsInstance(pillow_obj, Image.Image)
self.assertTrue(np.allclose(np.asarray(pillow_obj), img_data))
delete_existing_file(img_path)
def test_to_numpy(self):
np_data = read_image(image_path, as_numpy_array=True)
self.assertIsInstance(np_data, np.ndarray)
self.assertTrue(np.allclose(np_data, rand_image))
def test_to_pillow(self):
pillow_obj = read_image(image_path, as_numpy_array=False)
self.assertIsInstance(pillow_obj, Image.Image)
self.assertTrue(np.allclose(np.asarray(pillow_obj), rand_image))
class TestImageTranslator(TestImage):
def basic_file_validation(self, h5_f):
self.assertEqual('ImageTranslator', hdf_utils.get_attr(h5_f, 'translator'))
# First level should have absolutely nothing besides one group
self.assertEqual(len(h5_f.items()), 1)
self.assertTrue('Measurement_000' in h5_f.keys())
h5_meas_grp = h5_f['Measurement_000']
self.assertIsInstance(h5_meas_grp, h5py.Group)
# Again, this group should only have one group - Channel_000
self.assertEqual(len(h5_meas_grp.items()), 1)
self.assertTrue('Channel_000' in h5_meas_grp.keys())
h5_chan_grp = h5_meas_grp['Channel_000']
self.assertIsInstance(h5_chan_grp, h5py.Group)
# This channel group is not expected to have any (custom) attributes but it will contain the main dataset
self.assertEqual(len(h5_chan_grp.items()), 5)
for dset_name in ['Raw_Data', 'Position_Indices', 'Position_Values', 'Spectroscopic_Indices',
'Spectroscopic_Values']:
self.assertTrue(dset_name in h5_chan_grp.keys())
h5_dset = h5_chan_grp[dset_name]
self.assertIsInstance(h5_dset, h5py.Dataset)
usid_main = USIDataset(h5_chan_grp['Raw_Data'])
self.assertIsInstance(usid_main, USIDataset)
self.assertEqual(usid_main.name.split('/')[-1], 'Raw_Data')
self.assertEqual(usid_main.parent, h5_chan_grp)
validate_aux_dset_pair(self, h5_chan_grp, usid_main.h5_spec_inds, usid_main.h5_spec_vals, ['arb'],
['a.u.'], np.atleast_2d([0]), h5_main=usid_main, is_spectral=True)
def main_translate(self, **kwargs):
h5_path = kwargs.pop('h5_path', image_path.replace('.png', '.h5'))
delete_existing_file(h5_path)
input_image = rand_image.copy()
usize, vsize = input_image.shape[:2]
translator = ImageTranslator()
h5_path = translator.translate(image_path, **kwargs)
image_parms = dict()
if 'bin_factor' in kwargs.keys():
bin_factor = kwargs.pop('bin_factor')
if bin_factor is None:
_ = kwargs.pop('interp_func', None)
else:
if isinstance(bin_factor, int):
bin_factor = (bin_factor, bin_factor)
interp_func = kwargs.pop('interp_func', Image.Resampling.BICUBIC)
if isinstance(interp_func, int): #pillow<9.0.0
interp_func_name = resample_dict[interp_func]
elif isinstance(interp_func, Enum):
interp_func_name = interp_func.name
image_parms.update({'image_binning_size': np.array(bin_factor),
'image_PIL_resample_mode': interp_func_name})
img_obj = Image.fromarray(input_image)
img_obj = img_obj.convert(mode="L")
img_obj = img_obj.resize((int(vsize / bin_factor[1]), int(usize / bin_factor[0])),
resample=interp_func)
input_image = np.asarray(img_obj)
image_parms.update({'normalized': False})
input_image = input_image.copy()
if 'normalize' in kwargs.keys():
normalize = kwargs.pop('normalize')
if normalize:
input_image -= np.min(input_image)
input_image = input_image / np.float32(np.max(input_image))
image_parms.update({'normalized': True})
image_parms.update({'image_min': np.min(input_image), 'image_max': np.max(input_image)})
with h5py.File(h5_path, mode='r') as h5_f:
self.basic_file_validation(h5_f)
h5_meas_grp = h5_f['Measurement_000']
h5_chan_grp = h5_meas_grp['Channel_000']
usid_main = USIDataset(h5_chan_grp['Raw_Data'])
# check the attributes under this group
for key, expected_val in image_parms.items():
print(hdf_utils.get_attr(h5_meas_grp, key), expected_val)
self.assertTrue(np.all(hdf_utils.get_attr(h5_meas_grp, key) == expected_val))
one_d_image = input_image.T.reshape(-1, 1)
self.assertTrue(np.allclose(one_d_image, usid_main[()]))
# self.assertTrue(np.allclose(rand_image, np.reshape(usid_main[()], rand_image.shape)))
pos_data = np.vstack((np.tile(np.arange(input_image.shape[0]), input_image.shape[1]),
np.repeat(np.arange(input_image.shape[1]), input_image.shape[0]))).T
validate_aux_dset_pair(self, h5_chan_grp, usid_main.h5_pos_inds, usid_main.h5_pos_vals, ['Y', 'X'],
['a.u.', 'a.u.'], pos_data, h5_main=usid_main, is_spectral=False)
delete_existing_file(h5_path)
def test_basic_translate(self):
self.main_translate()
class TestBinning(TestImageTranslator):
def test_single_default_interp(self):
self.main_translate(bin_factor=2)
def test_tuple_default_interp(self):
self.main_translate(bin_factor=(1, 2))
def test_too_many_dims(self):
with self.assertRaises(ValueError):
translator = ImageTranslator()
_ = translator.translate(image_path, bin_factor=(1, 2, 3))
def test_neg_parms(self):
with self.assertRaises(ValueError):
translator = ImageTranslator()
_ = translator.translate(image_path, bin_factor=-2)
def test_float_parms(self):
with self.assertRaises(TypeError):
translator = ImageTranslator()
_ = translator.translate(image_path, bin_factor=1.34)
def test_invalid_dtype(self):
with self.assertRaises(TypeError):
translator = ImageTranslator()
_ = translator.translate(image_path, bin_factor=['dfrdd', True])
def test_custom_interp(self):
self.main_translate(bin_factor=2, interp_func=Image.NEAREST)
def test_invalid_interp(self):
with self.assertRaises(TypeError):
translator = ImageTranslator()
_ = translator.translate(image_path, bin_factor=2, interp_func='dsdsdsd')
class TestNormalization(TestImageTranslator):
def test_normalize_only(self):
self.main_translate(normalize=True)
def test_normalize_and_default_interp(self):
self.main_translate(normalize=True, bin_factor=2)
class TestFile(TestImageTranslator):
def test_invalid_h5_path(self):
with self.assertRaises(TypeError):
translator = ImageTranslator()
_ = translator.translate(image_path, h5_path=np.arange(4))
def test_path_not_str(self):
with self.assertRaises(TypeError):
translator = ImageTranslator()
_ = translator.translate(np.arange(4))
def test_path_does_not_exist(self):
with self.assertRaises(FileNotFoundError):
translator = ImageTranslator()
_ = translator.translate('no_such_file.png')
def test_output_h5_file_already_exists(self):
with h5py.File(image_path.replace('.png', '.h5'), mode='w') as _:
pass
with self.assertRaises(FileExistsError):
translator = ImageTranslator()
_ = translator.translate(image_path)
def test_valid_h5_path(self):
self.main_translate(h5_path='custom_path.h5')
self.main_translate(h5_path='custom_path.txt')
if __name__ == '__main__':
unittest.main()
| 11,680 | 37.807309 | 113 | py |
pyUSID-legacy | pyUSID-master-legacy/tests/io/test_usi_dataset.py | # -*- coding: utf-8 -*-
"""
Created on Tue Nov 3 15:07:16 2017
@author: Suhas Somnath
"""
from __future__ import division, print_function, unicode_literals, absolute_import
import unittest
import os
import sys
import h5py
import numpy as np
import dask.array as da
import matplotlib as mpl
# Attempting to get things to work for all versions of python on Travis
mpl.use('Agg')
from sidpy.hdf.hdf_utils import get_attr
sys.path.append("../../pyUSID/")
from pyUSID.io import USIDataset, Dimension
from pyUSID.io.hdf_utils.model import reshape_to_n_dims, get_dimensionality
from . import data_utils
skip_viz_tests = True
if sys.version_info.major == 3:
unicode = str
if sys.version_info.minor > 4:
skip_viz_tests = False
test_h5_file_path = data_utils.std_beps_path
class TestBEPS(unittest.TestCase):
def setUp(self):
data_utils.make_beps_file()
self.orig_labels_order = ['X', 'Y', 'Cycle', 'Bias']
self.h5_file = h5py.File(data_utils.std_beps_path, mode='r')
h5_grp = self.h5_file['/Raw_Measurement/']
self.source_nd_s2f = h5_grp['n_dim_form'][()]
self.source_nd_f2s = self.source_nd_s2f.transpose(1, 0, 3, 2)
self.h5_source = USIDataset(h5_grp['source_main'])
self.pos_dims=[]
self.spec_dims=[]
for dim_name, dim_units in zip(self.h5_source.pos_dim_labels,
get_attr(self.h5_source.h5_pos_inds, 'units')):
self.pos_dims.append(
Dimension(dim_name, dim_units, h5_grp[dim_name][()]))
for dim_name, dim_units in zip(self.h5_source.spec_dim_labels,
get_attr(self.h5_source.h5_spec_inds, 'units')):
self.spec_dims.append(
Dimension(dim_name, dim_units, h5_grp[dim_name][()]))
res_grp_0 = h5_grp['source_main-Fitter_000']
self.results_0_nd_s2f = res_grp_0['n_dim_form'][()]
self.results_0_nd_f2s = self.results_0_nd_s2f.transpose(1, 0, 3, 2)
self.h5_compound = USIDataset(res_grp_0['results_main'])
res_grp_1 = h5_grp['source_main-Fitter_001']
self.results_1_nd_s2f = res_grp_1['n_dim_form'][()]
self.results_1_nd_f2s = self.results_1_nd_s2f.transpose(1, 0, 3, 2)
self.h5_complex = USIDataset(res_grp_1['results_main'])
def tearDown(self):
self.h5_file.close()
os.remove(data_utils.std_beps_path)
class TestUSIDatasetReal(unittest.TestCase):
def setUp(self):
self.rev_spec = False
data_utils.make_beps_file(rev_spec=self.rev_spec)
self.orig_labels_order = ['X', 'Y', 'Cycle', 'Bias'] if self.rev_spec else ['X', 'Y', 'Bias', 'Cycle']
def tearDown(self):
os.remove(test_h5_file_path)
def get_expected_n_dim(self, h5_f):
nd_slow_to_fast = h5_f['/Raw_Measurement/n_dim_form'][()]
nd_fast_to_slow = nd_slow_to_fast.transpose(1, 0, 3, 2)
if self.rev_spec:
nd_fast_to_slow = nd_fast_to_slow.transpose(0, 1, 3, 2)
return nd_slow_to_fast, nd_fast_to_slow
class TestStringRepr(TestBEPS):
def test_string_representation(self):
usi_dset = self.h5_source
h5_main = self.h5_file[usi_dset.name]
actual = usi_dset.__repr__()
actual = [line.strip() for line in actual.split("\n")]
actual = [actual[line_ind] for line_ind in [0, 2, 4, 7, 8, 10, 11]]
expected = list()
expected.append(h5_main.__repr__())
expected.append(h5_main.name)
expected.append(get_attr(h5_main, "quantity") + " (" + get_attr(h5_main, "units") + ")")
for h5_inds in [usi_dset.h5_pos_inds, usi_dset.h5_spec_inds]:
for dim_name, dim_size in zip(get_attr(h5_inds, "labels"),
get_dimensionality(h5_inds)):
expected.append(dim_name + ' - size: ' + str(dim_size))
self.assertTrue(np.all([x == y for x, y in zip(actual, expected)]))
class TestEquality(TestBEPS):
def test_correct_USIDataset(self):
expected = USIDataset(self.h5_source)
self.assertTrue(expected == expected)
def test_correct_h5_dataset(self):
h5_main = self.h5_file[self.h5_source.name]
expected = USIDataset(h5_main)
self.assertTrue(expected == h5_main)
def test_incorrect_USIDataset(self):
with h5py.File(test_h5_file_path, mode='r') as h5_f:
h5_main = h5_f['/Raw_Measurement/source_main']
expected = USIDataset(h5_main)
incorrect = USIDataset(h5_f['/Raw_Measurement/source_main-Fitter_000/results_main'])
self.assertFalse(expected == incorrect)
def test_incorrect_h5_dataset(self):
with h5py.File(test_h5_file_path, mode='r') as h5_f:
h5_main = h5_f['/Raw_Measurement/source_main']
expected = USIDataset(h5_main)
incorrect = h5_f['/Raw_Measurement/source_main-Fitter_000/Spectroscopic_Indices']
self.assertFalse(expected == incorrect)
def test_incorrect_object(self):
with h5py.File(test_h5_file_path, mode='r') as h5_f:
h5_main = h5_f['/Raw_Measurement/source_main']
expected = USIDataset(h5_main)
incorrect = np.zeros(shape=(1, 2, 3, 4))
self.assertFalse(expected == incorrect)
class TestGetNDimFormExistsReal(TestUSIDatasetReal):
def test_sorted_and_unsorted(self):
with h5py.File(test_h5_file_path, mode='r') as h5_f:
usi_dset = USIDataset(h5_f['/Raw_Measurement/source_main'])
nd_slow_to_fast, nd_fast_to_slow = self.get_expected_n_dim(h5_f)
actual_f2s = usi_dset.get_n_dim_form(lazy=False)
self.assertTrue(np.allclose(nd_fast_to_slow, actual_f2s))
nd_form, success = reshape_to_n_dims(usi_dset, sort_dims=True)
print(nd_form.shape)
usi_dset.toggle_sorting()
actual_s2f = usi_dset.get_n_dim_form(lazy=False)
self.assertTrue(np.allclose(nd_slow_to_fast, actual_s2f))
class TestPosSpecSlicesReal(TestUSIDatasetReal):
def test_empty_dict(self):
with h5py.File(test_h5_file_path, mode='r') as h5_f:
usi_main = USIDataset(h5_f['/Raw_Measurement/source_main'])
actual_pos, actual_spec = usi_main._get_pos_spec_slices({})
self.assertTrue(np.allclose(np.expand_dims(np.arange(14), axis=1), actual_spec))
self.assertTrue(np.allclose(np.expand_dims(np.arange(15), axis=1), actual_pos))
def test_non_existent_dim(self):
with h5py.File(test_h5_file_path, mode='r') as h5_f:
usi_main = USIDataset(h5_f['/Raw_Measurement/source_main'])
with self.assertRaises(KeyError):
_ = usi_main._get_pos_spec_slices({'blah': 4, 'X': 3, 'Y': 1})
def test_incorrect_type(self):
with h5py.File(test_h5_file_path, mode='r') as h5_f:
usi_main = USIDataset(h5_f['/Raw_Measurement/source_main'])
with self.assertRaises(TypeError):
_ = usi_main._get_pos_spec_slices({'X': 'fdfd', 'Y': 1})
def test_negative_index(self):
with h5py.File(test_h5_file_path, mode='r') as h5_f:
usi_main = USIDataset(h5_f['/Raw_Measurement/source_main'])
with self.assertRaises(ValueError):
_ = usi_main._get_pos_spec_slices({'X': -4, 'Y': 1})
def test_out_of_bounds(self):
with h5py.File(test_h5_file_path, mode='r') as h5_f:
usi_main = USIDataset(h5_f['/Raw_Measurement/source_main'])
with self.assertRaises(IndexError):
_ = usi_main._get_pos_spec_slices({'X': 15, 'Y': 1})
def test_one_pos_dim_removed(self):
with h5py.File(test_h5_file_path, mode='r') as h5_f:
usi_main = USIDataset(h5_f['/Raw_Measurement/source_main'])
# orig_pos = np.vstack([np.tile(np.arange(5), 3), np.repeat(np.arange(3), 5)]).T
# orig_spec = np.vstack([np.tile(np.arange(7), 2), np.repeat(np.arange(2), 7)])
actual_pos, actual_spec = usi_main._get_pos_spec_slices({'X': 3})
# we want every fifth position starting from 3
expected_pos = np.expand_dims(np.arange(3, 15, 5), axis=1)
expected_spec = np.expand_dims(np.arange(14), axis=1)
self.assertTrue(np.allclose(expected_spec, actual_spec))
self.assertTrue(np.allclose(expected_pos, actual_pos))
def test_one_pos_dim_sliced(self):
with h5py.File(test_h5_file_path, mode='r') as h5_f:
usi_main = USIDataset(h5_f['/Raw_Measurement/source_main'])
actual_pos, actual_spec = usi_main._get_pos_spec_slices({'X': slice(1, 5, 2)})
# we want every fifth position starting from 3
positions = []
for row_ind in range(3):
for col_ind in range(1, 5, 2):
positions.append(5 * row_ind + col_ind)
expected_pos = np.expand_dims(positions, axis=1)
expected_spec = np.expand_dims(np.arange(14), axis=1)
self.assertTrue(np.allclose(expected_spec, actual_spec))
self.assertTrue(np.allclose(expected_pos, actual_pos))
def test_two_pos_dim_sliced(self):
with h5py.File(test_h5_file_path, mode='r') as h5_f:
usi_main = USIDataset(h5_f['/Raw_Measurement/source_main'])
actual_pos, actual_spec = usi_main._get_pos_spec_slices({'X': slice(1, 5, 2), 'Y': 1})
# we want every fifth position starting from 3
positions = []
for row_ind in range(1, 2):
for col_ind in range(1, 5, 2):
positions.append(5 * row_ind + col_ind)
expected_pos = np.expand_dims(positions, axis=1)
expected_spec = np.expand_dims(np.arange(14), axis=1)
self.assertTrue(np.allclose(expected_spec, actual_spec))
self.assertTrue(np.allclose(expected_pos, actual_pos))
def test_two_pos_dim_sliced_list(self):
with h5py.File(test_h5_file_path, mode='r') as h5_f:
usi_main = USIDataset(h5_f['/Raw_Measurement/source_main'])
actual_pos, actual_spec = usi_main._get_pos_spec_slices({'X': [1, 2, 4], 'Y': 1})
# we want every fifth position starting from 3
positions = []
for row_ind in range(1, 2):
for col_ind in [1, 2, 4]:
positions.append(5 * row_ind + col_ind)
expected_pos = np.expand_dims(positions, axis=1)
expected_spec = np.expand_dims(np.arange(14), axis=1)
self.assertTrue(np.allclose(expected_spec, actual_spec))
self.assertTrue(np.allclose(expected_pos, actual_pos))
def test_both_pos_removed(self):
with h5py.File(test_h5_file_path, mode='r') as h5_f:
usi_main = USIDataset(h5_f['/Raw_Measurement/source_main'])
actual_pos, actual_spec = usi_main._get_pos_spec_slices({'X': 3, 'Y': 1})
# we want every fifth position starting from 3
expected_pos = np.expand_dims([1 * 5 + 3], axis=1)
expected_spec = np.expand_dims(np.arange(14), axis=1)
self.assertTrue(np.allclose(expected_spec, actual_spec))
self.assertTrue(np.allclose(expected_pos, actual_pos))
def test_pos_and_spec_sliced_list(self):
with h5py.File(test_h5_file_path, mode='r') as h5_f:
usi_main = USIDataset(h5_f['/Raw_Measurement/source_main'])
h5_pos_inds = usi_main.h5_pos_inds
h5_spec_inds = usi_main.h5_spec_inds
actual_pos, actual_spec = usi_main._get_pos_spec_slices({'X': [1, 2, 4], 'Bias': slice(1, 7, 3)})
# we want every fifth position starting from 3
positions = []
for col_ind in [1, 2, 4]:
positions += np.argwhere(h5_pos_inds[h5_pos_inds.attrs['X']] == col_ind)[:, 0].tolist()
specs = []
for bias_ind in range(1, 7, 3):
specs += np.argwhere(h5_spec_inds[h5_spec_inds.attrs['Bias']] == bias_ind)[:, 1].tolist()
expected_pos = np.expand_dims(positions, axis=1)
expected_spec = np.expand_dims(specs, axis=1)
expected_pos.sort(axis=0)
expected_spec.sort(axis=0)
self.assertTrue(np.allclose(expected_spec, actual_spec))
self.assertTrue(np.allclose(expected_pos, actual_pos))
class TestGetUnitValuesReal(TestUSIDatasetReal):
def test_get_pos_values(self):
with h5py.File(test_h5_file_path, mode='r') as h5_f:
usi_main = USIDataset(h5_f['/Raw_Measurement/source_main'])
for dim_name in ['X', 'Y']:
expected = h5_f['/Raw_Measurement/' + dim_name][()]
actual = usi_main.get_pos_values(dim_name)
self.assertTrue(np.allclose(expected, actual))
def test_get_pos_values_illegal(self):
with h5py.File(test_h5_file_path, mode='r') as h5_f:
usi_main = USIDataset(h5_f['/Raw_Measurement/source_main'])
with self.assertRaises(KeyError):
_ = usi_main.get_pos_values('blah')
with self.assertRaises(TypeError):
_ = usi_main.get_pos_values(np.array(5))
def test_get_spec_values(self):
with h5py.File(test_h5_file_path, mode='r') as h5_f:
usi_main = USIDataset(h5_f['/Raw_Measurement/source_main'])
for dim_name in ['Bias', 'Cycle']:
expected = h5_f['/Raw_Measurement/' + dim_name][()]
actual = usi_main.get_spec_values(dim_name)
self.assertTrue(np.allclose(expected, actual))
def test_get_spec_values_illegal(self):
with h5py.File(test_h5_file_path, mode='r') as h5_f:
usi_main = USIDataset(h5_f['/Raw_Measurement/source_main'])
with self.assertRaises(KeyError):
_ = usi_main.get_spec_values('blah')
with self.assertRaises(TypeError):
_ = usi_main.get_spec_values(np.array(5))
class TestSliceReal(TestUSIDatasetReal):
def test_non_existent_dim(self):
with h5py.File(test_h5_file_path, mode='r') as h5_f:
usi_main = USIDataset(h5_f['/Raw_Measurement/source_main'])
with self.assertRaises(KeyError):
_ = usi_main.slice({'blah': 4, 'X': 3, 'Y': 1})
def test_incorrect_type(self):
with h5py.File(test_h5_file_path, mode='r') as h5_f:
usi_main = USIDataset(h5_f['/Raw_Measurement/source_main'])
with self.assertRaises(TypeError):
_ = usi_main.slice({'X': 'fdfd', 'Y': 1})
def test_out_of_bounds(self):
with h5py.File(test_h5_file_path, mode='r') as h5_f:
usi_main = USIDataset(h5_f['/Raw_Measurement/source_main'])
with self.assertRaises(IndexError):
_ = usi_main.slice({'X': 15, 'Y': 1})
def base(self, slice_dict, f2s_slice_list, result_as_nd, lazy_result,
verbose=False):
with h5py.File(test_h5_file_path, mode='r') as h5_f:
usi_main = USIDataset(h5_f['/Raw_Measurement/source_main'])
actual, success = usi_main.slice(slice_dict,
ndim_form=result_as_nd,
lazy=lazy_result,
verbose=verbose)
if verbose:
print('Status: {}, actual.shape: {}, actual.dtype: {}, '
'type(actual): {}'.format(success, actual.shape,
actual.dtype, type(actual)))
self.assertTrue(success)
n_dim_s2f, n_dim_f2s = self.get_expected_n_dim(h5_f)
if result_as_nd:
expected = n_dim_f2s[tuple(f2s_slice_list)]
expected = expected.squeeze()
else:
s2f_slice_list = f2s_slice_list[:2][::-1] + \
f2s_slice_list[2:][::-1]
if verbose:
print('Slice list converted from: {} to {}'
''.format(f2s_slice_list, s2f_slice_list))
expected = n_dim_s2f[tuple(s2f_slice_list)]
if verbose:
print('Expected in N-dim form: {}'.format(expected.shape))
expected = expected.reshape(np.prod(expected.shape[:2]),
np.prod(expected.shape[2:]))
if verbose:
print('Expected after flattening of shape: {}'
''.format(expected.shape))
if lazy_result:
self.assertIsInstance(actual, da.core.Array)
actual = actual.compute()
self.assertTrue(np.allclose(expected, actual))
def test_empty_2d_numpy(self):
self.base(None, [slice(None) for _ in range(4)], False, False)
def test_empty_nd_numpy(self):
self.base(None, [slice(None) for _ in range(4)], True, False)
def test_empty_nd_dask(self):
self.base(None, [slice(None) for _ in range(4)], True, True)
def test_empty_2d_dask(self):
self.base(None, [slice(None) for _ in range(4)], False, True)
def test_negative_index_nd_numpy(self):
self.base({'X': -2, 'Y': 1},
[slice(-2, -1), slice(1, 2)] + [slice(None) for _ in range(2)],
True, False)
def test_negative_index_nd_dask(self):
self.base({'X': -2, 'Y': 1},
[slice(-2, -1), slice(1, 2)] + [slice(None) for _ in range(2)],
True, True)
def test_negative_index_2d_numpy(self):
with h5py.File(test_h5_file_path, mode='r') as h5_f:
usi_main = USIDataset(h5_f['/Raw_Measurement/source_main'])
with self.assertRaises(ValueError):
_ = usi_main.slice({'X': -2, 'Y': 1}, ndim_form=False)
def test_one_pos_dim_removed_nd_numpy(self):
self.base({'X': 3},
[3] + [slice(None) for _ in range(3)], True, False)
def test_one_pos_dim_removed_nd_dask(self):
self.base({'X': 3},
[3] + [slice(None) for _ in range(3)], True, True)
def test_one_pos_dim_removed_2d_numpy(self):
self.base({'X': 3},
[slice(3, 4)] + [slice(None) for _ in range(3)],
False, False)
def test_one_pos_dim_removed_2d_dask(self):
self.base({'X': 3},
[slice(3, 4)] + [slice(None) for _ in range(3)],
False, True)
def test_one_pos_dim_sliced_nd_numpy(self):
self.base({'X': slice(1, 5, 2)},
[slice(1, 5, 2)] + [slice(None) for _ in range(3)],
True, False)
def test_one_pos_dim_sliced_nd_dask(self):
self.base({'X': slice(1, 5, 2)},
[slice(1, 5, 2)] + [slice(None) for _ in range(3)],
True, True)
def test_one_pos_dim_sliced_2d_numpy(self):
self.base({'X': slice(1, 5, 2)},
[slice(1, 5, 2)] + [slice(None) for _ in range(3)],
False, False)
def test_one_pos_dim_sliced_2d_dask(self):
self.base({'X': slice(1, 5, 2)},
[slice(1, 5, 2)] + [slice(None) for _ in range(3)],
False, True)
def test_two_pos_dim_sliced_nd_numpy(self):
self.base({'X': slice(1, 5, 2), 'Y': 1},
[slice(1, 5, 2), slice(1, 2)] + [slice(None) for _ in range(2)],
True, False)
def test_two_pos_dim_sliced_nd_dask(self):
self.base({'X': slice(1, 5, 2), 'Y': 1},
[slice(1, 5, 2), slice(1, 2)] + [slice(None) for _ in range(2)],
True, True)
def test_two_pos_dim_sliced_2d_numpy(self):
self.base({'X': slice(1, 5, 2), 'Y': 1},
[slice(1, 5, 2), slice(1, 2)] + [slice(None) for _ in range(2)],
False, False)
def test_two_pos_dim_sliced_2d_dask(self):
self.base({'X': slice(1, 5, 2), 'Y': 1},
[slice(1, 5, 2), slice(1, 2)] + [slice(None) for _ in range(2)],
False, True)
def test_two_pos_dim_sliced_list_nd_numpy(self):
self.base({'X': [1, 2, 4], 'Y': 1},
[[1, 2, 4], slice(1, 2)] + [slice(None) for _ in range(2)],
True, False)
def test_two_pos_dim_sliced_list_nd_dask(self):
self.base({'X': [1, 2, 4], 'Y': 1},
[[1, 2, 4], slice(1, 2)] + [slice(None) for _ in range(2)],
True, True)
def test_two_pos_dim_sliced_list_2d_numpy(self):
self.base({'X': [1, 2, 4], 'Y': 1},
[[1, 2, 4], slice(1, 2)] + [slice(None) for _ in range(2)],
False, False)
def test_two_pos_dim_sliced_list_2d_dask(self):
self.base({'X': [1, 2, 4], 'Y': 1},
[[1, 2, 4], slice(1, 2)] + [slice(None) for _ in range(2)],
False, True)
def test_both_pos_removed_nd_numpy(self):
self.base({'X': 3, 'Y': 1},
[slice(3, 4), slice(1, 2)] + [slice(None) for _ in range(2)],
True, False)
def test_both_pos_removed_nd_dask(self):
self.base({'X': 3, 'Y': 1},
[slice(3, 4), slice(1, 2)] + [slice(None) for _ in range(2)],
True, True)
def test_both_pos_removed_2d_numpy(self):
self.base({'X': 3, 'Y': 1},
[slice(3, 4), slice(1, 2)] + [slice(None) for _ in range(2)],
False, False)
def test_both_pos_removed_2d_dask(self):
self.base({'X': 3, 'Y': 1},
[slice(3, 4), slice(1, 2)] + [slice(None) for _ in range(2)],
False, True)
def test_pos_and_spec_sliced_list_nd_numpy(self):
self.base({'X': [1, 2, 4], 'Bias': slice(1, 7, 3)},
[[1, 2, 4], slice(None), slice(1, 7, 3), slice(None)],
True, False)
def test_pos_and_spec_sliced_list_nd_dask(self):
self.base({'X': [1, 2, 4], 'Bias': slice(1, 7, 3)},
[[1, 2, 4], slice(None), slice(1, 7, 3), slice(None)],
True, True)
def test_pos_and_spec_sliced_list_2d_numpy(self):
self.base({'X': [1, 2, 4], 'Bias': slice(1, 7, 3)},
[[1, 2, 4], slice(None), slice(1, 7, 3), slice(None)],
False, False)
def test_pos_and_spec_sliced_list_2d_dask(self):
self.base({'X': [1, 2, 4], 'Bias': slice(1, 7, 3)},
[[1, 2, 4], slice(None), slice(1, 7, 3), slice(None)],
False, True)
def test_all_dims_sliced_nd_numpy(self):
self.base({'X': [1, 2, 4], 'Y': 2, 'Bias': slice(1, 7, 3), 'Cycle': 1},
[[1, 2, 4], slice(2, 3), slice(1, 7, 3), slice(1, 2)],
True, False)
def test_all_dims_sliced_nd_dask(self):
self.base({'X': [1, 2, 4], 'Y': 2, 'Bias': slice(1, 7, 3), 'Cycle': 1},
[[1, 2, 4], slice(2, 3), slice(1, 7, 3), slice(1, 2)],
True, True)
def test_all_dims_sliced_2d_numpy(self):
self.base({'X': [1, 2, 4], 'Y': 2, 'Bias': slice(1, 7, 3), 'Cycle': 1},
[[1, 2, 4], slice(2, 3), slice(1, 7, 3), slice(1, 2)],
False, False)
def test_all_dims_sliced_2d_dask(self):
self.base({'X': [1, 2, 4], 'Y': 2, 'Bias': slice(1, 7, 3), 'Cycle': 1},
[[1, 2, 4], slice(2, 3), slice(1, 7, 3), slice(1, 2)],
False, True)
def test_all_but_one_dims_sliced_nd_numpy(self):
self.base({'X': 1, 'Y': 2, 'Bias': slice(1, 7, 3), 'Cycle': 1},
[slice(1, 2), slice(2, 3), slice(1, 7, 3), slice(1, 2)],
True, False)
def test_all_but_one_dims_sliced_nd_dask(self):
self.base({'X': 1, 'Y': 2, 'Bias': slice(1, 7, 3), 'Cycle': 1},
[slice(1, 2), slice(2, 3), slice(1, 7, 3), slice(1, 2)],
True, True)
def test_all_but_one_dims_sliced_2d_numpy(self):
self.base({'X': 1, 'Y': 2, 'Bias': slice(1, 7, 3), 'Cycle': 1},
[slice(1, 2), slice(2, 3), slice(1, 7, 3), slice(1, 2)],
False, False)
def test_all_but_one_dims_sliced_2d_dask(self):
self.base({'X': 1, 'Y': 2, 'Bias': slice(1, 7, 3), 'Cycle': 1},
[slice(1, 2), slice(2, 3), slice(1, 7, 3), slice(1, 2)],
False, True)
def test_all_dims_sliced_nd_numpy(self):
self.base({'X': 1, 'Y': 2, 'Bias': 4, 'Cycle': 1},
[slice(1, 2), slice(2, 3), slice(4, 5), slice(1, 2)],
True, False)
def test_all_dims_sliced_nd_dask(self):
self.base({'X': 1, 'Y': 2, 'Bias': 4, 'Cycle': 1},
[slice(1, 2), slice(2, 3), slice(4, 5), slice(1, 2)],
True, True)
def test_all_dims_sliced_2d_dask(self):
self.base({'X': 1, 'Y': 2, 'Bias': 4, 'Cycle': 1},
[slice(1, 2), slice(2, 3), slice(4, 5), slice(1, 2)],
False, False)
def test_all_dims_sliced_2d_dask(self):
self.base({'X': 1, 'Y': 2, 'Bias': 4, 'Cycle': 1},
[slice(1, 2), slice(2, 3), slice(4, 5), slice(1, 2)],
False, True)
class TestSortingReal(TestUSIDatasetReal):
def test_toggle_sorting(self):
# Need to change data file so that sorting actually does something
with h5py.File(test_h5_file_path, mode='r') as h5_f:
usi_main = USIDataset(h5_f['/Raw_Measurement/source_main'])
self.assertEqual(usi_main.n_dim_labels, self.orig_labels_order)
usi_main.toggle_sorting()
self.assertEqual(usi_main.n_dim_labels, ['Y', 'X', 'Cycle', 'Bias'])
usi_main.toggle_sorting()
self.assertEqual(usi_main.n_dim_labels, self.orig_labels_order)
def test_get_current_sorting(self):
with h5py.File(test_h5_file_path, mode='r') as h5_f:
usi_main = USIDataset(h5_f['/Raw_Measurement/source_main'])
unsorted_str = 'Data dimensions are in the order they occur in the file.\n'
sorted_str = 'Data dimensions are sorted in order from fastest changing dimension to slowest.\n'
# Initial state should be unsorted
self.assertFalse(usi_main._USIDataset__sort_dims)
with data_utils.capture_stdout() as get_value:
usi_main.get_current_sorting()
test_str = get_value()
self.assertTrue(test_str == unsorted_str)
# Toggle sorting. Sorting should now be true.
usi_main.toggle_sorting()
self.assertTrue(usi_main._USIDataset__sort_dims)
with data_utils.capture_stdout() as get_value:
usi_main.get_current_sorting()
test_str = get_value()
self.assertTrue(test_str == sorted_str)
class TestGetDimsForSliceReal(TestUSIDatasetReal):
@staticmethod
def get_all_dimensions():
pos_dims = []
spec_dims = []
with h5py.File(test_h5_file_path, mode='r') as h5_f:
h5_raw_grp = h5_f['Raw_Measurement']
usi_main = USIDataset(h5_raw_grp['source_main'])
for dim_name, dim_units in zip(usi_main.pos_dim_labels,
get_attr(usi_main.h5_pos_inds, 'units')):
pos_dims.append(Dimension(dim_name, dim_units, h5_raw_grp[dim_name][()]))
for dim_name, dim_units in zip(usi_main.spec_dim_labels,
get_attr(
usi_main.h5_spec_inds, 'units')):
spec_dims.append(Dimension(dim_name, dim_units, h5_raw_grp[dim_name][()]))
return pos_dims, spec_dims
def setUp(self):
super(TestGetDimsForSliceReal, self).setUp()
self.pos_dims, self.spec_dims = self.get_all_dimensions()
self.default_dimension = Dimension('arb.', 'a. u.', [1])
self.pos_dict = dict()
self.spec_dict = dict()
for item in self.pos_dims:
self.pos_dict[item.name] = item
for item in self.spec_dims:
self.spec_dict[item.name] = item
def __validate_dim_list(self, expected, actual):
self.assertIsInstance(expected, list)
self.assertIsInstance(expected, list)
self.assertEqual(len(expected), len(actual))
for left, right in zip(expected, actual):
self.assertEqual(left, right)
def base(self, slice_dict, pos_exp, spec_exp, verbose=False):
with h5py.File(test_h5_file_path, mode='r') as h5_f:
usi_main = USIDataset(h5_f['/Raw_Measurement/source_main'])
pos_act, spec_act = usi_main._get_dims_for_slice(slice_dict=slice_dict,
verbose=verbose)
if verbose:
print(pos_act)
print(spec_act)
self.__validate_dim_list(pos_act, pos_exp)
self.__validate_dim_list(spec_act, spec_exp)
def test_empty(self):
self.base(None, self.pos_dims, self.spec_dims)
def test_single_pos_dim_sliced(self):
self.base({'X': 2}, [self.pos_dict['Y']], self.spec_dims)
def test_single_pos_dim_truncated(self):
new_pos_dims = list()
for item in self.pos_dims:
if item.name == 'X':
new_pos_dims.append(Dimension(item.name, item.units, item.values[slice(1, 5, 2)]))
else:
new_pos_dims.append(item)
self.base({'X': slice(1, 5, 2)}, new_pos_dims, self.spec_dims)
def test_both_pos_dim_sliced(self):
self.base({'X': 2, 'Y': 0}, [self.default_dimension], self.spec_dims)
def test_single_spec_dim_sliced(self):
self.base({'Bias': 2}, self.pos_dims, [self.spec_dict['Cycle']])
def test_single_spec_dim_truncated(self):
new_spec_dims = list()
for item in self.spec_dims:
if item.name == 'Bias':
new_spec_dims.append(Dimension(item.name, item.units, item.values[slice(1, 7, 3)]))
else:
new_spec_dims.append(item)
self.base({'Bias': slice(1, 7, 3)}, self.pos_dims, new_spec_dims)
def test_both_spec_dim_sliced(self):
self.base({'Bias': 4, 'Cycle': 1}, self.pos_dims, [self.default_dimension],
verbose=False)
def test_one_pos_one_spec_dims_sliced(self):
self.base({'X': 1, 'Bias': 2}, [self.pos_dict['Y']], [self.spec_dict['Cycle']])
def test_all_dims_sliced(self):
self.base({'X': 1, 'Y': 2, 'Bias': 4, 'Cycle': 1},
[self.default_dimension], [self.default_dimension])
def get_tick_labels(tick_labels):
return np.array([float(x.get_text()) for x in tick_labels])
def validate_imshow(self, axis, exp_data, title=None, x_vec=None, y_vec=None,
x_label=None, y_label=None, verbose=False):
self.assertIsInstance(axis, mpl.axes.Axes)
im_handles = [obj for obj in axis.get_children() if
isinstance(obj, mpl.image.AxesImage)]
self.assertEqual(len(im_handles), 1)
im_handle = im_handles[0]
actual_data = im_handle.get_array().data
if verbose:
print(actual_data.shape, exp_data.shape)
self.assertTrue(np.allclose(actual_data, exp_data))
if title is not None:
self.assertEqual(axis.get_title(), title)
if x_label is not None:
self.assertEqual(axis.get_xlabel(), x_label)
if y_label is not None:
self.assertEqual(axis.get_ylabel(), y_label)
self.assertEqual(axis.get_xscale(), 'linear')
x_ref = get_tick_labels(axis.get_xticklabels())
y_ref = get_tick_labels(axis.get_yticklabels())
self.assertAlmostEqual(x_ref[0], np.round(x_vec[0], 2))
self.assertAlmostEqual(x_ref[-1], np.round(x_vec[-1], 2))
self.assertAlmostEqual(y_ref[0], np.round(y_vec[0], 2))
self.assertAlmostEqual(y_ref[-1], np.round(y_vec[-1], 2))
def validate_single_curve(self, axis, x_vec, y_vec, title=None, x_label=None,
y_label=None):
self.assertIsInstance(axis, mpl.axes.Axes)
line_handles = [obj for obj in axis.get_children() if
isinstance(obj, mpl.lines.Line2D)]
self.assertEqual(len(line_handles), 1)
line_handle = line_handles[0]
# for each curve in the plot:
self.assertTrue(np.allclose(line_handle.get_xdata(), x_vec))
self.assertTrue(np.allclose(line_handle.get_ydata(), y_vec))
# verify legend
if x_label is not None:
self.assertEqual(axis.get_xlabel(), x_label)
if y_label is not None:
self.assertEqual(axis.get_ylabel(), y_label)
if title is not None:
self.assertEqual(axis.get_title(), title)
# verify fig suptitles
"""
def validate_subplots(axes):
pass
"""
class TestSimpleStaticVisualizationReal(TestUSIDatasetReal):
def test_two_pos_simple(self):
if skip_viz_tests: return
with h5py.File(test_h5_file_path, mode='r') as h5_f:
dset_path = '/Raw_Measurement/source_main'
usi_main = USIDataset(h5_f[dset_path])
slice_dict = {'Bias': 0, 'Cycle': 1}
exp_data, success = usi_main.slice(slice_dict=slice_dict)
self.assertTrue(success)
fig, axis = usi_main.visualize(slice_dict=slice_dict)
validate_imshow(self, axis, exp_data, title=dset_path,
x_vec=h5_f['/Raw_Measurement/' + usi_main.pos_dim_labels[1]],
y_vec=h5_f['/Raw_Measurement/' + usi_main.pos_dim_labels[0]],
x_label=usi_main.pos_dim_descriptors[1],
y_label=usi_main.pos_dim_descriptors[0])
def test_two_spec(self):
if skip_viz_tests: return
with h5py.File(test_h5_file_path, mode='r') as h5_f:
dset_path = '/Raw_Measurement/source_main'
usi_main = USIDataset(h5_f[dset_path])
slice_dict = {'X': 3, 'Y': 2}
exp_data, success = usi_main.slice(slice_dict=slice_dict)
self.assertTrue(success)
fig, axis = usi_main.visualize(slice_dict=slice_dict)
validate_imshow(self, axis, exp_data, title=dset_path,
x_vec=h5_f['/Raw_Measurement/' + usi_main.spec_dim_labels[1]],
y_vec=h5_f['/Raw_Measurement/' + usi_main.spec_dim_labels[0]],
x_label=usi_main.spec_dim_descriptors[1],
y_label=usi_main.spec_dim_descriptors[0])
def test_one_pos_one_spec(self):
if skip_viz_tests: return
with h5py.File(test_h5_file_path, mode='r') as h5_f:
dset_path = '/Raw_Measurement/source_main'
usi_main = USIDataset(h5_f[dset_path])
slice_dict = {'X': 3, 'Bias': 2}
exp_data, success = usi_main.slice(slice_dict=slice_dict)
self.assertTrue(success)
fig, axis = usi_main.visualize(slice_dict=slice_dict)
spec_ind = usi_main.spec_dim_labels.index('Cycle')
pos_ind = usi_main.pos_dim_labels.index('Y')
validate_imshow(self, axis, exp_data, title=dset_path,
x_vec=h5_f['/Raw_Measurement/' + usi_main.spec_dim_labels[spec_ind]],
y_vec=h5_f['/Raw_Measurement/' + usi_main.pos_dim_labels[pos_ind]],
x_label=usi_main.spec_dim_descriptors[spec_ind],
y_label=usi_main.pos_dim_descriptors[pos_ind])
def test_one_pos(self):
if skip_viz_tests: return
with h5py.File(test_h5_file_path, mode='r') as h5_f:
dset_path = '/Raw_Measurement/source_main'
usi_main = USIDataset(h5_f[dset_path])
slice_dict = {'Bias': 4, 'Cycle': 1, 'Y': 2}
rem_dim_name = 'X'
pos_ind = usi_main.pos_dim_labels.index(rem_dim_name)
exp_data, success = usi_main.slice(slice_dict=slice_dict)
self.assertTrue(success)
fig, axis = usi_main.visualize(slice_dict=slice_dict)
validate_single_curve(self, axis, h5_f['/Raw_Measurement/' + rem_dim_name],
exp_data,
title=dset_path,
x_label=usi_main.pos_dim_descriptors[pos_ind],
y_label=usi_main.data_descriptor)
def test_one_spec(self):
if skip_viz_tests: return
with h5py.File(test_h5_file_path, mode='r') as h5_f:
dset_path = '/Raw_Measurement/source_main'
usi_main = USIDataset(h5_f[dset_path])
slice_dict = {'Bias': 4, 'X': 1, 'Y': 2}
rem_dim_name = 'Cycle'
spec_ind = usi_main.spec_dim_labels.index(rem_dim_name)
exp_data, success = usi_main.slice(slice_dict=slice_dict)
self.assertTrue(success)
fig, axis = usi_main.visualize(slice_dict=slice_dict)
validate_single_curve(self, axis, h5_f['/Raw_Measurement/' + rem_dim_name],
exp_data,
title=dset_path,
x_label=usi_main.spec_dim_descriptors[spec_ind],
y_label=usi_main.data_descriptor)
"""
def test_no_dims(self):
pass
def test_more_than_2_dims(self):
pass
class TestComplexStaticVisualization(unittest.TestCase):
def setUp(self):
self.file_path = 'complex_viz.h5'
pass
def tearDown(self):
os.remove(self.file_path)
def test_image(self):
pass
def test_curve(self):
pass
class TestCompoundStaticVisualization(unittest.TestCase):
def setUp(self):
self.file_path = 'complex_viz.h5'
pass
def tearDown(self):
os.remove(self.file_path)
def test_image(self):
pass
def test_curve(self):
pass
"""
if __name__ == '__main__':
unittest.main()
| 38,183 | 41.521158 | 110 | py |
pyUSID-legacy | pyUSID-master-legacy/tests/io/test_write_utils.py | # -*- coding: utf-8 -*-
"""
Created on Tue Nov 3 15:07:16 2017
@author: Suhas Somnath
"""
from __future__ import division, print_function, unicode_literals, absolute_import
import unittest
import sys
import numpy as np
sys.path.append("../../pyUSID/")
from pyUSID.io import anc_build_utils
if sys.version_info.major == 3:
unicode = str
class TestMakeIndicesMatrix(unittest.TestCase):
def test_dim_w_val_1(self):
with self.assertRaises(ValueError):
_ = anc_build_utils.make_indices_matrix([1, 2, 3])
def test_just_size_of_one_dim(self):
expected = np.expand_dims(np.arange(4), axis=0)
ret_val = anc_build_utils.make_indices_matrix(4, is_position=False)
self.assertTrue(np.allclose(expected, ret_val))
def test_empty_list(self):
with self.assertRaises(ValueError):
_ = anc_build_utils.make_indices_matrix([])
def test_single_value_dimension_int_input(self):
expected = np.expand_dims(np.arange(1), axis=0)
ret_val = anc_build_utils.make_indices_matrix(1, is_position=False)
self.assertTrue(np.allclose(expected, ret_val))
def test_single_value_dimension_list_input(self):
expected = np.expand_dims(np.arange(1), axis=0)
ret_val = anc_build_utils.make_indices_matrix([1], is_position=False)
self.assertTrue(np.allclose(expected, ret_val))
def test_non_int_dim_sizes(self):
with self.assertRaises(ValueError):
_ = anc_build_utils.make_indices_matrix([1.233, 2.4, 3])
def test_weird_inputs(self):
with self.assertRaises(ValueError):
_ = anc_build_utils.make_indices_matrix([2, 'hello', 3])
def test_matrix_1_dims(self):
expected = np.expand_dims(np.arange(4), axis=0)
ret_val = anc_build_utils.make_indices_matrix([4], is_position=False)
self.assertTrue(np.allclose(expected, ret_val))
ret_val = anc_build_utils.make_indices_matrix([4], is_position=True)
self.assertTrue(np.allclose(expected.T, ret_val))
def test_2_dims(self):
expected = np.vstack((np.tile(np.arange(2), 3),
np.repeat(np.arange(3), 2)))
ret_val = anc_build_utils.make_indices_matrix([2, 3], is_position=False)
self.assertTrue(np.allclose(expected, ret_val))
ret_val = anc_build_utils.make_indices_matrix([2, 3], is_position=True)
self.assertTrue(np.allclose(expected.T, ret_val))
def test_3_dims(self):
expected = np.vstack((np.tile(np.arange(2), 3 * 4),
np.tile(np.repeat(np.arange(3), 2), 4),
np.repeat(np.arange(4), 6)))
ret_val = anc_build_utils.make_indices_matrix([2, 3, 4], is_position=False)
self.assertTrue(np.allclose(expected, ret_val))
ret_val = anc_build_utils.make_indices_matrix([2, 3, 4], is_position=True)
self.assertTrue(np.allclose(expected.T, ret_val))
class TestGetAuxDsetSlicing(unittest.TestCase):
def test_legal_single_dim(self):
ret_val = anc_build_utils.get_aux_dset_slicing(['X'], is_spectroscopic=True)
expected = {'X': (slice(0, 1), slice(None))}
self.assertEqual(ret_val, expected)
ret_val = anc_build_utils.get_aux_dset_slicing(['X'], is_spectroscopic=False)
expected = {'X': (slice(None), slice(0, 1))}
self.assertEqual(ret_val, expected)
def test_legal_multi_dim(self):
ret_val = anc_build_utils.get_aux_dset_slicing(['X', 'Y'], is_spectroscopic=True)
expected = {'X': (slice(0, 1), slice(None)), 'Y': (slice(1, 2), slice(None))}
self.assertEqual(ret_val, expected)
ret_val = anc_build_utils.get_aux_dset_slicing(['X', 'Y'], is_spectroscopic=False)
expected = {'X': (slice(None), slice(0, 1)), 'Y': (slice(None), slice(1, 2))}
self.assertEqual(ret_val, expected)
def test_odd_input(self):
with self.assertRaises(TypeError):
_ = anc_build_utils.get_aux_dset_slicing([1, 'Y'], is_spectroscopic=True)
with self.assertRaises(ValueError):
_ = anc_build_utils.get_aux_dset_slicing([], is_spectroscopic=True)
class TestBuildIndValMatrices(unittest.TestCase):
def test_empty(self):
inds, vals = anc_build_utils.build_ind_val_matrices([[0]], is_spectral=True)
self.assertTrue(np.allclose(inds, anc_build_utils.INDICES_DTYPE(np.expand_dims(np.arange(1), 0))))
self.assertTrue(np.allclose(vals, anc_build_utils.VALUES_DTYPE(np.expand_dims(np.arange(1), 0))))
def test_1D(self):
sine_val = np.sin(np.linspace(0, 2*np.pi, 128))
inds, vals = anc_build_utils.build_ind_val_matrices([sine_val], is_spectral=True)
self.assertTrue(np.allclose(inds, anc_build_utils.INDICES_DTYPE(np.expand_dims(np.arange(len(sine_val)), axis=0))))
self.assertTrue(np.allclose(vals, anc_build_utils.VALUES_DTYPE(np.expand_dims(sine_val, axis=0))))
def test_1D_pos(self):
sine_val = np.sin(np.linspace(0, 2 * np.pi, 128))
inds, vals = anc_build_utils.build_ind_val_matrices([sine_val], is_spectral=False)
self.assertTrue(np.allclose(inds, anc_build_utils.INDICES_DTYPE(np.expand_dims(np.arange(len(sine_val)), axis=1))))
self.assertTrue(np.allclose(vals, anc_build_utils.VALUES_DTYPE(np.expand_dims(sine_val, axis=1))))
def test_3D(self):
max_v = 4
half_pts = 8
bi_triang = np.roll(np.hstack((np.linspace(-max_v, max_v, half_pts, endpoint=False),
np.linspace(max_v, -max_v, half_pts, endpoint=False))), -half_pts // 2)
cycles = [0, 1, 2]
fields = [0, 1]
exp_vals = np.vstack((np.tile(bi_triang, 6), np.tile(np.repeat(fields, 2 * half_pts), 3),
np.repeat(cycles, 2 * 2 * half_pts)))
exp_inds = np.vstack((np.tile(np.arange(2 * half_pts), 6), np.tile(np.repeat(fields, 2 * half_pts), 3),
np.repeat(cycles, 2 * 2 * half_pts)))
inds, vals = anc_build_utils.build_ind_val_matrices([bi_triang, fields, cycles])
self.assertTrue(np.allclose(exp_inds, inds))
self.assertTrue(np.allclose(exp_vals, vals))
def test_invalid_inputs(self):
with self.assertRaises(TypeError):
_ = anc_build_utils.build_ind_val_matrices("not a list of arrays")
with self.assertRaises(ValueError):
_ = anc_build_utils.build_ind_val_matrices([[0, 1], np.random.randint(0, high=5, size=(3, 4))])
class TestCreateSpecIndsFromVals(unittest.TestCase):
def test_legal(self):
max_v = 4
half_pts = 8
bi_triang = np.roll(np.hstack((np.linspace(-max_v, max_v, half_pts, endpoint=False),
np.linspace(max_v, -max_v, half_pts, endpoint=False))), -half_pts // 2)
cycles = [0, 1, 2]
fields = [0, 1]
exp_vals = np.vstack((np.tile(bi_triang, 6), np.tile(np.repeat(fields, 2 * half_pts), 3),
np.repeat(cycles, 2 * 2 * half_pts)))
exp_inds = np.vstack((np.tile(np.arange(2 * half_pts), 6), np.tile(np.repeat(fields, 2 * half_pts), 3),
np.repeat(cycles, 2 * 2 * half_pts)))
inds = anc_build_utils.create_spec_inds_from_vals(exp_vals)
self.assertTrue(np.allclose(inds, exp_inds))
def test_invalid_inputs(self):
with self.assertRaises(TypeError):
_ = anc_build_utils.create_spec_inds_from_vals([[0, 1, 0, 1],
[0, 0, 1, 1]])
with self.assertRaises(ValueError):
_ = anc_build_utils.create_spec_inds_from_vals(np.random.rand(2, 3, 4))
class TestCalcChunks(unittest.TestCase):
def test_no_unit_chunk(self):
dimensions = (16384, 16384 * 4)
dtype_bytesize = 4
unit_chunks = None
ret_val = anc_build_utils.calc_chunks(dimensions, dtype_bytesize, unit_chunks=unit_chunks)
self.assertTrue(np.allclose(ret_val, (26, 100)))
def test_unit_chunk(self):
dimensions = (16384, 16384 * 4)
dtype_bytesize = 4
unit_chunks = (3, 7)
ret_val = anc_build_utils.calc_chunks(dimensions, dtype_bytesize, unit_chunks=unit_chunks)
self.assertTrue(np.allclose(ret_val, (27, 98)))
def test_no_unit_chunk_max_mem(self):
dimensions = (16384, 16384 * 4)
dtype_bytesize = 4
unit_chunks = None
max_mem = 50000
ret_val = anc_build_utils.calc_chunks(dimensions, dtype_bytesize, unit_chunks=unit_chunks, max_chunk_mem=max_mem)
self.assertTrue(np.allclose(ret_val, (56, 224)))
def test_unit_chunk_max_mem(self):
dimensions = (16384, 16384 * 4)
dtype_bytesize = 4
unit_chunks = (3, 7)
max_mem = 50000
ret_val = anc_build_utils.calc_chunks(dimensions, dtype_bytesize, unit_chunks=unit_chunks, max_chunk_mem=max_mem)
self.assertTrue(np.allclose(ret_val, (57, 224)))
def test_unit_not_iterable(self):
dimensions = (16384, 16384 * 4)
dtype_bytesize = 4
unit_chunks = 4
with self.assertRaises(TypeError):
_ = anc_build_utils.calc_chunks(dimensions, dtype_bytesize, unit_chunks=unit_chunks)
def test_shape_mismatch(self):
dimensions = (16384, 16384 * 4)
dtype_bytesize = 4
unit_chunks = (1, 5, 9)
with self.assertRaises(ValueError):
_ = anc_build_utils.calc_chunks(dimensions, dtype_bytesize, unit_chunks=unit_chunks)
def test_invalid_types(self):
with self.assertRaises(TypeError):
_ = anc_build_utils.calc_chunks("Fdfd", 14)
with self.assertRaises(TypeError):
_ = anc_build_utils.calc_chunks((16384, 16384 * 4), 2.124)
if __name__ == '__main__':
unittest.main()
| 9,868 | 41.908696 | 123 | py |
pyUSID-legacy | pyUSID-master-legacy/tests/io/test_array_translator.py | # -*- coding: utf-8 -*-
"""
Created on Tue Nov 3 15:07:16 2017
@author: Suhas Somnath
"""
from __future__ import division, print_function, unicode_literals, absolute_import
import unittest
import os
import sys
import h5py
import numpy as np
import dask.array as da
from .data_utils import validate_aux_dset_pair, delete_existing_file
sys.path.append("../../pyUSID/")
from pyUSID.io import ArrayTranslator, Dimension, hdf_utils, USIDataset
if sys.version_info.major == 3:
unicode = str
file_path = 'test_array_translator.h5'
class TestArrayTranslator(unittest.TestCase):
def base_translation_tester(self, main_dset_as_dask=False, extra_dsets_type='numpy', use_parm_dict=True):
data_name = 'My_Awesome_Measurement'
if use_parm_dict:
attrs = {'att_1': 'string_val',
'att_2': 1.2345,
'att_3': [1, 2, 3, 4],
'att_4': ['str_1', 'str_2', 'str_3']}
else:
attrs = None
extra_dsets = {}
if extra_dsets_type is not None:
ref_dsets = {'dset_1': np.random.rand(5), 'dset_2': np.arange(25)}
if extra_dsets_type == 'numpy':
extra_dsets = ref_dsets
elif extra_dsets_type == 'dask':
for key, val in ref_dsets.items():
extra_dsets.update({key: da.from_array(val, chunks=val.shape)})
else:
extra_dsets_type = None
delete_existing_file(file_path)
main_data = np.random.rand(15, 14)
if main_dset_as_dask:
main_data = da.from_array(main_data, chunks=main_data.shape)
quantity = 'Current'
units = 'nA'
pos_sizes = [5, 3]
pos_names = ['X', 'Y']
pos_units = ['nm', 'um']
pos_dims = []
for name, unit, length in zip(pos_names, pos_units, pos_sizes):
pos_dims.append(Dimension(name, unit, np.arange(length)))
pos_data = np.vstack((np.tile(np.arange(5), 3),
np.repeat(np.arange(3), 5))).T
spec_sizes = [7, 2]
spec_names = ['Bias', 'Cycle']
spec_units = ['V', '']
spec_dims = []
for name, unit, length in zip(spec_names, spec_units, spec_sizes):
spec_dims.append(Dimension(name, unit, np.arange(length)))
spec_data = np.vstack((np.tile(np.arange(7), 2),
np.repeat(np.arange(2), 7)))
translator = ArrayTranslator()
_ = translator.translate(file_path, data_name, main_data, quantity, units, pos_dims, spec_dims, parm_dict=attrs,
extra_dsets=extra_dsets)
with h5py.File(file_path, mode='r') as h5_f:
# we are not interested in most of the attributes under root besides two:
self.assertEqual(data_name, hdf_utils.get_attr(h5_f, 'data_type'))
# self.assertEqual('NumpyTranslator', hdf_utils.get_attr(h5_f, 'translator'))
# First level should have absolutely nothing besides one group
self.assertEqual(len(h5_f.items()), 1)
self.assertTrue('Measurement_000' in h5_f.keys())
h5_meas_grp = h5_f['Measurement_000']
self.assertIsInstance(h5_meas_grp, h5py.Group)
# check the attributes under this group
# self.assertEqual(len(h5_meas_grp.attrs), len(attrs))
if use_parm_dict:
for key, expected_val in attrs.items():
self.assertTrue(np.all(hdf_utils.get_attr(h5_meas_grp, key) == expected_val))
# Again, this group should only have one group - Channel_000
self.assertEqual(len(h5_meas_grp.items()), 1)
self.assertTrue('Channel_000' in h5_meas_grp.keys())
h5_chan_grp = h5_meas_grp['Channel_000']
self.assertIsInstance(h5_chan_grp, h5py.Group)
# This channel group is not expected to have any (custom) attributes but it will contain the main dataset
self.assertEqual(len(h5_chan_grp.items()), 5 + len(extra_dsets))
for dset_name in ['Raw_Data', 'Position_Indices', 'Position_Values', 'Spectroscopic_Indices',
'Spectroscopic_Values']:
self.assertTrue(dset_name in h5_chan_grp.keys())
h5_dset = h5_chan_grp[dset_name]
self.assertIsInstance(h5_dset, h5py.Dataset)
usid_main = USIDataset(h5_chan_grp['Raw_Data'])
self.assertIsInstance(usid_main, USIDataset)
self.assertEqual(usid_main.name.split('/')[-1], 'Raw_Data')
self.assertEqual(usid_main.parent, h5_chan_grp)
self.assertTrue(np.allclose(main_data, usid_main[()]))
validate_aux_dset_pair(self, h5_chan_grp, usid_main.h5_pos_inds, usid_main.h5_pos_vals, pos_names, pos_units,
pos_data, h5_main=usid_main, is_spectral=False)
validate_aux_dset_pair(self, h5_chan_grp, usid_main.h5_spec_inds, usid_main.h5_spec_vals, spec_names,
spec_units,
spec_data, h5_main=usid_main, is_spectral=True)
# Now validate each of the extra datasets:
if extra_dsets_type is not None:
for key, val in extra_dsets.items():
self.assertTrue(key in h5_chan_grp.keys())
h5_dset = h5_chan_grp[key]
self.assertIsInstance(h5_dset, h5py.Dataset)
if extra_dsets_type == 'dask':
val = val.compute()
self.assertTrue(np.allclose(val, h5_dset[()]))
os.remove(file_path)
class TestBaseOperations(TestArrayTranslator):
def test_preexisting_file_OK(self):
with h5py.File(file_path, mode='w') as _:
pass
self.base_translation_tester(main_dset_as_dask=False, extra_dsets_type='numpy', use_parm_dict=False)
def test_quick_numpy_translation(self):
self.base_translation_tester(main_dset_as_dask=False, extra_dsets_type='numpy', use_parm_dict=False)
def test_quick_numpy_tranlsation_plus_parms(self):
self.base_translation_tester(main_dset_as_dask=False, extra_dsets_type='numpy', use_parm_dict=True)
def test_quick_dask_main_translation(self):
self.base_translation_tester(main_dset_as_dask=True, extra_dsets_type='numpy', use_parm_dict=False)
def test_all_dsets_as_dask(self):
self.base_translation_tester(main_dset_as_dask=True, extra_dsets_type='dask', use_parm_dict=False)
class TestIllegalStringParms(TestArrayTranslator):
def test_not_strings(self):
translator = ArrayTranslator()
with self.assertRaises(TypeError):
delete_existing_file(file_path)
_ = translator.translate(file_path, 1.2345, np.random.rand(5, 3), 'quant', 'unit',
Dimension('Position_Dim', 'au', 5),
Dimension('Spec_Dim', 'au', 3))
with self.assertRaises(TypeError):
delete_existing_file(file_path)
_ = translator.translate(file_path, 'Blah', np.random.rand(5, 3), {'quant': 1}, 'unit',
Dimension('Position_Dim', 'au', 5),
Dimension('Spec_Dim', 'au', 3))
with self.assertRaises(TypeError):
delete_existing_file(file_path)
_ = translator.translate(file_path, 'Blah', np.random.rand(5, 3), 'quant', ['unit'],
Dimension('Position_Dim', 'au', 5),
Dimension('Spec_Dim', 'au', 3))
class TestIllegalMainDataset(TestArrayTranslator):
def test_not_numpy_or_dask_array_main(self):
translator = ArrayTranslator()
with self.assertRaises(TypeError):
delete_existing_file(file_path)
_ = translator.translate(file_path, 'Blah', {'This is not a dataset': True}, 'quant', 'unit',
Dimension('Position_Dim', 'au', 5),
Dimension('Spec_Dim', 'au', 3))
def test_main_dset_1D(self):
translator = ArrayTranslator()
with self.assertRaises(ValueError):
delete_existing_file(file_path)
_ = translator.translate(file_path, 'Blah', np.arange(4), 'quant', 'unit',
Dimension('Position_Dim', 'au', 5),
Dimension('Spec_Dim', 'au', 3))
with self.assertRaises(ValueError):
delete_existing_file(file_path)
_ = translator.translate(file_path, 'Blah', da.from_array(np.arange(4), chunks=(4)), 'quant', 'unit',
Dimension('Position_Dim', 'au', 5),
Dimension('Spec_Dim', 'au', 3))
def test_main_dset_2D(self):
translator = ArrayTranslator()
with self.assertRaises(ValueError):
delete_existing_file(file_path)
_ = translator.translate(file_path, 'Blah', np.random.rand(2, 3, 4), 'quant', 'unit',
Dimension('Position_Dim', 'au', 5),
Dimension('Spec_Dim', 'au', 3))
with self.assertRaises(ValueError):
delete_existing_file(file_path)
_ = translator.translate(file_path, 'Blah', da.from_array(np.random.rand(2, 3, 4), chunks=(2, 3, 4)),
'quant', 'unit',
Dimension('Position_Dim', 'au', 5),
Dimension('Spec_Dim', 'au', 3))
class TestExtraDatasets(TestArrayTranslator):
def test_not_dicts(self):
translator = ArrayTranslator()
with self.assertRaises(TypeError):
delete_existing_file(file_path)
_ = translator.translate(file_path, 'Blah', np.random.rand(5, 3), 'quant', 'unit',
Dimension('Position_Dim', 'au', 5),
Dimension('Spec_Dim', 'au', 3),
extra_dsets=np.arange(4))
def test_not_str_names(self):
translator = ArrayTranslator()
with self.assertRaises(TypeError):
delete_existing_file(file_path)
_ = translator.translate(file_path, 'Blah', np.random.rand(5, 3), 'quant', 'unit',
Dimension('Position_Dim', 'au', 5),
Dimension('Spec_Dim', 'au', 3),
extra_dsets={14: np.arange(4),
'Blah_other': np.arange(15)})
def test_reserved_names(self):
translator = ArrayTranslator()
with self.assertRaises(KeyError):
delete_existing_file(file_path)
_ = translator.translate(file_path, 'Blah', np.random.rand(5, 3), 'quant', 'unit',
Dimension('Position_Dim', 'au', 5),
Dimension('Spec_Dim', 'au', 3),
extra_dsets={'Spectroscopic_Indices': np.arange(4),
'Blah_other': np.arange(15)})
def test_not_arrays(self):
translator = ArrayTranslator()
with self.assertRaises(TypeError):
delete_existing_file(file_path)
_ = translator.translate(file_path, 'Blah', np.random.rand(5, 3), 'quant', 'unit',
Dimension('Position_Dim', 'au', 5),
Dimension('Spec_Dim', 'au', 3),
extra_dsets={'Blah_other': 'I am not an array'})
def test_empty_name(self):
translator = ArrayTranslator()
with self.assertRaises(ValueError):
delete_existing_file(file_path)
_ = translator.translate(file_path, 'Blah', np.random.rand(5, 3), 'quant', 'unit',
Dimension('Position_Dim', 'au', 5),
Dimension('Spec_Dim', 'au', 3),
extra_dsets={' ': [1, 2, 3]})
class TestIllegalDimensions(TestArrayTranslator):
def test_position(self):
translator = ArrayTranslator()
with self.assertRaises(ValueError):
delete_existing_file(file_path)
_ = translator.translate(file_path, 'Blah', np.random.rand(15, 3), 'quant', 'unit',
[Dimension('Dim_1', 'au', 5),
Dimension('Dim_2', 'au', 4)],
Dimension('Spec_Dim', 'au', 3))
def test_spec(self):
translator = ArrayTranslator()
with self.assertRaises(ValueError):
delete_existing_file(file_path)
_ = translator.translate(file_path, 'Blah', np.random.rand(5, 13), 'quant', 'unit',
Dimension('Dim_1', 'au', 5),
[Dimension('Spec_Dim', 'au', 3),
Dimension('Dim_2', 'au', 4)])
def test_object_single(self):
translator = ArrayTranslator()
with self.assertRaises(TypeError):
delete_existing_file(file_path)
_ = translator.translate(file_path, 'Blah', np.random.rand(5, 13), 'quant', 'unit',
'my_string_Dimension',
[Dimension('Spec_Dim', 'au', 3),
Dimension('Dim_2', 'au', 4)])
def test_objects(self):
translator = ArrayTranslator()
with self.assertRaises(TypeError):
delete_existing_file(file_path)
_ = translator.translate(file_path, 'Blah', np.random.rand(5, 13), 'quant', 'unit',
Dimension('Dim_1', 'au', 5),
['blah',
Dimension('Dim_2', 'au', 4)])
if __name__ == '__main__':
unittest.main()
| 14,205 | 44.825806 | 121 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.