keyword stringclasses 7 values | repo_name stringlengths 8 98 | file_path stringlengths 4 244 | file_extension stringclasses 29 values | file_size int64 0 84.1M | line_count int64 0 1.6M | content stringlengths 1 84.1M ⌀ | language stringclasses 14 values |
|---|---|---|---|---|---|---|---|
3D | stillbreeze/3D-reconstruction-Using-SfM-and-Stereo-Matching | code/SFMedu/denseMatch/priority_queue_1.0_old/pq_top.m | .m | 769 | 27 | % PQ_TOP queries for the topmost element of the priority queue (not removing it)
%
% SYNTAX
% [idx, cost] = pq_top(pq)
%
% INPUT PARAMETERS
% pq: a pointer to the priority queue
%
% OUTPUT PARAMETERS
% idx: the index of the topmost element
% cost: the cost of the topmost element
%
% DESCRIPTION
% Queries the topmost element from a priority queue returning its
% index and associated cost.
%
% See also:
% PQ_DEMO, PQ_CREATE, PQ_PUSH, PQ_POP, PQ_SIZE, PQ_TOP, PQ_DELETE
%
% References:
% Gormen, T.H. and Leiserson, C.E. and Rivest, R.L., "introduction to
% algorithms", 1990, MIT Press/McGraw-Hill, Chapter 6.
% Copyright (c) 2008 Andrea Tagliasacchi
% All Rights Reserved
% email: andrea.tagliasacchi@gmail.com
% $Revision: 1.0$ Created on: May 22, 2009 | MATLAB |
3D | stillbreeze/3D-reconstruction-Using-SfM-and-Stereo-Matching | code/SFMedu/denseMatch/priority_queue_1.0_old/pq_create.cpp | .cpp | 1,760 | 55 | //==============================================================================
// Name : myheaps_demo.cpp
// Author : Andrea Tagliasacchi
// Version : 1.0
// Copyright : 2009 (c) Andrea Tagliasacchi
// Description : creates a (top-down) priority queue
//
// May 22, 2009: Created
//==============================================================================
#include "MyHeap.h"
//------------------------------- MATLAB -------------------------------------//
#define toSysout(...) printf(__VA_ARGS__)
#define exit_with_error(...) \
do { \
fprintf(stdout, "Error: "); \
fprintf(stdout, __VA_ARGS__ ); \
fprintf(stdout, "\n" ); \
exit(1); \
} while(0)
#ifdef MATLAB_MEX_FILE
#include "mex.h"
void retrieve_data( const mxArray* prhs, int& nelems){
// retrieve pointer from the MX form
// check that I actually received something
// if( data == NULL )
// mexErrMsgTxt("vararg{2} must be a [kxN] matrix of data\n");
nelems = (int) mxGetScalar(prhs);
if( nelems == 0 )
mexErrMsgTxt("Priority queue minimal allocation is 1.\n");
}
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]){
// read the parameters
// check input
if( nrhs != 1 || !mxIsNumeric(prhs[0]) )
mexErrMsgTxt("A unique scalar number with the expected size of the queue is necessary.\n");
// retrieve the data
int nelems = 100;
retrieve_data( prhs[0], nelems );
// instantiate the priority queue
MaxHeap<double>* pq = new MaxHeap<double>(nelems);
// convert the points to double
plhs[0] = mxCreateDoubleMatrix(1,1,mxREAL);
double* pointer_to_tree = mxGetPr(plhs[0]);
pointer_to_tree[0] = (long) pq;
}
#endif
| C++ |
3D | stillbreeze/3D-reconstruction-Using-SfM-and-Stereo-Matching | code/SFMedu/denseMatch/priority_queue_1.0_old/pq_create.m | .m | 1,270 | 40 | % PQ_CREATE construct a priority queue object
%
% SYNTAX
% pq = pq_create(p)
%
% INPUT PARAMETERS
% N: the maximum number of elements in the priority queue
%
% OUTPUT PARAMETERS
% pq: a (memory) pointer to the created data structure
%
% DESCRIPTION
% Given a positive integer N this function allocates the memory for a
% BACK INDEXED priority queue of size N. The priority queue is a Max Heap,
% meaninig that it is implemented as a binary tree and parent nodes have a
% cost which is larger than the one of its childrens. Back indexing allows
% to be able to *increase* the cost of an element which is already in the
% priority queue and do so in logarithmic time.
%
% The complexity of the operations on the data structure are the default
% ones for Heap based priority queue:
%
% - insertion: O(log(n))
% - pop: O(log(n))
% - cost update: O(log(n))
% - size: O(1)
% - query top: O(1)
% - delete: O(n)
%
% See also:
% PQ_DEMO, PQ_PUSH, PQ_POP, PQ_SIZE, PQ_TOP, PQ_DELETE
%
% References:
% Gormen, T.H. and Leiserson, C.E. and Rivest, R.L., "introduction to
% algorithms", 1990, MIT Press/McGraw-Hill, Chapter 6.
% Copyright (c) 2008 Andrea Tagliasacchi
% All Rights Reserved
% email: andrea.tagliasacchi@gmail.com
% $Revision: 1.0$ Created on: May 22, 2009 | MATLAB |
3D | stillbreeze/3D-reconstruction-Using-SfM-and-Stereo-Matching | code/SFMedu/denseMatch/priority_queue_1.0_old/pq_top.cpp | .cpp | 2,031 | 58 | //==============================================================================
// Name : pq_top.cpp
// Author : Andrea Tagliasacchi
// Version : 1.0
// Copyright : 2009 (c) Andrea Tagliasacchi
// Description : Returns the topmost element in the priority queue (not popping it)
//
// May 22, 2009: Created
//==============================================================================
#include "MyHeap.h"
//------------------------------- MATLAB -------------------------------------//
#define toSysout(...) printf(__VA_ARGS__)
#define exit_with_error(...) \
do { \
fprintf(stdout, "Error: "); \
fprintf(stdout, __VA_ARGS__ ); \
fprintf(stdout, "\n" ); \
exit(1); \
} while(0)
#ifdef MATLAB_MEX_FILE
#include "mex.h"
void retrieve_heap( const mxArray* matptr, MaxHeap<double>* & heap){
// retrieve pointer from the MX form
double* pointer0 = mxGetPr(matptr);
// check that I actually received something
if( pointer0 == NULL )
mexErrMsgTxt("vararg{1} must be a valid priority queue pointer\n");
// convert it to "long" datatype (good for addresses)
long pointer1 = (long) pointer0[0];
// convert it to "KDTree"
heap = (MaxHeap<double>*) pointer1;
// check that I actually received something
if( heap == NULL )
mexErrMsgTxt("vararg{1} must be a valid priority queue pointer\n");
}
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]){
if( nrhs!=1 )
mexErrMsgTxt("This function requires 3 arguments\n");
if( !mxIsNumeric(prhs[0]) )
mexErrMsgTxt("parameter 1 missing!\n");
// retrieve the heap
MaxHeap<double>* heap;
retrieve_heap( prhs[0], heap);
// query top element in the PQ
pair<double, int> curr = heap->top();
// return its values in the output
plhs[0] = mxCreateDoubleMatrix(1, 1, mxREAL);
*mxGetPr(plhs[0]) = curr.second+1;
plhs[1] = mxCreateDoubleMatrix(1, 1, mxREAL);
*mxGetPr(plhs[1]) = curr.first;
}
#endif
| C++ |
3D | stillbreeze/3D-reconstruction-Using-SfM-and-Stereo-Matching | code/SFMedu/denseMatch/priority_queue_1.0_old/pq_delete.cpp | .cpp | 1,795 | 54 | //==============================================================================
// Name : pq_delete.cpp
// Author : Andrea Tagliasacchi
// Version : 1.0
// Copyright : 2009 (c) Andrea Tagliasacchi
// Description : Frees the memory allocated by the priority queue
//
// May 22, 2009: Created
//==============================================================================
#include "MyHeap.h"
//------------------------------- MATLAB -------------------------------------//
#ifdef MATLAB_MEX_FILE
#include "mex.h"
#define toSysout(...) printf(__VA_ARGS__)
#define exit_with_error(...) \
do { \
fprintf(stdout, "Error: "); \
fprintf(stdout, __VA_ARGS__ ); \
fprintf(stdout, "\n" ); \
exit(1); \
} while(0)
void retrieve_heap( const mxArray* matptr, MaxHeap<double>* & heap){
// retrieve pointer from the MX form
double* pointer0 = mxGetPr(matptr);
// check that I actually received something
if( pointer0 == NULL )
mexErrMsgTxt("vararg{1} must be a valid priority queue pointer\n");
// convert it to "long" datatype (good for addresses)
long pointer1 = (long) pointer0[0];
// convert it to "KDTree"
heap = (MaxHeap<double>*) pointer1;
// check that I actually received something
if( heap == NULL )
mexErrMsgTxt("vararg{1} must be a valid priority queue pointer\n");
}
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]){
if( nrhs!=1 )
mexErrMsgTxt("This function requires 3 arguments\n");
if( !mxIsNumeric(prhs[0]) )
mexErrMsgTxt("parameter 1 missing!\n");
// retrieve the heap
MaxHeap<double>* heap;
retrieve_heap( prhs[0], heap);
// delete the heap
heap -> ~MaxHeap<double>();
}
#endif
| C++ |
3D | stillbreeze/3D-reconstruction-Using-SfM-and-Stereo-Matching | code/SFMedu/denseMatch/priority_queue_1.0_old/MyHeap.h | .h | 13,656 | 486 | /**
* @file MyHeaps.h
* @author Andrea Tagliasacchi
* @date 26 March 2008
* @copyright (c) Andrea Tagliasacchi - All rights reserved
*/
//--- CHANGELOG
//
// Sunday Feb 20th 2011:
// corrected bug at .pop() which would not clear index
// in the back-indexing array. Many thanks to Tim Holy
// to point it out
#ifndef MYHEAP_H_
#define MYHEAP_H_
#include <vector>
#include <exception> // general exception
#include <stdexcept> // out_of_range
#include <iostream>
#include <cassert>
#include <algorithm>
#include "float.h"
using namespace std;
// macros for navigation in the hard coded binary tree
#define PARENT(pos) ((pos-1)>>1) // equivalent to floor(pos/2)
#define LEFT(pos) ((pos<<1)+1) // equivalent to pos*2 + 1
#define RIGHT(pos) ((pos<<1)+2) // equivalent to pos*2 + 2
/// EXCEPTION
class HeapEmptyException : public out_of_range{
public:
HeapEmptyException(const string &message) : out_of_range(message) {;}
};
class InvalidKeyIncreaseException : public out_of_range{
public:
InvalidKeyIncreaseException(const string &message) : out_of_range(message) {;}
};
class InvalidIndexException : public out_of_range{
public:
InvalidIndexException(const string &message) : out_of_range(message) {;}
};
/**
* This class provides a back-inxedex heap structure where indexes of
* elements already in the heap are kept updated to allow for random access
* update of elements (done automatically in push if element with
* "idx" is already contained in the heap )
*
* Refer to the following textbook for details:
* @book{cormen1990ia,
* title={{Introduction to algorithms}},
* author={Cormen, T.T. and Leiserson, C.E. and Rivest, R.L.},
* year={1990},
* publisher={MIT Press Cambridge, MA, USA}
* }
*/
template <class Tkey>
class MaxHeap{
private:
/// root is assumed to be at end of the vector
vector< pair<Tkey,int> > heap;
/**
* maintain a list of back indexes.
* * -1 not in heap
* * other index that point to cell in vector heap
*/
vector< int > backIdx;
/**
* If useBackIdx==false it means that the current structure
* is not making use of a backindexed heap. Thus, no update
* is available
*/
bool useBackIdx;
public:
/// Simple constructor with NO cross updates
MaxHeap(){
useBackIdx = false;
}
/// back indexes constructor used for cross updates
MaxHeap( int Nindex ){
// initialize the back indexes with pseudo-null pointers
backIdx.resize( Nindex, -1 );
useBackIdx = true;
}
/// pushes a new value in the heap
void push( Tkey key, int index ){
//cout << "pushing " << index << endl;
if( useBackIdx && index >= (int) backIdx.size() )
throw InvalidIndexException("the index in the push must be smaller than the maximal allowed index (specified in constructor)");
// If key is not in backindexes or there is no backindexes AT ALL.... complete push (no update)
if( !useBackIdx ){
// add to the back of the vector
heap.push_back( make_pair(key, index) );
// recursive call to increase key
heapIncreaseKey( heap.size()-1, key );
}
else{
if( backIdx[index] == -1 ){
// add to the back of the vector
heap.push_back( make_pair(key, index) );
//initially point to back
backIdx[ index ] = heap.size()-1;
// recursive call to increase key
heapIncreaseKey( heap.size()-1, key );
// USE STL STUFF
//push_heap(heap.begin(),heap.end());
}
// update push (a key exists)
else {
heapIncreaseKey( backIdx[index], key );
}
}
}
/// return a constant reference to the MINIMAL KEY element stored in the head of the heap
const pair<Tkey,int>& top() throw(HeapEmptyException){
if( heap.empty() )
throw new HeapEmptyException("Impossible to get top element, empty heap");
else
return heap[0];
}
/// removes the top element of the queue (minimal)
void pop() throw(HeapEmptyException){
if( heap.size() < 1 ) //a.k.a. heap.empty()
throw new HeapEmptyException("heap underflow");
// overwrite top with tail element
heap[0] = heap.back();
// USE STL FUNCTIONALITIES (NOT ALLOW BACKINDEXs)
//pop_heap(heap.begin(), heap.end());
// shorten the vector
heap.pop_back();
// start heapify from root
maxHeapify(0);
}
/// returns the size of the heap
int size(){
return heap.size();
}
/// check for emptyness
bool empty(){
return heap.empty();
}
/// check recursively if the substructures is correct using STL provided algorithm
bool verifyHeap( ){
return std::is_heap(heap.begin(), heap.end() );
}
private:
/// check and applies MaxHeap Correctness down the subtree with index "currIdx"
void maxHeapify(int currIdx){
unsigned int leftIdx = LEFT( currIdx );
unsigned int rightIdx = RIGHT( currIdx );
// decide if and where ta swap, left or right, then swap
// current is the best choice (defalut)
int largestIdx;
// is left a better choice? (exists an invalid placed bigger value on the left side)
if( leftIdx < heap.size() && heap[leftIdx].first > heap[currIdx].first )
largestIdx = leftIdx;
else
largestIdx = currIdx;
// is right a better choice? (exists an invalid placed bigger value on the right side)
if( rightIdx < heap.size() && heap[rightIdx].first > heap[largestIdx].first )
largestIdx = rightIdx;
// a better choice exists?
if( largestIdx != currIdx ){
// swap elements
swap( currIdx, largestIdx );
// recursively call this function on alterated subtree
maxHeapify( largestIdx );
}
}
/// swap the content of two elements in position pos1 and pos2
void swap(int pos1, int pos2){
assert( !heap.empty() );
assert( pos1>=0 && pos1<(int)heap.size() );
assert( pos2>=0 && pos2<(int)heap.size() );
// update backindexes
if( useBackIdx ){
backIdx[ heap[pos1].second ] = pos2;
backIdx[ heap[pos2].second ] = pos1;
}
// update heap
pair<Tkey,int> temp = heap[pos1];
heap[pos1] = heap[pos2];
heap[pos2] = temp;
}
/// propagates the correctness (in heap sense) down from a vertex currIdx
void heapIncreaseKey( int currIdx, Tkey key ){
// check if given key update is actually an increase
if( key < heap[currIdx].first )
throw InvalidKeyIncreaseException("In MaxHeaps only increase key updates are legal");
// update value with current key
heap[currIdx].first = key;
// traverse the tree up making necessary swaps
int parentIdx = PARENT(currIdx);
while( currIdx > 0 ){
if( heap[ parentIdx ].first < heap[ currIdx ].first ){
// make swap
swap( currIdx, parentIdx );
// move up
currIdx = parentIdx;
parentIdx = PARENT(currIdx);
} else {
break;
}
}
}
/// print an internal representation of the heap (debug purposes)
void print() {
cout << "idxs";
for ( int i=0; i < size(); i++)
cout << " " << heap[i].second << " ";
cout << endl;
cout << "csts";
for ( int i=0; i < size(); i++)
cout << " " << heap[i].first << " ";
cout << endl;
// cout << "";
// for ( int i=0; i < size(); i++)
// cout << heap[i].first << " in off: " << backIdx[heap[i].first] << ", ";
// cout << endl;
cout << endl;
}
};
/**
* This class provides a back-inxedex heap (MinHeap) structure where indexes of
* elements already in the heap are kept updated to allow for random access
* update of elements (done automatically in push if element with
* "idx" is already contained in the heap )
*
* Refer to the following textbook for details:
* @book{cormen1990ia,
* title={{Introduction to algorithms}},
* author={Cormen, T.T. and Leiserson, C.E. and Rivest, R.L.},
* year={1990},
* publisher={MIT Press Cambridge, MA, USA}
* }
*/
template <class Tkey>
class MinHeap{
private:
/// root is assumed to be at end of the vector
vector< pair<Tkey,int> > heap;
/**
* maintain a list of back indexes.
* * -1 not in heap
* * other index that point to cell in vector heap
*/
vector< int > backIdx;
/**
* If useBackIdx==false it means that the current structure
* is not making use of a backindexed heap. Thus, no update
* is available
*/
bool useBackIdx;
public:
/// back indexes constructor used for cross updates
MinHeap( int Nindex ){
// initialize the back indexes with pseudo-null pointers
backIdx.resize( Nindex, -1 );
useBackIdx = true;
}
/// Simple constructor with NO cross updates
MinHeap(){
useBackIdx = false;
}
/// pushes a new value in the heap
void push( Tkey key, int index ){
//cout << "pushing " << index << endl;
if( useBackIdx && index >= (int) backIdx.size() )
throw InvalidIndexException("the index in the push must be smaller than the maximal allowed index (specified in constructor)");
// If key is not in backindexes or there is no backindexes AT ALL.... complete push (no update)
if( !useBackIdx ){
// add to the back of the vector
heap.push_back( make_pair(key, index) );
// recursive call to increase key
heapDecreaseKey( heap.size()-1, key );
}
else{
if( useBackIdx || backIdx[index] == -1 ){
// add to the back of the vector
heap.push_back( make_pair(key, index) );
//initially point to back
backIdx[ index ] = heap.size()-1;
// recursive call to increase key
heapDecreaseKey( heap.size()-1, key );
// USE STL STUFF
//push_heap(heap.begin(),heap.end());
}
// update push (a key exists)
else {
heapDecreaseKey( backIdx[index], key );
}
}
}
/// return a constant reference to the MINIMAL KEY element stored in the head of the heap
const pair<Tkey,int>& top() throw(HeapEmptyException){
if( heap.empty() )
throw new HeapEmptyException("Impossible to get top element, empty heap");
else
return heap[0];
}
/// removes the top element of the queue (minimal)
void pop() throw(HeapEmptyException){
if( heap.size() < 1 ) //a.k.a. heap.empty()
throw new HeapEmptyException("heap underflow");
// Clear the backIdx associated with the element we are about to remove
if( useBackIdx ){
backIdx[ heap[0].second ] = -1;
}
// overwrite top with tail element
heap[0] = heap.back();
// USE STL FUNCTIONALITIES (NOT ALLOW BACKINDEXs)
//pop_heap(heap.begin(), heap.end());
// shorten the vector
heap.pop_back();
// start heapify from root
minHeapify(0);
}
/// returns the size of the heap
int size(){
return heap.size();
}
/// check for emptyness
bool empty(){
return heap.empty();
}
// this does not work, how do you provide a new ordering function to is_heap??
/// check recursively if the substructures is correct using STL provided algorithm
//bool verifyHeap( ){
// return std::__is_heap(heap.begin(), heap.end() );
//}
/// computes full heap sort and returns the corresponding indexing structure
/// Requires the indexes to be allocated already.
void heapsort(vector<int>& indexes){
// until empty... keep popping
int i = 0;
while( empty() == false ){
pair<Tkey,int> t = top();
pop();
indexes[i++] = t.second;
}
}
private:
/// check and applies MaxHeap Correctness down the subtree with index "currIdx"
void minHeapify(int currIdx){
unsigned int leftIdx = LEFT( currIdx );
unsigned int rightIdx = RIGHT( currIdx );
// decide if and where ta swap, left or right, then swap
// current is the best choice (defalut)
int smallerIdx;
// is left a better choice? (exists an invalid placed smaller value on the left side)
if( leftIdx < heap.size() && heap[leftIdx].first < heap[currIdx].first )
smallerIdx = leftIdx;
else
smallerIdx = currIdx;
// is right a better choice? (exists an invalid placed smaller value on the right side)
if( rightIdx < heap.size() && heap[rightIdx].first < heap[smallerIdx].first )
smallerIdx = rightIdx;
// a better choice exists?
if( smallerIdx != currIdx ){
// swap elements
swap( currIdx, smallerIdx );
// recursively call this function on alterated subtree
minHeapify( smallerIdx );
}
}
/// swap the content of two elements in position pos1 and pos2
void swap(int pos1, int pos2){
assert( !heap.empty() );
assert( pos1>=0 && pos1<(int)heap.size() );
assert( pos2>=0 && pos2<(int)heap.size() );
// update backindexes
if( useBackIdx ){
backIdx[ heap[pos1].second ] = pos2;
backIdx[ heap[pos2].second ] = pos1;
}
// update heap
pair<Tkey,int> temp = heap[pos1];
heap[pos1] = heap[pos2];
heap[pos2] = temp;
}
/// propagates the correctness (in heap sense) down from a vertex currIdx
void heapDecreaseKey( int currIdx, Tkey key ){
// check if given key update is actually an increase
if( key > heap[currIdx].first )
throw InvalidKeyIncreaseException("In MinHeaps only decrease in key updates are legal");
// update value with current key
heap[currIdx].first = key;
// traverse the tree up making necessary swaps
int parentIdx = PARENT(currIdx);
while( currIdx > 0 ){
if( heap[ parentIdx ].first > heap[ currIdx ].first ){
// make swap
swap( currIdx, parentIdx );
// move up
currIdx = parentIdx;
parentIdx = PARENT(currIdx);
} else {
break;
}
}
}
/// print an internal representation of the heap (debug purposes)
public: void print() {
cout << "idxs";
for ( int i=0; i < size(); i++)
cout << " " << heap[i].second << " ";
cout << endl;
cout << "csts";
for ( int i=0; i < size(); i++)
cout << " " << heap[i].first << " ";
cout << endl;
// cout << "";
// for ( int i=0; i < size(); i++)
// cout << heap[i].first << " in off: " << backIdx[heap[i].first] << ", ";
// cout << endl;
cout << endl;
}
};
#endif /*MYHEAP_H_*/
| Unknown |
3D | stillbreeze/3D-reconstruction-Using-SfM-and-Stereo-Matching | code/SFMedu/denseMatch/priority_queue_1.0_old/pq_size.m | .m | 858 | 28 | % PQ_SIZE returns the number of elements in the priority queue
%
% SYNTAX
% sz = pq_size(pq)
%
% INPUT PARAMETERS
% pq: a pointer to the priority queue
%
% OUTPUT PARAMETERS
% sz: the number of elements in the priority queue
%
% DESCRIPTION
% Queries the priority queue for the number of elements that it contains.
% This number is not the "capacity" or the maximum number of elements which
% is possible to insert but rather the number of elements CURRENTLY in the
% priority queue
%
% See also:
% PQ_DEMO, PQ_CREATE, PQ_PUSH, PQ_POP, PQ_SIZE, PQ_TOP, PQ_DELETE
%
% References:
% Gormen, T.H. and Leiserson, C.E. and Rivest, R.L., "introduction to
% algorithms", 1990, MIT Press/McGraw-Hill, Chapter 6.
% Copyright (c) 2008 Andrea Tagliasacchi
% All Rights Reserved
% email: andrea.tagliasacchi@gmail.com
% $Revision: 1.0$ Created on: May 22, 2009 | MATLAB |
3D | stillbreeze/3D-reconstruction-Using-SfM-and-Stereo-Matching | code/SFMedu/denseMatch/priority_queue_1.0_old/pq_delete.m | .m | 570 | 22 | % PQ_DELETE construct a priority queue object
%
% SYNTAX
% pq_delete(pq)
%
% INPUT PARAMETERS
% pq: a pointer to the priority queue
%
% DESCRIPTION
% De-allocates the memory for the priority queue.
%
% See also:
% PQ_DEMO, PQ_CREATE, PQ_PUSH, PQ_POP, PQ_SIZE, PQ_TOP, PQ_DELETE
%
% References:
% Gormen, T.H. and Leiserson, C.E. and Rivest, R.L., "introduction to
% algorithms", 1990, MIT Press/McGraw-Hill, Chapter 6.
% Copyright (c) 2008 Andrea Tagliasacchi
% All Rights Reserved
% email: andrea.tagliasacchi@gmail.com
% $Revision: 1.0$ Created on: May 22, 2009 | MATLAB |
3D | stillbreeze/3D-reconstruction-Using-SfM-and-Stereo-Matching | code/SFMedu/denseMatch/priority_queue_1.0_old/pq_demo.m | .m | 1,014 | 39 | % PQ_DEMO compiles library and illustrate Priority Queue's functionalities
%
% Copyright (c) 2008 Andrea Tagliasacchi
% All Rights Reserved
% email: andrea.tagliasacchi@gmail.com
% $Revision: 1.0$ Created on: May 22, 2009
clc, clear, close all;
mex pq_create.cpp;
mex pq_push.cpp;
mex pq_pop.cpp;
mex pq_size.cpp;
mex pq_top.cpp;
mex pq_delete.cpp;
pq = pq_create( 10000 );
for i=1:10
disp(sprintf('\n')); %newline
%--- create a random entry
cost = rand(1);
pq_push(pq, i, cost);
disp(sprintf('inserted element: [%d,%f]', i, cost ));
%--- query for the new head
[idx,cost] = pq_top(pq);
newsize = pq_size(pq);
disp(sprintf('*** |queue| = %d, TOP=[%d,%f]', newsize, idx, cost ));
%--- randomly pop an element
if rand(1)>.5
disp(sprintf('\n'));
disp(sprintf('random pop!'));
[idx,cost] = pq_pop(pq);
newsize = pq_size(pq);
disp(sprintf('*** |queue| = %d, POPPED=[%d,%f]', newsize, idx, cost ));
end
end
pq_delete(pq); | MATLAB |
3D | stillbreeze/3D-reconstruction-Using-SfM-and-Stereo-Matching | code/SFMedu/denseMatch/priority_queue_1.0/pq_pop.m | .m | 725 | 26 | % PQ_POP removes the topmost element of the priority queue
%
% SYNTAX
% [idx, cost] = pq_pop(pq)
%
% INPUT PARAMETERS
% pq: a pointer to the priority queue
%
% OUTPUT PARAMETERS
% idx: the index of the popped element
% cost: the cost of the popped element
%
% DESCRIPTION
% Removes the topmost element from a priority queue and return its content.
%
% See also:
% PQ_DEMO, PQ_CREATE, PQ_PUSH, PQ_POP, PQ_SIZE, PQ_TOP, PQ_DELETE
%
% References:
% Gormen, T.H. and Leiserson, C.E. and Rivest, R.L., "introduction to
% algorithms", 1990, MIT Press/McGraw-Hill, Chapter 6.
% Copyright (c) 2008 Andrea Tagliasacchi
% All Rights Reserved
% email: andrea.tagliasacchi@gmail.com
% $Revision: 1.0$ Created on: May 22, 2009 | MATLAB |
3D | stillbreeze/3D-reconstruction-Using-SfM-and-Stereo-Matching | code/SFMedu/denseMatch/priority_queue_1.0/pq_push.m | .m | 902 | 29 | % PQ_PUSH inserts a new element/update a cost in the priority queue
%
% SYNTAX
% pq_push(pq, idx, cost)
%
% INPUT PARAMETERS
% pq: a pointer to the priority queue
%
% OUTPUT PARAMETERS
% idx: the index of the element
% cost: the cost of the newly inserted element or the
% cost to which the element should be updated to
%
% DESCRIPTION
% Inserts a new element in the priority queue. If the elements already
% exist (elements identified by their "idx"), its cost is updated and a new
% element will not be inserted.
%
% See also:
% PQ_DEMO, PQ_CREATE, PQ_PUSH, PQ_POP, PQ_SIZE, PQ_TOP, PQ_DELETE
%
% References:
% Gormen, T.H. and Leiserson, C.E. and Rivest, R.L., "introduction to
% algorithms", 1990, MIT Press/McGraw-Hill, Chapter 6.
% Copyright (c) 2008 Andrea Tagliasacchi
% All Rights Reserved
% email: andrea.tagliasacchi@gmail.com
% $Revision: 1.0$ Created on: May 22, 2009 | MATLAB |
3D | stillbreeze/3D-reconstruction-Using-SfM-and-Stereo-Matching | code/SFMedu/denseMatch/priority_queue_1.0/pq_pop.cpp | .cpp | 2,061 | 62 | //==============================================================================
// Name : pq_pop.cpp
// Author : Andrea Tagliasacchi
// Version : 1.0
// Copyright : 2009 (c) Andrea Tagliasacchi
// Description : pops an element from the PQ and returns its index and cost
//
// May 22, 2009: Created
//==============================================================================
#include "MyHeap.h"
//------------------------------- MATLAB -------------------------------------//
#ifdef MATLAB_MEX_FILE
#include "mex.h"
#define toSysout(...) printf(__VA_ARGS__)
#define exit_with_error(...) \
do { \
fprintf(stdout, "Error: "); \
fprintf(stdout, __VA_ARGS__ ); \
fprintf(stdout, "\n" ); \
exit(1); \
} while(0)
void retrieve_heap( const mxArray* matptr, MaxHeap<double>* & heap){
// retrieve pointer from the MX form
double* pointer0 = mxGetPr(matptr);
// check that I actually received something
if( pointer0 == NULL )
mexErrMsgTxt("vararg{1} must be a valid priority queue pointer\n");
// convert it to "long" datatype (good for addresses)
unsigned long long pointer1 = (unsigned long long) pointer0[0];
// convert it to "KDTree"
heap = (MaxHeap<double>*) pointer1;
// check that I actually received something
if( heap == NULL )
mexErrMsgTxt("vararg{1} must be a valid priority queue pointer\n");
}
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]){
if( nrhs!=1 )
mexErrMsgTxt("This function requires 3 arguments\n");
if( !mxIsNumeric(prhs[0]) )
mexErrMsgTxt("parameter 1 missing!\n");
// retrieve the heap
MaxHeap<double>* heap;
retrieve_heap( prhs[0], heap);
// extract head before popping
pair<double, int> curr = heap->top();
plhs[0] = mxCreateDoubleMatrix(1, 1, mxREAL);
*mxGetPr(plhs[0]) = curr.second+1;
plhs[1] = mxCreateDoubleMatrix(1, 1, mxREAL);
*mxGetPr(plhs[1]) = curr.first;
// pop top element in the PQ
heap->pop();
}
#endif
| C++ |
3D | stillbreeze/3D-reconstruction-Using-SfM-and-Stereo-Matching | code/SFMedu/denseMatch/priority_queue_1.0/pq_size.cpp | .cpp | 1,873 | 53 | //==============================================================================
// Name : pq_size.cpp
// Author : Andrea Tagliasacchi
// Version : 1.0
// Copyright : 2009 (c) Andrea Tagliasacchi
// Description : Returns the size of the priority queue
//
// May 22, 2009: Created
//==============================================================================
#include "MyHeap.h"
//------------------------------- MATLAB -------------------------------------//
#define toSysout(...) printf(__VA_ARGS__)
#define exit_with_error(...) \
do { \
fprintf(stdout, "Error: "); \
fprintf(stdout, __VA_ARGS__ ); \
fprintf(stdout, "\n" ); \
exit(1); \
} while(0)
#ifdef MATLAB_MEX_FILE
#include "mex.h"
void retrieve_heap( const mxArray* matptr, MaxHeap<double>* & heap){
// retrieve pointer from the MX form
double* pointer0 = mxGetPr(matptr);
// check that I actually received something
if( pointer0 == NULL )
mexErrMsgTxt("vararg{1} must be a valid priority queue pointer\n");
// convert it to "long" datatype (good for addresses)
unsigned long long pointer1 = (unsigned long long) pointer0[0];
// convert it to "KDTree"
heap = (MaxHeap<double>*) pointer1;
// check that I actually received something
if( heap == NULL )
mexErrMsgTxt("vararg{1} must be a valid priority queue pointer\n");
}
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]){
if( nrhs!=1 )
mexErrMsgTxt("This function requires 1 arguments\n");
if( !mxIsNumeric(prhs[0]) )
mexErrMsgTxt("parameter 1 missing!\n");
// retrieve the heap
MaxHeap<double>* heap;
retrieve_heap( prhs[0], heap);
// pop top element in the PQ
plhs[0] = mxCreateDoubleMatrix(1, 1, mxREAL);
*mxGetPr(plhs[0]) = heap->size();
}
#endif
| C++ |
3D | stillbreeze/3D-reconstruction-Using-SfM-and-Stereo-Matching | code/SFMedu/denseMatch/priority_queue_1.0/pq_demo.cpp | .cpp | 7,872 | 330 | #include "MyHeap.h"
#include <stdlib.h>
#include <cmath>
#include <iostream>
#include <algorithm>
using namespace std;
/// TEST FOR MAXHEAP
int test1(){
MaxHeap<double> pq(7); // heap of ints compared by doubles
pq.push( 1,1 );
pq.push( 6,0 );
pq.push( 2,4 );
pq.push( 3,3 );
pq.push( 4,2 );
pq.push( 5,5 );
// test the updates
pq.push( 6,5 );
pq.push( 3,7 );
// check ordering
while( !pq.empty() ){
cout << "[ " << pq.top().second << " " << pq.top().first << " ] " << endl;
pq.pop();
}
return 0;
}
/// Indexing test, check whether tree indexes are behaving properly. A bug here caused a long headache...
int test2(){
cout << "0:" << endl;
cout << PARENT(0) << endl;
cout << LEFT(0) << endl;
cout << RIGHT(0) << endl << endl;
cout << "1:" << endl;
cout << PARENT(1) << endl;
cout << LEFT(1) << endl;
cout << RIGHT(1) << endl << endl;
cout << "2:" << endl;
cout << PARENT(2) << endl;
cout << LEFT(2) << endl;
cout << RIGHT(2) << endl << endl;
cout << "3:" << endl;
cout << PARENT(3) << endl;
cout << LEFT(3) << endl;
cout << RIGHT(3) << endl << endl;
cout << "7:" << endl;
cout << PARENT(7) << endl;
cout << LEFT(7) << endl;
cout << RIGHT(7) << endl << endl;
return 0;
}
/// EXTENSIVE (RANDOM) TEST OF PUSH/POP
int test3(){
int N = 100000;
MaxHeap<double> pq(N); // heap of ints compared by doubles
for (int n=0; n < N; n++) {
// create random push and updates
double key = float(rand()) / RAND_MAX;
pq.push( key, n );
//Verify at each step O(n)
//if( pq.verifyHeap() == false ){
// cout << "error on pusching of " << n << endl;
// exit(0);
//}
}
// verify that exhaustive pop is done in decreasing
double prev_max = 1; // since we provide keys [0,1)
while( !pq.empty() ){
pair<double, int> curr = pq.top();
if( curr.first > prev_max ){
cout << "BUG: " << curr.first << " > " << prev_max << endl;
exit(0);
} else {
prev_max = curr.first;
}
pq.pop();
}
cout << "terminated correctly" << endl;
exit(0);
}
/// MAXHEAP EXTENSIVE (RANDOM) TEST OF RANDOM ACCESS UPDATE
int test4(){
// create base queue
int N = 100000;
MaxHeap<double> pq(N); // heap of ints compared by doubles
vector<double> costs(N); // cache costs
for (int n=0; n < N; n++) {
// create random push and updates
double key = float(rand()) / RAND_MAX;
costs[ n ] = key;
pq.push( key, n );
}
// create random updated (increase in value) to random elements
for (int n=0; n < N; n++) {
// create random push and updates
int index = rand() % N;
double keyupdate = ( float(rand()) / RAND_MAX ) / 50; // small update
costs[index] += keyupdate;
pq.push( costs[index], index );
}
// verify that exhaustive pop is done in decreasing
double prev_max = 2; // since we provide keys [0,1)
while( !pq.empty() ){
pair<double, int> curr = pq.top();
if( curr.first > prev_max ){
cout << "BUG: " << curr.first << " > " << prev_max << endl;
exit(0);
} else {
prev_max = curr.first;
}
pq.pop();
}
cout << "terminated correctly" << endl;
exit(0);
}
/// MINHEAP EXTENSIVE (RANDOM) TEST OF RANDOM ACCESS UPDATE
int test5(){
// create base queue
int N = 100000;
MinHeap<double> pq(N); // heap of ints compared by doubles
vector<double> costs(N); // cache costs
for (int n=0; n < N; n++) {
// create random push and updates
double key = float(rand()) / RAND_MAX;
costs[ n ] = key;
pq.push( key, n );
}
// create random updated (increase in value) to random elements
for (int n=0; n < N; n++) {
// create random push and updates
int index = rand() % N;
double keyupdate = ( float(rand()) / RAND_MAX ) / 50; // small update
costs[index] -= keyupdate;
pq.push( costs[index], index );
}
// verify that exhaustive pop is done in decreasing
double prev_min = -2; // since we provide keys [0,1)
while( !pq.empty() ){
pair<double, int> curr = pq.top();
if( curr.first < prev_min ){
cout << "BUG: " << curr.first << " < " << prev_min << endl;
exit(0);
} else {
prev_min = curr.first;
}
pq.pop();
}
cout << "terminated correctly" << endl;
exit(0);
}
/// FULL HEAP SORT + MATLAB-STYLE BACKINDEXES
int test6(){
int N = 10;
MinHeap<double> heap(N);
vector<double> data(N);
for (int n=0; n < N; n++) {
double key = round( (float(rand()) / RAND_MAX)*10 );
data.push_back(key);
heap.push( key, n );
}
heap.print();
// sort back indexes
vector<int> indexes;
indexes.reserve(10);
heap.heapsort( indexes );
// output sorted element indexes
for( int n=0; n<N; n++)
cout << indexes[n] << " ";
return 0;
}
/// MINHEAP: HEAP SORT & BACKINDEXES (KD-TREE related test)
int test7(){
// MULTIMEDIAN (KD-TREE)
int N = 10;
MinHeap<double> heap_x(N);
MinHeap<double> heap_y(N);
MinHeap<double> heap_z(N);
vector< vector<double> > data(N, vector<double>(3,0));
vector<double> idxs_x(N);
vector<double> idxs_y(N);
vector<double> idxs_z(N);
for (int n=0; n < N; n++) {
double key_x = round( (float(rand()) / RAND_MAX)*10 );
double key_y = round( (float(rand()) / RAND_MAX)*10 );
double key_z = round( (float(rand()) / RAND_MAX)*10 );
data[n][0] = key_x;
data[n][1] = key_y;
data[n][2] = key_z;
// fill the heaps
heap_x.push( key_x, n );
heap_y.push( key_y, n );
heap_z.push( key_z, n );
}
// print out original points
cout << "data" << endl;
for( int n=0; n<N; n++)
cout << n << " ";
cout << endl << "---------------------------" << endl;
for( int n=0; n<N; n++)
cout << data[n][0] << " ";
cout << endl;
for( int n=0; n<N; n++)
cout << data[n][1] << " ";
cout << endl;
for( int n=0; n<N; n++)
cout << data[n][2] << " ";
cout << endl << endl;
vector< vector<int> > indexes( 3, vector<int>(N,0) ); //back indexes
heap_x.heapsort( indexes[0] );
heap_y.heapsort( indexes[1] );
heap_z.heapsort( indexes[2] );
//indexes from data offset to position in sortex k-th dimension
cout << "back indexes" << endl;
for( int n=0; n<N; n++)
cout << indexes[0][n] << " ";
cout << endl;
for( int n=0; n<N; n++)
cout << indexes[1][n] << " ";
cout << endl;
for( int n=0; n<N; n++)
cout << indexes[2][n] << " ";
return 0;
}
/// MINHEAP: No backindexed heap structure, just test push/pop. Used in kNN kd-trees
int test8(){
// create random inserts
int N = 100000;
MinHeap<double> pq; // heap of ints key-ed by double
for (int n=0; n < N; n++) {
// create random push and updates
double key = double(rand()) / RAND_MAX;
pq.push( key, n );
}
// verify that exhaustive pop is done in decreasing
double prev_min = -2; // since rand provide keys [0,1)
while( !pq.empty() ){
pair<double, int> curr = pq.top();
if( curr.first < prev_min ){
cout << "BUG: " << curr.first << " < " << prev_min << endl;
exit(1);
}
else
prev_min = curr.first;
pq.pop();
}
return 0;
}
//class CMP{
// public double k;
// public int i;
//};
bool comp(const int &a, const int &b){
return a>b;
}
int test9(){
int a[8] = {1,2,3,4,5,6,7,8};
vector<int> v1(a, a+8);
// use one of the following
std::make_heap(v1.begin(), v1.end(), comp );
std::sort_heap(v1.begin (), v1.end (), comp );
for (unsigned int i=0; i < v1.size(); ++i)
cout << v1[i] << " ";
cout << endl;
return 0;
}
int test10(){
int a[7] = {1,2,3,4,5,6,7};
vector<int> v(a, a+7);
vector<int> larray(4);
vector<int> rarray(3);
std::copy(v.begin(), v.end()-v.size()/2, larray.begin());
std::copy(v.end()-v.size()/2, v.end(), rarray.begin());
for (int i=0; i < v.size(); ++i)
cout << v[i] << " ";
cout << endl;
for (int i=0; i < larray.size(); ++i)
cout << larray[i] << " ";
cout << endl;
for (int i=0; i < rarray.size(); ++i)
cout << rarray[i] << " ";
cout << endl;
return 0;
}
////////////// MAIN //////////////
int main(int argc, char **argv) {
// test1();
// test2();
// test3();
// test4();
// test5();
// test6();
// test7();
// test8();
// test9();
test10();
}
| C++ |
3D | stillbreeze/3D-reconstruction-Using-SfM-and-Stereo-Matching | code/SFMedu/denseMatch/priority_queue_1.0/MyHeap_old.h | .h | 13,341 | 474 | /**
* @file MyHeaps.h
* @author Andrea Tagliasacchi
* @data 26 March 2008
* @copyright (c) Andrea Tagliasacchi - All rights reserved
*/
#ifndef MYHEAPS_H_
#define MYHEAPS_H_
#include <vector>
#include <exception> // general exception
#include <stdexcept> // out_of_range
#include <iostream>
#include <cassert>
#include <algorithm>
#include "float.h"
using namespace std;
// macros for navigation in the hard coded binary tree
#define PARENT(pos) ((pos-1)>>1) // equivalent to floor(pos/2)
#define LEFT(pos) ((pos<<1)+1) // equivalent to pos*2 + 1
#define RIGHT(pos) ((pos<<1)+2) // equivalent to pos*2 + 2
/// EXCEPTION
class HeapEmptyException : public out_of_range{
public:
HeapEmptyException(const string &message) : out_of_range(message) {;}
};
class InvalidKeyIncreaseException : public out_of_range{
public:
InvalidKeyIncreaseException(const string &message) : out_of_range(message) {;}
};
class InvalidIndexException : public out_of_range{
public:
InvalidIndexException(const string &message) : out_of_range(message) {;}
};
/**
* This class provides a back-inxedex heap structure where indexes of
* elements already in the heap are kept updated to allow for random access
* update of elements (done automatically in push if element with
* "idx" is already contained in the heap )
*
* Refer to the following textbook for details:
* @book{cormen1990ia,
* title={{Introduction to algorithms}},
* author={Cormen, T.T. and Leiserson, C.E. and Rivest, R.L.},
* year={1990},
* publisher={MIT Press Cambridge, MA, USA}
* }
*/
template <class Tkey>
class MaxHeap{
private:
/// root is assumed to be at end of the vector
vector< pair<Tkey,int> > heap;
/**
* maintain a list of back indexes.
* * -1 not in heap
* * other index that point to cell in vector heap
*/
vector< int > backIdx;
/**
* If useBackIdx==false it means that the current structure
* is not making use of a backindexed heap. Thus, no update
* is available
*/
bool useBackIdx;
public:
/// Simple constructor with NO cross updates
MaxHeap(){
useBackIdx = false;
}
/// back indexes constructor used for cross updates
MaxHeap( int Nindex ){
// initialize the back indexes with pseudo-null pointers
backIdx.resize( Nindex, -1 );
useBackIdx = true;
}
/// pushes a new value in the heap
void push( Tkey key, int index ){
//cout << "pushing " << index << endl;
if( useBackIdx && index >= (int) backIdx.size() )
throw InvalidIndexException("the index in the push must be smaller than the maximal allowed index (specified in constructor)");
// If key is not in backindexes or there is no backindexes AT ALL.... complete push (no update)
if( !useBackIdx ){
// add to the back of the vector
heap.push_back( make_pair(key, index) );
// recursive call to increase key
heapIncreaseKey( heap.size()-1, key );
}
else{
if( backIdx[index] == -1 ){
// add to the back of the vector
heap.push_back( make_pair(key, index) );
//initially point to back
backIdx[ index ] = heap.size()-1;
// recursive call to increase key
heapIncreaseKey( heap.size()-1, key );
// USE STL STUFF
//push_heap(heap.begin(),heap.end());
}
// update push (a key exists)
else {
heapIncreaseKey( backIdx[index], key );
}
}
}
/// return a constant reference to the MINIMAL KEY element stored in the head of the heap
const pair<Tkey,int>& top() throw(HeapEmptyException){
if( heap.empty() )
throw new HeapEmptyException("Impossible to get top element, empty heap");
else
return heap[0];
}
/// removes the top element of the queue (minimal)
void pop() throw(HeapEmptyException){
if( heap.size() < 1 ) //a.k.a. heap.empty()
throw new HeapEmptyException("heap underflow");
// overwrite top with tail element
heap[0] = heap.back();
// USE STL FUNCTIONALITIES (NOT ALLOW BACKINDEXs)
//pop_heap(heap.begin(), heap.end());
// shorten the vector
heap.pop_back();
// start heapify from root
maxHeapify(0);
}
/// returns the size of the heap
int size(){
return heap.size();
}
/// check for emptyness
bool empty(){
return heap.empty();
}
/// check recursively if the substructures is correct using STL provided algorithm
bool verifyHeap( ){
return std::__is_heap(heap.begin(), heap.end() );
}
private:
/// check and applies MaxHeap Correctness down the subtree with index "currIdx"
void maxHeapify(int currIdx){
unsigned int leftIdx = LEFT( currIdx );
unsigned int rightIdx = RIGHT( currIdx );
// decide if and where ta swap, left or right, then swap
// current is the best choice (defalut)
int largestIdx;
// is left a better choice? (exists an invalid placed bigger value on the left side)
if( leftIdx < heap.size() && heap[leftIdx].first > heap[currIdx].first )
largestIdx = leftIdx;
else
largestIdx = currIdx;
// is right a better choice? (exists an invalid placed bigger value on the right side)
if( rightIdx < heap.size() && heap[rightIdx].first > heap[largestIdx].first )
largestIdx = rightIdx;
// a better choice exists?
if( largestIdx != currIdx ){
// swap elements
swap( currIdx, largestIdx );
// recursively call this function on alterated subtree
maxHeapify( largestIdx );
}
}
/// swap the content of two elements in position pos1 and pos2
void swap(int pos1, int pos2){
assert( !heap.empty() );
assert( pos1>=0 && pos1<(int)heap.size() );
assert( pos2>=0 && pos2<(int)heap.size() );
// update backindexes
if( useBackIdx ){
backIdx[ heap[pos1].second ] = pos2;
backIdx[ heap[pos2].second ] = pos1;
}
// update heap
pair<Tkey,int> temp = heap[pos1];
heap[pos1] = heap[pos2];
heap[pos2] = temp;
}
/// propagates the correctness (in heap sense) down from a vertex currIdx
void heapIncreaseKey( int currIdx, Tkey key ){
// check if given key update is actually an increase
if( key < heap[currIdx].first )
throw InvalidKeyIncreaseException("In MaxHeaps only increase key updates are legal");
// update value with current key
heap[currIdx].first = key;
// traverse the tree up making necessary swaps
int parentIdx = PARENT(currIdx);
while( currIdx > 0 ){
if( heap[ parentIdx ].first < heap[ currIdx ].first ){
// make swap
swap( currIdx, parentIdx );
// move up
currIdx = parentIdx;
parentIdx = PARENT(currIdx);
} else {
break;
}
}
}
/// print an internal representation of the heap (debug purposes)
void print() {
cout << "idxs";
for ( int i=0; i < size(); i++)
cout << " " << heap[i].second << " ";
cout << endl;
cout << "csts";
for ( int i=0; i < size(); i++)
cout << " " << heap[i].first << " ";
cout << endl;
// cout << "";
// for ( int i=0; i < size(); i++)
// cout << heap[i].first << " in off: " << backIdx[heap[i].first] << ", ";
// cout << endl;
cout << endl;
}
};
/**
* This class provides a back-inxedex heap (MinHeap) structure where indexes of
* elements already in the heap are kept updated to allow for random access
* update of elements (done automatically in push if element with
* "idx" is already contained in the heap )
*
* Refer to the following textbook for details:
* @book{cormen1990ia,
* title={{Introduction to algorithms}},
* author={Cormen, T.T. and Leiserson, C.E. and Rivest, R.L.},
* year={1990},
* publisher={MIT Press Cambridge, MA, USA}
* }
*/
template <class Tkey>
class MinHeap{
private:
/// root is assumed to be at end of the vector
vector< pair<Tkey,int> > heap;
/**
* maintain a list of back indexes.
* * -1 not in heap
* * other index that point to cell in vector heap
*/
vector< int > backIdx;
/**
* If useBackIdx==false it means that the current structure
* is not making use of a backindexed heap. Thus, no update
* is available
*/
bool useBackIdx;
public:
/// back indexes constructor used for cross updates
MinHeap( int Nindex ){
// initialize the back indexes with pseudo-null pointers
backIdx.resize( Nindex, -1 );
useBackIdx = true;
}
/// Simple constructor with NO cross updates
MinHeap(){
useBackIdx = false;
}
/// pushes a new value in the heap
void push( Tkey key, int index ){
//cout << "pushing " << index << endl;
if( useBackIdx && index >= (int) backIdx.size() )
throw InvalidIndexException("the index in the push must be smaller than the maximal allowed index (specified in constructor)");
// If key is not in backindexes or there is no backindexes AT ALL.... complete push (no update)
if( !useBackIdx ){
// add to the back of the vector
heap.push_back( make_pair(key, index) );
// recursive call to increase key
heapDecreaseKey( heap.size()-1, key );
}
else{
if( useBackIdx || backIdx[index] == -1 ){
// add to the back of the vector
heap.push_back( make_pair(key, index) );
//initially point to back
backIdx[ index ] = heap.size()-1;
// recursive call to increase key
heapDecreaseKey( heap.size()-1, key );
// USE STL STUFF
//push_heap(heap.begin(),heap.end());
}
// update push (a key exists)
else {
heapDecreaseKey( backIdx[index], key );
}
}
}
/// return a constant reference to the MINIMAL KEY element stored in the head of the heap
const pair<Tkey,int>& top() throw(HeapEmptyException){
if( heap.empty() )
throw new HeapEmptyException("Impossible to get top element, empty heap");
else
return heap[0];
}
/// removes the top element of the queue (minimal)
void pop() throw(HeapEmptyException){
if( heap.size() < 1 ) //a.k.a. heap.empty()
throw new HeapEmptyException("heap underflow");
// overwrite top with tail element
heap[0] = heap.back();
// USE STL FUNCTIONALITIES (NOT ALLOW BACKINDEXs)
//pop_heap(heap.begin(), heap.end());
// shorten the vector
heap.pop_back();
// start heapify from root
minHeapify(0);
}
/// returns the size of the heap
int size(){
return heap.size();
}
/// check for emptyness
bool empty(){
return heap.empty();
}
// this does not work, how do you provide a new ordering function to is_heap??
/// check recursively if the substructures is correct using STL provided algorithm
//bool verifyHeap( ){
// return std::__is_heap(heap.begin(), heap.end() );
//}
/// computes full heap sort and returns the corresponding indexing structure
/// Requires the indexes to be allocated already.
void heapsort(vector<int>& indexes){
// until empty... keep popping
int i = 0;
while( empty() == false ){
pair<Tkey,int> t = top();
pop();
indexes[i++] = t.second;
}
}
private:
/// check and applies MaxHeap Correctness down the subtree with index "currIdx"
void minHeapify(int currIdx){
unsigned int leftIdx = LEFT( currIdx );
unsigned int rightIdx = RIGHT( currIdx );
// decide if and where ta swap, left or right, then swap
// current is the best choice (defalut)
int smallerIdx;
// is left a better choice? (exists an invalid placed smaller value on the left side)
if( leftIdx < heap.size() && heap[leftIdx].first < heap[currIdx].first )
smallerIdx = leftIdx;
else
smallerIdx = currIdx;
// is right a better choice? (exists an invalid placed smaller value on the right side)
if( rightIdx < heap.size() && heap[rightIdx].first < heap[smallerIdx].first )
smallerIdx = rightIdx;
// a better choice exists?
if( smallerIdx != currIdx ){
// swap elements
swap( currIdx, smallerIdx );
// recursively call this function on alterated subtree
minHeapify( smallerIdx );
}
}
/// swap the content of two elements in position pos1 and pos2
void swap(int pos1, int pos2){
assert( !heap.empty() );
assert( pos1>=0 && pos1<(int)heap.size() );
assert( pos2>=0 && pos2<(int)heap.size() );
// update backindexes
if( useBackIdx ){
backIdx[ heap[pos1].second ] = pos2;
backIdx[ heap[pos2].second ] = pos1;
}
// update heap
pair<Tkey,int> temp = heap[pos1];
heap[pos1] = heap[pos2];
heap[pos2] = temp;
}
/// propagates the correctness (in heap sense) down from a vertex currIdx
void heapDecreaseKey( int currIdx, Tkey key ){
// check if given key update is actually an increase
if( key > heap[currIdx].first )
throw InvalidKeyIncreaseException("In MinHeaps only decrease in key updates are legal");
// update value with current key
heap[currIdx].first = key;
// traverse the tree up making necessary swaps
int parentIdx = PARENT(currIdx);
while( currIdx > 0 ){
if( heap[ parentIdx ].first > heap[ currIdx ].first ){
// make swap
swap( currIdx, parentIdx );
// move up
currIdx = parentIdx;
parentIdx = PARENT(currIdx);
} else {
break;
}
}
}
/// print an internal representation of the heap (debug purposes)
public: void print() {
cout << "idxs";
for ( int i=0; i < size(); i++)
cout << " " << heap[i].second << " ";
cout << endl;
cout << "csts";
for ( int i=0; i < size(); i++)
cout << " " << heap[i].first << " ";
cout << endl;
// cout << "";
// for ( int i=0; i < size(); i++)
// cout << heap[i].first << " in off: " << backIdx[heap[i].first] << ", ";
// cout << endl;
cout << endl;
}
};
#endif /*MYHEAPS_H_*/
| Unknown |
3D | stillbreeze/3D-reconstruction-Using-SfM-and-Stereo-Matching | code/SFMedu/denseMatch/priority_queue_1.0/pq_push.cpp | .cpp | 2,996 | 91 | //==============================================================================
// Name : pq_push.cpp
// Author : Andrea Tagliasacchi
// Version : 1.0
// Copyright : 2009 (c) Andrea Tagliasacchi
// Description : inserts a new element in the priority queue
//
// May 22, 2009: Created
//==============================================================================
#include "MyHeap.h"
//------------------------------- MATLAB -------------------------------------//
#define toSysout(...) printf(__VA_ARGS__)
#define exit_with_error(...) \
do { \
fprintf(stdout, "Error: "); \
fprintf(stdout, __VA_ARGS__ ); \
fprintf(stdout, "\n" ); \
exit(1); \
} while(0)
#ifdef MATLAB_MEX_FILE
#include "mex.h"
void retrieve_heap( const mxArray* matptr, MaxHeap<double>* & heap){
// retrieve pointer from the MX form
double* pointer0 = mxGetPr(matptr);
// check that I actually received something
if( pointer0 == NULL )
mexErrMsgTxt("vararg{1} must be a valid priority queue pointer\n");
// convert it to "long" datatype (good for addresses)
unsigned long long pointer1 = (unsigned long long) pointer0[0];
// convert it to "KDTree"
heap = (MaxHeap<double>*) pointer1;
// check that I actually received something
if( heap == NULL )
mexErrMsgTxt("vararg{1} must be a valid priority queue pointer\n");
}
void retrieve_index( const mxArray* matptr, int& index){
// check that I actually received something
if( matptr == NULL )
mexErrMsgTxt("missing second parameter (element index)\n");
if( 1 != mxGetM(matptr) || !mxIsNumeric(matptr) || 1 != mxGetN(matptr) )
mexErrMsgTxt("second parameter should be a unique integer array index\n");
// retrieve index
index = (int) mxGetScalar(matptr);
if( index % 1 != 0 )
mexErrMsgTxt("the index should have been an integer!\n");
}
void retrieve_cost( const mxArray* matptr, double& cost){
// check that I actually received something
if( matptr == NULL )
mexErrMsgTxt("missing third parameter (element index)\n");
if( 1 != mxGetM(matptr) || !mxIsNumeric(matptr) || 1 != mxGetN(matptr) )
mexErrMsgTxt("second parameter should be a unique integer array index\n");
// retrieve index
cost = (double) mxGetScalar(matptr);
}
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]){
if( nrhs!=3 )
mexErrMsgTxt("This function requires 3 arguments\n");
if( !mxIsNumeric(prhs[0]) )
mexErrMsgTxt("parameter 1 missing!\n");
if( !mxIsNumeric(prhs[1]) )
mexErrMsgTxt("parameter 2 missing!\n");
if( !mxIsNumeric(prhs[2]) )
mexErrMsgTxt("parameter 3 missing!\n");
// retrieve the heap
MaxHeap<double>* heap;
retrieve_heap( prhs[0], heap);
// retrieve the parameters
int index;
retrieve_index( prhs[1], index );
double cost;
retrieve_cost( prhs[2], cost);
// push in the PQ
heap->push( cost, index-1 );
// return control to matlab
return;
}
#endif
| C++ |
3D | stillbreeze/3D-reconstruction-Using-SfM-and-Stereo-Matching | code/SFMedu/denseMatch/priority_queue_1.0/pq_top.m | .m | 769 | 27 | % PQ_TOP queries for the topmost element of the priority queue (not removing it)
%
% SYNTAX
% [idx, cost] = pq_top(pq)
%
% INPUT PARAMETERS
% pq: a pointer to the priority queue
%
% OUTPUT PARAMETERS
% idx: the index of the topmost element
% cost: the cost of the topmost element
%
% DESCRIPTION
% Queries the topmost element from a priority queue returning its
% index and associated cost.
%
% See also:
% PQ_DEMO, PQ_CREATE, PQ_PUSH, PQ_POP, PQ_SIZE, PQ_TOP, PQ_DELETE
%
% References:
% Gormen, T.H. and Leiserson, C.E. and Rivest, R.L., "introduction to
% algorithms", 1990, MIT Press/McGraw-Hill, Chapter 6.
% Copyright (c) 2008 Andrea Tagliasacchi
% All Rights Reserved
% email: andrea.tagliasacchi@gmail.com
% $Revision: 1.0$ Created on: May 22, 2009 | MATLAB |
3D | stillbreeze/3D-reconstruction-Using-SfM-and-Stereo-Matching | code/SFMedu/denseMatch/priority_queue_1.0/pq_create.cpp | .cpp | 1,774 | 55 | //==============================================================================
// Name : myheaps_demo.cpp
// Author : Andrea Tagliasacchi
// Version : 1.0
// Copyright : 2009 (c) Andrea Tagliasacchi
// Description : creates a (top-down) priority queue
//
// May 22, 2009: Created
//==============================================================================
#include "MyHeap.h"
//------------------------------- MATLAB -------------------------------------//
#define toSysout(...) printf(__VA_ARGS__)
#define exit_with_error(...) \
do { \
fprintf(stdout, "Error: "); \
fprintf(stdout, __VA_ARGS__ ); \
fprintf(stdout, "\n" ); \
exit(1); \
} while(0)
#ifdef MATLAB_MEX_FILE
#include "mex.h"
void retrieve_data( const mxArray* prhs, int& nelems){
// retrieve pointer from the MX form
// check that I actually received something
// if( data == NULL )
// mexErrMsgTxt("vararg{2} must be a [kxN] matrix of data\n");
nelems = (int) mxGetScalar(prhs);
if( nelems == 0 )
mexErrMsgTxt("Priority queue minimal allocation is 1.\n");
}
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]){
// read the parameters
// check input
if( nrhs != 1 || !mxIsNumeric(prhs[0]) )
mexErrMsgTxt("A unique scalar number with the expected size of the queue is necessary.\n");
// retrieve the data
int nelems = 100;
retrieve_data( prhs[0], nelems );
// instantiate the priority queue
MaxHeap<double>* pq = new MaxHeap<double>(nelems);
// convert the points to double
plhs[0] = mxCreateDoubleMatrix(1,1,mxREAL);
double* pointer_to_tree = mxGetPr(plhs[0]);
pointer_to_tree[0] = (unsigned long long) pq;
}
#endif
| C++ |
3D | stillbreeze/3D-reconstruction-Using-SfM-and-Stereo-Matching | code/SFMedu/denseMatch/priority_queue_1.0/pq_create.m | .m | 1,270 | 40 | % PQ_CREATE construct a priority queue object
%
% SYNTAX
% pq = pq_create(p)
%
% INPUT PARAMETERS
% N: the maximum number of elements in the priority queue
%
% OUTPUT PARAMETERS
% pq: a (memory) pointer to the created data structure
%
% DESCRIPTION
% Given a positive integer N this function allocates the memory for a
% BACK INDEXED priority queue of size N. The priority queue is a Max Heap,
% meaninig that it is implemented as a binary tree and parent nodes have a
% cost which is larger than the one of its childrens. Back indexing allows
% to be able to *increase* the cost of an element which is already in the
% priority queue and do so in logarithmic time.
%
% The complexity of the operations on the data structure are the default
% ones for Heap based priority queue:
%
% - insertion: O(log(n))
% - pop: O(log(n))
% - cost update: O(log(n))
% - size: O(1)
% - query top: O(1)
% - delete: O(n)
%
% See also:
% PQ_DEMO, PQ_PUSH, PQ_POP, PQ_SIZE, PQ_TOP, PQ_DELETE
%
% References:
% Gormen, T.H. and Leiserson, C.E. and Rivest, R.L., "introduction to
% algorithms", 1990, MIT Press/McGraw-Hill, Chapter 6.
% Copyright (c) 2008 Andrea Tagliasacchi
% All Rights Reserved
% email: andrea.tagliasacchi@gmail.com
% $Revision: 1.0$ Created on: May 22, 2009 | MATLAB |
3D | stillbreeze/3D-reconstruction-Using-SfM-and-Stereo-Matching | code/SFMedu/denseMatch/priority_queue_1.0/pq_top.cpp | .cpp | 2,059 | 58 | //==============================================================================
// Name : pq_top.cpp
// Author : Andrea Tagliasacchi
// Version : 1.0
// Copyright : 2009 (c) Andrea Tagliasacchi
// Description : Returns the topmost element in the priority queue (not popping it)
//
// May 22, 2009: Created
//==============================================================================
#include "MyHeap.h"
//------------------------------- MATLAB -------------------------------------//
#define toSysout(...) printf(__VA_ARGS__)
#define exit_with_error(...) \
do { \
fprintf(stdout, "Error: "); \
fprintf(stdout, __VA_ARGS__ ); \
fprintf(stdout, "\n" ); \
exit(1); \
} while(0)
#ifdef MATLAB_MEX_FILE
#include "mex.h"
void retrieve_heap( const mxArray* matptr, MaxHeap<double>* & heap){
// retrieve pointer from the MX form
double* pointer0 = mxGetPr(matptr);
// check that I actually received something
if( pointer0 == NULL )
mexErrMsgTxt("vararg{1} must be a valid priority queue pointer\n");
// convert it to "long" datatype (good for addresses)
unsigned long long pointer1 = (unsigned long long) pointer0[0];
// convert it to "KDTree"
heap = (MaxHeap<double>*) pointer1;
// check that I actually received something
if( heap == NULL )
mexErrMsgTxt("vararg{1} must be a valid priority queue pointer\n");
}
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]){
if( nrhs!=1 )
mexErrMsgTxt("This function requires 3 arguments\n");
if( !mxIsNumeric(prhs[0]) )
mexErrMsgTxt("parameter 1 missing!\n");
// retrieve the heap
MaxHeap<double>* heap;
retrieve_heap( prhs[0], heap);
// query top element in the PQ
pair<double, int> curr = heap->top();
// return its values in the output
plhs[0] = mxCreateDoubleMatrix(1, 1, mxREAL);
*mxGetPr(plhs[0]) = curr.second+1;
plhs[1] = mxCreateDoubleMatrix(1, 1, mxREAL);
*mxGetPr(plhs[1]) = curr.first;
}
#endif
| C++ |
3D | stillbreeze/3D-reconstruction-Using-SfM-and-Stereo-Matching | code/SFMedu/denseMatch/priority_queue_1.0/pq_delete.cpp | .cpp | 1,823 | 54 | //==============================================================================
// Name : pq_delete.cpp
// Author : Andrea Tagliasacchi
// Version : 1.0
// Copyright : 2009 (c) Andrea Tagliasacchi
// Description : Frees the memory allocated by the priority queue
//
// May 22, 2009: Created
//==============================================================================
#include "MyHeap.h"
//------------------------------- MATLAB -------------------------------------//
#ifdef MATLAB_MEX_FILE
#include "mex.h"
#define toSysout(...) printf(__VA_ARGS__)
#define exit_with_error(...) \
do { \
fprintf(stdout, "Error: "); \
fprintf(stdout, __VA_ARGS__ ); \
fprintf(stdout, "\n" ); \
exit(1); \
} while(0)
void retrieve_heap( const mxArray* matptr, MaxHeap<double>* & heap){
// retrieve pointer from the MX form
double* pointer0 = mxGetPr(matptr);
// check that I actually received something
if( pointer0 == NULL )
mexErrMsgTxt("vararg{1} must be a valid priority queue pointer\n");
// convert it to "long" datatype (good for addresses)
unsigned long long pointer1 = (unsigned long long) pointer0[0];
// convert it to "KDTree"
heap = (MaxHeap<double>*) pointer1;
// check that I actually received something
if( heap == NULL )
mexErrMsgTxt("vararg{1} must be a valid priority queue pointer\n");
}
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]){
if( nrhs!=1 )
mexErrMsgTxt("This function requires 3 arguments\n");
if( !mxIsNumeric(prhs[0]) )
mexErrMsgTxt("parameter 1 missing!\n");
// retrieve the heap
MaxHeap<double>* heap;
retrieve_heap( prhs[0], heap);
// delete the heap
heap -> ~MaxHeap<double>();
}
#endif
| C++ |
3D | stillbreeze/3D-reconstruction-Using-SfM-and-Stereo-Matching | code/SFMedu/denseMatch/priority_queue_1.0/MyHeap.h | .h | 13,656 | 486 | /**
* @file MyHeaps.h
* @author Andrea Tagliasacchi
* @date 26 March 2008
* @copyright (c) Andrea Tagliasacchi - All rights reserved
*/
//--- CHANGELOG
//
// Sunday Feb 20th 2011:
// corrected bug at .pop() which would not clear index
// in the back-indexing array. Many thanks to Tim Holy
// to point it out
#ifndef MYHEAP_H_
#define MYHEAP_H_
#include <vector>
#include <exception> // general exception
#include <stdexcept> // out_of_range
#include <iostream>
#include <cassert>
#include <algorithm>
#include "float.h"
using namespace std;
// macros for navigation in the hard coded binary tree
#define PARENT(pos) ((pos-1)>>1) // equivalent to floor(pos/2)
#define LEFT(pos) ((pos<<1)+1) // equivalent to pos*2 + 1
#define RIGHT(pos) ((pos<<1)+2) // equivalent to pos*2 + 2
/// EXCEPTION
class HeapEmptyException : public out_of_range{
public:
HeapEmptyException(const string &message) : out_of_range(message) {;}
};
class InvalidKeyIncreaseException : public out_of_range{
public:
InvalidKeyIncreaseException(const string &message) : out_of_range(message) {;}
};
class InvalidIndexException : public out_of_range{
public:
InvalidIndexException(const string &message) : out_of_range(message) {;}
};
/**
* This class provides a back-inxedex heap structure where indexes of
* elements already in the heap are kept updated to allow for random access
* update of elements (done automatically in push if element with
* "idx" is already contained in the heap )
*
* Refer to the following textbook for details:
* @book{cormen1990ia,
* title={{Introduction to algorithms}},
* author={Cormen, T.T. and Leiserson, C.E. and Rivest, R.L.},
* year={1990},
* publisher={MIT Press Cambridge, MA, USA}
* }
*/
template <class Tkey>
class MaxHeap{
private:
/// root is assumed to be at end of the vector
vector< pair<Tkey,int> > heap;
/**
* maintain a list of back indexes.
* * -1 not in heap
* * other index that point to cell in vector heap
*/
vector< int > backIdx;
/**
* If useBackIdx==false it means that the current structure
* is not making use of a backindexed heap. Thus, no update
* is available
*/
bool useBackIdx;
public:
/// Simple constructor with NO cross updates
MaxHeap(){
useBackIdx = false;
}
/// back indexes constructor used for cross updates
MaxHeap( int Nindex ){
// initialize the back indexes with pseudo-null pointers
backIdx.resize( Nindex, -1 );
useBackIdx = true;
}
/// pushes a new value in the heap
void push( Tkey key, int index ){
//cout << "pushing " << index << endl;
if( useBackIdx && index >= (int) backIdx.size() )
throw InvalidIndexException("the index in the push must be smaller than the maximal allowed index (specified in constructor)");
// If key is not in backindexes or there is no backindexes AT ALL.... complete push (no update)
if( !useBackIdx ){
// add to the back of the vector
heap.push_back( make_pair(key, index) );
// recursive call to increase key
heapIncreaseKey( heap.size()-1, key );
}
else{
if( backIdx[index] == -1 ){
// add to the back of the vector
heap.push_back( make_pair(key, index) );
//initially point to back
backIdx[ index ] = heap.size()-1;
// recursive call to increase key
heapIncreaseKey( heap.size()-1, key );
// USE STL STUFF
//push_heap(heap.begin(),heap.end());
}
// update push (a key exists)
else {
heapIncreaseKey( backIdx[index], key );
}
}
}
/// return a constant reference to the MINIMAL KEY element stored in the head of the heap
const pair<Tkey,int>& top() throw(HeapEmptyException){
if( heap.empty() )
throw new HeapEmptyException("Impossible to get top element, empty heap");
else
return heap[0];
}
/// removes the top element of the queue (minimal)
void pop() throw(HeapEmptyException){
if( heap.size() < 1 ) //a.k.a. heap.empty()
throw new HeapEmptyException("heap underflow");
// overwrite top with tail element
heap[0] = heap.back();
// USE STL FUNCTIONALITIES (NOT ALLOW BACKINDEXs)
//pop_heap(heap.begin(), heap.end());
// shorten the vector
heap.pop_back();
// start heapify from root
maxHeapify(0);
}
/// returns the size of the heap
int size(){
return heap.size();
}
/// check for emptyness
bool empty(){
return heap.empty();
}
/// check recursively if the substructures is correct using STL provided algorithm
bool verifyHeap( ){
return std::is_heap(heap.begin(), heap.end() );
}
private:
/// check and applies MaxHeap Correctness down the subtree with index "currIdx"
void maxHeapify(int currIdx){
unsigned int leftIdx = LEFT( currIdx );
unsigned int rightIdx = RIGHT( currIdx );
// decide if and where ta swap, left or right, then swap
// current is the best choice (defalut)
int largestIdx;
// is left a better choice? (exists an invalid placed bigger value on the left side)
if( leftIdx < heap.size() && heap[leftIdx].first > heap[currIdx].first )
largestIdx = leftIdx;
else
largestIdx = currIdx;
// is right a better choice? (exists an invalid placed bigger value on the right side)
if( rightIdx < heap.size() && heap[rightIdx].first > heap[largestIdx].first )
largestIdx = rightIdx;
// a better choice exists?
if( largestIdx != currIdx ){
// swap elements
swap( currIdx, largestIdx );
// recursively call this function on alterated subtree
maxHeapify( largestIdx );
}
}
/// swap the content of two elements in position pos1 and pos2
void swap(int pos1, int pos2){
assert( !heap.empty() );
assert( pos1>=0 && pos1<(int)heap.size() );
assert( pos2>=0 && pos2<(int)heap.size() );
// update backindexes
if( useBackIdx ){
backIdx[ heap[pos1].second ] = pos2;
backIdx[ heap[pos2].second ] = pos1;
}
// update heap
pair<Tkey,int> temp = heap[pos1];
heap[pos1] = heap[pos2];
heap[pos2] = temp;
}
/// propagates the correctness (in heap sense) down from a vertex currIdx
void heapIncreaseKey( int currIdx, Tkey key ){
// check if given key update is actually an increase
if( key < heap[currIdx].first )
throw InvalidKeyIncreaseException("In MaxHeaps only increase key updates are legal");
// update value with current key
heap[currIdx].first = key;
// traverse the tree up making necessary swaps
int parentIdx = PARENT(currIdx);
while( currIdx > 0 ){
if( heap[ parentIdx ].first < heap[ currIdx ].first ){
// make swap
swap( currIdx, parentIdx );
// move up
currIdx = parentIdx;
parentIdx = PARENT(currIdx);
} else {
break;
}
}
}
/// print an internal representation of the heap (debug purposes)
void print() {
cout << "idxs";
for ( int i=0; i < size(); i++)
cout << " " << heap[i].second << " ";
cout << endl;
cout << "csts";
for ( int i=0; i < size(); i++)
cout << " " << heap[i].first << " ";
cout << endl;
// cout << "";
// for ( int i=0; i < size(); i++)
// cout << heap[i].first << " in off: " << backIdx[heap[i].first] << ", ";
// cout << endl;
cout << endl;
}
};
/**
* This class provides a back-inxedex heap (MinHeap) structure where indexes of
* elements already in the heap are kept updated to allow for random access
* update of elements (done automatically in push if element with
* "idx" is already contained in the heap )
*
* Refer to the following textbook for details:
* @book{cormen1990ia,
* title={{Introduction to algorithms}},
* author={Cormen, T.T. and Leiserson, C.E. and Rivest, R.L.},
* year={1990},
* publisher={MIT Press Cambridge, MA, USA}
* }
*/
template <class Tkey>
class MinHeap{
private:
/// root is assumed to be at end of the vector
vector< pair<Tkey,int> > heap;
/**
* maintain a list of back indexes.
* * -1 not in heap
* * other index that point to cell in vector heap
*/
vector< int > backIdx;
/**
* If useBackIdx==false it means that the current structure
* is not making use of a backindexed heap. Thus, no update
* is available
*/
bool useBackIdx;
public:
/// back indexes constructor used for cross updates
MinHeap( int Nindex ){
// initialize the back indexes with pseudo-null pointers
backIdx.resize( Nindex, -1 );
useBackIdx = true;
}
/// Simple constructor with NO cross updates
MinHeap(){
useBackIdx = false;
}
/// pushes a new value in the heap
void push( Tkey key, int index ){
//cout << "pushing " << index << endl;
if( useBackIdx && index >= (int) backIdx.size() )
throw InvalidIndexException("the index in the push must be smaller than the maximal allowed index (specified in constructor)");
// If key is not in backindexes or there is no backindexes AT ALL.... complete push (no update)
if( !useBackIdx ){
// add to the back of the vector
heap.push_back( make_pair(key, index) );
// recursive call to increase key
heapDecreaseKey( heap.size()-1, key );
}
else{
if( useBackIdx || backIdx[index] == -1 ){
// add to the back of the vector
heap.push_back( make_pair(key, index) );
//initially point to back
backIdx[ index ] = heap.size()-1;
// recursive call to increase key
heapDecreaseKey( heap.size()-1, key );
// USE STL STUFF
//push_heap(heap.begin(),heap.end());
}
// update push (a key exists)
else {
heapDecreaseKey( backIdx[index], key );
}
}
}
/// return a constant reference to the MINIMAL KEY element stored in the head of the heap
const pair<Tkey,int>& top() throw(HeapEmptyException){
if( heap.empty() )
throw new HeapEmptyException("Impossible to get top element, empty heap");
else
return heap[0];
}
/// removes the top element of the queue (minimal)
void pop() throw(HeapEmptyException){
if( heap.size() < 1 ) //a.k.a. heap.empty()
throw new HeapEmptyException("heap underflow");
// Clear the backIdx associated with the element we are about to remove
if( useBackIdx ){
backIdx[ heap[0].second ] = -1;
}
// overwrite top with tail element
heap[0] = heap.back();
// USE STL FUNCTIONALITIES (NOT ALLOW BACKINDEXs)
//pop_heap(heap.begin(), heap.end());
// shorten the vector
heap.pop_back();
// start heapify from root
minHeapify(0);
}
/// returns the size of the heap
int size(){
return heap.size();
}
/// check for emptyness
bool empty(){
return heap.empty();
}
// this does not work, how do you provide a new ordering function to is_heap??
/// check recursively if the substructures is correct using STL provided algorithm
//bool verifyHeap( ){
// return std::__is_heap(heap.begin(), heap.end() );
//}
/// computes full heap sort and returns the corresponding indexing structure
/// Requires the indexes to be allocated already.
void heapsort(vector<int>& indexes){
// until empty... keep popping
int i = 0;
while( empty() == false ){
pair<Tkey,int> t = top();
pop();
indexes[i++] = t.second;
}
}
private:
/// check and applies MaxHeap Correctness down the subtree with index "currIdx"
void minHeapify(int currIdx){
unsigned int leftIdx = LEFT( currIdx );
unsigned int rightIdx = RIGHT( currIdx );
// decide if and where ta swap, left or right, then swap
// current is the best choice (defalut)
int smallerIdx;
// is left a better choice? (exists an invalid placed smaller value on the left side)
if( leftIdx < heap.size() && heap[leftIdx].first < heap[currIdx].first )
smallerIdx = leftIdx;
else
smallerIdx = currIdx;
// is right a better choice? (exists an invalid placed smaller value on the right side)
if( rightIdx < heap.size() && heap[rightIdx].first < heap[smallerIdx].first )
smallerIdx = rightIdx;
// a better choice exists?
if( smallerIdx != currIdx ){
// swap elements
swap( currIdx, smallerIdx );
// recursively call this function on alterated subtree
minHeapify( smallerIdx );
}
}
/// swap the content of two elements in position pos1 and pos2
void swap(int pos1, int pos2){
assert( !heap.empty() );
assert( pos1>=0 && pos1<(int)heap.size() );
assert( pos2>=0 && pos2<(int)heap.size() );
// update backindexes
if( useBackIdx ){
backIdx[ heap[pos1].second ] = pos2;
backIdx[ heap[pos2].second ] = pos1;
}
// update heap
pair<Tkey,int> temp = heap[pos1];
heap[pos1] = heap[pos2];
heap[pos2] = temp;
}
/// propagates the correctness (in heap sense) down from a vertex currIdx
void heapDecreaseKey( int currIdx, Tkey key ){
// check if given key update is actually an increase
if( key > heap[currIdx].first )
throw InvalidKeyIncreaseException("In MinHeaps only decrease in key updates are legal");
// update value with current key
heap[currIdx].first = key;
// traverse the tree up making necessary swaps
int parentIdx = PARENT(currIdx);
while( currIdx > 0 ){
if( heap[ parentIdx ].first > heap[ currIdx ].first ){
// make swap
swap( currIdx, parentIdx );
// move up
currIdx = parentIdx;
parentIdx = PARENT(currIdx);
} else {
break;
}
}
}
/// print an internal representation of the heap (debug purposes)
public: void print() {
cout << "idxs";
for ( int i=0; i < size(); i++)
cout << " " << heap[i].second << " ";
cout << endl;
cout << "csts";
for ( int i=0; i < size(); i++)
cout << " " << heap[i].first << " ";
cout << endl;
// cout << "";
// for ( int i=0; i < size(); i++)
// cout << heap[i].first << " in off: " << backIdx[heap[i].first] << ", ";
// cout << endl;
cout << endl;
}
};
#endif /*MYHEAP_H_*/
| Unknown |
3D | stillbreeze/3D-reconstruction-Using-SfM-and-Stereo-Matching | code/SFMedu/denseMatch/priority_queue_1.0/pq_size.m | .m | 858 | 28 | % PQ_SIZE returns the number of elements in the priority queue
%
% SYNTAX
% sz = pq_size(pq)
%
% INPUT PARAMETERS
% pq: a pointer to the priority queue
%
% OUTPUT PARAMETERS
% sz: the number of elements in the priority queue
%
% DESCRIPTION
% Queries the priority queue for the number of elements that it contains.
% This number is not the "capacity" or the maximum number of elements which
% is possible to insert but rather the number of elements CURRENTLY in the
% priority queue
%
% See also:
% PQ_DEMO, PQ_CREATE, PQ_PUSH, PQ_POP, PQ_SIZE, PQ_TOP, PQ_DELETE
%
% References:
% Gormen, T.H. and Leiserson, C.E. and Rivest, R.L., "introduction to
% algorithms", 1990, MIT Press/McGraw-Hill, Chapter 6.
% Copyright (c) 2008 Andrea Tagliasacchi
% All Rights Reserved
% email: andrea.tagliasacchi@gmail.com
% $Revision: 1.0$ Created on: May 22, 2009 | MATLAB |
3D | stillbreeze/3D-reconstruction-Using-SfM-and-Stereo-Matching | code/SFMedu/denseMatch/priority_queue_1.0/pq_delete.m | .m | 570 | 22 | % PQ_DELETE construct a priority queue object
%
% SYNTAX
% pq_delete(pq)
%
% INPUT PARAMETERS
% pq: a pointer to the priority queue
%
% DESCRIPTION
% De-allocates the memory for the priority queue.
%
% See also:
% PQ_DEMO, PQ_CREATE, PQ_PUSH, PQ_POP, PQ_SIZE, PQ_TOP, PQ_DELETE
%
% References:
% Gormen, T.H. and Leiserson, C.E. and Rivest, R.L., "introduction to
% algorithms", 1990, MIT Press/McGraw-Hill, Chapter 6.
% Copyright (c) 2008 Andrea Tagliasacchi
% All Rights Reserved
% email: andrea.tagliasacchi@gmail.com
% $Revision: 1.0$ Created on: May 22, 2009 | MATLAB |
3D | stillbreeze/3D-reconstruction-Using-SfM-and-Stereo-Matching | code/SFMedu/denseMatch/priority_queue_1.0/pq_demo.m | .m | 1,014 | 39 | % PQ_DEMO compiles library and illustrate Priority Queue's functionalities
%
% Copyright (c) 2008 Andrea Tagliasacchi
% All Rights Reserved
% email: andrea.tagliasacchi@gmail.com
% $Revision: 1.0$ Created on: May 22, 2009
clc, clear, close all;
mex pq_create.cpp;
mex pq_push.cpp;
mex pq_pop.cpp;
mex pq_size.cpp;
mex pq_top.cpp;
mex pq_delete.cpp;
pq = pq_create( 10000 );
for i=1:10
disp(sprintf('\n')); %newline
%--- create a random entry
cost = rand(1);
pq_push(pq, i, cost);
disp(sprintf('inserted element: [%d,%f]', i, cost ));
%--- query for the new head
[idx,cost] = pq_top(pq);
newsize = pq_size(pq);
disp(sprintf('*** |queue| = %d, TOP=[%d,%f]', newsize, idx, cost ));
%--- randomly pop an element
if rand(1)>.5
disp(sprintf('\n'));
disp(sprintf('random pop!'));
[idx,cost] = pq_pop(pq);
newsize = pq_size(pq);
disp(sprintf('*** |queue| = %d, POPPED=[%d,%f]', newsize, idx, cost ));
end
end
pq_delete(pq); | MATLAB |
3D | oopil/3D_medical_image_FSS | FSS1000/train.py | .py | 7,425 | 180 | """Training Script"""
import os
import shutil
import numpy as np
import pdb
import random
import torch
import torch.nn as nn
import torch.optim
from torch.utils.data import DataLoader
from torch.optim.lr_scheduler import MultiStepLR
import torch.backends.cudnn as cudnn
from torchvision.transforms import Compose
import torchvision.transforms as transforms
from torchvision.utils import make_grid
if __name__ == '__main__':
from util.utils import set_seed, CLASS_LABELS, date
from config import ex
from tensorboardX import SummaryWriter
from dataloaders_medical.common import *
from dataloaders_medical.prostate import *
from models.encoder import SupportEncoder, QueryEncoder
from models.decoder import Decoder
else:
from .util.utils import set_seed, CLASS_LABELS, date
from .config import ex
from tensorboardX import SummaryWriter
from .dataloaders_medical.common import *
from .dataloaders_medical.prostate import *
from .models.encoder import SupportEncoder, QueryEncoder
from .models.decoder import Decoder
def overlay_color(img, mask, label, scale=50):
"""
:param img: [1, 256, 256]
:param mask: [1, 256, 256]
:param label: [1, 256, 256]
:return:
"""
# pdb.set_trace()
scale = np.mean(img.cpu().numpy())
mask = mask[0]
label = label[0]
zeros = torch.zeros_like(mask)
zeros = [zeros for _ in range(3)]
zeros[0] = mask
mask = torch.stack(zeros,dim=0)
zeros[1] = label
label = torch.stack(zeros,dim=0)
img_3ch = torch.cat([img,img,img],dim=0)
masked = img_3ch+mask.float()*scale+label.float()*scale
return [masked]
@ex.automain
def main(_run, _config, _log):
if _run.observers:
os.makedirs(f'{_run.observers[0].dir}/snapshots', exist_ok=True)
for source_file, _ in _run.experiment_info['sources']:
os.makedirs(os.path.dirname(f'{_run.observers[0].dir}/source/{source_file}'),
exist_ok=True)
_run.observers[0].save_file(source_file, f'source/{source_file}')
shutil.rmtree(f'{_run.observers[0].basedir}/_sources')
set_seed(_config['seed'])
cudnn.enabled = True
cudnn.benchmark = True
device = torch.device(f"cuda:{_config['gpu_id']}")
resize_dim = _config['input_size']
encoded_h = int(resize_dim[0] / 2**_config['n_pool'])
encoded_w = int(resize_dim[1] / 2**_config['n_pool'])
s_encoder = SupportEncoder(_config['path']['init_path'], device)#.to(device)
q_encoder = QueryEncoder(_config['path']['init_path'], device)#.to(device)
decoder = Decoder(input_res=(encoded_h, encoded_w), output_res=resize_dim).to(device)
_log.info('###### Load data ######')
data_name = _config['dataset']
if data_name == 'prostate':
make_data = meta_data
else:
raise ValueError('Wrong config for dataset!')
tr_dataset, val_dataset, ts_dataset = make_data(_config)
trainloader = DataLoader(
dataset=tr_dataset,
batch_size=_config['batch_size'],
shuffle=True,
num_workers=_config['n_work'],
pin_memory=False, #True load data while training gpu
drop_last=True
)
_log.info('###### Set optimizer ######')
print(_config['optim'])
optimizer = torch.optim.Adam(#list(initializer.parameters()) +
list(s_encoder.parameters()) +
list(q_encoder.parameters()) +
list(decoder.parameters()),
_config['optim']['lr'])
scheduler = MultiStepLR(optimizer, milestones=_config['lr_milestones'], gamma=0.1)
pos_weight = torch.tensor([0.3 , 1], dtype=torch.float).to(device)
criterion = nn.BCELoss()
if _config['record']: ## tensorboard visualization
_log.info('###### define tensorboard writer #####')
_log.info(f'##### board/train_{_config["board"]}_{date()}')
writer = SummaryWriter(f'board/train_{_config["board"]}_{date()}')
iter_n_train = len(trainloader)
_log.info('###### Training ######')
for i_epoch in range(_config['n_steps']):
loss_epoch = 0
blank = torch.zeros([1, 256, 256]).to(device)
for i_iter, sample_train in enumerate(trainloader):
## training stage
optimizer.zero_grad()
s_x = sample_train['s_x'].to(device) # [B, Support, slice_num, 1, 256, 256]
s_y = sample_train['s_y'].to(device) # [B, Support, slice_num, 1, 256, 256]
q_x = sample_train['q_x'].to(device) #[B, slice_num, 1, 256, 256]
q_y = sample_train['q_y'].to(device) #[B, slice_num, 1, 256, 256]
# loss_per_video = 0.0
s_xi = s_x[:, :, 0, :, :, :] # [B, Support, 1, 256, 256]
s_yi = s_y[:, :, 0, :, :, :]
# for s_idx in range(_config["n_shot"]):
s_x_merge = s_xi.view(s_xi.size(0) * s_xi.size(1), 1, 256, 256)
s_y_merge = s_yi.view(s_yi.size(0) * s_yi.size(1), 1, 256, 256)
s_xi_encode_merge, _ = s_encoder(s_x_merge, s_y_merge) # [B*S, 512, w, h]
s_xi_encode = s_xi_encode_merge.view(s_yi.size(0), s_yi.size(1), 512, encoded_w, encoded_h)
s_xi_encode_avg = torch.mean(s_xi_encode, dim=1)
# s_xi_encode, _ = s_encoder(s_xi, s_yi) # [B, 512, w, h]
q_xi = q_x[:, 0, :, :, :]
q_yi = q_y[:, 0, :, :, :]
q_xi_encode, q_ft_list = q_encoder(q_xi)
sq_xi = torch.cat((s_xi_encode_avg, q_xi_encode), dim=1)
yhati = decoder(sq_xi, q_ft_list) # [B, 1, 256, 256]
loss = criterion(yhati, q_yi)
# loss_per_video += loss
# loss_per_video.backward()
loss.backward()
optimizer.step()
loss_epoch += loss
print(f"train, iter:{i_iter}/{iter_n_train}, iter_loss:{loss}", end='\r')
if _config['record'] and i_iter == 0:
batch_i = 0
frames = []
frames += overlay_color(q_xi[batch_i], yhati[batch_i].round(), q_yi[batch_i], scale=_config['scale'])
visual = make_grid(frames, normalize=True, nrow=2)
writer.add_image("train/visual", visual, i_epoch)
if _config['record'] and i_iter == 0:
batch_i = 0
frames = []
frames += overlay_color(q_xi[batch_i], yhati[batch_i].round(), q_yi[batch_i],
scale=_config['scale'])
# frames += overlay_color(s_xi[batch_i], blank, s_yi[batch_i], scale=_config['scale'])
visual = make_grid(frames, normalize=True, nrow=5)
writer.add_image("valid/visual", visual, i_epoch)
print(f"train - epoch:{i_epoch}/{_config['n_steps']}, epoch_loss:{loss_epoch}", end='\n')
save_fname = f'{_run.observers[0].dir}/snapshots/last.pth'
_run.log_scalar("training.loss", float(loss_epoch), i_epoch)
if _config['record']:
writer.add_scalar('loss/train_loss', loss_epoch, i_epoch)
torch.save({
's_encoder': s_encoder.state_dict(),
'q_encoder': q_encoder.state_dict(),
'decoder': decoder.state_dict(),
'optimizer': optimizer.state_dict(),
}, save_fname
)
writer.close() | Python |
3D | oopil/3D_medical_image_FSS | FSS1000/__init__.py | .py | 0 | 0 | null | Python |
3D | oopil/3D_medical_image_FSS | FSS1000/test.py | .py | 9,591 | 241 | """Evaluation Script"""
import os
import shutil
import pdb
import tqdm
import numpy as np
import torch
import torch.optim
import torch.nn as nn
from torch.utils.data import DataLoader
import torch.backends.cudnn as cudnn
from torchvision.transforms import Compose
from torchvision.utils import make_grid
from math import isnan
from models.encoder import SupportEncoder, QueryEncoder
from models.decoder import Decoder
# from util.metric import Metric
from util.utils import set_seed, CLASS_LABELS, get_bbox, date
from config import ex
from tensorboardX import SummaryWriter
from dataloaders_medical.prostate import *
import SimpleITK as sitk
def overlay_color(img, mask, label, scale=50):
"""
:param img: [1, 256, 256]
:param mask: [1, 256, 256]
:param label: [1, 256, 256]
:return:
"""
# pdb.set_trace()
scale = np.mean(img.cpu().numpy())
mask = mask[0]
label = label[0]
zeros = torch.zeros_like(mask)
zeros = [zeros for _ in range(3)]
zeros[0] = mask
mask = torch.stack(zeros,dim=0)
zeros[1] = label
label = torch.stack(zeros,dim=0)
img_3ch = torch.cat([img,img,img],dim=0)
masked = img_3ch+mask.float()*scale+label.float()*scale
return [masked]
@ex.automain
def main(_run, _config, _log):
for source_file, _ in _run.experiment_info['sources']:
os.makedirs(os.path.dirname(f'{_run.observers[0].dir}/source/{source_file}'),
exist_ok=True)
_run.observers[0].save_file(source_file, f'source/{source_file}')
shutil.rmtree(f'{_run.observers[0].basedir}/_sources')
set_seed(_config['seed'])
cudnn.enabled = True
cudnn.benchmark = True
torch.cuda.set_device(device=_config['gpu_id'])
torch.set_num_threads(1)
device = torch.device(f"cuda:{_config['gpu_id']}")
_log.info('###### Create model ######')
resize_dim = _config['input_size']
encoded_h = int(resize_dim[0] / 2**_config['n_pool'])
encoded_w = int(resize_dim[1] / 2**_config['n_pool'])
s_encoder = SupportEncoder(_config['path']['init_path'], device)#.to(device)
q_encoder = QueryEncoder(_config['path']['init_path'], device)#.to(device)
decoder = Decoder(input_res=(encoded_h, encoded_w), output_res=resize_dim).to(device)
checkpoint = torch.load(_config['snapshot'], map_location='cpu')
s_encoder.load_state_dict(checkpoint['s_encoder'])
q_encoder.load_state_dict(checkpoint['q_encoder'])
decoder.load_state_dict(checkpoint['decoder'])
# initializer.eval()
# encoder.eval()
# convlstmcell.eval()
# decoder.eval()
_log.info('###### Load data ######')
data_name = _config['dataset']
make_data = meta_data
max_label = 1
tr_dataset, val_dataset, ts_dataset = make_data(_config)
testloader = DataLoader(
dataset=ts_dataset,
batch_size=1,
shuffle=False,
# num_workers=_config['n_work'],
pin_memory=False, # True
drop_last=False
)
# all_samples = test_loader_Spleen()
# all_samples = test_loader_Prostate()
if _config['record']:
_log.info('###### define tensorboard writer #####')
board_name = f'board/test_{_config["board"]}_{date()}'
writer = SummaryWriter(board_name)
_log.info('###### Testing begins ######')
# metric = Metric(max_label=max_label, n_runs=_config['n_runs'])
img_cnt = 0
# length = len(all_samples)
length = len(testloader)
img_lists = []
pred_lists = []
label_lists = []
saves = {}
for subj_idx in range(len(ts_dataset.get_cnts())):
saves[subj_idx] = []
with torch.no_grad():
loss_valid = 0
batch_i = 0 # use only 1 batch size for testing
for i, sample_test in enumerate(testloader): # even for upward, down for downward
subj_idx, idx = ts_dataset.get_test_subj_idx(i)
img_list = []
pred_list = []
label_list = []
preds = []
fnames = sample_test['q_fname']
s_x = sample_test['s_x'].to(device) # [B, slice_num, 1, 256, 256]
s_y = sample_test['s_y'].to(device) # [B, slice_num, 1, 256, 256]
q_x = sample_test['q_x'].to(device) # [B, slice_num, 1, 256, 256]
q_y = sample_test['q_y'].to(device) # [B, slice_num, 1, 256, 256]
s_xi = s_x[:, :, 0, :, :, :] # [B, Support, 1, 256, 256]
s_yi = s_y[:, :, 0, :, :, :]
for s_idx in range(_config["n_shot"]):
s_x_merge = s_xi.view(s_xi.size(0) * s_xi.size(1), 1, 256, 256)
s_y_merge = s_yi.view(s_yi.size(0) * s_yi.size(1), 1, 256, 256)
s_xi_encode_merge, _ = s_encoder(s_x_merge, s_y_merge) # [B*S, 512, w, h]
s_xi_encode = s_xi_encode_merge.view(s_yi.size(0), s_yi.size(1), 512, encoded_w, encoded_h)
s_xi_encode_avg = torch.mean(s_xi_encode, dim=1)
# s_xi_encode, _ = s_encoder(s_xi, s_yi) # [B, 512, w, h]
q_xi = q_x[:, 0, :, :, :]
q_yi = q_y[:, 0, :, :, :]
q_xi_encode, q_ft_list = q_encoder(q_xi)
sq_xi = torch.cat((s_xi_encode_avg, q_xi_encode), dim=1)
yhati = decoder(sq_xi, q_ft_list) # [B, 1, 256, 256]
preds.append(yhati.round())
img_list.append(q_xi[batch_i].cpu().numpy())
pred_list.append(yhati[batch_i].round().cpu().numpy())
label_list.append(q_yi[batch_i].cpu().numpy())
saves[subj_idx].append([subj_idx, idx, img_list, pred_list, label_list, fnames])
print(f"test, iter:{i}/{length} - {subj_idx}/{idx} \t\t", end='\r')
img_lists.append(img_list)
pred_lists.append(pred_list)
label_lists.append(label_list)
print("start computing dice similarities ... total ", len(saves))
dice_similarities = []
for subj_idx in range(len(saves)):
imgs, preds, labels = [], [], []
save_subj = saves[subj_idx]
for i in range(len(save_subj)):
# print(len(save_subj), len(save_subj)-q_slice_n+1, q_slice_n, i)
subj_idx, idx, img_list, pred_list, label_list, fnames = save_subj[i]
# print(subj_idx, idx, is_reverse, len(img_list))
# print(i, is_reverse, is_reverse_next, is_flip)
for j in range(len(img_list)):
imgs.append(img_list[j])
preds.append(pred_list[j])
labels.append(label_list[j])
# pdb.set_trace()
img_arr = np.concatenate(imgs, axis=0)
pred_arr = np.concatenate(preds, axis=0)
label_arr = np.concatenate(labels, axis=0)
# print(ts_dataset.slice_cnts[subj_idx] , len(imgs))
# pdb.set_trace()
dice = np.sum([label_arr * pred_arr]) * 2.0 / (np.sum(pred_arr) + np.sum(label_arr))
dice_similarities.append(dice)
print(f"computing dice scores {subj_idx}/{10}", end='\n')
if _config['record']:
frames = []
for frame_id in range(0, len(save_subj)):
frames += overlay_color(torch.tensor(imgs[frame_id]), torch.tensor(preds[frame_id]), torch.tensor(labels[frame_id]), scale=_config['scale'])
visual = make_grid(frames, normalize=True, nrow=5)
writer.add_image(f"test/{subj_idx}", visual, i)
writer.add_scalar(f'dice_score/{i}', dice)
if _config['save_sample']:
## only for internal test (BCV - MICCAI2015)
sup_idx = _config['s_idx']
target = _config['target']
save_name = _config['save_name']
dirs = ["gt", "pred", "input"]
save_dir = f"/user/home2/soopil/tmp/PANet/MICCAI2015/sample/fss1000_organ{target}_sup{sup_idx}_{save_name}"
for dir in dirs:
try:
os.makedirs(os.path.join(save_dir,dir))
except:
pass
subj_name = fnames[0][0].split("/")[-2]
if target == 14:
src_dir = "/user/home2/soopil/Datasets/MICCAI2015challenge/Cervix/RawData/Training/img"
orig_fname = f"{src_dir}/{subj_name}-Image.nii.gz"
pass
else:
src_dir = "/user/home2/soopil/Datasets/MICCAI2015challenge/Abdomen/RawData/Training/img"
orig_fname = f"{src_dir}/img{subj_name}.nii.gz"
itk = sitk.ReadImage(orig_fname)
orig_spacing = itk.GetSpacing()
label_arr = label_arr*2.0
# label_arr = np.concatenate([np.zeros([1,256,256]), label_arr,np.zeros([1,256,256])])
# pred_arr = np.concatenate([np.zeros([1,256,256]), pred_arr,np.zeros([1,256,256])])
# img_arr = np.concatenate([np.zeros([1,256,256]), img_arr,np.zeros([1,256,256])])
# pdb.set_trace()
itk = sitk.GetImageFromArray(label_arr)
itk.SetSpacing(orig_spacing)
sitk.WriteImage(itk,f"{save_dir}/gt/{subj_idx}.nii.gz")
itk = sitk.GetImageFromArray(pred_arr.astype(float))
itk.SetSpacing(orig_spacing)
sitk.WriteImage(itk,f"{save_dir}/pred/{subj_idx}.nii.gz")
itk = sitk.GetImageFromArray(img_arr.astype(float))
itk.SetSpacing(orig_spacing)
sitk.WriteImage(itk,f"{save_dir}/input/{subj_idx}.nii.gz")
print(f"test result \n n : {len(dice_similarities)}, mean dice score : \
{np.mean(dice_similarities)} \n dice similarities : {dice_similarities}")
if _config['record']:
writer.add_scalar(f'dice_score/mean', np.mean(dice_similarities))
| Python |
3D | oopil/3D_medical_image_FSS | FSS1000/test_visual.py | .py | 5,553 | 156 | """Evaluation Script"""
import os
import shutil
import pdb
import tqdm
import numpy as np
import torch
import torch.optim
import torch.nn as nn
from torch.utils.data import DataLoader
import torch.backends.cudnn as cudnn
from torchvision.transforms import Compose
from torchvision.utils import make_grid
from math import isnan
from models.encoder import SupportEncoder, QueryEncoder
from models.decoder import Decoder
# from util.metric import Metric
from util.utils import set_seed, CLASS_LABELS, get_bbox, date
from config import ex
# from tensorboardX import SummaryWriter
from dataloaders_medical.prostate import *
import matplotlib.pyplot as plt
def try_mkdirs(path):
try:
os.makedirs(path)
return True
except:
return False
@ex.automain
def main(_run, _config, _log):
for source_file, _ in _run.experiment_info['sources']:
os.makedirs(os.path.dirname(f'{_run.observers[0].dir}/source/{source_file}'),
exist_ok=True)
_run.observers[0].save_file(source_file, f'source/{source_file}')
shutil.rmtree(f'{_run.observers[0].basedir}/_sources')
set_seed(_config['seed'])
cudnn.enabled = True
cudnn.benchmark = True
torch.cuda.set_device(device=_config['gpu_id'])
torch.set_num_threads(1)
device = torch.device(f"cuda:{_config['gpu_id']}")
_log.info('###### Create model ######')
resize_dim = _config['input_size']
encoded_h = int(resize_dim[0] / 2**_config['n_pool'])
encoded_w = int(resize_dim[1] / 2**_config['n_pool'])
s_encoder = SupportEncoder(_config['path']['init_path'], device)#.to(device)
q_encoder = QueryEncoder(_config['path']['init_path'], device)#.to(device)
decoder = Decoder(input_res=(encoded_h, encoded_w), output_res=resize_dim).to(device)
checkpoint = torch.load(_config['snapshot'], map_location='cpu')
s_encoder.load_state_dict(checkpoint['s_encoder'])
q_encoder.load_state_dict(checkpoint['q_encoder'])
decoder.load_state_dict(checkpoint['decoder'])
# initializer.eval()
# encoder.eval()
# convlstmcell.eval()
# decoder.eval()
_log.info('###### Load data ######')
data_name = _config['dataset']
make_data = meta_data
max_label = 1
tr_dataset, val_dataset, ts_dataset = make_data(_config)
testloader = DataLoader(
dataset=ts_dataset,
batch_size=1,
shuffle=False,
# num_workers=_config['n_work'],
pin_memory=False, # True
drop_last=False
)
_log.info('###### Testing begins ######')
# metric = Metric(max_label=max_label, n_runs=_config['n_runs'])
img_cnt = 0
# length = len(all_samples)
length = len(testloader)
img_lists = []
pred_lists = []
label_lists = []
saves = {}
for subj_idx in range(len(ts_dataset.get_cnts())):
saves[subj_idx] = []
with torch.no_grad():
loss_valid = 0
batch_i = 0 # use only 1 batch size for testing
for i, sample_test in enumerate(testloader): # even for upward, down for downward
subj_idx, idx = ts_dataset.get_test_subj_idx(i)
img_list = []
pred_list = []
label_list = []
preds = []
s_x = sample_test['s_x'].to(device) # [B, slice_num, 1, 256, 256]
s_y = sample_test['s_y'].to(device) # [B, slice_num, 1, 256, 256]
q_x = sample_test['q_x'].to(device) # [B, slice_num, 1, 256, 256]
q_y = sample_test['q_y'].to(device) # [B, slice_num, 1, 256, 256]
s_fname = sample_test['s_fname']
q_fname = sample_test['q_fname']
s_xi = s_x[:, 0, :, :, :] #[B, 1, 256, 256]
s_yi = s_y[:, 0, :, :, :]
s_xi_encode, _ = s_encoder(s_xi, s_yi) #[B, 512, w, h]
q_xi = q_x[:, 0, :, :, :]
q_yi = q_y[:, 0, :, :, :]
q_xi_encode, q_ft_list = q_encoder(q_xi)
sq_xi = torch.cat((s_xi_encode, q_xi_encode),dim=1)
yhati = decoder(sq_xi, q_ft_list) # [B, 1, 256, 256]
preds.append(yhati.round())
img_list.append(q_xi[batch_i].cpu().numpy())
pred_list.append(yhati[batch_i].round().cpu().numpy())
label_list.append(q_yi[batch_i].cpu().numpy())
saves[subj_idx].append([subj_idx, idx, img_list, pred_list, label_list])
print(f"test, iter:{i}/{length} - {subj_idx}/{idx} \t\t", end='\r')
img_lists.append(img_list)
pred_lists.append(pred_list)
label_lists.append(label_list)
q_fname_split = q_fname[0][0].split("/")
q_fname_split[-6] = "Training_2d_2_pred"
try_mkdirs("/".join(q_fname_split[:-1]))
o_q_fname = "/".join(q_fname_split)
np.save(o_q_fname,yhati.round().cpu().numpy())
# print(q_fname[0][0])
# print(o_q_fname)
try_mkdirs("figure")
print("start computing dice similarities ... total ", len(saves))
for subj_idx in range(len(saves)):
save_subj = saves[subj_idx]
dices = []
for slice_idx in range(len(save_subj)):
subj_idx, idx, img_list, pred_list, label_list = save_subj[slice_idx]
for j in range(len(img_list)):
dice = np.sum([label_list[j] * pred_list[j]]) * 2.0 / (np.sum(pred_list[j]) + np.sum(label_list[j]))
dices.append(dice)
plt.clf()
plt.bar([k for k in range(len(dices))],dices)
plt.savefig(f"figure/bar_{_config['target']}_{subj_idx}.png") | Python |
3D | oopil/3D_medical_image_FSS | FSS1000/config.py | .py | 3,930 | 147 | """Experiment Configuration"""
import os
import re
import glob
import itertools
import sacred
from sacred import Experiment
from sacred.observers import FileStorageObserver
from sacred.utils import apply_backspaces_and_linefeeds
sacred.SETTINGS['CONFIG']['READ_ONLY_CONFIG'] = False
sacred.SETTINGS.CAPTURE_MODE = 'no'
ex = Experiment('PANet')
ex.captured_out_filter = apply_backspaces_and_linefeeds
source_folders = ['.', './dataloaders', './models', './util', './dataloaders_medical']
sources_to_save = list(itertools.chain.from_iterable(
[glob.glob(f'{folder}/*.py') for folder in source_folders]))
for source_file in sources_to_save:
ex.add_source_file(source_file)
# "Organs" : ["background",
# "spleen",
# "right kidney",
# "left kidney",
# "gallbladder",
# "esophagus",
# "liver",
# "stomach",
# "aorta",
# "inferior vana cava",
# "portal vein & splenic vein",
# "pancreas",
# "right adrenal gland",
# "left adrenal gland",
# ],
@ex.config
def cfg():
"""Default configurations"""
server="144" #202
# size = 320
size = 256
input_size = (size, size) # 419? 480!
seed = 1234
cuda_visable = '0, 1, 2, 3, 4, 5, 6, 7'
gpu_id = 0
mode = 'test' # 'train' or 'test'
record = False
scale = 1.0
n_shot = 3
s_idx = 0
n_pool = 3 # 4 - number of pooling
target = 1
add_target = 0
external_test = "None" # "decathlon" # "CT_ORG"
if external_test == "None":
internal_test = True
else:
internal_test = False
if mode == 'train':
lr_milestones = [50*i for i in range(1,3)]
n_iter = 500
dataset = 'prostate' # 'VOC' or 'COCO'
n_steps = 300
n_work = 1
batch_size = 5
print_interval = 500
validation_interval = 500
save_pred_every = 10000
val_cnt = 100
model = {
'align': False,
# 'align': True,
}
optim = {
'lr': 1e-4,
'momentum': 0.9,
'weight_decay': 0.0005,
}
elif mode == 'test':
save_sample = False
save_name = ""
is_test=True
dataset = 'prostate' # 'VOC' or 'COCO'
notrain = False
# snapshot = './runs/PANet_VOC_sets_0_1way_1shot_[train]/1/snapshots/30000.pth'
snapshot = '/user/home2/soopil/tmp/PANet/runs/PANet_VOC_sets_0_3way_5shot_[train]/2/snapshots/50000.pth'
n_iter = 1
n_runs = 1
n_steps = 1000
batch_size = 1
scribble_dilation = 0
bbox = False
scribble = False
HE = False
# Set dataset config from the snapshot string
# Set model config from the snapshot string
model = {}
for key in ['align',]:
model[key] = key in snapshot
else:
raise ValueError('Wrong configuration for "mode" !')
exp_str = '_'.join([
mode,
])
data_srcs = {
"144":"/user/home2/soopil/Datasets/MICCAI2015challenge/Abdomen/RawData/Training_2d_2",
"202":"/data2/soopil/MICCAI2015challenge/Abdomen/RawData/Training_2d_2",
}
data_src = data_srcs[str(server)]
path = {
'log_dir': './runs',
# 'init_path': None,
'init_path': './../../pretrained_model/vgg16-397923af.pth',
}
### configuration for Medical Image Test
modal_index = 0 #["flair","t1","t1ce","t2"]
mask_index = 1 #[1, 2, 4]
board=""
@ex.config_hook
def add_observer(config, command_name, logger):
"""A hook function to add observer"""
exp_name = f'{ex.path}_{config["exp_str"]}'
observer = FileStorageObserver.create(os.path.join(config['path']['log_dir'], exp_name))
ex.observers.append(observer)
return config
| Python |
3D | oopil/3D_medical_image_FSS | FSS1000/util/__init__.py | .py | 0 | 0 | null | Python |
3D | oopil/3D_medical_image_FSS | FSS1000/util/utils.py | .py | 2,004 | 74 | """Util functions"""
import random
import torch
import numpy as np
import os
from datetime import datetime
def try_mkdir(path):
try:
os.mkdir(path)
print(f"mkdir : {path}")
except:
print(f"failed to make a directory : {path}")
def date():
now = datetime.now()
string = now.year + now.month + now.day
string = now.strftime('%Y%m%d_%H%M%S')
return string
def set_seed(seed):
"""
Set the random seed
"""
random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
CLASS_LABELS = {
'VOC': {
'all': set(range(1, 21)),
0: set(range(1, 21)) - set(range(1, 6)),
1: set(range(1, 21)) - set(range(6, 11)),
2: set(range(1, 21)) - set(range(11, 16)),
3: set(range(1, 21)) - set(range(16, 21)),
},
'COCO': {
'all': set(range(1, 81)),
0: set(range(1, 81)) - set(range(1, 21)),
1: set(range(1, 81)) - set(range(21, 41)),
2: set(range(1, 81)) - set(range(41, 61)),
3: set(range(1, 81)) - set(range(61, 81)),
}
}
def get_bbox(fg_mask, inst_mask):
"""
Get the ground truth bounding boxes
"""
fg_bbox = torch.zeros_like(fg_mask, device=fg_mask.device)
bg_bbox = torch.ones_like(fg_mask, device=fg_mask.device)
inst_mask[fg_mask == 0] = 0
area = torch.bincount(inst_mask.view(-1))
cls_id = area[1:].argmax() + 1
cls_ids = np.unique(inst_mask)[1:]
mask_idx = np.where(inst_mask[0] == cls_id)
y_min = mask_idx[0].min()
y_max = mask_idx[0].max()
x_min = mask_idx[1].min()
x_max = mask_idx[1].max()
fg_bbox[0, y_min:y_max+1, x_min:x_max+1] = 1
for i in cls_ids:
mask_idx = np.where(inst_mask[0] == i)
y_min = max(mask_idx[0].min(), 0)
y_max = min(mask_idx[0].max(), fg_mask.shape[1] - 1)
x_min = max(mask_idx[1].min(), 0)
x_max = min(mask_idx[1].max(), fg_mask.shape[2] - 1)
bg_bbox[0, y_min:y_max+1, x_min:x_max+1] = 0
return fg_bbox, bg_bbox
| Python |
3D | oopil/3D_medical_image_FSS | FSS1000/models/decoder.py | .py | 6,184 | 149 | import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import numpy as np
# from torchsummary import summary
if __name__ == '__main__':
from nnutils import conv_unit
else:
from .nnutils import conv_unit
class Decoder(nn.Module):
def __init__(self, input_channels=512, input_res=(8, 14), init_channels=512, shrink_per_block=2, output_channels=1,
output_res=(256, 448)):
super(Decoder, self).__init__()
self.layer1 = nn.Sequential(
nn.Conv2d(512 + 512, 512, kernel_size=3, padding=1),
nn.BatchNorm2d(512, momentum=1, affine=True),
nn.ReLU()
)
self.layer2 = nn.Sequential(
nn.Conv2d(512, 512, kernel_size=3, padding=1),
nn.BatchNorm2d(512, momentum=1, affine=True),
nn.ReLU()
)
self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.double_conv1 = nn.Sequential(
nn.Conv2d(512 + 512*1, 512, kernel_size=3, padding=1),
nn.BatchNorm2d(512, momentum=1, affine=True),
nn.ReLU(),
nn.Conv2d(512, 512, kernel_size=3, padding=1),
nn.BatchNorm2d(512, momentum=1, affine=True),
nn.ReLU()
) # 14 x 14
self.double_conv2 = nn.Sequential(
nn.Conv2d(512 + 512*1, 256, kernel_size=3, padding=1),
nn.BatchNorm2d(256, momentum=1, affine=True),
nn.ReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1),
nn.BatchNorm2d(256, momentum=1, affine=True),
nn.ReLU()
) # 28 x 28
self.double_conv3 = nn.Sequential(
nn.Conv2d(256 + 256*1, 128, kernel_size=3, padding=1),
nn.BatchNorm2d(128, momentum=1, affine=True),
nn.ReLU(),
nn.Conv2d(128, 128, kernel_size=3, padding=1),
nn.BatchNorm2d(128, momentum=1, affine=True),
nn.ReLU()
) # 56 x 56
self.double_conv4 = nn.Sequential(
nn.Conv2d(128 + 128*1, 64, kernel_size=3, padding=1),
nn.BatchNorm2d(64, momentum=1, affine=True),
nn.ReLU(),
nn.Conv2d(64, 64, kernel_size=3, padding=1),
nn.BatchNorm2d(64, momentum=1, affine=True),
nn.ReLU()
) # 112 x 112
self.double_conv5 = nn.Sequential(
nn.Conv2d(64 + 64 * 1, 64, kernel_size=3, padding=1),
nn.BatchNorm2d(64, momentum=1, affine=True),
nn.ReLU(),
# nn.Conv2d(64, 2, kernel_size=1, padding=0),
nn.Conv2d(64, 1, kernel_size=1, padding=0), # 1 for bce and 2 for cross entropy loss
nn.Sigmoid()
) # 256 x 256
# x = F.interpolate(x, orig_size, mode="bilinear")
self._init_weights()
def mask_process(self, mask):
# x = F.interpolate(x, orig_size, mode="bilinear")
mask = F.interpolate(mask, [16,16], mode="bilinear")
def forward(self, hidden, ft_list):
out = self.layer1(hidden)
out = self.layer2(out)
# out = self.upsample(out) # block 1
out = torch.cat((out, ft_list[-1]), dim=1)
out = self.double_conv1(out)
# out = self.upsample(out) # block 2
out = torch.cat((out, ft_list[-2]), dim=1)
out = self.double_conv2(out)
out = self.upsample(out) # block 3
out = torch.cat((out, ft_list[-3]), dim=1)
out = self.double_conv3(out)
out = self.upsample(out) # block 4
out = torch.cat((out, ft_list[-4]), dim=1)
out = self.double_conv4(out)
out = self.upsample(out) # block 5
out = torch.cat((out, ft_list[-5]), dim=1)
out = self.double_conv5(out)
# out = F.sigmoid(out)
# out = torch.squeeze(out)
return out
def _init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
# torch.nn.init.normal_(m.weight)
torch.nn.init.kaiming_normal_(m.weight, nonlinearity='relu')
class Decoder_save(nn.Module):
def __init__(self, input_channels=512, input_res=(8, 14), init_channels=512, shrink_per_block=2, output_channels=1,
output_res=(256, 448)):
super(Decoder, self).__init__()
self.input_channels = input_channels
self.input_res = np.array(input_res)
self.init_channels = init_channels
self.shrink_per_block = shrink_per_block
self.output_channels = output_channels
self.output_res = np.array(output_res)
self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False)
self.net = self.network(self.input_channels, self.input_res, self.init_channels, self.shrink_per_block, self.output_channels, self.output_res)
def network(self, input_channels, input_res, init_channels, shrink_per_block, output_channels, output_res):
modules = []
prev_channels = input_channels
# print('0', prev_channels, input_channels, input_res, output_res)
while True:
# print('1', prev_channels, init_channels)
modules.append(conv_unit(in_ch=prev_channels, out_ch=init_channels, kernel_size=5, stride=1, padding=2))
# print('2', prev_channels, init_channels)
if np.array_equal(input_res, output_res):
modules.append(
conv_unit(in_ch=init_channels, out_ch=output_channels, kernel_size=5, stride=1, padding=2, activation='sigmoid'))
# print('3', prev_channels, init_channels)
break
else:
modules.append(conv_unit(in_ch=init_channels, out_ch=init_channels, kernel_size=5, stride=1, padding=2))
# print('4', prev_channels, init_channels)
modules.append(self.upsample)
input_res *= 2
prev_channels = init_channels
if init_channels > 64:
init_channels = int(init_channels / shrink_per_block)
# print('5', prev_channels, init_channels)
return nn.Sequential(*modules)
def forward(self, h):
return self.net(h)
| Python |
3D | oopil/3D_medical_image_FSS | FSS1000/models/encoder.py | .py | 2,230 | 66 | import pdb
import torch
import torch.nn as nn
import torchvision
from .vgg import Encoder_vgg
from .attention import PAM_Module, CAM_Module
# from torchsummary import summary
if __name__ == '__main__':
from nnutils import conv_unit
else:
from .nnutils import conv_unit
class SupportEncoder(nn.Module):
def __init__(self, pretrained_path, device):
super(SupportEncoder, self).__init__()
self.encoder_list = list(Encoder_vgg(in_channels=2, pretrained_path=pretrained_path).features.to(device))
self.conv1x1 = conv_unit(in_ch=512, out_ch=512, kernel_size=1, activation='relu').to(device)
self.attention=False #True
if self.attention:
self.pam = PAM_Module(in_dim=512).to(device)
self.cam = CAM_Module(in_dim=512).to(device)
def forward(self, x,y):
out = torch.cat((x,y),dim=1)
ft_list = []
for model_i, model in enumerate(self.encoder_list):
out = model(out)
if model_i % 2 == 0:
ft_list.append(out)
if self.attention:
out = self.pam(out)+self.cam(out)
out = self.conv1x1(out)
return out, ft_list[:]
class QueryEncoder(nn.Module):
def __init__(self, pretrained_path, device):
super(QueryEncoder, self).__init__()
self.encoder_list = list(Encoder_vgg(in_channels=1, pretrained_path=pretrained_path).features.to(device))
self.conv1x1 = conv_unit(in_ch=512, out_ch=512, kernel_size=1, activation='relu').to(device)
self.attention=False #True
if self.attention:
self.pam = PAM_Module(in_dim=512).to(device)
self.cam = CAM_Module(in_dim=512).to(device)
def forward(self, x):
## data set preprocessing ( youtube vos, decathlon )
## get the encoded list -> skip connection
## change the vgg model from Few shot segmentation model
# pdb.set_trace()
ft_list = []
for model_i, model in enumerate(self.encoder_list):
x = model(x)
if model_i % 2 == 0:
ft_list.append(x)
if self.attention:
x = self.pam(x)+self.cam(x)
x = self.conv1x1(x)
return x, ft_list[:]
| Python |
3D | oopil/3D_medical_image_FSS | FSS1000/models/nnutils.py | .py | 1,670 | 51 | import torch
import torch.nn as nn
def conv_unit(in_ch, out_ch, kernel_size, stride = 1, padding = 0, activation = 'relu', batch_norm = True):
seq_list = []
seq_list.append(nn.Conv2d(in_channels = in_ch, out_channels = out_ch, kernel_size = kernel_size, stride = stride, padding = padding))
if batch_norm:
seq_list.append(nn.BatchNorm2d(num_features = out_ch))
if activation == 'relu':
seq_list.append(nn.ReLU())
elif activation == 'sigmoid':
seq_list.append(nn.Sigmoid())
return nn.Sequential(*seq_list)
# class VOSBaseArch(nn.Module):
# def __init__(self, initializer, encoder, convlstmcell, decoder, cost_fn, optimizer):
# super(VOSBaseArch, self).__init__()
# self.initializer = initializer
# self.encoder = encoder
# self.convlstmcell = convlstmcell
# self.decoder = decoder
# self.cost_fn = cost_fn
# self.optimizer = optimizer
# def forward(self, x, y, t):
# yhat_list = []
# loss_list = []
# loss_per_video = 0.0
# print(x[:, 0, :, :, :].size(), y[:, 0, :, :, :].size())
# ci, hi = initializer(x[:, 0, :, :, :] + y[:, 0, :, :, :])
# for frame_id in range(1, x.size(1)):
# xi = x[:, frame_id, :, :, :]
# yi = y[:, frame_id, :, :, :]
# xi = encoder(xi)
# ci, hi = convlstmcell(xi, ci, hi)
# yhati = decoder(hi)
# yhat_list.append(yhati)
# loss = cost_fn(yhati, yi)
# loss_per_video += loss
# loss_list.append(loss.item())
# return yhat_list, loss_list, loss_per_video | Python |
3D | oopil/3D_medical_image_FSS | FSS1000/models/__init__.py | .py | 0 | 0 | null | Python |
3D | oopil/3D_medical_image_FSS | FSS1000/models/vgg.py | .py | 4,211 | 116 | """
Encoder for few shot segmentation (VGG16)
"""
import torch
import torch.nn as nn
import pdb
class Encoder_vgg(nn.Module):
"""
Encoder for few shot segmentation
Args:
in_channels:
number of input channels
pretrained_path:
path of the model for initialization
"""
def __init__(self, in_channels=2, pretrained_path=None):
super().__init__()
self.pretrained_path = pretrained_path
## basic model
features = nn.Sequential(
self._make_layer(2, in_channels, 64),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
self._make_layer(2, 64, 128),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
self._make_layer(3, 128, 256),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
self._make_layer(3, 256, 512),
nn.MaxPool2d(kernel_size=3, stride=1, padding=1),
self._make_layer(3, 512, 512, dilation=2, lastRelu=False),
)
## vgg16 model
features1 = nn.Sequential( ## 5 pooling and 1 dilation
self._make_layer(2, in_channels, 64),
nn.MaxPool2d(kernel_size=2, stride=2),
self._make_layer(2, 64, 128),
nn.MaxPool2d(kernel_size=2, stride=2),
self._make_layer(3, 128, 256),
nn.MaxPool2d(kernel_size=2, stride=2),
self._make_layer(3, 256, 512),
nn.MaxPool2d(kernel_size=2, stride=2), ## no pooing
self._make_layer(3, 512, 512, dilation=2), #, lastRelu=False # dilation 2
nn.MaxPool2d(kernel_size=2, stride=2),
)
features2 = nn.Sequential( ## 4 pooling and 1 dilation
self._make_layer(2, in_channels, 64),
nn.MaxPool2d(kernel_size=2, stride=2),
self._make_layer(2, 64, 128),
nn.MaxPool2d(kernel_size=2, stride=2),
self._make_layer(3, 128, 256),
nn.MaxPool2d(kernel_size= 2, stride= 2),
self._make_layer(3, 256, 512),
nn.MaxPool2d(kernel_size = 1, stride = 1), # 1 for no pooling
self._make_layer(3, 512, 512, dilation=2, lastRelu=False), #, lastRelu=False # dilation 2
)
# self.features = features1
self.features = features2
self._init_weights()
def forward(self, x):
return self.features(x)
def _make_layer(self, n_convs, in_channels, out_channels, dilation=1, lastRelu=True):
"""
Make a (conv, relu) layer
Args:
n_convs:
number of convolution layers
in_channels:
input channels
out_channels:
output channels
"""
layer = []
for i in range(n_convs):
layer.append(nn.Conv2d(in_channels, out_channels, kernel_size=3,
dilation=dilation, padding=dilation))
## add Batch normalization
# layer.append(nn.BatchNorm2d(out_channels, momentum=1, affine=False))
if i != n_convs - 1 or lastRelu:
layer.append(nn.ReLU(inplace=True))
in_channels = out_channels
return nn.Sequential(*layer)
def _init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
torch.nn.init.kaiming_normal_(m.weight, nonlinearity='relu')
if self.pretrained_path is not None:
# print("load pretrained model.")
dic = torch.load(self.pretrained_path, map_location='cpu')
keys = list(dic.keys())
new_dic = self.state_dict()
new_keys = list(new_dic.keys())
## remove variables for Batch normalization
# print(new_keys)
length = len(new_keys)
for i in range(len(new_keys)):
idx = length - 1 - i
key = new_keys[idx]
if "bias" in key or "weight" in key:
pass
else:
new_keys.remove(key)
for i in range(4,26): #26
new_dic[new_keys[i]] = dic[keys[i]]
self.load_state_dict(new_dic)
| Python |
3D | oopil/3D_medical_image_FSS | FSS1000/models/attention.py | .py | 2,725 | 71 | import numpy as np
import torch
import math
import pdb
from torch.nn import Module, Sequential, Conv2d, ReLU,AdaptiveMaxPool2d, AdaptiveAvgPool2d, \
NLLLoss, BCELoss, CrossEntropyLoss, AvgPool2d, MaxPool2d, Parameter, Linear, Sigmoid, Softmax, Dropout, Embedding
from torch.nn import functional as F
from torch.autograd import Variable
class PAM_Module(Module):
""" Position attention module"""
#Ref from SAGAN
def __init__(self, in_dim):
super(PAM_Module, self).__init__()
self.chanel_in = in_dim
self.query_conv = Conv2d(in_channels=in_dim, out_channels=in_dim//8, kernel_size=1)
self.key_conv = Conv2d(in_channels=in_dim, out_channels=in_dim//8, kernel_size=1)
self.value_conv = Conv2d(in_channels=in_dim, out_channels=in_dim, kernel_size=1)
self.gamma = Parameter(torch.zeros(1))
self.softmax = Softmax(dim=-1)
def forward(self, x):
"""
inputs :
x : input feature maps( B X C X H X W)
returns :
out : attention value + input feature
attention: B X (HxW) X (HxW)
"""
m_batchsize, C, height, width = x.size()
proj_query = self.query_conv(x).view(m_batchsize, -1, width*height).permute(0, 2, 1)
proj_key = self.key_conv(x).view(m_batchsize, -1, width*height)
energy = torch.bmm(proj_query, proj_key)
attention = self.softmax(energy)
proj_value = self.value_conv(x).view(m_batchsize, -1, width*height)
out = torch.bmm(proj_value, attention.permute(0, 2, 1))
out = out.view(m_batchsize, C, height, width)
out = self.gamma*out + x
return out
class CAM_Module(Module):
""" Channel attention module"""
def __init__(self, in_dim):
super(CAM_Module, self).__init__()
self.chanel_in = in_dim
self.gamma = Parameter(torch.zeros(1))
self.softmax = Softmax(dim=-1)
def forward(self,x):
"""
inputs :
x : input feature maps( B X C X H X W)
returns :
out : attention value + input feature
attention: B X C X C
"""
m_batchsize, C, height, width = x.size()
proj_query = x.view(m_batchsize, C, -1)
proj_key = x.view(m_batchsize, C, -1).permute(0, 2, 1)
energy = torch.bmm(proj_query, proj_key)
energy_new = torch.max(energy, -1, keepdim=True)[0].expand_as(energy)-energy
attention = self.softmax(energy_new)
proj_value = x.view(m_batchsize, C, -1)
out = torch.bmm(attention, proj_value)
out = out.view(m_batchsize, C, height, width)
out = self.gamma*out + x
return out | Python |
3D | oopil/3D_medical_image_FSS | FSS1000/test/decathlon_5shot.sh | .sh | 2,110 | 31 | # this code require gpu_id when running
# 1 2 3 4 5 6 7 8 9 10 11 12 13
mkdir runs/log
mkdir runs/log/decathlon_5shot
gpu=$1
j=$2
j=7
organ=1
for support in 0 5 10 15 20
do
#echo "python train.py with mode=train gpu_id=${gpu} target=${organ} board=ID${j}_${organ} record=False n_work=3 external_train=decathlon n_shot=5 "
#python train.py with mode=train gpu_id=${gpu} target=${organ} board=ID${j}_${organ}_data record=False n_work=3 external_train=decathlon n_shot=5
echo "python test.py with gpu_id=${gpu} target=${organ} board=ID${j}_${organ}_lowest record=False external_test=decathlon n_shot=5 s_idx=${support} snapshot=runs/PANet_train/${j}/snapshots/lowest.pth >> runs/log/decathlon_5shot/ID${j}_5shot_${organ}_${support}.txt"
python test.py with gpu_id=${gpu} target=${organ} board=ID${j}_${organ}_lowest record=False external_test=decathlon n_shot=5 s_idx=${support} snapshot=runs/PANet_train/${j}/snapshots/lowest.pth >> runs/log/decathlon_5shot/ID${j}_5shot_${organ}_${support}.txt
#echo "python test.py with gpu_id=${gpu} target=${organ} board=ID${j}_${organ}_last record=False external_test=decathlon n_shot=5 s_idx=${support} snapshot=runs/PANet_train/${j}/snapshots/last.pth >> runs/log/decathlon_5shot/ID${j}_5shot_${organ}_${support}.txt"
#python test.py with gpu_id=${gpu} target=${organ} board=ID${j}_${organ}_last record=False external_test=decathlon n_shot=5 s_idx=${support} snapshot=runs/PANet_train/${j}/snapshots/last.pth >> runs/log/decathlon_5shot/ID${j}_5shot_${organ}_${support}.txt
done
j=9
organ=6
for support in 0 5 10 15 20
do
echo "python test.py with gpu_id=${gpu} target=${organ} board=ID${j}_${organ}_lowest record=False external_test=decathlon n_shot=5 s_idx=${support} snapshot=runs/PANet_train/${j}/snapshots/lowest.pth >> runs/log/decathlon_5shot/ID${j}_5shot_${organ}_${support}.txt"
python test.py with gpu_id=${gpu} target=${organ} board=ID${j}_${organ}_lowest record=False external_test=decathlon n_shot=5 s_idx=${support} snapshot=runs/PANet_train/${j}/snapshots/lowest.pth >> runs/log/decathlon_5shot/ID${j}_5shot_${organ}_${support}.txt
done
exit 0 | Shell |
3D | oopil/3D_medical_image_FSS | FSS1000/test/summarize_test_results.py | .py | 1,508 | 54 | import re
import glob
import numpy as np
def summarize(files):
if len(files) < 5:
print("There is no results for this set.")
return 0,0
dices = []
for file in files[:5]:
fd = open(file)
lines = fd.readlines()
# print(len(lines),file)
result_line = lines[-2]
find = re.search("0.*",result_line)
line_parts = re.split(" ", result_line)
dice = line_parts[-2]
dices.append(float(dice))
return dices
def main():
bcv_dir = "runs/log/bcv"
decathlon_dir = "runs/log/decathlon"
ctorg_dir = "runs/log/ctorg"
dir = "runs/log/ctorg_5shot"
# dirs = [dir]
dirs = ["runs/log/ctorg_5shot", "runs/log/decathlon_5shot", "runs/log/bcv_bladder"]
for dir in dirs:
print()
print(dir)
for shot in [1,3,5]:
for organ in [1,3,6,14]:
files = glob.glob(f"{dir}/*{shot}shot*_{organ}_*")
# print(files)
files.sort()
dices = summarize(files)
avg, std = np.mean(dices), np.std(dices)
avg = float("{:.3f}".format(avg))
std = float("{:.4f}".format(std))
dices = [str(dice) for dice in dices]
dice_str = ",".join(dices)
# print(dir, organ, shot)
if avg*std!=0:
print(f"organ:{organ},{shot}shot,{dice_str},{avg},{std}")
# assert False
if __name__=="__main__":
main()
| Python |
3D | oopil/3D_medical_image_FSS | FSS1000/test/bcv_5shot_save_sample.sh | .sh | 754 | 26 | # test code for external dataset ctorg with different support data
mkdir runs/log
mkdir runs/log/bcv_dice_ce
declare -a gpu_list
gpu_list=(0 1 2 7)
gpu=$1
j=$2
idx=0
# BCV configuration
# 1 shot - 21, 3 shot - 5, 5 shot - 17
j=7
support=0
for organ in 1 3 6 14
do
echo "python test.py with target=${organ} snapshot=runs/PANet_train/${j}/snapshots/lowest.pth n_shot=5 gpu_id=${gpu} record=False board=ID${j}_${organ}_5shot_${support} s_idx=${support} save_sample=True save_name=5shot_noFT"
python test.py with target=${organ} snapshot=runs/PANet_train/${j}/snapshots/lowest.pth n_shot=5 gpu_id=${gpu} record=False board=ID${j}_${organ}_5shot_${support} s_idx=${support} save_sample=True save_name=5shot_noFT
sleep 5
j=$(($j+1))
done | Shell |
3D | oopil/3D_medical_image_FSS | FSS1000/test/bcv_bladder.sh | .sh | 1,695 | 32 | # this code require gpu_id when running
# 1 2 3 4 5 6 7 8 9 10 11 12 13
mkdir runs/log
mkdir runs/log/bcv_bladder
gpu=$1
j=$2
organ=14
j=14
for support in 0 3 5 7 10
do
echo "python test.py with gpu_id=${gpu} target=${organ} board=ID${j}_${organ}_lowest record=False n_shot=1 s_idx=${support} snapshot=runs/PANet_train/${j}/snapshots/lowest.pth >> runs/log/bcv_bladder/ID${j}_1shot_${organ}_${support}.txt"
python test.py with gpu_id=${gpu} target=${organ} board=ID${j}_${organ}_lowest record=False n_shot=1 s_idx=${support} snapshot=runs/PANet_train/${j}/snapshots/lowest.pth >> runs/log/bcv_bladder/ID${j}_1shot_${organ}_${support}.txt
done
j=15
for support in 0 3 5 7 10
do
echo "python test.py with gpu_id=${gpu} target=${organ} board=ID${j}_${organ}_lowest record=False n_shot=3 s_idx=${support} snapshot=runs/PANet_train/${j}/snapshots/lowest.pth >> runs/log/bcv_bladder/ID${j}_3shot_${organ}_${support}.txt"
python test.py with gpu_id=${gpu} target=${organ} board=ID${j}_${organ}_lowest record=False n_shot=3 s_idx=${support} snapshot=runs/PANet_train/${j}/snapshots/lowest.pth >> runs/log/bcv_bladder/ID${j}_3shot_${organ}_${support}.txt
done
j=16
for support in 0 3 5 7 10
do
echo "python test.py with gpu_id=${gpu} target=${organ} board=ID${j}_${organ}_lowest record=False n_shot=5 s_idx=${support} snapshot=runs/PANet_train/${j}/snapshots/lowest.pth >> runs/log/bcv_bladder/ID${j}_5shot_${organ}_${support}.txt"
python test.py with gpu_id=${gpu} target=${organ} board=ID${j}_${organ}_lowest record=False n_shot=5 s_idx=${support} snapshot=runs/PANet_train/${j}/snapshots/lowest.pth >> runs/log/bcv_bladder/ID${j}_5shot_${organ}_${support}.txt
done | Shell |
3D | oopil/3D_medical_image_FSS | FSS1000/test/ctorg_5shot.sh | .sh | 1,580 | 26 | # this code require gpu_id when running
# 1 2 3 4 5 6 7 8 9 10 11 12 13
mkdir runs/log
mkdir runs/log/ctorg_5shot
gpu=$1
j=$2
j=8
for organ in 3 6 14
do
for support in 0 5 10 15 20
do
#echo "python train.py with mode=train gpu_id=${gpu} target=${organ} board=ID${j}_${organ} record=False n_work=3 external_train=CT_ORG n_shot=5 "
#python train.py with mode=train gpu_id=${gpu} target=${organ} board=ID${j}_${organ}_data record=False n_work=3 external_train=CT_ORG n_shot=5
echo "python test.py with gpu_id=${gpu} target=${organ} board=ID${j}_${organ}_lowest record=False external_test=CT_ORG n_shot=5 s_idx=${support} snapshot=runs/PANet_train/${j}/snapshots/lowest.pth >> runs/log/ctorg_5shot/ID${j}_5shot_${organ}_${support}.txt"
python test.py with gpu_id=${gpu} target=${organ} board=ID${j}_${organ}_lowest record=False external_test=CT_ORG n_shot=5 s_idx=${support} snapshot=runs/PANet_train/${j}/snapshots/lowest.pth >> runs/log/ctorg_5shot/ID${j}_5shot_${organ}_${support}.txt
#echo "python test.py with gpu_id=${gpu} target=${organ} board=ID${j}_${organ}_last record=False external_test=CT_ORG n_shot=5 s_idx=${support} snapshot=runs/PANet_train/${j}/snapshots/last.pth >> runs/log/ctorg_5shot/ID${j}_5shot_${organ}_${support}.txt"
#python test.py with gpu_id=${gpu} target=${organ} board=ID${j}_${organ}_last record=False external_test=CT_ORG n_shot=5 s_idx=${support} snapshot=runs/PANet_train/${j}/snapshots/last.pth >> runs/log/ctorg_5shot/ID${j}_5shot_${organ}_${support}.txt
done
j=$(($j+1))
done | Shell |
3D | oopil/3D_medical_image_FSS | FSS1000/dataloaders_medical/decathlon.py | .py | 10,510 | 285 | import os
import re
import sys
import json
import math
import random
import numpy as np
sys.path.append("/home/soopil/Desktop/github/python_utils")
# sys.path.append("../dataloaders_medical")
from dataloaders_medical.common import *
# from common import *
import cv2
from cv2 import resize
def prostate_img_process(img_arr, HE=False):
if HE:
img_arr = equalize_hist(img_arr) * 255.0
else:
img_arr = normalize(img_arr, type=0)
return img_arr
def totensor(arr):
tensor = torch.from_numpy(arr).float()
return tensor
def random_augment(s_imgs, s_labels, q_imgs, q_labels):
## do random rotation and flip
k = random.sample([i for i in range(0, 4)], 1)[0]
s_imgs = np.rot90(s_imgs, k, (3, 4)).copy()
s_labels = np.rot90(s_labels, k, (3, 4)).copy()
q_imgs = np.rot90(q_imgs, k, (2, 3)).copy()
q_labels = np.rot90(q_labels, k, (2, 3)).copy()
if random.random() < 0.5:
s_imgs = np.flip(s_imgs, 3).copy()
s_labels = np.flip(s_labels, 3).copy()
q_imgs = np.flip(q_imgs, 2).copy()
q_labels = np.flip(q_labels, 2).copy()
if random.random() < 0.5:
s_imgs = np.flip(s_imgs, 4).copy()
s_labels = np.flip(s_labels, 4).copy()
q_imgs = np.flip(q_imgs, 3).copy()
q_labels = np.flip(q_labels, 3).copy()
return s_imgs, s_labels, q_imgs, q_labels
class Base_dataset():
def __init__(self, img_paths, label_paths, config):
"""
dataset constructor for training
"""
super().__init__()
self.mode = config['mode']
self.length = config['n_iter']
self.valid_img_n = len(img_paths)
self.size = config['size']
self.img_paths = img_paths
self.label_paths = label_paths
self.n_shot = config["n_shot"]
self.s_idx = config["s_idx"]
self.is_train = True
if str(self.__class__).split(".")[-1][:4]=="Test":
self.is_train = False
## load file names in advance
self.img_lists = []
self.slice_cnts = []
for img_path in self.img_paths:
fnames = os.listdir(img_path)
self.slice_cnts.append(len(fnames))
fnames = [int(e.split(".")[0]) for e in fnames]
fnames.sort()
fnames = [f"{e}.npy" for e in fnames]
self.img_lists.append(fnames)
if not self.is_train: # for testing
self.length = sum(self.slice_cnts)
def get_sample(self, s_img_paths_all, s_label_paths_all, q_img_paths, q_label_paths):
seed = random.randrange(0,1000)
# s_length = len(s_img_paths)
s_imgs_all, s_labels_all = [],[]
for s_idx, s_img_paths in enumerate(s_img_paths_all):
s_label_paths = s_label_paths_all[s_idx]
imgs, labels = [],[]
for i in range(len(s_img_paths)):
img_path, label_path = s_img_paths[i], s_label_paths[i]
img = self.img_load(img_path, seed)
img = resize(img, dsize=(self.size, self.size), interpolation=cv2.INTER_AREA)
img = np.expand_dims(img, axis=0)
imgs.append(img)
label = np.load(label_path)
label = resize(label, dsize=(self.size, self.size), interpolation=cv2.INTER_NEAREST)
label = np.expand_dims(label, axis=0)
labels.append(label)
s_imgs = np.stack(imgs,axis=0)
s_labels = np.stack(labels,axis=0)
s_imgs_all.append(s_imgs)
s_labels_all.append(s_labels)
s_imgs = np.stack(s_imgs_all,axis=0)
s_labels = np.stack(s_labels_all,axis=0)
q_length = len(q_img_paths)
imgs, labels = [],[]
for i in range(len(q_img_paths)):
img_path, label_path = q_img_paths[i], q_label_paths[i]
img = self.img_load(img_path, seed)
img = resize(img, dsize=(self.size, self.size), interpolation=cv2.INTER_AREA)
img = np.expand_dims(img, axis=0)
imgs.append(img)
label = np.load(label_path)
label = resize(label, dsize=(self.size, self.size), interpolation=cv2.INTER_NEAREST)
label = np.expand_dims(label, axis=0)
labels.append(label)
q_imgs = np.stack(imgs,axis=0)
q_labels = np.stack(labels,axis=0)
# print(imgs.shape) [slice_num,1,256,256]?
if self.is_train: ## random augmentation : flip, rotation
s_imgs, s_labels, q_imgs, q_labels = random_augment(s_imgs, s_labels, q_imgs, q_labels)
sample = {
"s_x":totensor(s_imgs),
"s_y":totensor(s_labels), #.long()
"q_x":totensor(q_imgs),
"q_y":totensor(q_labels), #.long()
# "s_length":s_length,
# "q_length":q_length,
"s_fname":s_img_paths_all,
"q_fname":q_img_paths,
}
return sample
def handle_idx(self, s_n, q_idx, q_n):
"""
choose slices for support and query volume
:return: supp_idx, qry_idx
"""
q_ratio = (q_idx)/(q_n-1)
s_idx = round((s_n-1)*q_ratio)
return s_idx
def getitem_train(self):
## choose support and target
idx_space = [i for i in range(self.valid_img_n)]
subj_idxs = random.sample(idx_space, self.n_shot+1)
s_subj_idxs = subj_idxs[:self.n_shot]
q_subj_idx = subj_idxs[self.n_shot]
q_subj_img_path = self.img_paths[q_subj_idx]
q_subj_label_path = self.label_paths[q_subj_idx]
q_fnames = self.img_lists[q_subj_idx]
q_idx = random.randrange(0, len(q_fnames))
is_flip = False
if random.random() < 0.5:
is_flip = True
q_fnames.reverse()
s_img_paths_all, s_label_paths_all = [],[]
for s_subj_idx in s_subj_idxs:
s_subj_img_path = self.img_paths[s_subj_idx]
s_subj_label_path = self.label_paths[s_subj_idx]
s_fnames = self.img_lists[s_subj_idx]
## flip augmentation
if is_flip:
s_fnames.reverse()
## choose support and query slice
s_idx = self.handle_idx(len(s_fnames), q_idx, len(q_fnames))
s_fnames_selected = s_fnames[s_idx:s_idx+1]
## define path, load data, and return
s_img_paths_selected = [f"{s_subj_img_path}/{fname}" for fname in s_fnames_selected]
s_label_paths_selected = [f"{s_subj_label_path}/{fname}" for fname in s_fnames_selected]
s_img_paths_all.append(s_img_paths_selected)
s_label_paths_all.append(s_label_paths_selected)
q_fnames_selected = q_fnames[q_idx:q_idx + 1]
q_img_paths_selected = [f"{q_subj_img_path}/{fname}" for fname in q_fnames_selected]
q_label_paths_selected = [f"{q_subj_label_path}/{fname}" for fname in q_fnames_selected]
return self.get_sample(s_img_paths_all, s_label_paths_all, q_img_paths_selected, q_label_paths_selected)
def getitme_test(self, idx):
q_subj_idx, q_idx = self.get_test_subj_idx(idx)
q_subj_img_path = self.img_paths[q_subj_idx]
q_subj_label_path = self.label_paths[q_subj_idx]
q_fnames = self.img_lists[q_subj_idx]
s_img_paths_all, s_label_paths_all = [],[]
for s_idx in range(self.n_shot):
s_subj_img_path = self.s_img_paths[s_idx]
s_subj_label_path = self.s_label_paths[s_idx]
s_fnames = self.s_fnames_list[s_idx]
## choose support and query slice
s_idx = self.handle_idx(len(s_fnames), q_idx, len(q_fnames))
s_fnames_selected = s_fnames[s_idx:s_idx+1]
## define path, load data, and return
s_img_paths_selected = [f"{s_subj_img_path}/{fname}" for fname in s_fnames_selected]
s_label_paths_selected = [f"{s_subj_label_path}/{fname}" for fname in s_fnames_selected]
s_img_paths_all.append(s_img_paths_selected)
s_label_paths_all.append(s_label_paths_selected)
q_fnames_selected = q_fnames[q_idx:q_idx + 1]
q_img_paths_selected = [f"{q_subj_img_path}/{fname}" for fname in q_fnames_selected]
q_label_paths_selected = [f"{q_subj_label_path}/{fname}" for fname in q_fnames_selected]
return self.get_sample(s_img_paths_all, s_label_paths_all, q_img_paths_selected, q_label_paths_selected)
def get_len_train(self):
return self.length
def get_len_test(self):
return self.length
def get_val_subj_idx(self, idx):
for subj_idx,cnt in enumerate(self.q_cnts):
if idx < cnt:
return subj_idx, idx*self.q_max_slice
else:
idx -= cnt
print("get_val_subj_idx function is not working.")
assert False
def get_test_subj_idx(self, idx):
for subj_idx,cnt in enumerate(self.slice_cnts):
if idx < cnt:
return subj_idx, idx
else:
idx -= cnt
print("get_test_subj_idx function is not working.")
assert False
def get_cnts(self):
## only for test loader
return self.slice_cnts
def img_load(self, img_path, seed=0):
img_arr = np.load(img_path)
return img_arr
class BaseLoader(Base_dataset):
modal_i = [0] # there is only one modality
label_i = 1.0 # there is only one label for each image
class TrainLoader(BaseLoader):
def __len__(self):
return self.get_len_train()
def __getitem__(self, idx):
return self.getitem_train()
class TestLoader(BaseLoader):
def __len__(self):
return self.get_len_test()
def __getitem__(self, idx):
return self.getitme_test(idx)
def set_support_volume(self, s_img_paths, s_label_paths):
## set support img path and label path for validation and testing
self.s_img_paths = []
self.s_label_paths = []
self.s_fnames_list = []
for i in range(len(s_img_paths)):
s_fnames = os.listdir(s_img_paths[i])
s_fnames = [int(e.split(".")[0]) for e in s_fnames]
s_fnames.sort()
print(f'support img {i} path : {s_img_paths[i]} length : {len(s_fnames)}')
self.s_img_paths.append(s_img_paths[i])
self.s_label_paths.append(s_label_paths[i])
self.s_fnames_list.append([f"{e}.npy" for e in s_fnames])
if __name__ == "__main__":
pass
# main() | Python |
3D | oopil/3D_medical_image_FSS | FSS1000/dataloaders_medical/common.py | .py | 6,690 | 210 | """
Dataset classes for common uses
"""
import random
import SimpleITK as sitk
import numpy as np
from PIL import Image
from torch.utils.data import Dataset
import torch
import torchvision.transforms.functional as tr_F
from skimage.exposure import equalize_hist
import pdb
def crop_resize(slice):
x_size, y_size = np.shape(slice)
slice = slice[40:x_size - 20, 50:y_size - 50]
slice = resize(slice, (240, 240))
return slice
def fill_empty_space(arr):
arr[arr==0] = np.mean(arr)
return arr
def prostate_sample(img_arr, label_arr, isize):
img = Image.fromarray(img_arr.astype(np.uint8))
label = Image.fromarray(label_arr.astype(np.uint8))
sample = {
'image':img,
'label':label,
'inst':label,
'scribble':label,
}
# pdb.set_trace()
sample = resize(sample, (isize,isize))
sample = to_tensor_normalize(sample)
return sample
def prostate_mask(sample, isize):
# pdb.set_trace()
label = sample['label']
fg_mask = torch.where(label == 1, torch.ones_like(label), torch.zeros_like(label))
bg_mask = torch.ones_like(label) - fg_mask
fg_mask = fg_mask.expand((1, isize, isize))
bg_mask = bg_mask.expand((1, isize, isize))
return {'fg_mask': fg_mask,
'bg_mask': bg_mask,
}
def get_support_sample(ipath, lpath, modal_index, mask_n, is_HE, shift=0):
arr = read_npy(ipath, modal_index, is_HE)
# pdb.set_trace() ## for debugging
# arr = fill_empty_space(arr)
arr_mask = read_sitk(lpath)
## for 2-way(binary) segmentation
arr_mask = (arr_mask>0)*1.0
# arr_mask = (arr_mask == mask_n) * 1.0
cnt = np.sum(arr_mask, axis=(1, 2))
maxarg = np.argmax(cnt)
slice = arr[maxarg+shift, :, :]
slice = crop_resize(slice)
# slice = normalize(slice)
slice = convert3ch(slice)
save_img(slice, "tmp_img.png")
slice_mask = arr_mask[maxarg+shift, :, :]*255.0
slice_mask = crop_resize(slice_mask)
# slice_mask = convert3ch(slice_mask)
save_img(slice_mask, "tmp_label.png")
sample = read_sample("tmp_img.png", "tmp_label.png")
# sample = transforms(sample)
sample = to_tensor_normalize(sample)
return sample
## for 5 way segmentation
# arr_mask = (arr_mask == mask_n)*1.0
# if mask_n == 2:
# arr_mask = (arr_mask > 0)*1.0
# elif mask_n == 1:
# arr_mask = (arr_mask == 1)*1.0
# elif mask_n == 4:
# arr_mask = (arr_mask == 4)*1.0 + (arr_mask == 1)*1.0
# else:
# raise("invalid mask_n")
def getMask(sample, class_id=1, class_ids=[0, 1]):
label = sample['label']
empty = sample['empty']
fg_mask = torch.where(label == class_id, torch.ones_like(label), torch.zeros_like(label))
brain_bg_mask = empty
# bg_mask = torch.ones_like(label) - fg_mask - empty
bg_mask = torch.ones_like(label) - fg_mask
# brain_fg_mask = torch.ones_like(label) - empty
brain_fg_mask = torch.ones_like(label) - empty - fg_mask
fg_mask = fg_mask.expand((1, 240, 240))
bg_mask = bg_mask.expand((1, 240, 240))
brain_fg_mask = brain_fg_mask.expand((1, 240, 240))
brain_bg_mask = brain_bg_mask.expand((1, 240, 240))
return {'fg_mask': fg_mask,
'bg_mask': bg_mask,
'brain_fg_mask': brain_fg_mask,
'brain_bg_mask': brain_bg_mask,}
def read_npy(path, modal_index, is_HE):
arr = np.load(path)[modal_index]
if is_HE:
arr = equalize_hist(arr)
arr = normalize(arr, type=0)
return arr
def convert3ch(slice, axis=2):
slice = np.expand_dims(slice, axis=axis)
slice = np.concatenate([slice, slice, slice], axis=axis)
return slice
def normalize(arr, type=0):
# print(np.mean(arr*255.0), np.std(arr*255.0), np.amin(arr*255.0), np.amax(arr*255.0))
if type == 0: # min and max
mini = np.amin(arr)
arr -= mini
maxi = np.amax(arr)
arr_norm = arr/maxi
elif type == 1: # stddev and mean
mean = np.mean(arr)
stddev = np.std(arr)
arr_norm = (arr-mean)/stddev
return arr_norm*255.0
def map_distribution(arr, tg_mean=0, tg_std=1, tg_min=0, tg_max=255):
arr_nonzero = arr[np.nonzero(arr)]
## input arr range : (0,255)
mean, std, mini, maxi = np.mean(arr_nonzero), np.std(arr_nonzero), np.amin(arr_nonzero), np.amax(arr_nonzero)
Z = (arr - mean) / std ## map arr into standard var Z
new_arr = Z*tg_std + tg_mean ## map Z into the target distribution
print(np.mean(new_arr), np.std(new_arr), np.amin(new_arr), np.amax(new_arr))
new_arr = np.clip(new_arr, tg_min, tg_max)
return new_arr
def to_tensor_normalize(sample):
img, label = sample['image'], sample['label']
inst, scribble = sample['inst'], sample['scribble']
## map distribution
# pdb.set_trace()
# arr = np.array(img)
# arr = map_distribution(arr, tg_mean=0.456, tg_std=0.224, tg_min=-10, tg_max=10)
# img = Image.fromarray(arr.astype(dtype=np.uint8))
img = tr_F.to_tensor(img)
empty = (img[0] == 0.0) * 1.0
img = tr_F.normalize(img, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
label = torch.Tensor(np.array(label)).long()
img = img.expand((1, 3, 240, 240))
label = label/255.0
sample['empty'] = empty.long()
sample['image'] = img
sample['label'] = label
sample['inst'] = inst
sample['scribble'] = scribble
return sample
def read_sample(img_path, label_path):
sample = {}
sample['image'] = Image.open(img_path)
sample['label'] = Image.open(label_path)
sample['inst'] = Image.open(label_path)
sample['scribble'] = Image.open(label_path)
# Save the original image (without normalization)
sample['original'] = Image.open(img_path)
return sample
def read_sitk(path):
itk_img = sitk.ReadImage(path)
arr = sitk.GetArrayFromImage(itk_img)
arr = np.array(arr, dtype=np.float32)
return arr
def save_sitk(arr, itk_ref, opath):
sitk_oimg = sitk.GetImageFromArray(arr)
sitk_oimg.CopyInformation(itk_ref)
sitk.WriteImage(sitk_oimg, opath)
def save_img(arr, path):
im = Image.fromarray(arr.astype(np.uint8))
im.save(path)
def load_img(path):
arr = read_PIL(path)
return to_tensor_normalize(arr)
def load_seg(path):
arr = read_PIL(path)
arr = np.expand_dims(arr, axis=0)
arr = tr_F.to_tensor(arr)
return arr
def read_PIL(path):
im = Image.open(path)
arr = np.array(im, dtype=np.float32)
# arr = np.swapaxes(arr, 0, 2)
return arr
def resize(sample, size):
img, label = sample['image'], sample['label']
img = tr_F.resize(img, size)
label = tr_F.resize(label, size, interpolation=Image.NEAREST)
sample['image'] = img
sample['label'] = label
return sample
| Python |
3D | oopil/3D_medical_image_FSS | FSS1000/dataloaders_medical/__init__.py | .py | 0 | 0 | null | Python |
3D | oopil/3D_medical_image_FSS | FSS1000/dataloaders_medical/dataset_CT_ORG.py | .py | 10,422 | 278 | import os
import re
import sys
import json
import math
import random
import numpy as np
sys.path.append("/home/soopil/Desktop/github/python_utils")
# sys.path.append("../dataloaders_medical")
from dataloaders_medical.common import *
# from common import *
import cv2
from cv2 import resize
def totensor(arr):
tensor = torch.from_numpy(arr).float()
return tensor
def random_augment(s_imgs, s_labels, q_imgs, q_labels):
## do random rotation and flip
k = random.sample([i for i in range(0, 4)], 1)[0]
s_imgs = np.rot90(s_imgs, k, (3, 4)).copy()
s_labels = np.rot90(s_labels, k, (3, 4)).copy()
q_imgs = np.rot90(q_imgs, k, (2, 3)).copy()
q_labels = np.rot90(q_labels, k, (2, 3)).copy()
if random.random() < 0.5:
s_imgs = np.flip(s_imgs, 3).copy()
s_labels = np.flip(s_labels, 3).copy()
q_imgs = np.flip(q_imgs, 2).copy()
q_labels = np.flip(q_labels, 2).copy()
if random.random() < 0.5:
s_imgs = np.flip(s_imgs, 4).copy()
s_labels = np.flip(s_labels, 4).copy()
q_imgs = np.flip(q_imgs, 3).copy()
q_labels = np.flip(q_labels, 3).copy()
return s_imgs, s_labels, q_imgs, q_labels
class Base_dataset_ctorg():
def __init__(self, img_paths, label_paths, config):
"""
dataset constructor for training
"""
super().__init__()
self.mode = config['mode']
self.length = config['n_iter']
self.valid_img_n = len(img_paths)
self.size = config['size']
self.img_paths = img_paths
self.label_paths = label_paths
self.n_shot = config["n_shot"]
self.s_idx = config["s_idx"]
self.is_train = True
if str(self.__class__).split(".")[-1][:4] == "Test":
self.is_train = False
## load file names in advance
self.img_lists = []
self.slice_cnts = []
for img_path in self.img_paths:
fnames = os.listdir(img_path)
self.slice_cnts.append(len(fnames))
fnames = [int(e.split(".")[0]) for e in fnames]
fnames.sort()
fnames = [f"{e}.npy" for e in fnames]
self.img_lists.append(fnames)
if not self.is_train: # for testing
self.length = sum(self.slice_cnts)
def get_sample(self, s_img_paths_all, s_label_paths_all, q_img_paths, q_label_paths):
seed = random.randrange(0, 1000)
# s_length = len(s_img_paths)
s_imgs_all, s_labels_all = [], []
for s_idx, s_img_paths in enumerate(s_img_paths_all):
s_label_paths = s_label_paths_all[s_idx]
imgs, labels = [], []
for i in range(len(s_img_paths)):
img_path, label_path = s_img_paths[i], s_label_paths[i]
img = self.img_load(img_path, seed)
img = resize(img, dsize=(self.size, self.size), interpolation=cv2.INTER_AREA)
img = np.expand_dims(img, axis=0)
imgs.append(img)
label = np.load(label_path)
label = resize(label, dsize=(self.size, self.size), interpolation=cv2.INTER_NEAREST)
label = np.expand_dims(label, axis=0)
labels.append(label)
s_imgs = np.stack(imgs, axis=0)
s_labels = np.stack(labels, axis=0)
s_imgs_all.append(s_imgs)
s_labels_all.append(s_labels)
s_imgs = np.stack(s_imgs_all, axis=0)
s_labels = np.stack(s_labels_all, axis=0)
q_length = len(q_img_paths)
imgs, labels = [], []
for i in range(len(q_img_paths)):
img_path, label_path = q_img_paths[i], q_label_paths[i]
img = self.img_load(img_path, seed)
img = resize(img, dsize=(self.size, self.size), interpolation=cv2.INTER_AREA)
img = np.expand_dims(img, axis=0)
imgs.append(img)
label = np.load(label_path)
label = resize(label, dsize=(self.size, self.size), interpolation=cv2.INTER_NEAREST)
label = np.expand_dims(label, axis=0)
labels.append(label)
q_imgs = np.stack(imgs, axis=0)
q_labels = np.stack(labels, axis=0)
# print(imgs.shape) [slice_num,1,256,256]?
if self.is_train: ## random augmentation : flip, rotation
s_imgs, s_labels, q_imgs, q_labels = random_augment(s_imgs, s_labels, q_imgs, q_labels)
sample = {
"s_x": totensor(s_imgs),
"s_y": totensor(s_labels), # .long()
"q_x": totensor(q_imgs),
"q_y": totensor(q_labels), # .long()
# "s_length":s_length,
# "q_length":q_length,
"s_fname": s_img_paths_all,
"q_fname": q_img_paths,
}
return sample
def handle_idx(self, s_n, q_idx, q_n):
"""
choose slices for support and query volume
:return: supp_idx, qry_idx
"""
q_ratio = (q_idx) / (q_n - 1)
s_idx = round((s_n - 1) * q_ratio)
return s_idx
def getitem_train(self):
## choose support and target
idx_space = [i for i in range(self.valid_img_n)]
subj_idxs = random.sample(idx_space, self.n_shot + 1)
s_subj_idxs = subj_idxs[:self.n_shot]
q_subj_idx = subj_idxs[self.n_shot]
q_subj_img_path = self.img_paths[q_subj_idx]
q_subj_label_path = self.label_paths[q_subj_idx]
q_fnames = self.img_lists[q_subj_idx]
q_idx = random.randrange(0, len(q_fnames))
is_flip = False
if random.random() < 0.5:
is_flip = True
q_fnames.reverse()
s_img_paths_all, s_label_paths_all = [], []
for s_subj_idx in s_subj_idxs:
s_subj_img_path = self.img_paths[s_subj_idx]
s_subj_label_path = self.label_paths[s_subj_idx]
s_fnames = self.img_lists[s_subj_idx]
## flip augmentation
if is_flip:
s_fnames.reverse()
## choose support and query slice
s_idx = self.handle_idx(len(s_fnames), q_idx, len(q_fnames))
s_fnames_selected = s_fnames[s_idx:s_idx + 1]
## define path, load data, and return
s_img_paths_selected = [f"{s_subj_img_path}/{fname}" for fname in s_fnames_selected]
s_label_paths_selected = [f"{s_subj_label_path}/{fname}" for fname in s_fnames_selected]
s_img_paths_all.append(s_img_paths_selected)
s_label_paths_all.append(s_label_paths_selected)
q_fnames_selected = q_fnames[q_idx:q_idx + 1]
q_img_paths_selected = [f"{q_subj_img_path}/{fname}" for fname in q_fnames_selected]
q_label_paths_selected = [f"{q_subj_label_path}/{fname}" for fname in q_fnames_selected]
return self.get_sample(s_img_paths_all, s_label_paths_all, q_img_paths_selected, q_label_paths_selected)
def getitme_test(self, idx):
q_subj_idx, q_idx = self.get_test_subj_idx(idx)
q_subj_img_path = self.img_paths[q_subj_idx]
q_subj_label_path = self.label_paths[q_subj_idx]
q_fnames = self.img_lists[q_subj_idx]
s_img_paths_all, s_label_paths_all = [], []
for s_idx in range(self.n_shot):
s_subj_img_path = self.s_img_paths[s_idx]
s_subj_label_path = self.s_label_paths[s_idx]
s_fnames = self.s_fnames_list[s_idx]
## choose support and query slice
s_idx = self.handle_idx(len(s_fnames), q_idx, len(q_fnames))
s_fnames_selected = s_fnames[s_idx:s_idx + 1]
## define path, load data, and return
s_img_paths_selected = [f"{s_subj_img_path}/{fname}" for fname in s_fnames_selected]
s_label_paths_selected = [f"{s_subj_label_path}/{fname}" for fname in s_fnames_selected]
s_img_paths_all.append(s_img_paths_selected)
s_label_paths_all.append(s_label_paths_selected)
q_fnames_selected = q_fnames[q_idx:q_idx + 1]
q_img_paths_selected = [f"{q_subj_img_path}/{fname}" for fname in q_fnames_selected]
q_label_paths_selected = [f"{q_subj_label_path}/{fname}" for fname in q_fnames_selected]
return self.get_sample(s_img_paths_all, s_label_paths_all, q_img_paths_selected, q_label_paths_selected)
def get_len_train(self):
return self.length
def get_len_test(self):
return self.length
def get_val_subj_idx(self, idx):
for subj_idx, cnt in enumerate(self.q_cnts):
if idx < cnt:
return subj_idx, idx * self.q_max_slice
else:
idx -= cnt
print("get_val_subj_idx function is not working.")
assert False
def get_test_subj_idx(self, idx):
for subj_idx, cnt in enumerate(self.slice_cnts):
if idx < cnt:
return subj_idx, idx
else:
idx -= cnt
print("get_test_subj_idx function is not working.")
assert False
def get_cnts(self):
## only for test loader
return self.slice_cnts
def img_load(self, img_path, seed=0):
img_arr = np.load(img_path) + 0.25
return img_arr
def set_support_volume(self, s_img_paths, s_label_paths):
## set support img path and label path for validation and testing
self.s_img_paths = []
self.s_label_paths = []
self.s_fnames_list = []
for i in range(len(s_img_paths)):
s_fnames = os.listdir(s_img_paths[i])
s_fnames = [int(e.split(".")[0]) for e in s_fnames]
s_fnames.sort()
print(f'support img {i} path : {s_img_paths[i]} length : {len(s_fnames)}')
self.s_img_paths.append(s_img_paths[i])
self.s_label_paths.append(s_label_paths[i])
self.s_fnames_list.append([f"{e}.npy" for e in s_fnames])
class BaseLoader_CTORG(Base_dataset_ctorg):
modal_i = [0] # there is only one modality
label_i = 1.0 # there is only one label for each image
class TrainLoader_CTORG(BaseLoader_CTORG):
def __len__(self):
return self.get_len_train()
def __getitem__(self, idx):
return self.getitem_train()
class TestLoader_CTORG(BaseLoader_CTORG):
def __len__(self):
return self.get_len_test()
def __getitem__(self, idx):
return self.getitme_test(idx)
if __name__ == "__main__":
pass
# main() | Python |
3D | oopil/3D_medical_image_FSS | FSS1000/dataloaders_medical/dataset_decathlon.py | .py | 14,795 | 458 | import os
import re
import sys
import json
import random
import numpy as np
sys.path.append("/home/soopil/Desktop/github/python_utils")
# sys.path.append("../dataloaders_medical")
from dataloaders_medical.common import *
# from common import *
import cv2
from cv2 import resize
def totensor(arr):
tensor = torch.from_numpy(arr).float()
# tensor = F.interpolate(tensor, size=size,mode=interp)
return tensor
def random_augment(s_imgs, s_labels, q_imgs, q_labels):
## do random rotation and flip
k = random.sample([i for i in range(0, 4)], 1)[0]
s_imgs = np.rot90(s_imgs, k, (3, 4)).copy()
s_labels = np.rot90(s_labels, k, (3, 4)).copy()
q_imgs = np.rot90(q_imgs, k, (2, 3)).copy()
q_labels = np.rot90(q_labels, k, (2, 3)).copy()
if random.random() < 0.5:
s_imgs = np.flip(s_imgs, 3).copy()
s_labels = np.flip(s_labels, 3).copy()
q_imgs = np.flip(q_imgs, 2).copy()
q_labels = np.flip(q_labels, 2).copy()
if random.random() < 0.5:
s_imgs = np.flip(s_imgs, 4).copy()
s_labels = np.flip(s_labels, 4).copy()
q_imgs = np.flip(q_imgs, 3).copy()
q_labels = np.flip(q_labels, 3).copy()
return s_imgs, s_labels, q_imgs, q_labels
class Base_dataset():
def __init__(self, img_paths, label_paths, config):
"""
dataset constructor for training
"""
super().__init__()
self.mode = config['mode']
self.length = config['n_iter']
self.valid_img_n = len(img_paths)
self.size = config['size']
self.img_paths = img_paths
self.label_paths = label_paths
self.n_shot = config["n_shot"]
self.s_idx = config["s_idx"]
self.is_train = True
crit = str(self.__class__).split("_")[-1][:4]
print(f"training word : {crit}")
if crit == "test":
self.is_train = False
## load file names in advance
self.img_lists = []
self.slice_cnts = []
for img_path in self.img_paths:
fnames = os.listdir(img_path)
self.slice_cnts.append(len(fnames))
fnames = [int(e.split(".")[0]) for e in fnames]
fnames.sort()
fnames = [f"{e}.npy" for e in fnames]
self.img_lists.append(fnames)
if not self.is_train: # for testing
self.length = sum(self.slice_cnts)
def get_sample(self, s_img_paths_all, s_label_paths_all, q_img_paths, q_label_paths):
seed = random.randrange(0, 1000)
# s_length = len(s_img_paths)
s_imgs_all, s_labels_all = [], []
for s_idx, s_img_paths in enumerate(s_img_paths_all):
s_label_paths = s_label_paths_all[s_idx]
imgs, labels = [], []
for i in range(len(s_img_paths)):
img_path, label_path = s_img_paths[i], s_label_paths[i]
img = self.img_load(img_path, seed)
img = resize(img, dsize=(self.size, self.size), interpolation=cv2.INTER_AREA)
img = np.expand_dims(img, axis=0)
imgs.append(img)
label = np.load(label_path)
label = resize(label, dsize=(self.size, self.size), interpolation=cv2.INTER_NEAREST)
label = np.expand_dims(label, axis=0)
labels.append(label)
s_imgs = np.stack(imgs, axis=0)
s_labels = np.stack(labels, axis=0)
s_imgs_all.append(s_imgs)
s_labels_all.append(s_labels)
s_imgs = np.stack(s_imgs_all, axis=0)
s_labels = np.stack(s_labels_all, axis=0)
q_length = len(q_img_paths)
imgs, labels = [], []
for i in range(len(q_img_paths)):
img_path, label_path = q_img_paths[i], q_label_paths[i]
img = self.img_load(img_path, seed)
img = resize(img, dsize=(self.size, self.size), interpolation=cv2.INTER_AREA)
img = np.expand_dims(img, axis=0)
imgs.append(img)
label = np.load(label_path)
label = resize(label, dsize=(self.size, self.size), interpolation=cv2.INTER_NEAREST)
label = np.expand_dims(label, axis=0)
labels.append(label)
q_imgs = np.stack(imgs, axis=0)
q_labels = np.stack(labels, axis=0)
# print(imgs.shape) [slice_num,1,256,256]?
if self.is_train: ## random augmentation : flip, rotation
s_imgs, s_labels, q_imgs, q_labels = random_augment(s_imgs, s_labels, q_imgs, q_labels)
sample = {
"s_x": totensor(s_imgs),
"s_y": totensor(s_labels), # .long()
"q_x": totensor(q_imgs),
"q_y": totensor(q_labels), # .long()
# "s_length":s_length,
# "q_length":q_length,
"s_fname": s_img_paths_all,
"q_fname": q_img_paths,
}
return sample
def handle_idx(self, s_n, q_idx, q_n):
"""
choose slices for support and query volume
:return: supp_idx, qry_idx
"""
q_ratio = (q_idx) / (q_n - 1)
s_idx = round((s_n - 1) * q_ratio)
return s_idx
def getitem_train(self):
## choose support and target
idx_space = [i for i in range(self.valid_img_n)]
subj_idxs = random.sample(idx_space, self.n_shot + 1)
s_subj_idxs = subj_idxs[:self.n_shot]
q_subj_idx = subj_idxs[self.n_shot]
q_subj_img_path = self.img_paths[q_subj_idx]
q_subj_label_path = self.label_paths[q_subj_idx]
q_fnames = self.img_lists[q_subj_idx]
q_idx = random.randrange(0, len(q_fnames))
is_flip = False
if random.random() < 0.5:
is_flip = True
q_fnames.reverse()
s_img_paths_all, s_label_paths_all = [], []
for s_subj_idx in s_subj_idxs:
s_subj_img_path = self.img_paths[s_subj_idx]
s_subj_label_path = self.label_paths[s_subj_idx]
s_fnames = self.img_lists[s_subj_idx]
## flip augmentation
if is_flip:
s_fnames.reverse()
## choose support and query slice
s_idx = self.handle_idx(len(s_fnames), q_idx, len(q_fnames))
s_fnames_selected = s_fnames[s_idx:s_idx + 1]
## define path, load data, and return
s_img_paths_selected = [f"{s_subj_img_path}/{fname}" for fname in s_fnames_selected]
s_label_paths_selected = [f"{s_subj_label_path}/{fname}" for fname in s_fnames_selected]
s_img_paths_all.append(s_img_paths_selected)
s_label_paths_all.append(s_label_paths_selected)
q_fnames_selected = q_fnames[q_idx:q_idx + 1]
q_img_paths_selected = [f"{q_subj_img_path}/{fname}" for fname in q_fnames_selected]
q_label_paths_selected = [f"{q_subj_label_path}/{fname}" for fname in q_fnames_selected]
return self.get_sample(s_img_paths_all, s_label_paths_all, q_img_paths_selected, q_label_paths_selected)
def getitme_test(self, idx):
q_subj_idx, q_idx = self.get_test_subj_idx(idx)
q_subj_img_path = self.img_paths[q_subj_idx]
q_subj_label_path = self.label_paths[q_subj_idx]
q_fnames = self.img_lists[q_subj_idx]
s_img_paths_all, s_label_paths_all = [], []
for s_idx in range(self.n_shot):
s_subj_img_path = self.s_img_paths[s_idx]
s_subj_label_path = self.s_label_paths[s_idx]
s_fnames = self.s_fnames_list[s_idx]
## choose support and query slice
s_idx = self.handle_idx(len(s_fnames), q_idx, len(q_fnames))
s_fnames_selected = s_fnames[s_idx:s_idx + 1]
## define path, load data, and return
s_img_paths_selected = [f"{s_subj_img_path}/{fname}" for fname in s_fnames_selected]
s_label_paths_selected = [f"{s_subj_label_path}/{fname}" for fname in s_fnames_selected]
s_img_paths_all.append(s_img_paths_selected)
s_label_paths_all.append(s_label_paths_selected)
q_fnames_selected = q_fnames[q_idx:q_idx + 1]
q_img_paths_selected = [f"{q_subj_img_path}/{fname}" for fname in q_fnames_selected]
q_label_paths_selected = [f"{q_subj_label_path}/{fname}" for fname in q_fnames_selected]
return self.get_sample(s_img_paths_all, s_label_paths_all, q_img_paths_selected, q_label_paths_selected)
def get_len_train(self):
return self.length
def get_len_test(self):
return self.length
def get_val_subj_idx(self, idx):
for subj_idx, cnt in enumerate(self.q_cnts):
if idx < cnt:
return subj_idx, idx * self.q_max_slice
else:
idx -= cnt
print("get_val_subj_idx function is not working.")
assert False
def get_test_subj_idx(self, idx):
for subj_idx, cnt in enumerate(self.slice_cnts):
if idx < cnt:
return subj_idx, idx
else:
idx -= cnt
print("get_test_subj_idx function is not working.")
assert False
def get_cnts(self):
## only for test loader
return self.slice_cnts
def img_load(self, img_path, seed=0):
img_arr = np.load(img_path)
return img_arr
def set_support_volume(self, s_img_paths, s_label_paths):
## set support img path and label path for validation and testing
self.s_img_paths = []
self.s_label_paths = []
self.s_fnames_list = []
for i in range(len(s_img_paths)):
s_fnames = os.listdir(s_img_paths[i])
s_fnames = [int(e.split(".")[0]) for e in s_fnames]
s_fnames.sort()
print(f'support img {i} path : {s_img_paths[i]} length : {len(s_fnames)}')
self.s_img_paths.append(s_img_paths[i])
self.s_label_paths.append(s_label_paths[i])
self.s_fnames_list.append([f"{e}.npy" for e in s_fnames])
class Spleen_Base(Base_dataset):
modal_i = 0
label_i = 1.0
class Spleen_train(Spleen_Base):
def __len__(self):
return self.get_len_train()
def __getitem__(self, idx):
return self.getitem_train()
class Spleen_test(Spleen_Base):
def __len__(self):
return self.get_len_test()
def __getitem__(self, idx):
return self.getitme_test(idx)
class Liver_Base(Base_dataset):
modal_i = 0 # only 1 modality
label_i = 1.0 # use both 1 : cancer / 2 : liver
class Liver_train(Liver_Base):
def __len__(self):
return self.get_len_train()
def __getitem__(self, idx):
return self.getitem_train()
class Liver_test(Liver_Base):
def __len__(self):
return self.get_len_test()
def __getitem__(self, idx):
return self.getitme_test(idx)
class Tumor_Base(Base_dataset):
modal_i = [0, 1, 2, 3] # 4 modalities
label_i = 3.0 # 1 : edema / 2 : non enhancing tumor / 3 : enhancing tumour
def img_load(self, img_path, seed=0):
modal_idx = seed%len(self.modal_i)
img_arr = np.load(img_path)
return img_arr[modal_idx] # synchronize with query img and other support img
class Tumor_train(Tumor_Base):
def __len__(self):
return self.get_len_train()
def __getitem__(self, idx):
return self.getitem_train()
class Tumor_test(Tumor_Base):
def __len__(self):
return self.get_len_test()
def __getitem__(self, idx):
return self.getitme_test(idx)
class Prostate_Base(Base_dataset):
modality_n = 2
modal_i = 0
label_i = 2.0
def img_load(self, img_path, seed=0):
img_arr = np.load(img_path)
return img_arr[self.modal_i]
class Prostate_train(Prostate_Base):
def __len__(self):
return self.get_len_train()
def __getitem__(self, idx):
return self.getitem_train()
class Prostate_test(Prostate_Base):
def __len__(self):
return self.get_len_test()
def __getitem__(self, idx):
return self.getitme_test(idx)
class Hippo_Base(Base_dataset):
modal_i = 0
label_i = 1.0 # use both 1.0 and 2.0
class Hippo_train(Hippo_Base):
def __len__(self):
return self.get_len_train()
def __getitem__(self, idx):
return self.getitem_train()
class Hippo_test(Hippo_Base):
def __len__(self):
return self.get_len_test()
def __getitem__(self, idx):
return self.getitme_test(idx)
class Lung_Base(Base_dataset):
modal_i = 0 # only 1 modality
label_i = 1.0 # use both 1 : cancer
class Lung_train(Lung_Base):
def __len__(self):
return self.get_len_train()
def __getitem__(self, idx):
return self.getitem_train()
class Lung_test(Lung_Base):
def __len__(self):
return self.get_len_test()
def __getitem__(self, idx):
return self.getitme_test(idx)
class HepaticVessel_Base(Base_dataset):
modality_n = 1
# modal_i = 0
label_i = 1.0 # 1 for vessel, 2 for tumour
# use only vessel
class HepaticVessel_train(HepaticVessel_Base):
def __len__(self):
return self.get_len_train()
def __getitem__(self, idx):
return self.getitem_train()
class HepaticVessel_test(HepaticVessel_Base):
def __len__(self):
return self.get_len_test()
def __getitem__(self, idx):
return self.getitme_test(idx)
class Heart_Base(Base_dataset):
modality_n = 1
# modal_i = 0
label_i = 1.0 # 1 for left atrium
class Heart_train(Heart_Base):
def __len__(self):
return self.get_len_train()
def __getitem__(self, idx):
return self.getitem_train()
class Heart_test(Heart_Base):
def __len__(self):
return self.get_len_test()
def __getitem__(self, idx):
return self.getitme_test(idx)
class Pancreas_Base(Base_dataset):
modality_n = 1 # only 1 modality
# modal_i = 0
label_i = 1.0 # 1 for pancreas, 2 for cancer
# use all of them
class Pancreas_train(Pancreas_Base):
def __len__(self):
return self.get_len_train()
def __getitem__(self, idx):
return self.getitem_train()
class Pancreas_test(Pancreas_Base):
def __len__(self):
return self.get_len_test()
def __getitem__(self, idx):
return self.getitme_test(idx)
class Colon_Base(Base_dataset):
modality_n = 1 # only 1 modality
# modal_i = 0
label_i = 1.0 # 1 for colon cancer primaries
# use 1.0
class Colon_train(Colon_Base):
def __len__(self):
return self.get_len_train()
def __getitem__(self, idx):
return self.getitem_train()
class Colon_test(Colon_Base):
def __len__(self):
return self.get_len_test()
def __getitem__(self, idx):
return self.getitme_test(idx)
if __name__ == "__main__":
pass
# main() | Python |
3D | oopil/3D_medical_image_FSS | FSS1000/dataloaders_medical/prostate.py | .py | 8,728 | 224 | import sys
import glob
import json
import re
from glob import glob
from util.utils import *
from dataloaders_medical.decathlon import *
from dataloaders_medical.dataset_decathlon import *
from dataloaders_medical.dataset_CT_ORG import *
import numpy as np
class MetaSliceData_train():
def __init__(self, datasets, iter_n = 100):
super().__init__()
self.datasets = datasets
self.dataset_n = len(datasets)
self.iter_n =iter_n
def __len__(self):
return self.iter_n
def __getitem__(self, idx):
dataset = random.sample(self.datasets, 1)[0]
return dataset.__getitem__(idx)
def metadata():
info = {
"src_dir" : "/user/home2/soopil/Datasets/MICCAI2015challenge/Abdomen/RawData/Training",
"trg_dir" : "/user/home2/soopil/Datasets/MICCAI2015challenge/Abdomen/RawData/Training_2d", # 144 setting
"trg_dir2" : "/user/home2/soopil/Datasets/MICCAI2015challenge/Abdomen/RawData/Training_2d_2", # 144 setting
"trg_dir3" : "/user/home2/soopil/Datasets/MICCAI2015challenge/Abdomen/RawData/Training_2d_denoise",
# "trg_dir" : "/home/soopil/Desktop/Dataset/MICCAI2015challenge/Abdomen/RawData/Training_2d", # desktop setting
"Tasks" : [i for i in range(1,14)],
# "Tasks" : [i for i in range(1,17+1)],
# training : [1,2,3,5,6,7,8,9,14,15]
# testing : [1,3,6,14]
"Organs" : ["background",
"spleen", #1
"right kidney", #2
"left kidney", #3
"gallbladder", #4
"esophagus", #5
"liver", #6
"stomach", #7
"aorta", #8
"inferior vana cava", #9
"portal vein & splenic vein", #10
"pancreas", #11
"right adrenal gland", #12
"left adrenal gland", #13
"bladder", #14
"uturus", #15
"rectum", #16
"small bowel", #17
],
}
return info
def meta_data(_config):
def path_collect(idx, option='train'):
img_paths = glob(f"{meta['trg_dir2']}/{idx}/{option}/img/*")
label_paths = glob(f"{meta['trg_dir2']}/{idx}/{option}/label/*")
# img_paths = glob(f"{_config['data_src']}/{idx}/{option}/img/*")
# label_paths = glob(f"{_config['data_src']}/{idx}/{option}/label/*")
return img_paths, label_paths
def spliter(idx):
tr_imgs, tr_labels = path_collect(idx, 'train')
val_imgs, val_labels = path_collect(idx, 'valid')
ts_imgs, ts_labels = path_collect(idx, 'test')
return tr_imgs, tr_labels, val_imgs, val_labels, ts_imgs, ts_labels
target_task = _config['target']
meta = metadata()
print(meta['trg_dir'])
# tasks = meta['Tasks']
tasks = [1,2,3,5,6,7,8,9,14,15]
# tasks_remove = [4, 10, 12, 13] # 7 11
# tasks_remove = [4, 5, 8, 9, 10, 11, 12, 13]
# tasks_remove = [4, 5, 8, 9, 10, 11, 12, 13, 16, 17]
## we sholdn't use both left and right kidneys
# for task in tasks_remove:
# tasks.remove(task)
kidneys = [2,3]
if target_task in kidneys:
kidneys.remove(target_task)
other_task = kidneys[0]
try:
tasks.remove(other_task)
except:
pass
print(f"tasks : {tasks}")
datasets = {}
for task in tasks:
tr_imgs, tr_labels, val_imgs, val_labels, ts_imgs, ts_labels = spliter(task)
datasets[task] = [TrainLoader(tr_imgs, tr_labels, _config), TestLoader(val_imgs, val_labels, _config), TestLoader(ts_imgs, ts_labels, _config)]
tr_imgs, tr_labels, val_imgs, val_labels, ts_imgs, ts_labels = spliter(target_task)
if _config['add_target']:
n_add_target = _config['add_target']
datasets[target_task] = [TrainLoader(tr_imgs[:n_add_target], tr_labels[:n_add_target], _config), TestLoader(val_imgs, val_labels, _config), TestLoader(ts_imgs, ts_labels, _config)]
val_dataset = datasets[target_task][1]
ts_dataset = datasets[target_task][2]
tr_datasets = [dataset[0] for dataset in datasets.values()]
else:
val_dataset = datasets[target_task][1]
ts_dataset = datasets[target_task][2]
datasets.pop(target_task) #dictionary pop(key)
tr_datasets = [dataset[0] for dataset in datasets.values()]
print(f"training tasks : {datasets.keys()}")
print(f"target tasks : {target_task}")
## set the support volume for testing
if _config["internal_test"]:
pass
else:
# _, _, ts_dataset = external_trainset(_config,target_task)
tr_imgs, tr_labels, ts_dataset = external_testset(_config,target_task)
val_dataset.set_support_volume(tr_imgs[_config['s_idx']:_config['s_idx']+_config['n_shot']], tr_labels[_config['s_idx']:_config['s_idx']+_config['n_shot']])
ts_dataset.set_support_volume(tr_imgs[_config['s_idx']:_config['s_idx']+_config['n_shot']], tr_labels[_config['s_idx']:_config['s_idx']+_config['n_shot']])
meta_tr_dataset = MetaSliceData_train(tr_datasets, iter_n=_config['n_iter'])
return meta_tr_dataset, val_dataset, ts_dataset
def external_testset(_config, target_task):
def decathlon_spliter(idx):
def path_collect(idx, option='train'):
tasks = ["Task01_BrainTumour",
"Task02_Heart",
"Task03_Liver",
"Task04_Hippocampus",
"Task05_Prostate",
"Task06_Lung",
"Task07_Pancreas",
"Task08_HepaticVessel",
"Task09_Spleen",
"Task10_Colon",
"Task11_Davis"
]
src_path='/user/home2/soopil/Datasets/Decathlon_2d'
img_paths = glob(f"{src_path}/{tasks[idx - 1]}/{option}/img/*")
label_paths = glob(f"{src_path}/{tasks[idx - 1]}/{option}/label/*")
return img_paths, label_paths
tr_imgs, tr_labels = path_collect(idx, 'train')
ts_imgs, ts_labels = path_collect(idx, 'test')
return tr_imgs, tr_labels, ts_imgs, ts_labels
def CT_ORG_spliter(idx):
def path_collect(idx, option='train'):
Organs = ["background",
"Liver", # 1
"Bladder", # 2
"Lung", # 3
"Kidney", # 4
"Bone", # 5
"Brain", # 6
],
src_path="/user/home2/soopil/Datasets/CT_ORG/Training_2d_align"
img_paths = glob(f"{src_path}/{idx}/{option}/img/*")
label_paths = glob(f"{src_path}/{idx}/{option}/label/*")
return img_paths, label_paths
tr_imgs, tr_labels = path_collect(idx, 'train')
ts_imgs, ts_labels = path_collect(idx, 'test')
return tr_imgs, tr_labels, ts_imgs, ts_labels
external = _config["external_test"]
print(f"external testset : {external}")
if external == "decathlon":
if target_task == 1: # spleen
target_idx_decath = 9
tr_imgs, tr_labels, ts_imgs, ts_labels = decathlon_spliter(target_idx_decath)
ts_dataset = Spleen_test(ts_imgs, ts_labels, _config)
# print(ts_imgs)
elif target_task == 6:
target_idx_decath = 3
tr_imgs, tr_labels, ts_imgs, ts_labels = decathlon_spliter(target_idx_decath)
ts_dataset = Liver_test(ts_imgs, ts_labels, _config)
else:
print("There isn't according organ in Decathlon dataset.")
assert False
print(f"target index in external dataset : {target_idx_decath}")
elif external == "CT_ORG":
if target_task == 3: # kidney
target_idx_ctorg = 4
tr_imgs, tr_labels, ts_imgs, ts_labels = CT_ORG_spliter(target_idx_ctorg)
ts_dataset = TestLoader_CTORG(ts_imgs, ts_labels, _config)
elif target_task == 6: # liver
target_idx_ctorg = 1
tr_imgs, tr_labels, ts_imgs, ts_labels = CT_ORG_spliter(target_idx_ctorg)
ts_dataset = TestLoader_CTORG(ts_imgs, ts_labels, _config)
elif target_task == 14: # bladder
target_idx_ctorg = 2
tr_imgs, tr_labels, ts_imgs, ts_labels = CT_ORG_spliter(target_idx_ctorg)
ts_dataset = TestLoader_CTORG(ts_imgs, ts_labels, _config)
else:
print("There isn't according organ in CT_ORG dataset.")
assert False
print(f"target index in external dataset : {target_idx_ctorg}")
else:
print("configuration of external dataset is wrong")
assert False
return tr_imgs, tr_labels, ts_dataset
if __name__=="__main__":
pass | Python |
3D | oopil/3D_medical_image_FSS | PANet/train.py | .py | 6,426 | 163 | """Training Script"""
import os
import shutil
import numpy as np
import pdb
import random
import torch
import torch.nn as nn
import torch.optim
from torch.utils.data import DataLoader
from torch.optim.lr_scheduler import MultiStepLR
import torch.backends.cudnn as cudnn
from torchvision.transforms import Compose
import torchvision.transforms as transforms
from torchvision.utils import make_grid
from tensorboardX import SummaryWriter
from config import ex
from util.utils import set_seed, CLASS_LABELS, date
from dataloaders_medical.prostate import *
from models.fewshot import FewShotSeg
def overlay_color(img, mask, label, scale=50):
"""
:param img: [1, 256, 256]
:param mask: [1, 256, 256]
:param label: [1, 256, 256]
:return:
"""
# pdb.set_trace()
scale = np.mean(img.cpu().numpy())
mask = mask[0]
label = label[0]
zeros = torch.zeros_like(mask)
zeros = [zeros for _ in range(3)]
zeros[0] = mask
mask = torch.stack(zeros,dim=0)
zeros[1] = label
label = torch.stack(zeros,dim=0)
img_3ch = torch.cat([img,img,img],dim=0)
masked = img_3ch+mask.float()*scale+label.float()*scale
return [masked]
@ex.automain
def main(_run, _config, _log):
if _run.observers:
os.makedirs(f'{_run.observers[0].dir}/snapshots', exist_ok=True)
for source_file, _ in _run.experiment_info['sources']:
os.makedirs(os.path.dirname(f'{_run.observers[0].dir}/source/{source_file}'),
exist_ok=True)
_run.observers[0].save_file(source_file, f'source/{source_file}')
shutil.rmtree(f'{_run.observers[0].basedir}/_sources')
set_seed(_config['seed'])
cudnn.enabled = True
cudnn.benchmark = True
torch.cuda.set_device(device=_config['gpu_id'])
torch.set_num_threads(1)
_log.info('###### Create model ######')
model = FewShotSeg(pretrained_path=_config['path']['init_path'], cfg=_config['model'])
model = nn.DataParallel(model.cuda(), device_ids=[_config['gpu_id'],])
model.train()
_log.info('###### Load data ######')
data_name = _config['dataset']
if data_name == 'BCV':
make_data = meta_data
else:
print(f"data name : {data_name}")
raise ValueError('Wrong config for dataset!')
tr_dataset, val_dataset, ts_dataset = make_data(_config)
trainloader = DataLoader(
dataset=tr_dataset,
batch_size=_config['batch_size'],
shuffle=True,
num_workers=_config['n_work'],
pin_memory=False, #True load data while training gpu
drop_last=True
)
_log.info('###### Set optimizer ######')
optimizer = torch.optim.SGD(model.parameters(), **_config['optim'])
scheduler = MultiStepLR(optimizer, milestones=_config['lr_milestones'], gamma=0.1)
criterion = nn.CrossEntropyLoss(ignore_index=_config['ignore_label'])
if _config['record']: ## tensorboard visualization
_log.info('###### define tensorboard writer #####')
_log.info(f'##### board/train_{_config["board"]}_{date()}')
writer = SummaryWriter(f'board/train_{_config["board"]}_{date()}')
log_loss = {'loss': 0, 'align_loss': 0}
_log.info('###### Training ######')
for i_iter, sample_batched in enumerate(trainloader):
# Prepare input
s_x_orig = sample_batched['s_x'].cuda() # [B, Support, slice_num=1, 1, 256, 256]
s_x = s_x_orig.squeeze(2) # [B, Support, 1, 256, 256]
s_y_fg_orig = sample_batched['s_y'].cuda() # [B, Support, slice_num, 1, 256, 256]
s_y_fg = s_y_fg_orig.squeeze(2) # [B, Support, 1, 256, 256]
s_y_fg = s_y_fg.squeeze(2) # [B, Support, 256, 256]
s_y_bg = torch.ones_like(s_y_fg) - s_y_fg
q_x_orig = sample_batched['q_x'].cuda() # [B, slice_num, 1, 256, 256]
q_x = q_x_orig.squeeze(1) # [B, 1, 256, 256]
q_y_orig = sample_batched['q_y'].cuda() # [B, slice_num, 1, 256, 256]
q_y = q_y_orig.squeeze(1) # [B, 1, 256, 256]
q_y = q_y.squeeze(1).long() # [B, 256, 256]
s_xs = [[s_x[:,shot, ...] for shot in range(_config["n_shot"])]]
s_y_fgs = [[s_y_fg[:,shot, ...] for shot in range(_config["n_shot"])]]
s_y_bgs = [[s_y_bg[:,shot, ...] for shot in range(_config["n_shot"])]]
q_xs = [q_x]
"""
Args:
supp_imgs: support images
way x shot x [B x 1 x H x W], list of lists of tensors
fore_mask: foreground masks for support images
way x shot x [B x H x W], list of lists of tensors
back_mask: background masks for support images
way x shot x [B x H x W], list of lists of tensors
qry_imgs: query images
N x [B x 1 x H x W], list of tensors
qry_pred: [B, 2, H, W]
"""
# Forward and Backward
optimizer.zero_grad()
query_pred, align_loss = model(s_xs, s_y_fgs, s_y_bgs, q_xs) #[B, 2, w, h]
query_loss = criterion(query_pred, q_y)
loss = query_loss + align_loss * _config['align_loss_scaler']
loss.backward()
optimizer.step()
scheduler.step()
# Log loss
query_loss = query_loss.detach().data.cpu().numpy()
align_loss = align_loss.detach().data.cpu().numpy() if align_loss != 0 else 0
_run.log_scalar('loss', query_loss)
_run.log_scalar('align_loss', align_loss)
log_loss['loss'] += query_loss
log_loss['align_loss'] += align_loss
# print loss and take snapshots
if (i_iter + 1) % _config['print_interval'] == 0:
loss = log_loss['loss'] / (i_iter + 1)
align_loss = log_loss['align_loss'] / (i_iter + 1)
print(f'step {i_iter+1}: loss: {loss}, align_loss: {align_loss}')
if _config['record']:
batch_i = 0
frames = []
query_pred = query_pred.argmax(dim=1)
query_pred = query_pred.unsqueeze(1)
frames += overlay_color(q_x_orig[batch_i,0], query_pred[batch_i].float(), q_y_orig[batch_i,0])
visual = make_grid(frames, normalize=True, nrow=2)
writer.add_image("train/visual", visual, i_iter)
print(f"train - iter:{i_iter} \t => model saved", end='\n')
save_fname = f'{_run.observers[0].dir}/snapshots/last.pth'
torch.save(model.state_dict(),save_fname)
| Python |
3D | oopil/3D_medical_image_FSS | PANet/test.py | .py | 8,803 | 224 | """Evaluation Script"""
import os
import shutil
import pdb
import tqdm
import numpy as np
import torch
import torch.optim
import torch.nn as nn
from torch.utils.data import DataLoader
import torch.backends.cudnn as cudnn
from torchvision.utils import make_grid
from models.fewshot import FewShotSeg
from util.utils import set_seed, CLASS_LABELS, get_bbox, date
from config import ex
from tensorboardX import SummaryWriter
from dataloaders_medical.prostate import *
import SimpleITK as sitk
def overlay_color(img, mask, label, scale=50):
"""
:param img: [1, 256, 256]
:param mask: [1, 256, 256]
:param label: [1, 256, 256]
:return:
"""
# pdb.set_trace()
scale = np.mean(img.cpu().numpy())
mask = mask[0]
label = label[0]
zeros = torch.zeros_like(mask)
zeros = [zeros for _ in range(3)]
zeros[0] = mask
mask = torch.stack(zeros,dim=0)
zeros[1] = label
label = torch.stack(zeros,dim=0)
img_3ch = torch.cat([img,img,img],dim=0)
masked = img_3ch+mask.float()*scale+label.float()*scale
return [masked]
@ex.automain
def main(_run, _config, _log):
for source_file, _ in _run.experiment_info['sources']:
os.makedirs(os.path.dirname(f'{_run.observers[0].dir}/source/{source_file}'),
exist_ok=True)
_run.observers[0].save_file(source_file, f'source/{source_file}')
shutil.rmtree(f'{_run.observers[0].basedir}/_sources')
set_seed(_config['seed'])
cudnn.enabled = True
cudnn.benchmark = True
torch.cuda.set_device(device=_config['gpu_id'])
torch.set_num_threads(1)
_log.info('###### Create model ######')
model = FewShotSeg(pretrained_path=_config['path']['init_path'], cfg=_config['model'])
model = nn.DataParallel(model.cuda(), device_ids=[_config['gpu_id'],])
if not _config['notrain']:
model.load_state_dict(torch.load(_config['snapshot'], map_location='cpu'))
model.eval()
_log.info('###### Load data ######')
data_name = _config['dataset']
make_data = meta_data
max_label = 1
tr_dataset, val_dataset, ts_dataset = make_data(_config)
testloader = DataLoader(
dataset=ts_dataset,
batch_size=1,
shuffle=False,
# num_workers=_config['n_work'],
pin_memory=False, # True
drop_last=False
)
if _config['record']:
_log.info('###### define tensorboard writer #####')
board_name = f'board/test_{_config["board"]}_{date()}'
writer = SummaryWriter(board_name)
_log.info('###### Testing begins ######')
# metric = Metric(max_label=max_label, n_runs=_config['n_runs'])
img_cnt = 0
# length = len(all_samples)
length = len(testloader)
img_lists = []
pred_lists = []
label_lists = []
saves = {}
for subj_idx in range(len(ts_dataset.get_cnts())):
saves[subj_idx] = []
with torch.no_grad():
loss_valid = 0
batch_i = 0 # use only 1 batch size for testing
for i, sample_test in enumerate(testloader): # even for upward, down for downward
subj_idx, idx = ts_dataset.get_test_subj_idx(i)
img_list = []
pred_list = []
label_list = []
preds = []
fnames = sample_test['q_fname']
s_x_orig = sample_test['s_x'].cuda() # [B, Support, slice_num=1, 1, 256, 256]
s_x = s_x_orig.squeeze(2) # [B, Support, 1, 256, 256]
s_y_fg_orig = sample_test['s_y'].cuda() # [B, Support, slice_num, 1, 256, 256]
s_y_fg = s_y_fg_orig.squeeze(2) # [B, Support, 1, 256, 256]
s_y_fg = s_y_fg.squeeze(2) # [B, Support, 256, 256]
s_y_bg = torch.ones_like(s_y_fg) - s_y_fg
q_x_orig = sample_test['q_x'].cuda() # [B, slice_num, 1, 256, 256]
q_x = q_x_orig.squeeze(1) # [B, 1, 256, 256]
q_y_orig = sample_test['q_y'].cuda() # [B, slice_num, 1, 256, 256]
q_y = q_y_orig.squeeze(1) # [B, 1, 256, 256]
q_y = q_y.squeeze(1).long() # [B, 256, 256]
s_xs = [[s_x[:, shot, ...] for shot in range(_config["n_shot"])]]
s_y_fgs = [[s_y_fg[:, shot, ...] for shot in range(_config["n_shot"])]]
s_y_bgs = [[s_y_bg[:, shot, ...] for shot in range(_config["n_shot"])]]
q_xs = [q_x]
q_yhat, align_loss = model(s_xs, s_y_fgs, s_y_bgs, q_xs)
# q_yhat = q_yhat[:,1:2, ...]
q_yhat = q_yhat.argmax(dim=1)
q_yhat = q_yhat.unsqueeze(1)
preds.append(q_yhat)
img_list.append(q_x_orig[batch_i,0].cpu().numpy())
pred_list.append(q_yhat[batch_i].cpu().numpy())
label_list.append(q_y_orig[batch_i,0].cpu().numpy())
saves[subj_idx].append([subj_idx, idx, img_list, pred_list, label_list, fnames])
print(f"test, iter:{i}/{length} - {subj_idx}/{idx} \t\t", end='\r')
img_lists.append(img_list)
pred_lists.append(pred_list)
label_lists.append(label_list)
print("start computing dice similarities ... total ", len(saves))
dice_similarities = []
for subj_idx in range(len(saves)):
imgs, preds, labels = [], [], []
save_subj = saves[subj_idx]
for i in range(len(save_subj)):
# print(len(save_subj), len(save_subj)-q_slice_n+1, q_slice_n, i)
subj_idx, idx, img_list, pred_list, label_list, fnames = save_subj[i]
# print(subj_idx, idx, is_reverse, len(img_list))
# print(i, is_reverse, is_reverse_next, is_flip)
for j in range(len(img_list)):
imgs.append(img_list[j])
preds.append(pred_list[j])
labels.append(label_list[j])
# pdb.set_trace()
img_arr = np.concatenate(imgs, axis=0)
pred_arr = np.concatenate(preds, axis=0)
label_arr = np.concatenate(labels, axis=0)
# pdb.set_trace()
# print(ts_dataset.slice_cnts[subj_idx] , len(imgs))
# pdb.set_trace()
dice = np.sum([label_arr * pred_arr]) * 2.0 / (np.sum(pred_arr) + np.sum(label_arr))
dice_similarities.append(dice)
print(f"computing dice scores {subj_idx}/{10}", end='\n')
if _config['record']:
frames = []
for frame_id in range(0, len(save_subj)):
frames += overlay_color(torch.tensor(imgs[frame_id]), torch.tensor(preds[frame_id]).float(), torch.tensor(labels[frame_id]))
visual = make_grid(frames, normalize=True, nrow=5)
writer.add_image(f"test/{subj_idx}", visual, i)
writer.add_scalar(f'dice_score/{i}', dice)
if _config['save_sample']:
## only for internal test (BCV - MICCAI2015)
sup_idx = _config['s_idx']
target = _config['target']
save_name = _config['save_name']
dirs = ["gt", "pred", "input"]
save_dir = f"../sample/panet_organ{target}_sup{sup_idx}_{save_name}"
for dir in dirs:
try:
os.makedirs(os.path.join(save_dir,dir))
except:
pass
subj_name = fnames[0][0].split("/")[-2]
if target == 14:
src_dir = "/user/home2/soopil/Datasets/MICCAI2015challenge/Cervix/RawData/Training/img"
orig_fname = f"{src_dir}/{subj_name}-Image.nii.gz"
pass
else:
src_dir = "/user/home2/soopil/Datasets/MICCAI2015challenge/Abdomen/RawData/Training/img"
orig_fname = f"{src_dir}/img{subj_name}.nii.gz"
itk = sitk.ReadImage(orig_fname)
orig_spacing = itk.GetSpacing()
label_arr = label_arr*2.0
# label_arr = np.concatenate([np.zeros([1,256,256]), label_arr,np.zeros([1,256,256])])
# pred_arr = np.concatenate([np.zeros([1,256,256]), pred_arr,np.zeros([1,256,256])])
# img_arr = np.concatenate([np.zeros([1,256,256]), img_arr,np.zeros([1,256,256])])
itk = sitk.GetImageFromArray(label_arr)
itk.SetSpacing(orig_spacing)
sitk.WriteImage(itk,f"{save_dir}/gt/{subj_idx}.nii.gz")
itk = sitk.GetImageFromArray(pred_arr.astype(float))
itk.SetSpacing(orig_spacing)
sitk.WriteImage(itk,f"{save_dir}/pred/{subj_idx}.nii.gz")
itk = sitk.GetImageFromArray(img_arr)
itk.SetSpacing(orig_spacing)
sitk.WriteImage(itk,f"{save_dir}/input/{subj_idx}.nii.gz")
print(f"test result \n n : {len(dice_similarities)}, mean dice score : \
{np.mean(dice_similarities)} \n dice similarities : {dice_similarities}")
if _config['record']:
writer.add_scalar(f'dice_score/mean', np.mean(dice_similarities))
| Python |
3D | oopil/3D_medical_image_FSS | PANet/config.py | .py | 4,180 | 153 | """Experiment Configuration"""
import os
import re
import glob
import itertools
import sacred
from sacred import Experiment
from sacred.observers import FileStorageObserver
from sacred.utils import apply_backspaces_and_linefeeds
sacred.SETTINGS['CONFIG']['READ_ONLY_CONFIG'] = False
sacred.SETTINGS.CAPTURE_MODE = 'no'
ex = Experiment('PANet')
ex.captured_out_filter = apply_backspaces_and_linefeeds
source_folders = ['.', './dataloaders', './models', './util']
sources_to_save = list(itertools.chain.from_iterable(
[glob.glob(f'{folder}/*.py') for folder in source_folders]))
for source_file in sources_to_save:
ex.add_source_file(source_file)
@ex.config
def cfg():
"""Default configurations"""
server="144" #202
size=256
input_size = (size, size)
seed = 1234
cuda_visable = '0, 1, 2, 3, 4, 5, 6, 7'
gpu_id = 0
n_shot = 1
mode = 'test' # 'train' or 'test'
target = 1
s_idx=0
add_target=False
record=False
dataset = 'BCV' # 'VOC' or 'COCO'
board = "try"
external_test = "None" # "decathlon" # "CT_ORG"
if external_test == "None":
internal_test = True
else:
internal_test = False
if mode == 'train':
n_steps = 50000 # 30000
n_iter=n_steps
label_sets = 0
batch_size = 5
lr_milestones = [10000, 20000, 50000]
# lr_milestones = [10000, 20000, 30000]
align_loss_scaler = 1
ignore_label = 255
print_interval = 100 #100
save_pred_every = 500
n_work=1
model = {
'align': True,
# 'align': False,
}
task = {
'n_ways': 1,
'n_shots': n_shot,
'n_queries': 1,
}
optim = {
'lr': 1e-3,
'momentum': 0.9,
'weight_decay': 0.0005,
}
elif mode == 'test':
save_sample = False
save_name = ""
notrain = False
snapshot = './runs/PANet_VOC_sets_0_1way_1shot_[train]/1/snapshots/30000.pth'
n_runs = 5
n_iter = 1
n_steps = 1000
batch_size = 1
scribble_dilation = 0
bbox = False
scribble = False
# Set model config from the snapshot string
model = {}
for key in ['align',]:
model[key] = key in snapshot
# Set label_sets from the snapshot string
label_sets = int(snapshot.split('_sets_')[1][0])
# Set task config from the snapshot string
task = {
'n_ways': 1,
'n_shots': n_shot,
'n_queries': 1,
}
# task = {
# 'n_ways': int(re.search("[0-9]+way", snapshot).group(0)[:-3]),
# 'n_shots': int(re.search("[0-9]+shot", snapshot).group(0)[:-4]),
# 'n_queries': 1,
# }
else:
raise ValueError('Wrong configuration for "mode" !')
exp_str = '_'.join(
[dataset,]
+ [key for key, value in model.items() if value]
+ [f'sets_{label_sets}', f'{task["n_ways"]}way_{task["n_shots"]}shot_{mode}'])
path = {
'log_dir': './runs',
'init_path': './../../pretrained_model/vgg16-397923af.pth',
'VOC':{'data_dir': '../../data/Pascal/VOCdevkit/VOC2012/',
'data_split': 'trainaug',},
'COCO':{'data_dir': '../../data/COCO/',
'data_split': 'train',},
}
data_srcs = {
"144":"/user/home2/soopil/Datasets/MICCAI2015challenge/Abdomen/RawData/Training_2d_2",
"202":"/data2/soopil/MICCAI2015challenge/Abdomen/RawData/Training_2d_2",
}
data_src = data_srcs[str(server)]
@ex.config_hook
def add_observer(config, command_name, logger):
"""A hook fucntion to add observer"""
exp_name = f'{ex.path}_{config["exp_str"]}'
if config['mode'] == 'test':
if config['notrain']:
exp_name += '_notrain'
if config['scribble']:
exp_name += '_scribble'
if config['bbox']:
exp_name += '_bbox'
observer = FileStorageObserver.create(os.path.join(config['path']['log_dir'], exp_name))
ex.observers.append(observer)
return config
| Python |
3D | oopil/3D_medical_image_FSS | PANet/util/sbd_instance_process.py | .py | 1,242 | 39 | """
This snippet processes SBD instance segmentation data
and transform it from .mat to .png. Then transformed
images will be saved in VOC data folder. The name of
the new folder is "SegmentationObjectAug"
"""
import os
from scipy.io import loadmat
from PIL import Image
# set path
voc_dir = '../Pascal/VOCdevkit/VOC2012/'
sbd_dir = '../SBD/'
inst_path = os.path.join(voc_dir, 'SegmentationObject')
inst_aug_path = os.path.join(sbd_dir, 'inst')
# set target dirctory
target_path = os.path.join(voc_dir, 'SegmentationObjectAug')
os.makedirs(target_path, exist_ok=True)
# copy original VOC instance masks
inst_files = os.listdir(inst_path)
for inst_file in inst_files:
im = Image.open(os.path.join(inst_path, inst_file))
im.save(os.path.join(target_path, inst_file))
palette = im.getpalette()
# read SBD instance masks and save them
inst_aug_files = os.listdir(inst_aug_path)
for inst_aug_file in inst_aug_files:
target_file = os.path.join(target_path, inst_aug_file.replace('.mat', '.png'))
if not os.path.isfile(target_file):
data = loadmat(os.path.join(inst_aug_path, inst_aug_file))
im = Image.fromarray(data['GTinst']['Segmentation'][0, 0])
im.putpalette(palette)
im.save(target_file)
| Python |
3D | oopil/3D_medical_image_FSS | PANet/util/metric.py | .py | 6,195 | 152 | """
Metrics for computing evalutation results
"""
import numpy as np
class Metric(object):
"""
Compute evaluation result
Args:
max_label:
max label index in the data (0 denoting background)
n_runs:
number of test runs
"""
def __init__(self, max_label=20, n_runs=None):
self.labels = list(range(max_label + 1)) # all class labels
self.n_runs = 1 if n_runs is None else n_runs
# list of list of array, each array save the TP/FP/FN statistic of a testing sample
self.tp_lst = [[] for _ in range(self.n_runs)]
self.fp_lst = [[] for _ in range(self.n_runs)]
self.fn_lst = [[] for _ in range(self.n_runs)]
def record(self, pred, target, labels=None, n_run=None):
"""
Record the evaluation result for each sample and each class label, including:
True Positive, False Positive, False Negative
Args:
pred:
predicted mask array, expected shape is H x W
target:
target mask array, expected shape is H x W
labels:
only count specific label, used when knowing all possible labels in advance
"""
assert pred.shape == target.shape
if self.n_runs == 1:
n_run = 0
# array to save the TP/FP/FN statistic for each class (plus BG)
tp_arr = np.full(len(self.labels), np.nan)
fp_arr = np.full(len(self.labels), np.nan)
fn_arr = np.full(len(self.labels), np.nan)
if labels is None:
labels = self.labels
else:
labels = [0,] + labels
for j, label in enumerate(labels):
# Get the location of the pixels that are predicted as class j
idx = np.where(np.logical_and(pred == j, target != 255))
pred_idx_j = set(zip(idx[0].tolist(), idx[1].tolist()))
# Get the location of the pixels that are class j in ground truth
idx = np.where(target == j)
target_idx_j = set(zip(idx[0].tolist(), idx[1].tolist()))
if target_idx_j: # if ground-truth contains this class
tp_arr[label] = len(set.intersection(pred_idx_j, target_idx_j))
fp_arr[label] = len(pred_idx_j - target_idx_j)
fn_arr[label] = len(target_idx_j - pred_idx_j)
self.tp_lst[n_run].append(tp_arr)
self.fp_lst[n_run].append(fp_arr)
self.fn_lst[n_run].append(fn_arr)
def get_mIoU(self, labels=None, n_run=None):
"""
Compute mean IoU
Args:
labels:
specify a subset of labels to compute mean IoU, default is using all classes
"""
if labels is None:
labels = self.labels
# Sum TP, FP, FN statistic of all samples
if n_run is None:
tp_sum = [np.nansum(np.vstack(self.tp_lst[run]), axis=0).take(labels)
for run in range(self.n_runs)]
fp_sum = [np.nansum(np.vstack(self.fp_lst[run]), axis=0).take(labels)
for run in range(self.n_runs)]
fn_sum = [np.nansum(np.vstack(self.fn_lst[run]), axis=0).take(labels)
for run in range(self.n_runs)]
# Compute mean IoU classwisely
# Average across n_runs, then average over classes
mIoU_class = np.vstack([tp_sum[run] / (tp_sum[run] + fp_sum[run] + fn_sum[run])
for run in range(self.n_runs)])
mIoU = mIoU_class.mean(axis=1)
return (mIoU_class.mean(axis=0), mIoU_class.std(axis=0),
mIoU.mean(axis=0), mIoU.std(axis=0))
else:
tp_sum = np.nansum(np.vstack(self.tp_lst[n_run]), axis=0).take(labels)
fp_sum = np.nansum(np.vstack(self.fp_lst[n_run]), axis=0).take(labels)
fn_sum = np.nansum(np.vstack(self.fn_lst[n_run]), axis=0).take(labels)
# Compute mean IoU classwisely and average over classes
mIoU_class = tp_sum / (tp_sum + fp_sum + fn_sum)
mIoU = mIoU_class.mean()
return mIoU_class, mIoU
def get_mIoU_binary(self, n_run=None):
"""
Compute mean IoU for binary scenario
(sum all foreground classes as one class)
"""
# Sum TP, FP, FN statistic of all samples
if n_run is None:
tp_sum = [np.nansum(np.vstack(self.tp_lst[run]), axis=0)
for run in range(self.n_runs)]
fp_sum = [np.nansum(np.vstack(self.fp_lst[run]), axis=0)
for run in range(self.n_runs)]
fn_sum = [np.nansum(np.vstack(self.fn_lst[run]), axis=0)
for run in range(self.n_runs)]
# Sum over all foreground classes
tp_sum = [np.c_[tp_sum[run][0], np.nansum(tp_sum[run][1:])]
for run in range(self.n_runs)]
fp_sum = [np.c_[fp_sum[run][0], np.nansum(fp_sum[run][1:])]
for run in range(self.n_runs)]
fn_sum = [np.c_[fn_sum[run][0], np.nansum(fn_sum[run][1:])]
for run in range(self.n_runs)]
# Compute mean IoU classwisely and average across classes
mIoU_class = np.vstack([tp_sum[run] / (tp_sum[run] + fp_sum[run] + fn_sum[run])
for run in range(self.n_runs)])
mIoU = mIoU_class.mean(axis=1)
return (mIoU_class.mean(axis=0), mIoU_class.std(axis=0),
mIoU.mean(axis=0), mIoU.std(axis=0))
else:
tp_sum = np.nansum(np.vstack(self.tp_lst[n_run]), axis=0)
fp_sum = np.nansum(np.vstack(self.fp_lst[n_run]), axis=0)
fn_sum = np.nansum(np.vstack(self.fn_lst[n_run]), axis=0)
# Sum over all foreground classes
tp_sum = np.c_[tp_sum[0], np.nansum(tp_sum[1:])]
fp_sum = np.c_[fp_sum[0], np.nansum(fp_sum[1:])]
fn_sum = np.c_[fn_sum[0], np.nansum(fn_sum[1:])]
mIoU_class = tp_sum / (tp_sum + fp_sum + fn_sum)
mIoU = mIoU_class.mean()
return mIoU_class, mIoU
| Python |
3D | oopil/3D_medical_image_FSS | PANet/util/__init__.py | .py | 0 | 0 | null | Python |
3D | oopil/3D_medical_image_FSS | PANet/util/scribbles.py | .py | 12,787 | 344 | from __future__ import absolute_import, division
import networkx as nx
import numpy as np
from scipy.ndimage import binary_dilation, binary_erosion
from scipy.special import comb
from skimage.filters import rank
from skimage.morphology import dilation, disk, erosion, medial_axis
from sklearn.neighbors import radius_neighbors_graph
def bezier_curve(points, nb_points=1000):
""" Given a list of points compute a bezier curve from it.
# Arguments
points: ndarray. Array of points with shape (N, 2) with N being the
number of points and the second dimension representing the
(x, y) coordinates.
nb_points: Integer. Number of points to sample from the bezier curve.
This value must be larger than the number of points given in
`points`. Maximum value 10000.
# Returns
ndarray: Array of shape (1000, 2) with the bezier curve of the
given path of points.
"""
nb_points = min(nb_points, 1000)
points = np.asarray(points, dtype=np.float)
if points.ndim != 2 or points.shape[1] != 2:
raise ValueError(
'`points` should be two dimensional and have shape: (N, 2)')
n_points = len(points)
if n_points > nb_points:
# We are downsampling points
return points
t = np.linspace(0., 1., nb_points).reshape(1, -1)
# Compute the Bernstein polynomial of n, i as a function of t
i = np.arange(n_points).reshape(-1, 1)
n = n_points - 1
polynomial_array = comb(n, i) * (t**(n - i)) * (1 - t)**i
bezier_curve_points = polynomial_array.T.dot(points)
return bezier_curve_points
def bresenham(points):
""" Apply Bresenham algorithm for a list points.
More info: https://en.wikipedia.org/wiki/Bresenham's_line_algorithm
# Arguments
points: ndarray. Array of points with shape (N, 2) with N being the number
if points and the second coordinate representing the (x, y)
coordinates.
# Returns
ndarray: Array of points after having applied the bresenham algorithm.
"""
points = np.asarray(points, dtype=np.int)
def line(x0, y0, x1, y1):
""" Bresenham line algorithm.
"""
d_x = x1 - x0
d_y = y1 - y0
x_sign = 1 if d_x > 0 else -1
y_sign = 1 if d_y > 0 else -1
d_x = np.abs(d_x)
d_y = np.abs(d_y)
if d_x > d_y:
xx, xy, yx, yy = x_sign, 0, 0, y_sign
else:
d_x, d_y = d_y, d_x
xx, xy, yx, yy = 0, y_sign, x_sign, 0
D = 2 * d_y - d_x
y = 0
line = np.empty((d_x + 1, 2), dtype=points.dtype)
for x in range(d_x + 1):
line[x] = [x0 + x * xx + y * yx, y0 + x * xy + y * yy]
if D >= 0:
y += 1
D -= 2 * d_x
D += 2 * d_y
return line
nb_points = len(points)
if nb_points < 2:
return points
new_points = []
for i in range(nb_points - 1):
p = points[i:i + 2].ravel().tolist()
new_points.append(line(*p))
new_points = np.concatenate(new_points, axis=0)
return new_points
def scribbles2mask(scribbles,
output_resolution,
bezier_curve_sampling=False,
nb_points=1000,
compute_bresenham=True,
default_value=0):
""" Convert the scribbles data into a mask.
# Arguments
scribbles: Dictionary. Scribbles in the default format.
output_resolution: Tuple. Output resolution (H, W).
bezier_curve_sampling: Boolean. Weather to sample first the returned
scribbles using bezier curve or not.
nb_points: Integer. If `bezier_curve_sampling` is `True` set the number
of points to sample from the bezier curve.
compute_bresenham: Boolean. Whether to compute bresenham algorithm for the
scribbles lines.
default_value: Integer. Default value for the pixels which do not belong
to any scribble.
# Returns
ndarray: Array with the mask of the scribbles with the index of the
object ids. The shape of the returned array is (B x H x W) by
default or (H x W) if `only_annotated_frame==True`.
"""
if len(output_resolution) != 2:
raise ValueError(
'Invalid output resolution: {}'.format(output_resolution))
for r in output_resolution:
if r < 1:
raise ValueError(
'Invalid output resolution: {}'.format(output_resolution))
size_array = np.asarray(output_resolution[::-1], dtype=np.float) - 1
m = np.full(output_resolution, default_value, dtype=np.int)
for p in scribbles:
p /= output_resolution[::-1]
path = p.tolist()
path = np.asarray(path, dtype=np.float)
if bezier_curve_sampling:
path = bezier_curve(path, nb_points=nb_points)
path *= size_array
path = path.astype(np.int)
if compute_bresenham:
path = bresenham(path)
m[path[:, 1], path[:, 0]] = 1
return m
class ScribblesRobot(object):
"""Robot that generates realistic scribbles simulating human interaction.
# Attributes:
kernel_size: Float. Fraction of the square root of the area used
to compute the dilation and erosion before computing the
skeleton of the error masks.
max_kernel_radius: Float. Maximum kernel radius when applying
dilation and erosion. Default 16 pixels.
min_nb_nodes: Integer. Number of nodes necessary to keep a connected
graph and convert it into a scribble.
nb_points: Integer. Number of points to sample the bezier curve
when converting the final paths into curves.
Reference:
[1] Sergi et al., "The 2018 DAVIS Challenge on Video Object Segmentation", arxiv 2018
[2] Jordi et al., "The 2017 DAVIS Challenge on Video Object Segmentation", arxiv 2017
"""
def __init__(self,
kernel_size=.15,
max_kernel_radius=16,
min_nb_nodes=4,
nb_points=1000):
if kernel_size >= 1. or kernel_size < 0:
raise ValueError('kernel_size must be a value between [0, 1).')
self.kernel_size = kernel_size
self.max_kernel_radius = max_kernel_radius
self.min_nb_nodes = min_nb_nodes
self.nb_points = nb_points
def _generate_scribble_mask(self, mask):
""" Generate the skeleton from a mask
Given an error mask, the medial axis is computed to obtain the
skeleton of the objects. In order to obtain smoother skeleton and
remove small objects, an erosion and dilation operations are performed.
The kernel size used is proportional the squared of the area.
# Arguments
mask: Numpy Array. Error mask
Returns:
skel: Numpy Array. Skeleton mask
"""
mask = np.asarray(mask, dtype=np.uint8)
side = np.sqrt(np.sum(mask > 0))
mask_ = mask
# kernel_size = int(self.kernel_size * side)
kernel_radius = self.kernel_size * side * .5
kernel_radius = min(kernel_radius, self.max_kernel_radius)
# logging.verbose(
# 'Erosion and dilation with kernel radius: {:.1f}'.format(
# kernel_radius), 2)
compute = True
while kernel_radius > 1. and compute:
kernel = disk(kernel_radius)
mask_ = rank.minimum(mask.copy(), kernel)
mask_ = rank.maximum(mask_, kernel)
compute = False
if mask_.astype(np.bool).sum() == 0:
compute = True
prev_kernel_radius = kernel_radius
kernel_radius *= .9
# logging.verbose('Reducing kernel radius from {:.1f} '.format(
# prev_kernel_radius) +
# 'pixels to {:.1f}'.format(kernel_radius), 1)
mask_ = np.pad(
mask_, ((1, 1), (1, 1)), mode='constant', constant_values=False)
skel = medial_axis(mask_.astype(np.bool))
skel = skel[1:-1, 1:-1]
return skel
def _mask2graph(self, skeleton_mask):
""" Transforms a skeleton mask into a graph
Args:
skeleton_mask (ndarray): Skeleton mask
Returns:
tuple(nx.Graph, ndarray): Returns a tuple where the first element
is a Graph and the second element is an array of xy coordinates
indicating the coordinates for each Graph node.
If an empty mask is given, None is returned.
"""
mask = np.asarray(skeleton_mask, dtype=np.bool)
if np.sum(mask) == 0:
return None
h, w = mask.shape
x, y = np.arange(w), np.arange(h)
X, Y = np.meshgrid(x, y)
X, Y = X.ravel(), Y.ravel()
M = mask.ravel()
X, Y = X[M], Y[M]
points = np.c_[X, Y]
G = radius_neighbors_graph(points, np.sqrt(2), mode='distance')
T = nx.from_scipy_sparse_matrix(G)
return T, points
def _acyclics_subgraphs(self, G):
""" Divide a graph into connected components subgraphs
Divide a graph into connected components subgraphs and remove its
cycles removing the edge with higher weight inside the cycle. Also
prune the graphs by number of nodes in case the graph has not enought
nodes.
Args:
G (nx.Graph): Graph
Returns:
list(nx.Graph): Returns a list of graphs which are subgraphs of G
with cycles removed.
"""
if not isinstance(G, nx.Graph):
raise TypeError('G must be a nx.Graph instance')
S = [] # List of subgraphs of G
for g in nx.connected_component_subgraphs(G):
# Remove all cycles that we may find
has_cycles = True
while has_cycles:
try:
cycle = nx.find_cycle(g)
weights = np.asarray([G[u][v]['weight'] for u, v in cycle])
idx = weights.argmax()
# Remove the edge with highest weight at cycle
g.remove_edge(*cycle[idx])
except nx.NetworkXNoCycle:
has_cycles = False
if len(g) < self.min_nb_nodes:
# Prune small subgraphs
# logging.verbose('Remove a small line with {} nodes'.format(
# len(g)), 1)
continue
S.append(g)
return S
def _longest_path_in_tree(self, G):
""" Given a tree graph, compute the longest path and return it
Given an undirected tree graph, compute the longest path and return it.
The approach use two shortest path transversals (shortest path in a
tree is the same as longest path). This could be improve but would
require implement it:
https://cs.stackexchange.com/questions/11263/longest-path-in-an-undirected-tree-with-only-one-traversal
Args:
G (nx.Graph): Graph which should be an undirected tree graph
Returns:
list(int): Returns a list of indexes of the nodes belonging to the
longest path.
"""
if not isinstance(G, nx.Graph):
raise TypeError('G must be a nx.Graph instance')
if not nx.is_tree(G):
raise ValueError('Graph G must be a tree (graph without cycles)')
# Compute the furthest node to the random node v
v = list(G.nodes())[0]
distance = nx.single_source_shortest_path_length(G, v)
vp = max(distance.items(), key=lambda x: x[1])[0]
# From this furthest point v' find again the longest path from it
distance = nx.single_source_shortest_path(G, vp)
longest_path = max(distance.values(), key=len)
# Return the longest path
return list(longest_path)
def generate_scribbles(self, mask):
"""Given a binary mask, the robot will return a scribble in the region"""
# generate scribbles
skel_mask = self._generate_scribble_mask(mask)
G, P = self._mask2graph(skel_mask)
S = self._acyclics_subgraphs(G)
longest_paths_idx = [self._longest_path_in_tree(s) for s in S]
longest_paths = [P[idx] for idx in longest_paths_idx]
scribbles_paths = [
bezier_curve(p, self.nb_points) for p in longest_paths
]
output_resolution = tuple([mask.shape[0], mask.shape[1]])
scribble_mask = scribbles2mask(scribbles_paths, output_resolution)
return scribble_mask
| Python |
3D | oopil/3D_medical_image_FSS | PANet/util/voc_classwise_filenames.py | .py | 2,540 | 72 | """
This snippet processes VOC segmentation data
and generates filename list according to the
class labels each image contains.
This snippet will create folders under
"ImageSets/Segmentaion/" with the same
names as the splits. Each folder has 20 txt files
each contains the filenames whose associated image
contains this class label.
"""
import os
import numpy as np
from PIL import Image
# set path
voc_dir = '../../data/Pascal/VOCdevkit/VOC2012/'
seg_dir = os.path.join(voc_dir, 'SegmentationClassAug')
trainaug_path = os.path.join(voc_dir, 'ImageSets', 'Segmentation', 'trainaug.txt')
trainval_path = os.path.join(voc_dir, 'ImageSets', 'Segmentation', 'trainval.txt')
train_path = os.path.join(voc_dir, 'ImageSets', 'Segmentation', 'train.txt')
val_path = os.path.join(voc_dir, 'ImageSets', 'Segmentation', 'val.txt')
# list filenames of all segmentation masks
filenames = os.listdir(seg_dir)
# read filenames in different data splits
with open(train_path, 'r') as f:
train = f.read().splitlines()
with open(val_path, 'r') as f:
val = f.read().splitlines()
with open(trainval_path, 'r') as f:
trainval = f.read().splitlines()
with open(trainaug_path, 'r') as f:
trainaug = f.read().splitlines()
filenames_dic = {'train': train,
'val': val,
'trainval': trainval,
'trainaug': trainaug}
# create a dic to store the classwise filename lists
dic = {'train': {},
'val': {},
'trainval': {},
'trainaug': {}}
for split in dic:
os.makedirs(os.path.join(voc_dir, 'ImageSets', 'Segmentation', split), exist_ok=True)
# check if each mask contains certain label
for filename in filenames:
filepath = os.path.join(seg_dir, filename)
label_set = set(np.unique(np.asarray(Image.open(filepath)))) - set((0, 255)) # exclude 0 and 255
filename_wo_png = filename.replace('.png', '')
for label in label_set:
for split in dic:
if filename_wo_png in filenames_dic[split]:
if label in dic[split].keys():
dic[split][label].append(filename_wo_png)
else:
dic[split][label] = [filename_wo_png,]
# write the result to file
for split in dic:
for label in dic[split].keys():
imageset_path = os.path.join(voc_dir, 'ImageSets', 'Segmentation', split,
'class{}.txt'.format(label))
with open(imageset_path, 'w+') as f:
for item in dic[split][label]:
f.write("{}\n".format(item))
| Python |
3D | oopil/3D_medical_image_FSS | PANet/util/utils.py | .py | 1,995 | 74 | """Util functions"""
import random
from datetime import datetime
import torch
import numpy as np
def try_mkdir(path):
try:
os.mkdir(path)
print(f"mkdir : {path}")
except:
print(f"failed to make a directory : {path}")
def date():
now = datetime.now()
string = now.year + now.month + now.day
string = now.strftime('%Y%m%d_%H%M%S')
return string
def set_seed(seed):
"""
Set the random seed
"""
random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
CLASS_LABELS = {
'VOC': {
'all': set(range(1, 21)),
0: set(range(1, 21)) - set(range(1, 6)),
1: set(range(1, 21)) - set(range(6, 11)),
2: set(range(1, 21)) - set(range(11, 16)),
3: set(range(1, 21)) - set(range(16, 21)),
},
'COCO': {
'all': set(range(1, 81)),
0: set(range(1, 81)) - set(range(1, 21)),
1: set(range(1, 81)) - set(range(21, 41)),
2: set(range(1, 81)) - set(range(41, 61)),
3: set(range(1, 81)) - set(range(61, 81)),
}
}
def get_bbox(fg_mask, inst_mask):
"""
Get the ground truth bounding boxes
"""
fg_bbox = torch.zeros_like(fg_mask, device=fg_mask.device)
bg_bbox = torch.ones_like(fg_mask, device=fg_mask.device)
inst_mask[fg_mask == 0] = 0
area = torch.bincount(inst_mask.view(-1))
cls_id = area[1:].argmax() + 1
cls_ids = np.unique(inst_mask)[1:]
mask_idx = np.where(inst_mask[0] == cls_id)
y_min = mask_idx[0].min()
y_max = mask_idx[0].max()
x_min = mask_idx[1].min()
x_max = mask_idx[1].max()
fg_bbox[0, y_min:y_max+1, x_min:x_max+1] = 1
for i in cls_ids:
mask_idx = np.where(inst_mask[0] == i)
y_min = max(mask_idx[0].min(), 0)
y_max = min(mask_idx[0].max(), fg_mask.shape[1] - 1)
x_min = max(mask_idx[1].min(), 0)
x_max = min(mask_idx[1].max(), fg_mask.shape[2] - 1)
bg_bbox[0, y_min:y_max+1, x_min:x_max+1] = 0
return fg_bbox, bg_bbox
| Python |
3D | oopil/3D_medical_image_FSS | PANet/models/__init__.py | .py | 0 | 0 | null | Python |
3D | oopil/3D_medical_image_FSS | PANet/models/fewshot.py | .py | 8,036 | 194 | """
Fewshot Semantic Segmentation
"""
import pdb
from collections import OrderedDict
import torch
import torch.nn as nn
import torch.nn.functional as F
from .vgg import Encoder
class FewShotSeg(nn.Module):
"""
Fewshot Segmentation model
Args:
in_channels:
number of input channels
pretrained_path:
path of the model for initialization
cfg:
model configurations
"""
def __init__(self, in_channels=1, pretrained_path=None, cfg=None):
super().__init__()
self.pretrained_path = pretrained_path
self.config = cfg or {'align': False}
# Encoder
self.encoder = nn.Sequential(OrderedDict([
('backbone', Encoder(in_channels, self.pretrained_path)),]))
def forward(self, supp_imgs, fore_mask, back_mask, qry_imgs):
"""
Args:
supp_imgs: support images
way x shot x [B x 1 x H x W], list of lists of tensors
fore_mask: foreground masks for support images
way x shot x [B x H x W], list of lists of tensors
back_mask: background masks for support images
way x shot x [B x H x W], list of lists of tensors
qry_imgs: query images
N x [B x 1 x H x W], list of tensors
"""
n_ways = len(supp_imgs)
n_shots = len(supp_imgs[0])
n_queries = len(qry_imgs)
batch_size = supp_imgs[0][0].shape[0]
img_size = supp_imgs[0][0].shape[-2:]
###### Extract features ######
imgs_concat = torch.cat([torch.cat(way, dim=0) for way in supp_imgs]
+ [torch.cat(qry_imgs, dim=0),], dim=0)
img_fts = self.encoder(imgs_concat)
fts_size = img_fts.shape[-2:]
supp_fts = img_fts[:n_ways * n_shots * batch_size].view(
n_ways, n_shots, batch_size, -1, *fts_size) # Wa x Sh x B x C x H' x W'
qry_fts = img_fts[n_ways * n_shots * batch_size:].view(
n_queries, batch_size, -1, *fts_size) # N x B x C x H' x W'
fore_mask = torch.stack([torch.stack(way, dim=0)
for way in fore_mask], dim=0) # Wa x Sh x B x H x W
# back_mask = torch.stack([torch.stack(way, dim=0)
# for way in back_mask], dim=0) # Wa x Sh x B x H x W
back_mask = torch.ones_like(fore_mask) - fore_mask
###### Compute loss ######
align_loss = 0
outputs = []
for epi in range(batch_size):
###### Extract prototype ######
supp_fg_fts = [[self.getFeatures(supp_fts[way, shot, [epi]],
fore_mask[way, shot, [epi]])
for shot in range(n_shots)] for way in range(n_ways)]
supp_bg_fts = [[self.getFeatures(supp_fts[way, shot, [epi]],
back_mask[way, shot, [epi]])
for shot in range(n_shots)] for way in range(n_ways)]
###### Obtain the prototypes######
fg_prototypes, bg_prototype = self.getPrototype(supp_fg_fts, supp_bg_fts)
###### Compute the distance ######
prototypes = [bg_prototype,] + fg_prototypes
dist = [self.calDist(qry_fts[:, epi], prototype) for prototype in prototypes]
pred = torch.stack(dist, dim=1) # N x (1 + Wa) x H' x W'
outputs.append(F.interpolate(pred, size=img_size, mode='bilinear'))
###### Prototype alignment loss ######
if self.config['align'] and self.training:
align_loss_epi = self.alignLoss(qry_fts[:, epi], pred, supp_fts[:, :, epi],
fore_mask[:, :, epi], back_mask[:, :, epi])
align_loss += align_loss_epi
output = torch.stack(outputs, dim=1) # N x B x (1 + Wa) x H x W
output = output.view(-1, *output.shape[2:])
return output, align_loss / batch_size
def calDist(self, fts, prototype, scaler=20):
"""
Calculate the distance between features and prototypes
Args:
fts: input features
expect shape: N x C x H x W
prototype: prototype of one semantic class
expect shape: 1 x C
"""
dist = F.cosine_similarity(fts, prototype[..., None, None], dim=1) * scaler
return dist
def getFeatures(self, fts, mask):
"""
Extract foreground and background features via masked average pooling
Args:
fts: input features, expect shape: 1 x C x H' x W'
mask: binary mask, expect shape: 1 x H x W
"""
fts = F.interpolate(fts, size=mask.shape[-2:], mode='bilinear')
masked_fts = torch.sum(fts * mask[None, ...], dim=(2, 3)) \
/ (mask[None, ...].sum(dim=(2, 3)) + 1e-5) # 1 x C
return masked_fts
def getPrototype(self, fg_fts, bg_fts):
"""
Average the features to obtain the prototype
Args:
fg_fts: lists of list of foreground features for each way/shot
expect shape: Wa x Sh x [1 x C]
bg_fts: lists of list of background features for each way/shot
expect shape: Wa x Sh x [1 x C]
"""
n_ways, n_shots = len(fg_fts), len(fg_fts[0])
fg_prototypes = [sum(way) / n_shots for way in fg_fts]
bg_prototype = sum([sum(way) / n_shots for way in bg_fts]) / n_ways
return fg_prototypes, bg_prototype
def alignLoss(self, qry_fts, pred, supp_fts, fore_mask, back_mask):
"""
Compute the loss for the prototype alignment branch
Args:
qry_fts: embedding features for query images
expect shape: N x C x H' x W'
pred: predicted segmentation score
expect shape: N x (1 + Wa) x H x W
supp_fts: embedding features for support images
expect shape: Wa x Sh x C x H' x W'
fore_mask: foreground masks for support images
expect shape: way x shot x H x W
back_mask: background masks for support images
expect shape: way x shot x H x W
"""
n_ways, n_shots = len(fore_mask), len(fore_mask[0])
# Mask and get query prototype
pred_mask = pred.argmax(dim=1, keepdim=True) # N x 1 x H' x W'
binary_masks = [pred_mask == i for i in range(1 + n_ways)]
skip_ways = [i for i in range(n_ways) if binary_masks[i + 1].sum() == 0]
pred_mask = torch.stack(binary_masks, dim=1).float() # N x (1 + Wa) x 1 x H' x W'
qry_prototypes = torch.sum(qry_fts.unsqueeze(1) * pred_mask, dim=(0, 3, 4))
qry_prototypes = qry_prototypes / (pred_mask.sum((0, 3, 4)) + 1e-5) # (1 + Wa) x C
# Compute the support loss
loss = 0
for way in range(n_ways):
if way in skip_ways:
continue
# Get the query prototypes
prototypes = [qry_prototypes[[0]], qry_prototypes[[way + 1]]]
for shot in range(n_shots):
img_fts = supp_fts[way, [shot]]
supp_dist = [self.calDist(img_fts, prototype) for prototype in prototypes]
supp_pred = torch.stack(supp_dist, dim=1)
supp_pred = F.interpolate(supp_pred, size=fore_mask.shape[-2:],
mode='bilinear')
# Construct the support Ground-Truth segmentation
supp_label = torch.full_like(fore_mask[way, shot], 255,
device=img_fts.device).long()
supp_label[fore_mask[way, shot] == 1] = 1
supp_label[back_mask[way, shot] == 1] = 0
# Compute Loss
loss = loss + F.cross_entropy(
supp_pred, supp_label[None, ...], ignore_index=255) / n_shots / n_ways
return loss
| Python |
3D | oopil/3D_medical_image_FSS | PANet/models/vgg.py | .py | 2,350 | 75 | """
Encoder for few shot segmentation (VGG16)
"""
import torch
import torch.nn as nn
class Encoder(nn.Module):
"""
Encoder for few shot segmentation
Args:
in_channels:
number of input channels
pretrained_path:
path of the model for initialization
"""
def __init__(self, in_channels=3, pretrained_path=None):
super().__init__()
self.pretrained_path = pretrained_path
self.features = nn.Sequential(
self._make_layer(2, in_channels, 64),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
self._make_layer(2, 64, 128),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
self._make_layer(3, 128, 256),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
self._make_layer(3, 256, 512),
# nn.MaxPool2d(kernel_size=3, stride=1, padding=1),
self._make_layer(3, 512, 512, dilation=2, lastRelu=False),
)
self._init_weights()
def forward(self, x):
return self.features(x)
def _make_layer(self, n_convs, in_channels, out_channels, dilation=1, lastRelu=True):
"""
Make a (conv, relu) layer
Args:
n_convs:
number of convolution layers
in_channels:
input channels
out_channels:
output channels
"""
layer = []
for i in range(n_convs):
layer.append(nn.Conv2d(in_channels, out_channels, kernel_size=3,
dilation=dilation, padding=dilation))
if i != n_convs - 1 or lastRelu:
layer.append(nn.ReLU(inplace=True))
in_channels = out_channels
return nn.Sequential(*layer)
def _init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
torch.nn.init.kaiming_normal_(m.weight, nonlinearity='relu')
if self.pretrained_path is not None:
dic = torch.load(self.pretrained_path, map_location='cpu')
keys = list(dic.keys())
new_dic = self.state_dict()
new_keys = list(new_dic.keys())
for i in range(4,26):
new_dic[new_keys[i]] = dic[keys[i]]
self.load_state_dict(new_dic)
| Python |
3D | oopil/3D_medical_image_FSS | PANet/test/summarize_test_results.py | .py | 1,434 | 52 | import re
import glob
import numpy as np
def summarize(files):
if len(files) < 5:
print("There is no results for this set.")
return 0,0
dices = []
for file in files[:5]:
fd = open(file)
lines = fd.readlines()
# print(len(lines),file)
result_line = lines[-2]
find = re.search("0.*",result_line)
line_parts = re.split(" ", result_line)
dice = line_parts[-2]
dices.append(float(dice))
return dices
def main():
bcv_dir = "runs/log/bcv"
decathlon_dir = "runs/log/decathlon"
ctorg_dir = "runs/log/ctorg"
dir = "runs/log/ctorg_5shot"
dirs = ["runs/log/ctorg_5shot", "runs/log/decathlon_5shot"]
for dir in dirs:
print()
print(dir)
for shot in [1,3,5]:
for organ in [1,3,6,14]:
files = glob.glob(f"{dir}/*{shot}shot*_{organ}_*")
files.sort()
dices = summarize(files)
avg, std = np.mean(dices), np.std(dices)
avg = float("{:.3f}".format(avg))
std = float("{:.4f}".format(std))
dices = [str(dice) for dice in dices]
dice_str = ",".join(dices)
# print(dir, organ, shot)
if avg*std!=0:
print(f"organ:{organ},{shot}shot,{dice_str},{avg},{std}")
# assert False
if __name__=="__main__":
main()
| Python |
3D | oopil/3D_medical_image_FSS | PANet/dataloaders_medical/decathlon.py | .py | 10,510 | 285 | import os
import re
import sys
import json
import math
import random
import numpy as np
sys.path.append("/home/soopil/Desktop/github/python_utils")
# sys.path.append("../dataloaders_medical")
from dataloaders_medical.common import *
# from common import *
import cv2
from cv2 import resize
def prostate_img_process(img_arr, HE=False):
if HE:
img_arr = equalize_hist(img_arr) * 255.0
else:
img_arr = normalize(img_arr, type=0)
return img_arr
def totensor(arr):
tensor = torch.from_numpy(arr).float()
return tensor
def random_augment(s_imgs, s_labels, q_imgs, q_labels):
## do random rotation and flip
k = random.sample([i for i in range(0, 4)], 1)[0]
s_imgs = np.rot90(s_imgs, k, (3, 4)).copy()
s_labels = np.rot90(s_labels, k, (3, 4)).copy()
q_imgs = np.rot90(q_imgs, k, (2, 3)).copy()
q_labels = np.rot90(q_labels, k, (2, 3)).copy()
if random.random() < 0.5:
s_imgs = np.flip(s_imgs, 3).copy()
s_labels = np.flip(s_labels, 3).copy()
q_imgs = np.flip(q_imgs, 2).copy()
q_labels = np.flip(q_labels, 2).copy()
if random.random() < 0.5:
s_imgs = np.flip(s_imgs, 4).copy()
s_labels = np.flip(s_labels, 4).copy()
q_imgs = np.flip(q_imgs, 3).copy()
q_labels = np.flip(q_labels, 3).copy()
return s_imgs, s_labels, q_imgs, q_labels
class Base_dataset():
def __init__(self, img_paths, label_paths, config):
"""
dataset constructor for training
"""
super().__init__()
self.mode = config['mode']
self.length = config['n_iter']
self.valid_img_n = len(img_paths)
self.size = config['size']
self.img_paths = img_paths
self.label_paths = label_paths
self.n_shot = config["n_shot"]
self.s_idx = config["s_idx"]
self.is_train = True
if str(self.__class__).split(".")[-1][:4]=="Test":
self.is_train = False
## load file names in advance
self.img_lists = []
self.slice_cnts = []
for img_path in self.img_paths:
fnames = os.listdir(img_path)
self.slice_cnts.append(len(fnames))
fnames = [int(e.split(".")[0]) for e in fnames]
fnames.sort()
fnames = [f"{e}.npy" for e in fnames]
self.img_lists.append(fnames)
if not self.is_train: # for testing
self.length = sum(self.slice_cnts)
def get_sample(self, s_img_paths_all, s_label_paths_all, q_img_paths, q_label_paths):
seed = random.randrange(0,1000)
# s_length = len(s_img_paths)
s_imgs_all, s_labels_all = [],[]
for s_idx, s_img_paths in enumerate(s_img_paths_all):
s_label_paths = s_label_paths_all[s_idx]
imgs, labels = [],[]
for i in range(len(s_img_paths)):
img_path, label_path = s_img_paths[i], s_label_paths[i]
img = self.img_load(img_path, seed)
img = resize(img, dsize=(self.size, self.size), interpolation=cv2.INTER_AREA)
img = np.expand_dims(img, axis=0)
imgs.append(img)
label = np.load(label_path)
label = resize(label, dsize=(self.size, self.size), interpolation=cv2.INTER_NEAREST)
label = np.expand_dims(label, axis=0)
labels.append(label)
s_imgs = np.stack(imgs,axis=0)
s_labels = np.stack(labels,axis=0)
s_imgs_all.append(s_imgs)
s_labels_all.append(s_labels)
s_imgs = np.stack(s_imgs_all,axis=0)
s_labels = np.stack(s_labels_all,axis=0)
q_length = len(q_img_paths)
imgs, labels = [],[]
for i in range(len(q_img_paths)):
img_path, label_path = q_img_paths[i], q_label_paths[i]
img = self.img_load(img_path, seed)
img = resize(img, dsize=(self.size, self.size), interpolation=cv2.INTER_AREA)
img = np.expand_dims(img, axis=0)
imgs.append(img)
label = np.load(label_path)
label = resize(label, dsize=(self.size, self.size), interpolation=cv2.INTER_NEAREST)
label = np.expand_dims(label, axis=0)
labels.append(label)
q_imgs = np.stack(imgs,axis=0)
q_labels = np.stack(labels,axis=0)
# print(imgs.shape) [slice_num,1,256,256]?
if self.is_train: ## random augmentation : flip, rotation
s_imgs, s_labels, q_imgs, q_labels = random_augment(s_imgs, s_labels, q_imgs, q_labels)
sample = {
"s_x":totensor(s_imgs),
"s_y":totensor(s_labels), #.long()
"q_x":totensor(q_imgs),
"q_y":totensor(q_labels), #.long()
# "s_length":s_length,
# "q_length":q_length,
"s_fname":s_img_paths_all,
"q_fname":q_img_paths,
}
return sample
def handle_idx(self, s_n, q_idx, q_n):
"""
choose slices for support and query volume
:return: supp_idx, qry_idx
"""
q_ratio = (q_idx)/(q_n-1)
s_idx = round((s_n-1)*q_ratio)
return s_idx
def getitem_train(self):
## choose support and target
idx_space = [i for i in range(self.valid_img_n)]
subj_idxs = random.sample(idx_space, self.n_shot+1)
s_subj_idxs = subj_idxs[:self.n_shot]
q_subj_idx = subj_idxs[self.n_shot]
q_subj_img_path = self.img_paths[q_subj_idx]
q_subj_label_path = self.label_paths[q_subj_idx]
q_fnames = self.img_lists[q_subj_idx]
q_idx = random.randrange(0, len(q_fnames))
is_flip = False
if random.random() < 0.5:
is_flip = True
q_fnames.reverse()
s_img_paths_all, s_label_paths_all = [],[]
for s_subj_idx in s_subj_idxs:
s_subj_img_path = self.img_paths[s_subj_idx]
s_subj_label_path = self.label_paths[s_subj_idx]
s_fnames = self.img_lists[s_subj_idx]
## flip augmentation
if is_flip:
s_fnames.reverse()
## choose support and query slice
s_idx = self.handle_idx(len(s_fnames), q_idx, len(q_fnames))
s_fnames_selected = s_fnames[s_idx:s_idx+1]
## define path, load data, and return
s_img_paths_selected = [f"{s_subj_img_path}/{fname}" for fname in s_fnames_selected]
s_label_paths_selected = [f"{s_subj_label_path}/{fname}" for fname in s_fnames_selected]
s_img_paths_all.append(s_img_paths_selected)
s_label_paths_all.append(s_label_paths_selected)
q_fnames_selected = q_fnames[q_idx:q_idx + 1]
q_img_paths_selected = [f"{q_subj_img_path}/{fname}" for fname in q_fnames_selected]
q_label_paths_selected = [f"{q_subj_label_path}/{fname}" for fname in q_fnames_selected]
return self.get_sample(s_img_paths_all, s_label_paths_all, q_img_paths_selected, q_label_paths_selected)
def getitme_test(self, idx):
q_subj_idx, q_idx = self.get_test_subj_idx(idx)
q_subj_img_path = self.img_paths[q_subj_idx]
q_subj_label_path = self.label_paths[q_subj_idx]
q_fnames = self.img_lists[q_subj_idx]
s_img_paths_all, s_label_paths_all = [],[]
for s_idx in range(self.n_shot):
s_subj_img_path = self.s_img_paths[s_idx]
s_subj_label_path = self.s_label_paths[s_idx]
s_fnames = self.s_fnames_list[s_idx]
## choose support and query slice
s_idx = self.handle_idx(len(s_fnames), q_idx, len(q_fnames))
s_fnames_selected = s_fnames[s_idx:s_idx+1]
## define path, load data, and return
s_img_paths_selected = [f"{s_subj_img_path}/{fname}" for fname in s_fnames_selected]
s_label_paths_selected = [f"{s_subj_label_path}/{fname}" for fname in s_fnames_selected]
s_img_paths_all.append(s_img_paths_selected)
s_label_paths_all.append(s_label_paths_selected)
q_fnames_selected = q_fnames[q_idx:q_idx + 1]
q_img_paths_selected = [f"{q_subj_img_path}/{fname}" for fname in q_fnames_selected]
q_label_paths_selected = [f"{q_subj_label_path}/{fname}" for fname in q_fnames_selected]
return self.get_sample(s_img_paths_all, s_label_paths_all, q_img_paths_selected, q_label_paths_selected)
def get_len_train(self):
return self.length
def get_len_test(self):
return self.length
def get_val_subj_idx(self, idx):
for subj_idx,cnt in enumerate(self.q_cnts):
if idx < cnt:
return subj_idx, idx*self.q_max_slice
else:
idx -= cnt
print("get_val_subj_idx function is not working.")
assert False
def get_test_subj_idx(self, idx):
for subj_idx,cnt in enumerate(self.slice_cnts):
if idx < cnt:
return subj_idx, idx
else:
idx -= cnt
print("get_test_subj_idx function is not working.")
assert False
def get_cnts(self):
## only for test loader
return self.slice_cnts
def img_load(self, img_path, seed=0):
img_arr = np.load(img_path)
return img_arr
class BaseLoader(Base_dataset):
modal_i = [0] # there is only one modality
label_i = 1.0 # there is only one label for each image
class TrainLoader(BaseLoader):
def __len__(self):
return self.get_len_train()
def __getitem__(self, idx):
return self.getitem_train()
class TestLoader(BaseLoader):
def __len__(self):
return self.get_len_test()
def __getitem__(self, idx):
return self.getitme_test(idx)
def set_support_volume(self, s_img_paths, s_label_paths):
## set support img path and label path for validation and testing
self.s_img_paths = []
self.s_label_paths = []
self.s_fnames_list = []
for i in range(len(s_img_paths)):
s_fnames = os.listdir(s_img_paths[i])
s_fnames = [int(e.split(".")[0]) for e in s_fnames]
s_fnames.sort()
print(f'support img {i} path : {s_img_paths[i]} length : {len(s_fnames)}')
self.s_img_paths.append(s_img_paths[i])
self.s_label_paths.append(s_label_paths[i])
self.s_fnames_list.append([f"{e}.npy" for e in s_fnames])
if __name__ == "__main__":
pass
# main() | Python |
3D | oopil/3D_medical_image_FSS | PANet/dataloaders_medical/common.py | .py | 6,690 | 210 | """
Dataset classes for common uses
"""
import random
import SimpleITK as sitk
import numpy as np
from PIL import Image
from torch.utils.data import Dataset
import torch
import torchvision.transforms.functional as tr_F
from skimage.exposure import equalize_hist
import pdb
def crop_resize(slice):
x_size, y_size = np.shape(slice)
slice = slice[40:x_size - 20, 50:y_size - 50]
slice = resize(slice, (240, 240))
return slice
def fill_empty_space(arr):
arr[arr==0] = np.mean(arr)
return arr
def prostate_sample(img_arr, label_arr, isize):
img = Image.fromarray(img_arr.astype(np.uint8))
label = Image.fromarray(label_arr.astype(np.uint8))
sample = {
'image':img,
'label':label,
'inst':label,
'scribble':label,
}
# pdb.set_trace()
sample = resize(sample, (isize,isize))
sample = to_tensor_normalize(sample)
return sample
def prostate_mask(sample, isize):
# pdb.set_trace()
label = sample['label']
fg_mask = torch.where(label == 1, torch.ones_like(label), torch.zeros_like(label))
bg_mask = torch.ones_like(label) - fg_mask
fg_mask = fg_mask.expand((1, isize, isize))
bg_mask = bg_mask.expand((1, isize, isize))
return {'fg_mask': fg_mask,
'bg_mask': bg_mask,
}
def get_support_sample(ipath, lpath, modal_index, mask_n, is_HE, shift=0):
arr = read_npy(ipath, modal_index, is_HE)
# pdb.set_trace() ## for debugging
# arr = fill_empty_space(arr)
arr_mask = read_sitk(lpath)
## for 2-way(binary) segmentation
arr_mask = (arr_mask>0)*1.0
# arr_mask = (arr_mask == mask_n) * 1.0
cnt = np.sum(arr_mask, axis=(1, 2))
maxarg = np.argmax(cnt)
slice = arr[maxarg+shift, :, :]
slice = crop_resize(slice)
# slice = normalize(slice)
slice = convert3ch(slice)
save_img(slice, "tmp_img.png")
slice_mask = arr_mask[maxarg+shift, :, :]*255.0
slice_mask = crop_resize(slice_mask)
# slice_mask = convert3ch(slice_mask)
save_img(slice_mask, "tmp_label.png")
sample = read_sample("tmp_img.png", "tmp_label.png")
# sample = transforms(sample)
sample = to_tensor_normalize(sample)
return sample
## for 5 way segmentation
# arr_mask = (arr_mask == mask_n)*1.0
# if mask_n == 2:
# arr_mask = (arr_mask > 0)*1.0
# elif mask_n == 1:
# arr_mask = (arr_mask == 1)*1.0
# elif mask_n == 4:
# arr_mask = (arr_mask == 4)*1.0 + (arr_mask == 1)*1.0
# else:
# raise("invalid mask_n")
def getMask(sample, class_id=1, class_ids=[0, 1]):
label = sample['label']
empty = sample['empty']
fg_mask = torch.where(label == class_id, torch.ones_like(label), torch.zeros_like(label))
brain_bg_mask = empty
# bg_mask = torch.ones_like(label) - fg_mask - empty
bg_mask = torch.ones_like(label) - fg_mask
# brain_fg_mask = torch.ones_like(label) - empty
brain_fg_mask = torch.ones_like(label) - empty - fg_mask
fg_mask = fg_mask.expand((1, 240, 240))
bg_mask = bg_mask.expand((1, 240, 240))
brain_fg_mask = brain_fg_mask.expand((1, 240, 240))
brain_bg_mask = brain_bg_mask.expand((1, 240, 240))
return {'fg_mask': fg_mask,
'bg_mask': bg_mask,
'brain_fg_mask': brain_fg_mask,
'brain_bg_mask': brain_bg_mask,}
def read_npy(path, modal_index, is_HE):
arr = np.load(path)[modal_index]
if is_HE:
arr = equalize_hist(arr)
arr = normalize(arr, type=0)
return arr
def convert3ch(slice, axis=2):
slice = np.expand_dims(slice, axis=axis)
slice = np.concatenate([slice, slice, slice], axis=axis)
return slice
def normalize(arr, type=0):
# print(np.mean(arr*255.0), np.std(arr*255.0), np.amin(arr*255.0), np.amax(arr*255.0))
if type == 0: # min and max
mini = np.amin(arr)
arr -= mini
maxi = np.amax(arr)
arr_norm = arr/maxi
elif type == 1: # stddev and mean
mean = np.mean(arr)
stddev = np.std(arr)
arr_norm = (arr-mean)/stddev
return arr_norm*255.0
def map_distribution(arr, tg_mean=0, tg_std=1, tg_min=0, tg_max=255):
arr_nonzero = arr[np.nonzero(arr)]
## input arr range : (0,255)
mean, std, mini, maxi = np.mean(arr_nonzero), np.std(arr_nonzero), np.amin(arr_nonzero), np.amax(arr_nonzero)
Z = (arr - mean) / std ## map arr into standard var Z
new_arr = Z*tg_std + tg_mean ## map Z into the target distribution
print(np.mean(new_arr), np.std(new_arr), np.amin(new_arr), np.amax(new_arr))
new_arr = np.clip(new_arr, tg_min, tg_max)
return new_arr
def to_tensor_normalize(sample):
img, label = sample['image'], sample['label']
inst, scribble = sample['inst'], sample['scribble']
## map distribution
# pdb.set_trace()
# arr = np.array(img)
# arr = map_distribution(arr, tg_mean=0.456, tg_std=0.224, tg_min=-10, tg_max=10)
# img = Image.fromarray(arr.astype(dtype=np.uint8))
img = tr_F.to_tensor(img)
empty = (img[0] == 0.0) * 1.0
img = tr_F.normalize(img, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
label = torch.Tensor(np.array(label)).long()
img = img.expand((1, 3, 240, 240))
label = label/255.0
sample['empty'] = empty.long()
sample['image'] = img
sample['label'] = label
sample['inst'] = inst
sample['scribble'] = scribble
return sample
def read_sample(img_path, label_path):
sample = {}
sample['image'] = Image.open(img_path)
sample['label'] = Image.open(label_path)
sample['inst'] = Image.open(label_path)
sample['scribble'] = Image.open(label_path)
# Save the original image (without normalization)
sample['original'] = Image.open(img_path)
return sample
def read_sitk(path):
itk_img = sitk.ReadImage(path)
arr = sitk.GetArrayFromImage(itk_img)
arr = np.array(arr, dtype=np.float32)
return arr
def save_sitk(arr, itk_ref, opath):
sitk_oimg = sitk.GetImageFromArray(arr)
sitk_oimg.CopyInformation(itk_ref)
sitk.WriteImage(sitk_oimg, opath)
def save_img(arr, path):
im = Image.fromarray(arr.astype(np.uint8))
im.save(path)
def load_img(path):
arr = read_PIL(path)
return to_tensor_normalize(arr)
def load_seg(path):
arr = read_PIL(path)
arr = np.expand_dims(arr, axis=0)
arr = tr_F.to_tensor(arr)
return arr
def read_PIL(path):
im = Image.open(path)
arr = np.array(im, dtype=np.float32)
# arr = np.swapaxes(arr, 0, 2)
return arr
def resize(sample, size):
img, label = sample['image'], sample['label']
img = tr_F.resize(img, size)
label = tr_F.resize(label, size, interpolation=Image.NEAREST)
sample['image'] = img
sample['label'] = label
return sample
| Python |
3D | oopil/3D_medical_image_FSS | PANet/dataloaders_medical/__init__.py | .py | 0 | 0 | null | Python |
3D | oopil/3D_medical_image_FSS | PANet/dataloaders_medical/dataset_CT_ORG.py | .py | 10,420 | 278 | import os
import re
import sys
import json
import math
import random
import numpy as np
sys.path.append("/home/soopil/Desktop/github/python_utils")
# sys.path.append("../dataloaders_medical")
from dataloaders_medical.common import *
# from common import *
import cv2
from cv2 import resize
def totensor(arr):
tensor = torch.from_numpy(arr).float()
return tensor
def random_augment(s_imgs, s_labels, q_imgs, q_labels):
## do random rotation and flip
k = random.sample([i for i in range(0, 4)], 1)[0]
s_imgs = np.rot90(s_imgs, k, (3, 4)).copy()
s_labels = np.rot90(s_labels, k, (3, 4)).copy()
q_imgs = np.rot90(q_imgs, k, (2, 3)).copy()
q_labels = np.rot90(q_labels, k, (2, 3)).copy()
if random.random() < 0.5:
s_imgs = np.flip(s_imgs, 3).copy()
s_labels = np.flip(s_labels, 3).copy()
q_imgs = np.flip(q_imgs, 2).copy()
q_labels = np.flip(q_labels, 2).copy()
if random.random() < 0.5:
s_imgs = np.flip(s_imgs, 4).copy()
s_labels = np.flip(s_labels, 4).copy()
q_imgs = np.flip(q_imgs, 3).copy()
q_labels = np.flip(q_labels, 3).copy()
return s_imgs, s_labels, q_imgs, q_labels
class Base_dataset_ctorg():
def __init__(self, img_paths, label_paths, config):
"""
dataset constructor for training
"""
super().__init__()
self.mode = config['mode']
self.length = config['n_iter']
self.valid_img_n = len(img_paths)
self.size = config['size']
self.img_paths = img_paths
self.label_paths = label_paths
self.n_shot = config["n_shot"]
self.s_idx = config["s_idx"]
self.is_train = True
if str(self.__class__).split(".")[-1][:4] == "Test":
self.is_train = False
## load file names in advance
self.img_lists = []
self.slice_cnts = []
for img_path in self.img_paths:
fnames = os.listdir(img_path)
self.slice_cnts.append(len(fnames))
fnames = [int(e.split(".")[0]) for e in fnames]
fnames.sort()
fnames = [f"{e}.npy" for e in fnames]
self.img_lists.append(fnames)
if not self.is_train: # for testing
self.length = sum(self.slice_cnts)
def get_sample(self, s_img_paths_all, s_label_paths_all, q_img_paths, q_label_paths):
seed = random.randrange(0, 1000)
# s_length = len(s_img_paths)
s_imgs_all, s_labels_all = [], []
for s_idx, s_img_paths in enumerate(s_img_paths_all):
s_label_paths = s_label_paths_all[s_idx]
imgs, labels = [], []
for i in range(len(s_img_paths)):
img_path, label_path = s_img_paths[i], s_label_paths[i]
img = self.img_load(img_path, seed)
img = resize(img, dsize=(self.size, self.size), interpolation=cv2.INTER_AREA)
img = np.expand_dims(img, axis=0)
imgs.append(img)
label = np.load(label_path)
label = resize(label, dsize=(self.size, self.size), interpolation=cv2.INTER_NEAREST)
label = np.expand_dims(label, axis=0)
labels.append(label)
s_imgs = np.stack(imgs, axis=0)
s_labels = np.stack(labels, axis=0)
s_imgs_all.append(s_imgs)
s_labels_all.append(s_labels)
s_imgs = np.stack(s_imgs_all, axis=0)
s_labels = np.stack(s_labels_all, axis=0)
q_length = len(q_img_paths)
imgs, labels = [], []
for i in range(len(q_img_paths)):
img_path, label_path = q_img_paths[i], q_label_paths[i]
img = self.img_load(img_path, seed)
img = resize(img, dsize=(self.size, self.size), interpolation=cv2.INTER_AREA)
img = np.expand_dims(img, axis=0)
imgs.append(img)
label = np.load(label_path)
label = resize(label, dsize=(self.size, self.size), interpolation=cv2.INTER_NEAREST)
label = np.expand_dims(label, axis=0)
labels.append(label)
q_imgs = np.stack(imgs, axis=0)
q_labels = np.stack(labels, axis=0)
# print(imgs.shape) [slice_num,1,256,256]?
if self.is_train: ## random augmentation : flip, rotation
s_imgs, s_labels, q_imgs, q_labels = random_augment(s_imgs, s_labels, q_imgs, q_labels)
sample = {
"s_x": totensor(s_imgs),
"s_y": totensor(s_labels), # .long()
"q_x": totensor(q_imgs),
"q_y": totensor(q_labels), # .long()
# "s_length":s_length,
# "q_length":q_length,
"s_fname": s_img_paths_all,
"q_fname": q_img_paths,
}
return sample
def handle_idx(self, s_n, q_idx, q_n):
"""
choose slices for support and query volume
:return: supp_idx, qry_idx
"""
q_ratio = (q_idx) / (q_n - 1)
s_idx = round((s_n - 1) * q_ratio)
return s_idx
def getitem_train(self):
## choose support and target
idx_space = [i for i in range(self.valid_img_n)]
subj_idxs = random.sample(idx_space, self.n_shot + 1)
s_subj_idxs = subj_idxs[:self.n_shot]
q_subj_idx = subj_idxs[self.n_shot]
q_subj_img_path = self.img_paths[q_subj_idx]
q_subj_label_path = self.label_paths[q_subj_idx]
q_fnames = self.img_lists[q_subj_idx]
q_idx = random.randrange(0, len(q_fnames))
is_flip = False
if random.random() < 0.5:
is_flip = True
q_fnames.reverse()
s_img_paths_all, s_label_paths_all = [], []
for s_subj_idx in s_subj_idxs:
s_subj_img_path = self.img_paths[s_subj_idx]
s_subj_label_path = self.label_paths[s_subj_idx]
s_fnames = self.img_lists[s_subj_idx]
## flip augmentation
if is_flip:
s_fnames.reverse()
## choose support and query slice
s_idx = self.handle_idx(len(s_fnames), q_idx, len(q_fnames))
s_fnames_selected = s_fnames[s_idx:s_idx + 1]
## define path, load data, and return
s_img_paths_selected = [f"{s_subj_img_path}/{fname}" for fname in s_fnames_selected]
s_label_paths_selected = [f"{s_subj_label_path}/{fname}" for fname in s_fnames_selected]
s_img_paths_all.append(s_img_paths_selected)
s_label_paths_all.append(s_label_paths_selected)
q_fnames_selected = q_fnames[q_idx:q_idx + 1]
q_img_paths_selected = [f"{q_subj_img_path}/{fname}" for fname in q_fnames_selected]
q_label_paths_selected = [f"{q_subj_label_path}/{fname}" for fname in q_fnames_selected]
return self.get_sample(s_img_paths_all, s_label_paths_all, q_img_paths_selected, q_label_paths_selected)
def getitme_test(self, idx):
q_subj_idx, q_idx = self.get_test_subj_idx(idx)
q_subj_img_path = self.img_paths[q_subj_idx]
q_subj_label_path = self.label_paths[q_subj_idx]
q_fnames = self.img_lists[q_subj_idx]
s_img_paths_all, s_label_paths_all = [], []
for s_idx in range(self.n_shot):
s_subj_img_path = self.s_img_paths[s_idx]
s_subj_label_path = self.s_label_paths[s_idx]
s_fnames = self.s_fnames_list[s_idx]
## choose support and query slice
s_idx = self.handle_idx(len(s_fnames), q_idx, len(q_fnames))
s_fnames_selected = s_fnames[s_idx:s_idx + 1]
## define path, load data, and return
s_img_paths_selected = [f"{s_subj_img_path}/{fname}" for fname in s_fnames_selected]
s_label_paths_selected = [f"{s_subj_label_path}/{fname}" for fname in s_fnames_selected]
s_img_paths_all.append(s_img_paths_selected)
s_label_paths_all.append(s_label_paths_selected)
q_fnames_selected = q_fnames[q_idx:q_idx + 1]
q_img_paths_selected = [f"{q_subj_img_path}/{fname}" for fname in q_fnames_selected]
q_label_paths_selected = [f"{q_subj_label_path}/{fname}" for fname in q_fnames_selected]
return self.get_sample(s_img_paths_all, s_label_paths_all, q_img_paths_selected, q_label_paths_selected)
def get_len_train(self):
return self.length
def get_len_test(self):
return self.length
def get_val_subj_idx(self, idx):
for subj_idx, cnt in enumerate(self.q_cnts):
if idx < cnt:
return subj_idx, idx * self.q_max_slice
else:
idx -= cnt
print("get_val_subj_idx function is not working.")
assert False
def get_test_subj_idx(self, idx):
for subj_idx, cnt in enumerate(self.slice_cnts):
if idx < cnt:
return subj_idx, idx
else:
idx -= cnt
print("get_test_subj_idx function is not working.")
assert False
def get_cnts(self):
## only for test loader
return self.slice_cnts
def img_load(self, img_path, seed=0):
img_arr = np.load(img_path)+0.25
return img_arr
def set_support_volume(self, s_img_paths, s_label_paths):
## set support img path and label path for validation and testing
self.s_img_paths = []
self.s_label_paths = []
self.s_fnames_list = []
for i in range(len(s_img_paths)):
s_fnames = os.listdir(s_img_paths[i])
s_fnames = [int(e.split(".")[0]) for e in s_fnames]
s_fnames.sort()
print(f'support img {i} path : {s_img_paths[i]} length : {len(s_fnames)}')
self.s_img_paths.append(s_img_paths[i])
self.s_label_paths.append(s_label_paths[i])
self.s_fnames_list.append([f"{e}.npy" for e in s_fnames])
class BaseLoader_CTORG(Base_dataset_ctorg):
modal_i = [0] # there is only one modality
label_i = 1.0 # there is only one label for each image
class TrainLoader_CTORG(BaseLoader_CTORG):
def __len__(self):
return self.get_len_train()
def __getitem__(self, idx):
return self.getitem_train()
class TestLoader_CTORG(BaseLoader_CTORG):
def __len__(self):
return self.get_len_test()
def __getitem__(self, idx):
return self.getitme_test(idx)
if __name__ == "__main__":
pass
# main() | Python |
3D | oopil/3D_medical_image_FSS | PANet/dataloaders_medical/dataset_decathlon.py | .py | 14,795 | 458 | import os
import re
import sys
import json
import random
import numpy as np
sys.path.append("/home/soopil/Desktop/github/python_utils")
# sys.path.append("../dataloaders_medical")
from dataloaders_medical.common import *
# from common import *
import cv2
from cv2 import resize
def totensor(arr):
tensor = torch.from_numpy(arr).float()
# tensor = F.interpolate(tensor, size=size,mode=interp)
return tensor
def random_augment(s_imgs, s_labels, q_imgs, q_labels):
## do random rotation and flip
k = random.sample([i for i in range(0, 4)], 1)[0]
s_imgs = np.rot90(s_imgs, k, (3, 4)).copy()
s_labels = np.rot90(s_labels, k, (3, 4)).copy()
q_imgs = np.rot90(q_imgs, k, (2, 3)).copy()
q_labels = np.rot90(q_labels, k, (2, 3)).copy()
if random.random() < 0.5:
s_imgs = np.flip(s_imgs, 3).copy()
s_labels = np.flip(s_labels, 3).copy()
q_imgs = np.flip(q_imgs, 2).copy()
q_labels = np.flip(q_labels, 2).copy()
if random.random() < 0.5:
s_imgs = np.flip(s_imgs, 4).copy()
s_labels = np.flip(s_labels, 4).copy()
q_imgs = np.flip(q_imgs, 3).copy()
q_labels = np.flip(q_labels, 3).copy()
return s_imgs, s_labels, q_imgs, q_labels
class Base_dataset():
def __init__(self, img_paths, label_paths, config):
"""
dataset constructor for training
"""
super().__init__()
self.mode = config['mode']
self.length = config['n_iter']
self.valid_img_n = len(img_paths)
self.size = config['size']
self.img_paths = img_paths
self.label_paths = label_paths
self.n_shot = config["n_shot"]
self.s_idx = config["s_idx"]
self.is_train = True
crit = str(self.__class__).split("_")[-1][:4]
print(f"training word : {crit}")
if crit == "test":
self.is_train = False
## load file names in advance
self.img_lists = []
self.slice_cnts = []
for img_path in self.img_paths:
fnames = os.listdir(img_path)
self.slice_cnts.append(len(fnames))
fnames = [int(e.split(".")[0]) for e in fnames]
fnames.sort()
fnames = [f"{e}.npy" for e in fnames]
self.img_lists.append(fnames)
if not self.is_train: # for testing
self.length = sum(self.slice_cnts)
def get_sample(self, s_img_paths_all, s_label_paths_all, q_img_paths, q_label_paths):
seed = random.randrange(0, 1000)
# s_length = len(s_img_paths)
s_imgs_all, s_labels_all = [], []
for s_idx, s_img_paths in enumerate(s_img_paths_all):
s_label_paths = s_label_paths_all[s_idx]
imgs, labels = [], []
for i in range(len(s_img_paths)):
img_path, label_path = s_img_paths[i], s_label_paths[i]
img = self.img_load(img_path, seed)
img = resize(img, dsize=(self.size, self.size), interpolation=cv2.INTER_AREA)
img = np.expand_dims(img, axis=0)
imgs.append(img)
label = np.load(label_path)
label = resize(label, dsize=(self.size, self.size), interpolation=cv2.INTER_NEAREST)
label = np.expand_dims(label, axis=0)
labels.append(label)
s_imgs = np.stack(imgs, axis=0)
s_labels = np.stack(labels, axis=0)
s_imgs_all.append(s_imgs)
s_labels_all.append(s_labels)
s_imgs = np.stack(s_imgs_all, axis=0)
s_labels = np.stack(s_labels_all, axis=0)
q_length = len(q_img_paths)
imgs, labels = [], []
for i in range(len(q_img_paths)):
img_path, label_path = q_img_paths[i], q_label_paths[i]
img = self.img_load(img_path, seed)
img = resize(img, dsize=(self.size, self.size), interpolation=cv2.INTER_AREA)
img = np.expand_dims(img, axis=0)
imgs.append(img)
label = np.load(label_path)
label = resize(label, dsize=(self.size, self.size), interpolation=cv2.INTER_NEAREST)
label = np.expand_dims(label, axis=0)
labels.append(label)
q_imgs = np.stack(imgs, axis=0)
q_labels = np.stack(labels, axis=0)
# print(imgs.shape) [slice_num,1,256,256]?
if self.is_train: ## random augmentation : flip, rotation
s_imgs, s_labels, q_imgs, q_labels = random_augment(s_imgs, s_labels, q_imgs, q_labels)
sample = {
"s_x": totensor(s_imgs),
"s_y": totensor(s_labels), # .long()
"q_x": totensor(q_imgs),
"q_y": totensor(q_labels), # .long()
# "s_length":s_length,
# "q_length":q_length,
"s_fname": s_img_paths_all,
"q_fname": q_img_paths,
}
return sample
def handle_idx(self, s_n, q_idx, q_n):
"""
choose slices for support and query volume
:return: supp_idx, qry_idx
"""
q_ratio = (q_idx) / (q_n - 1)
s_idx = round((s_n - 1) * q_ratio)
return s_idx
def getitem_train(self):
## choose support and target
idx_space = [i for i in range(self.valid_img_n)]
subj_idxs = random.sample(idx_space, self.n_shot + 1)
s_subj_idxs = subj_idxs[:self.n_shot]
q_subj_idx = subj_idxs[self.n_shot]
q_subj_img_path = self.img_paths[q_subj_idx]
q_subj_label_path = self.label_paths[q_subj_idx]
q_fnames = self.img_lists[q_subj_idx]
q_idx = random.randrange(0, len(q_fnames))
is_flip = False
if random.random() < 0.5:
is_flip = True
q_fnames.reverse()
s_img_paths_all, s_label_paths_all = [], []
for s_subj_idx in s_subj_idxs:
s_subj_img_path = self.img_paths[s_subj_idx]
s_subj_label_path = self.label_paths[s_subj_idx]
s_fnames = self.img_lists[s_subj_idx]
## flip augmentation
if is_flip:
s_fnames.reverse()
## choose support and query slice
s_idx = self.handle_idx(len(s_fnames), q_idx, len(q_fnames))
s_fnames_selected = s_fnames[s_idx:s_idx + 1]
## define path, load data, and return
s_img_paths_selected = [f"{s_subj_img_path}/{fname}" for fname in s_fnames_selected]
s_label_paths_selected = [f"{s_subj_label_path}/{fname}" for fname in s_fnames_selected]
s_img_paths_all.append(s_img_paths_selected)
s_label_paths_all.append(s_label_paths_selected)
q_fnames_selected = q_fnames[q_idx:q_idx + 1]
q_img_paths_selected = [f"{q_subj_img_path}/{fname}" for fname in q_fnames_selected]
q_label_paths_selected = [f"{q_subj_label_path}/{fname}" for fname in q_fnames_selected]
return self.get_sample(s_img_paths_all, s_label_paths_all, q_img_paths_selected, q_label_paths_selected)
def getitme_test(self, idx):
q_subj_idx, q_idx = self.get_test_subj_idx(idx)
q_subj_img_path = self.img_paths[q_subj_idx]
q_subj_label_path = self.label_paths[q_subj_idx]
q_fnames = self.img_lists[q_subj_idx]
s_img_paths_all, s_label_paths_all = [], []
for s_idx in range(self.n_shot):
s_subj_img_path = self.s_img_paths[s_idx]
s_subj_label_path = self.s_label_paths[s_idx]
s_fnames = self.s_fnames_list[s_idx]
## choose support and query slice
s_idx = self.handle_idx(len(s_fnames), q_idx, len(q_fnames))
s_fnames_selected = s_fnames[s_idx:s_idx + 1]
## define path, load data, and return
s_img_paths_selected = [f"{s_subj_img_path}/{fname}" for fname in s_fnames_selected]
s_label_paths_selected = [f"{s_subj_label_path}/{fname}" for fname in s_fnames_selected]
s_img_paths_all.append(s_img_paths_selected)
s_label_paths_all.append(s_label_paths_selected)
q_fnames_selected = q_fnames[q_idx:q_idx + 1]
q_img_paths_selected = [f"{q_subj_img_path}/{fname}" for fname in q_fnames_selected]
q_label_paths_selected = [f"{q_subj_label_path}/{fname}" for fname in q_fnames_selected]
return self.get_sample(s_img_paths_all, s_label_paths_all, q_img_paths_selected, q_label_paths_selected)
def get_len_train(self):
return self.length
def get_len_test(self):
return self.length
def get_val_subj_idx(self, idx):
for subj_idx, cnt in enumerate(self.q_cnts):
if idx < cnt:
return subj_idx, idx * self.q_max_slice
else:
idx -= cnt
print("get_val_subj_idx function is not working.")
assert False
def get_test_subj_idx(self, idx):
for subj_idx, cnt in enumerate(self.slice_cnts):
if idx < cnt:
return subj_idx, idx
else:
idx -= cnt
print("get_test_subj_idx function is not working.")
assert False
def get_cnts(self):
## only for test loader
return self.slice_cnts
def img_load(self, img_path, seed=0):
img_arr = np.load(img_path)
return img_arr
def set_support_volume(self, s_img_paths, s_label_paths):
## set support img path and label path for validation and testing
self.s_img_paths = []
self.s_label_paths = []
self.s_fnames_list = []
for i in range(len(s_img_paths)):
s_fnames = os.listdir(s_img_paths[i])
s_fnames = [int(e.split(".")[0]) for e in s_fnames]
s_fnames.sort()
print(f'support img {i} path : {s_img_paths[i]} length : {len(s_fnames)}')
self.s_img_paths.append(s_img_paths[i])
self.s_label_paths.append(s_label_paths[i])
self.s_fnames_list.append([f"{e}.npy" for e in s_fnames])
class Spleen_Base(Base_dataset):
modal_i = 0
label_i = 1.0
class Spleen_train(Spleen_Base):
def __len__(self):
return self.get_len_train()
def __getitem__(self, idx):
return self.getitem_train()
class Spleen_test(Spleen_Base):
def __len__(self):
return self.get_len_test()
def __getitem__(self, idx):
return self.getitme_test(idx)
class Liver_Base(Base_dataset):
modal_i = 0 # only 1 modality
label_i = 1.0 # use both 1 : cancer / 2 : liver
class Liver_train(Liver_Base):
def __len__(self):
return self.get_len_train()
def __getitem__(self, idx):
return self.getitem_train()
class Liver_test(Liver_Base):
def __len__(self):
return self.get_len_test()
def __getitem__(self, idx):
return self.getitme_test(idx)
class Tumor_Base(Base_dataset):
modal_i = [0, 1, 2, 3] # 4 modalities
label_i = 3.0 # 1 : edema / 2 : non enhancing tumor / 3 : enhancing tumour
def img_load(self, img_path, seed=0):
modal_idx = seed%len(self.modal_i)
img_arr = np.load(img_path)
return img_arr[modal_idx] # synchronize with query img and other support img
class Tumor_train(Tumor_Base):
def __len__(self):
return self.get_len_train()
def __getitem__(self, idx):
return self.getitem_train()
class Tumor_test(Tumor_Base):
def __len__(self):
return self.get_len_test()
def __getitem__(self, idx):
return self.getitme_test(idx)
class Prostate_Base(Base_dataset):
modality_n = 2
modal_i = 0
label_i = 2.0
def img_load(self, img_path, seed=0):
img_arr = np.load(img_path)
return img_arr[self.modal_i]
class Prostate_train(Prostate_Base):
def __len__(self):
return self.get_len_train()
def __getitem__(self, idx):
return self.getitem_train()
class Prostate_test(Prostate_Base):
def __len__(self):
return self.get_len_test()
def __getitem__(self, idx):
return self.getitme_test(idx)
class Hippo_Base(Base_dataset):
modal_i = 0
label_i = 1.0 # use both 1.0 and 2.0
class Hippo_train(Hippo_Base):
def __len__(self):
return self.get_len_train()
def __getitem__(self, idx):
return self.getitem_train()
class Hippo_test(Hippo_Base):
def __len__(self):
return self.get_len_test()
def __getitem__(self, idx):
return self.getitme_test(idx)
class Lung_Base(Base_dataset):
modal_i = 0 # only 1 modality
label_i = 1.0 # use both 1 : cancer
class Lung_train(Lung_Base):
def __len__(self):
return self.get_len_train()
def __getitem__(self, idx):
return self.getitem_train()
class Lung_test(Lung_Base):
def __len__(self):
return self.get_len_test()
def __getitem__(self, idx):
return self.getitme_test(idx)
class HepaticVessel_Base(Base_dataset):
modality_n = 1
# modal_i = 0
label_i = 1.0 # 1 for vessel, 2 for tumour
# use only vessel
class HepaticVessel_train(HepaticVessel_Base):
def __len__(self):
return self.get_len_train()
def __getitem__(self, idx):
return self.getitem_train()
class HepaticVessel_test(HepaticVessel_Base):
def __len__(self):
return self.get_len_test()
def __getitem__(self, idx):
return self.getitme_test(idx)
class Heart_Base(Base_dataset):
modality_n = 1
# modal_i = 0
label_i = 1.0 # 1 for left atrium
class Heart_train(Heart_Base):
def __len__(self):
return self.get_len_train()
def __getitem__(self, idx):
return self.getitem_train()
class Heart_test(Heart_Base):
def __len__(self):
return self.get_len_test()
def __getitem__(self, idx):
return self.getitme_test(idx)
class Pancreas_Base(Base_dataset):
modality_n = 1 # only 1 modality
# modal_i = 0
label_i = 1.0 # 1 for pancreas, 2 for cancer
# use all of them
class Pancreas_train(Pancreas_Base):
def __len__(self):
return self.get_len_train()
def __getitem__(self, idx):
return self.getitem_train()
class Pancreas_test(Pancreas_Base):
def __len__(self):
return self.get_len_test()
def __getitem__(self, idx):
return self.getitme_test(idx)
class Colon_Base(Base_dataset):
modality_n = 1 # only 1 modality
# modal_i = 0
label_i = 1.0 # 1 for colon cancer primaries
# use 1.0
class Colon_train(Colon_Base):
def __len__(self):
return self.get_len_train()
def __getitem__(self, idx):
return self.getitem_train()
class Colon_test(Colon_Base):
def __len__(self):
return self.get_len_test()
def __getitem__(self, idx):
return self.getitme_test(idx)
if __name__ == "__main__":
pass
# main() | Python |
3D | oopil/3D_medical_image_FSS | PANet/dataloaders_medical/prostate.py | .py | 8,795 | 226 | import sys
import glob
import json
import re
from glob import glob
from util.utils import *
from dataloaders_medical.decathlon import *
from dataloaders_medical.dataset_decathlon import *
from dataloaders_medical.dataset_CT_ORG import *
import numpy as np
class MetaSliceData_train():
def __init__(self, datasets, iter_n = 100):
super().__init__()
self.datasets = datasets
self.dataset_n = len(datasets)
self.iter_n =iter_n
def __len__(self):
return self.iter_n
def __getitem__(self, idx):
dataset = random.sample(self.datasets, 1)[0]
return dataset.__getitem__(idx)
def metadata():
info = {
"src_dir" : "/user/home2/soopil/Datasets/MICCAI2015challenge/Abdomen/RawData/Training",
"trg_dir" : "/user/home2/soopil/Datasets/MICCAI2015challenge/Abdomen/RawData/Training_2d", # 144 setting
"trg_dir2" : "/user/home2/soopil/Datasets/MICCAI2015challenge/Abdomen/RawData/Training_2d_2", # 144 setting
"trg_dir3" : "/user/home2/soopil/Datasets/MICCAI2015challenge/Abdomen/RawData/Training_2d_denoise",
# "trg_dir" : "/home/soopil/Desktop/Dataset/MICCAI2015challenge/Abdomen/RawData/Training_2d", # desktop setting
"Tasks" : [i for i in range(1,14)],
# "Tasks" : [i for i in range(1,17+1)],
# training : [1,2,3,5,6,7,8,9,14,15]
# testing : [1,3,6,14]
"Organs" : ["background",
"spleen", #1
"right kidney", #2
"left kidney", #3
"gallbladder", #4
"esophagus", #5
"liver", #6
"stomach", #7
"aorta", #8
"inferior vana cava", #9
"portal vein & splenic vein", #10
"pancreas", #11
"right adrenal gland", #12
"left adrenal gland", #13
"bladder", #14
"uturus", #15
"rectum", #16
"small bowel", #17
],
}
return info
def meta_data(_config):
def path_collect(idx, option='train'):
# img_paths = glob(f"{meta['trg_dir2']}/{idx}/{option}/img/*")
# label_paths = glob(f"{meta['trg_dir2']}/{idx}/{option}/label/*")
img_paths = glob(f"{_config['data_src']}/{idx}/{option}/img/*")
label_paths = glob(f"{_config['data_src']}/{idx}/{option}/label/*")
return img_paths, label_paths
def spliter(idx):
tr_imgs, tr_labels = path_collect(idx, 'train')
val_imgs, val_labels = path_collect(idx, 'valid')
ts_imgs, ts_labels = path_collect(idx, 'test')
return tr_imgs, tr_labels, val_imgs, val_labels, ts_imgs, ts_labels
target_task = _config['target']
meta = metadata()
print(meta['trg_dir'])
# tasks = meta['Tasks']
tasks = [1,2,3,5,6,7,8,9,14,15]
# tasks_remove = [4, 10, 12, 13] # 7 11
# tasks_remove = [4, 5, 8, 9, 10, 11, 12, 13]
# tasks_remove = [4, 5, 8, 9, 10, 11, 12, 13, 16, 17]
## we sholdn't use both left and right kidneys
# for task in tasks_remove:
# tasks.remove(task)
kidneys = [2,3]
if target_task in kidneys:
kidneys.remove(target_task)
other_task = kidneys[0]
try:
tasks.remove(other_task)
except:
pass
print(f"tasks : {tasks}")
datasets = {}
for task in tasks:
tr_imgs, tr_labels, val_imgs, val_labels, ts_imgs, ts_labels = spliter(task)
datasets[task] = [TrainLoader(tr_imgs, tr_labels, _config), TestLoader(val_imgs, val_labels, _config), TestLoader(ts_imgs, ts_labels, _config)]
tr_imgs, tr_labels, val_imgs, val_labels, ts_imgs, ts_labels = spliter(target_task)
if _config['add_target']:
n_add_target = _config['add_target']
datasets[target_task] = [TrainLoader(tr_imgs[:n_add_target], tr_labels[:n_add_target], _config), TestLoader(val_imgs, val_labels, _config), TestLoader(ts_imgs, ts_labels, _config)]
val_dataset = datasets[target_task][1]
ts_dataset = datasets[target_task][2]
tr_datasets = [dataset[0] for dataset in datasets.values()]
else:
val_dataset = datasets[target_task][1]
ts_dataset = datasets[target_task][2]
datasets.pop(target_task) #dictionary pop(key)
tr_datasets = [dataset[0] for dataset in datasets.values()]
print(f"training tasks : {datasets.keys()}")
print(f"target tasks : {target_task}")
## set the support volume for testing
if _config["internal_test"]:
pass
else:
# _, _, ts_dataset = external_trainset(_config,target_task)
tr_imgs, tr_labels, ts_dataset = external_testset(_config, target_task)
val_dataset.set_support_volume(tr_imgs[_config['s_idx']:_config['s_idx'] + _config['n_shot']],
tr_labels[_config['s_idx']:_config['s_idx'] + _config['n_shot']])
ts_dataset.set_support_volume(tr_imgs[_config['s_idx']:_config['s_idx'] + _config['n_shot']],
tr_labels[_config['s_idx']:_config['s_idx'] + _config['n_shot']])
meta_tr_dataset = MetaSliceData_train(tr_datasets, iter_n=_config['n_iter'])
return meta_tr_dataset, val_dataset, ts_dataset
def external_testset(_config, target_task):
def decathlon_spliter(idx):
def path_collect(idx, option='train'):
tasks = ["Task01_BrainTumour",
"Task02_Heart",
"Task03_Liver",
"Task04_Hippocampus",
"Task05_Prostate",
"Task06_Lung",
"Task07_Pancreas",
"Task08_HepaticVessel",
"Task09_Spleen",
"Task10_Colon",
"Task11_Davis"
]
src_path = '/user/home2/soopil/Datasets/Decathlon_2d'
img_paths = glob(f"{src_path}/{tasks[idx - 1]}/{option}/img/*")
label_paths = glob(f"{src_path}/{tasks[idx - 1]}/{option}/label/*")
return img_paths, label_paths
tr_imgs, tr_labels = path_collect(idx, 'train')
ts_imgs, ts_labels = path_collect(idx, 'test')
return tr_imgs, tr_labels, ts_imgs, ts_labels
def CT_ORG_spliter(idx):
def path_collect(idx, option='train'):
Organs = ["background",
"Liver", # 1
"Bladder", # 2
"Lung", # 3
"Kidney", # 4
"Bone", # 5
"Brain", # 6
],
src_path = "/user/home2/soopil/Datasets/CT_ORG/Training_2d_align"
img_paths = glob(f"{src_path}/{idx}/{option}/img/*")
label_paths = glob(f"{src_path}/{idx}/{option}/label/*")
return img_paths, label_paths
tr_imgs, tr_labels = path_collect(idx, 'train')
ts_imgs, ts_labels = path_collect(idx, 'test')
return tr_imgs, tr_labels, ts_imgs, ts_labels
external = _config["external_test"]
print(f"external testset : {external}")
if external == "decathlon":
if target_task == 1: # spleen
target_idx_decath = 9
tr_imgs, tr_labels, ts_imgs, ts_labels = decathlon_spliter(target_idx_decath)
ts_dataset = Spleen_test(ts_imgs, ts_labels, _config)
elif target_task == 6:
target_idx_decath = 3
tr_imgs, tr_labels, ts_imgs, ts_labels = decathlon_spliter(target_idx_decath)
ts_dataset = Liver_test(ts_imgs, ts_labels, _config)
else:
print("There isn't according organ in Decathlon dataset.")
assert False
print(f"target index in external dataset : {target_idx_decath}")
elif external == "CT_ORG":
if target_task == 3: # kidney
target_idx_ctorg = 4
tr_imgs, tr_labels, ts_imgs, ts_labels = CT_ORG_spliter(target_idx_ctorg)
ts_dataset = TestLoader_CTORG(ts_imgs, ts_labels, _config)
elif target_task == 6: # liver
target_idx_ctorg = 1
tr_imgs, tr_labels, ts_imgs, ts_labels = CT_ORG_spliter(target_idx_ctorg)
ts_dataset = TestLoader_CTORG(ts_imgs, ts_labels, _config)
elif target_task == 14: # bladder
target_idx_ctorg = 2
tr_imgs, tr_labels, ts_imgs, ts_labels = CT_ORG_spliter(target_idx_ctorg)
ts_dataset = TestLoader_CTORG(ts_imgs, ts_labels, _config)
else:
print("There isn't according organ in CT_ORG dataset.")
assert False
print(f"target index in external dataset : {target_idx_ctorg}")
else:
print("configuration of external dataset is wrong")
assert False
return tr_imgs, tr_labels, ts_dataset
if __name__=="__main__":
pass | Python |
3D | oopil/3D_medical_image_FSS | BiGRU_fewshot/train.py | .py | 6,662 | 175 | """Training Script"""
import os
import shutil
import numpy as np
import pdb
import random
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim
from torch.utils.data import DataLoader
from torch.optim.lr_scheduler import MultiStepLR
import torch.backends.cudnn as cudnn
from torchvision.transforms import Compose
import torchvision.transforms as transforms
from torchvision.utils import make_grid
from nn_common_modules import losses
if __name__ == '__main__':
from util.utils import set_seed, CLASS_LABELS, date
from config import ex
from tensorboardX import SummaryWriter
from dataloaders_medical.common import *
from dataloaders_medical.prostate import *
from model import MedicalFSS
else:
from .util.utils import set_seed, CLASS_LABELS, date
from .config import ex
from tensorboardX import SummaryWriter
from .dataloaders_medical.common import *
from .dataloaders_medical.prostate import *
from .model import MedicalFSS
def overlay_color(img, mask, label, scale=50):
"""
:param img: [1, 256, 256]
:param mask: [1, 256, 256]
:param label: [1, 256, 256]
:return:
"""
scale = np.mean(img.cpu().numpy())
mask = mask[0]
label = label[0]
zeros = torch.zeros_like(mask)
zeros = [zeros for _ in range(3)]
zeros[0] = mask
mask = torch.stack(zeros,dim=0)
zeros[1] = label
label = torch.stack(zeros,dim=0)
img_3ch = torch.cat([img,img,img],dim=0)
masked = img_3ch+mask.float()*scale+label.float()*scale
return [masked]
@ex.capture
def get_info(_run):
print(_run._id)
print(_run.experiment_info["name"])
@ex.automain
def main(_run, _config, _log):
if _run.observers:
os.makedirs(f'{_run.observers[0].dir}/snapshots', exist_ok=True)
for source_file, _ in _run.experiment_info['sources']:
os.makedirs(os.path.dirname(f'{_run.observers[0].dir}/source/{source_file}'),
exist_ok=True)
_run.observers[0].save_file(source_file, f'source/{source_file}')
shutil.rmtree(f'{_run.observers[0].basedir}/_sources')
print(f"experiment : {_run.experiment_info['name']} , ex_ID : {_run._id}")
set_seed(_config['seed'])
cudnn.enabled = True
cudnn.benchmark = True
device = torch.device(f"cuda:{_config['gpu_id']}")
model = MedicalFSS(_config,device).to(device)
_log.info('###### Load data ######')
make_data = meta_data
tr_dataset, val_dataset, ts_dataset = make_data(_config)
trainloader = DataLoader(
dataset=tr_dataset,
batch_size=_config['batch_size'],
shuffle=True,
num_workers=_config['n_work'],
pin_memory=False, #True load data while training gpu
drop_last=True
)
_log.info('###### Set optimizer ######')
print(_config['optim'])
optimizer = torch.optim.Adam(list(model.parameters()),
_config['optim']['lr'])
scheduler = MultiStepLR(optimizer, milestones=_config['lr_milestones'], gamma=0.1)
criterion_ce = nn.CrossEntropyLoss()
criterion = losses.DiceLoss()
if _config['record']: ## tensorboard visualization
_log.info('###### define tensorboard writer #####')
_log.info(f'##### board/train_{_config["board"]}_{date()}')
writer = SummaryWriter(f'board/train_{_config["board"]}_{date()}')
iter_n_train = len(trainloader)
_log.info('###### Training ######')
q_slice_n = _config['q_slice']
blank = torch.zeros([1, 256, 256]).to(device)
iter_print = _config['iter_print']
for i_epoch in range(_config['n_steps']):
loss_epoch = 0
## training stage
for i_iter, sample_train in enumerate(trainloader):
preds = []
loss_per_video = 0.0
optimizer.zero_grad()
s_x = sample_train['s_x'].to(device) # [B, Support, slice_num, 1, 256, 256]
s_y = sample_train['s_y'].to(device) # [B, Support, slice_num, 1, 256, 256]
q_x = sample_train['q_x'].to(device) #[B, slice_num, 1, 256, 256]
q_y = sample_train['q_y'].type(torch.LongTensor).to(device) #[B, slice_num, 1, 256, 256]
preds = model(s_x,s_y,q_x)
for frame_id in range(q_slice_n):
q_yi = q_y[:, frame_id, :, :, :] # [B, 1, 256, 256]
q_yi2 = q_yi.squeeze(1) # [B, 256, 256]
yhati = preds[frame_id]
loss = criterion(F.softmax(yhati, dim=1), q_yi2)+criterion_ce(F.softmax(yhati, dim=1), q_yi2)
loss_per_video += loss
preds.append(yhati)
loss_per_video.backward()
optimizer.step()
loss_epoch += loss_per_video
if iter_print:
print(f"train, iter:{i_iter}/{iter_n_train}, iter_loss:{loss_per_video}", end='\r')
if _config['record'] and i_iter == 0:
batch_i = 0
frames = []
for frame_id in range(0, q_slice_n):
frames += overlay_color(q_x[batch_i, frame_id], preds[frame_id][batch_i].round(), q_y[batch_i, frame_id], scale=_config['scale'])
for frame_id in range(0, q_slice_n):
frames += overlay_color(s_x[batch_i, 0, frame_id], blank, s_y[batch_i, 0, frame_id], scale=_config['scale'])
visual = make_grid(frames, normalize=True, nrow=5)
writer.add_image("train/visual", visual, i_epoch)
if _config['record'] and i_iter == 0:
batch_i = 0
frames = []
for frame_id in range(0, q_slice_n):
frames += overlay_color(q_x[batch_i, frame_id], preds[frame_id][batch_i].round(), q_y[batch_i, frame_id], scale=_config['scale'])
for frame_id in range(0, q_slice_n):
frames += overlay_color(s_x[batch_i, 0, frame_id], blank, s_y[batch_i, 0, frame_id], scale=_config['scale'])
visual = make_grid(frames, normalize=True, nrow=5)
writer.add_image("valid/visual", visual, i_epoch)
print(f"train - epoch:{i_epoch}/{_config['n_steps']}, epoch_loss:{loss_epoch}", end='\n')
save_fname = f'{_run.observers[0].dir}/snapshots/last.pth'
_run.log_scalar("training.loss", float(loss_epoch), i_epoch)
if _config['record']:
writer.add_scalar('loss/train_loss', loss_epoch, i_epoch)
torch.save({
'model': model.state_dict(),
'optimizer': optimizer.state_dict(),
}, save_fname
)
writer.close()
| Python |
3D | oopil/3D_medical_image_FSS | BiGRU_fewshot/model.py | .py | 3,724 | 78 | import pdb
import numpy as np
import torch
import torch.nn as nn
from models.encoder import SupportEncoder, QueryEncoder
from models.convgru import ConvBGRU
from models.decoder import Decoder
class MedicalFSS(nn.Module):
def __init__(self, config, device):
super(MedicalFSS, self).__init__()
self.config=config
resize_dim = self.config['input_size']
self.encoded_h = int(resize_dim[0] / 2 ** self.config['n_pool'])
self.encoded_w = int(resize_dim[1] / 2 ** self.config['n_pool'])
self.s_encoder = SupportEncoder(self.config['path']['init_path'], device) # .to(device)
self.q_encoder = QueryEncoder(self.config['path']['init_path'], device) # .to(device)
self.ConvBiGRU = ConvBGRU(in_channels=512,
hidden_channels=256,
kernel_size=(3, 3),
num_layers=self.config['n_layer'],
device=device).to(device)
self.decoder = Decoder(input_res=(self.encoded_h, self.encoded_w), output_res=resize_dim).to(device)
self.q_slice_n = self.config['q_slice']
self.ch = 256 # number of channels of embedding vector
self.n_shot = self.config['n_shot']
self.reversed_idx = list(reversed(range(self.q_slice_n)))
self.is_attention=self.config['is_attention']
if self.is_attention:
self.avgpool3d = nn.AvgPool3d((self.ch*2, self.encoded_w, self.encoded_h))
self.softmax = nn.Softmax(dim=1)
def forward(self, s_x, s_y, q_x):
s_x_encode, q_x_encode, q_ft_lists = [], [], []
for frame_id in range(self.q_slice_n):
s_xi = s_x[:, :, frame_id, :, :, :] # [B, Support, 1, 256, 256]
s_yi = s_y[:, :, frame_id, :, :, :]
q_xi = q_x[:, frame_id, :, :, :]
s_x_merge = s_xi.view(s_xi.size(0) * s_xi.size(1), 1, 256, 256)
s_y_merge = s_yi.view(s_yi.size(0) * s_yi.size(1), 1, 256, 256)
s_xi_encode_merge, s_ft_list = self.s_encoder(s_x_merge, s_y_merge) # [B*S, ch, w, h]
s_xi_encode = s_xi_encode_merge.view(s_xi.size(0), s_xi.size(1), self.ch, self.encoded_w, self.encoded_h) # [B, S, ch, w, h]
q_xi_encode, q_ft_list = self.q_encoder(q_xi)
s_x_encode.append(s_xi_encode) # [B,256(c),256,256]
q_x_encode.append(q_xi_encode) # [B,256(c),256,256]
q_ft_lists.append(q_ft_list)
s_xi_encode_frames = torch.stack(s_x_encode, dim=2) #[B, shot, slice, ch, w, h]
gru_outputs = []
for shot_id in range(self.n_shot):
s_x_encode_batch = s_xi_encode_frames[:,shot_id, ...]
# s_x_encode_batch = torch.stack(s_x_encode[:,shot_id,...], dim=1)
q_x_encode_batch = torch.stack(q_x_encode, dim=1) #[B, slice, ch, w, h]
x_encode_batch = torch.cat((s_x_encode_batch, q_x_encode_batch), dim=2)
x_fwd = x_encode_batch
x_rev = x_encode_batch[:, self.reversed_idx, ...]
h_encode_gru = self.ConvBiGRU(x_fwd, x_rev)
gru_outputs.append(h_encode_gru) #[B, slice, ch, w, h]
gru_output = torch.stack(gru_outputs,dim=1) #[B, shot, slice, ch, w, h]
# gru_out = torch.sum(gru_output,dim=1) #[B, slice, ch, w, h]
gru_out = torch.mean(gru_output,dim=1) #[B, slice, ch, w, h]
out = []
for frame_id in range(self.q_slice_n):
hi = gru_out[:, frame_id, :, :, :]
q_ft_list = q_ft_lists[frame_id]
yhati = self.decoder(hi, q_ft_list) # [B, 1, 256, 256]
out.append(yhati)
return out
def get_attention_score(self):
return self.attention_score | Python |
3D | oopil/3D_medical_image_FSS | BiGRU_fewshot/__init__.py | .py | 0 | 0 | null | Python |
3D | oopil/3D_medical_image_FSS | BiGRU_fewshot/test.py | .py | 11,927 | 301 | """Evaluation Script"""
import os
import shutil
import pdb
import tqdm
import numpy as np
import torch
import torch.optim
import torch.nn as nn
from torch.utils.data import DataLoader
import torch.backends.cudnn as cudnn
from torchvision.transforms import Compose
from torchvision.utils import make_grid
from math import isnan
# from util.metric import Metric
from util.utils import set_seed, CLASS_LABELS, get_bbox, date
from config import ex
from tensorboardX import SummaryWriter
from dataloaders_medical.prostate import *
from model import MedicalFSS
from nn_common_modules import losses
import torch.nn.functional as F
import SimpleITK as sitk
def overlay_color(img, mask, label, scale=50):
"""
:param img: [1, 256, 256]
:param mask: [1, 256, 256]
:param label: [1, 256, 256]
:return:
"""
# pdb.set_trace()
scale = np.mean(img.cpu().numpy())
mask = mask[0]
label = label[0]
zeros = torch.zeros_like(mask)
zeros = [zeros for _ in range(3)]
zeros[0] = mask
mask = torch.stack(zeros,dim=0)
zeros[1] = label
label = torch.stack(zeros,dim=0)
img_3ch = torch.cat([img,img,img],dim=0)
masked = img_3ch+mask.float()*scale+label.float()*scale
return [masked]
@ex.automain
def main(_run, _config, _log):
for source_file, _ in _run.experiment_info['sources']:
os.makedirs(os.path.dirname(f'{_run.observers[0].dir}/source/{source_file}'),
exist_ok=True)
_run.observers[0].save_file(source_file, f'source/{source_file}')
shutil.rmtree(f'{_run.observers[0].basedir}/_sources')
set_seed(_config['seed'])
cudnn.enabled = True
cudnn.benchmark = True
torch.cuda.set_device(device=_config['gpu_id'])
torch.set_num_threads(1)
device = torch.device(f"cuda:{_config['gpu_id']}")
_log.info('###### Load data ######')
data_name = _config['dataset']
make_data = meta_data
q_slice_n = _config['q_slice']
iter_print = _config['iter_print']
if _config['record']:
_log.info('###### define tensorboard writer #####')
board_name = f'board/test_{_config["board"]}_{date()}'
writer = SummaryWriter(board_name)
if _config["n_update"]:
_log.info('###### fine tuning with support data of target organ #####')
_config["n_shot"] = _config["n_shot"]-1
_log.info('###### Create model ######')
model = MedicalFSS(_config, device).to(device)
checkpoint = torch.load(_config['snapshot'], map_location='cpu')
print("checkpoint keys : ", checkpoint.keys())
# initializer.load_state_dict(checkpoint['initializer'])
model.load_state_dict(checkpoint['model'])
# optimizer.load_state_dict(checkpoint['optimizer'])
tr_dataset, val_dataset, ts_dataset = make_data(_config, is_finetuning=True)
trainloader = DataLoader(
dataset=tr_dataset,
batch_size=1,
shuffle=False,
pin_memory=False,
drop_last=False
)
optimizer = torch.optim.Adam(list(model.parameters()),_config['optim']['lr'])
# optimizer = torch.optim.SGD(list(model.parameters()),1e-5)
# criterion = nn.BCELoss()
criterion = losses.DiceLoss()
criterion_ce = nn.CrossEntropyLoss()
for i_iter, sample_train in enumerate(trainloader):
preds = []
loss_per_video = 0.0
optimizer.zero_grad()
s_x = sample_train['s_x'].to(device) # [B, Support, slice_num, 1, 256, 256]
s_y = sample_train['s_y'].to(device) # [B, Support, slice_num, 1, 256, 256]
q_x = sample_train['q_x'].to(device) # [B, slice_num, 1, 256, 256]
q_y = sample_train['q_y'].type(torch.LongTensor).to(device) #[B, slice_num, 1, 256, 256]
preds = model(s_x, s_y, q_x)
for frame_id in range(q_slice_n):
q_yi = q_y[:, frame_id, :, :, :] # [B, 1, 256, 256]
q_yi2 = q_yi.squeeze(1) # [B, 256, 256]
yhati = preds[frame_id]
# pdb.set_trace()
# loss = criterion(F.softmax(yhati, dim=1), q_yi2)
loss = criterion(F.softmax(yhati, dim=1), q_yi2)+criterion_ce(F.softmax(yhati, dim=1), q_yi2)
loss_per_video += loss
preds.append(yhati)
loss_per_video.backward()
optimizer.step()
if iter_print:
print(f"train, iter:{i_iter}/{_config['n_update']}, iter_loss:{loss_per_video}", end='\r')
_config["n_shot"] = _config["n_shot"]+1
else:
_log.info('###### Create model ######')
model = MedicalFSS(_config, device).to(device)
checkpoint = torch.load(_config['snapshot'], map_location='cpu')
print("checkpoint keys : ", checkpoint.keys())
# initializer.load_state_dict(checkpoint['initializer'])
model.load_state_dict(checkpoint['model'])
model.n_shot = _config["n_shot"]
tr_dataset, val_dataset, ts_dataset = make_data(_config)
testloader = DataLoader(
dataset=ts_dataset,
batch_size=1,
shuffle=False,
pin_memory=False,
drop_last=False
)
_log.info('###### Testing begins ######')
# metric = Metric(max_label=max_label, n_runs=_config['n_runs'])
img_cnt = 0
# length = len(all_samples)
length = len(testloader)
blank = torch.zeros([1, 256, 256]).to(device)
reversed_idx = list(reversed(range(q_slice_n)))
ch = 256 # number of channels of embedding
img_lists = []
pred_lists = []
label_lists = []
saves = {}
n_test = len(ts_dataset.q_cnts)
for subj_idx in range(n_test):
saves[subj_idx] = []
with torch.no_grad():
batch_idx = 0 # use only 1 batch size for testing
for i, sample_test in enumerate(testloader): # even for upward, down for downward
subj_idx, idx = ts_dataset.get_test_subj_idx(i)
img_list, pred_list, label_list, preds = [],[],[],[]
s_x = sample_test['s_x'].to(device) # [B, slice_num, 1, 256, 256]
s_y = sample_test['s_y'].to(device) # [B, slice_num, 1, 256, 256]
q_x = sample_test['q_x'].to(device) # [B, slice_num, 1, 256, 256]
q_y = sample_test['q_y'].to(device) # [B, slice_num, 1, 256, 256]
fnames = sample_test['q_fname']
preds = model(s_x, s_y, q_x)
for frame_id in range(q_slice_n):
q_xi = q_x[:, frame_id, :, :, :]
q_yi = q_y[:, frame_id, :, :, :]
yhati = preds[frame_id]
preds.append(yhati.argmax(dim=1))
img_list.append(q_xi[batch_idx].cpu().numpy())
pred_list.append(yhati.argmax(dim=1).cpu().numpy())
label_list.append(q_yi[batch_idx].cpu().numpy())
saves[subj_idx].append([subj_idx, idx, img_list, pred_list, label_list, fnames])
if iter_print:
print(f"test, iter:{i}/{length} - {subj_idx}/{idx} \t\t", end='\r')
img_lists.append(img_list)
pred_lists.append(pred_list)
label_lists.append(label_list)
# if _config['record']:
# frames = []
# for frame_id in range(0, q_x.size(1)):
# frames += overlay_color(q_x[batch_idx, frame_id], preds[frame_id-1][batch_idx].round(), q_y[batch_idx, frame_id], scale=_config['scale'])
# visual = make_grid(frames, normalize=True, nrow=5)
# writer.add_image(f"test/{subj_idx}/{idx}_query_image", visual, i)
center_idx = (q_slice_n//2)+1 -1 # 5->2 index
dice_similarities = []
for subj_idx in range(n_test):
imgs, preds, labels = [], [], []
save_subj = saves[subj_idx]
for i in range(len(save_subj)):
subj_idx, idx, img_list, pred_list, label_list, fnames = save_subj[i]
# if idx==(q_slice_n//2):
if idx==0:
for j in range((q_slice_n//2)+1):# 5//2 + 1 = 3
imgs.append(img_list[idx+j])
preds.append(pred_list[idx+j])
labels.append(label_list[idx+j])
elif idx==(len(save_subj)-1):
# pdb.set_trace()
for j in range((q_slice_n//2)+1):# 5//2 + 1 = 3
imgs.append(img_list[center_idx+j])
preds.append(pred_list[center_idx+j])
labels.append(label_list[center_idx+j])
else:
imgs.append(img_list[center_idx])
preds.append(pred_list[center_idx])
labels.append(label_list[center_idx])
# pdb.set_trace()
img_arr = np.concatenate(imgs, axis=0)
pred_arr = np.concatenate(preds, axis=0)
label_arr = np.concatenate(labels, axis=0)
dice = np.sum([label_arr * pred_arr]) * 2.0 / (np.sum(pred_arr) + np.sum(label_arr))
## IoU
union = np.clip(pred_arr + label_arr, 0, 1)
IoU = np.sum([label_arr * pred_arr]) / np.sum(union)
dice_similarities.append(dice)
print(f"{len(imgs)} slice -> computing dice scores. {subj_idx}/{n_test}. {ts_dataset.q_cnts[subj_idx] }/{len(save_subj)} => {len(imgs)}", end='\r')
if _config['record']:
frames = []
for frame_id in range(0, len(imgs)):
frames += overlay_color(torch.tensor(imgs[frame_id]), torch.tensor(preds[frame_id]), torch.tensor(labels[frame_id]), scale=_config['scale'])
print(len(frames))
visual = make_grid(frames, normalize=True, nrow=5)
writer.add_image(f"test/{subj_idx}", visual, i)
writer.add_scalar(f'dice_score/{i}', dice)
if _config['save_sample']:
## only for internal test (BCV - MICCAI2015)
sup_idx = _config['s_idx']
target = _config['target']
save_name = _config['save_name']
dirs = ["gt", "pred", "input"]
save_dir = f"../sample/bigru_organ{target}_sup{sup_idx}_{save_name}"
for dir in dirs:
try:
os.makedirs(os.path.join(save_dir,dir))
except:
pass
subj_name = fnames[0][0].split("/")[-2]
if target == 14:
src_dir = "/user/home2/soopil/Datasets/MICCAI2015challenge/Cervix/RawData/Training/img"
orig_fname = f"{src_dir}/{subj_name}-Image.nii.gz"
pass
else:
src_dir = "/user/home2/soopil/Datasets/MICCAI2015challenge/Abdomen/RawData/Training/img"
orig_fname = f"{src_dir}/img{subj_name}.nii.gz"
itk = sitk.ReadImage(orig_fname)
orig_spacing = itk.GetSpacing()
label_arr = label_arr*2.0
# label_arr = np.concatenate([np.zeros([1,256,256]), label_arr,np.zeros([1,256,256])])
# pred_arr = np.concatenate([np.zeros([1,256,256]), pred_arr,np.zeros([1,256,256])])
# img_arr = np.concatenate([np.zeros([1,256,256]), img_arr,np.zeros([1,256,256])])
itk = sitk.GetImageFromArray(label_arr)
itk.SetSpacing(orig_spacing)
sitk.WriteImage(itk,f"{save_dir}/gt/{subj_idx}.nii.gz")
itk = sitk.GetImageFromArray(pred_arr.astype(float))
itk.SetSpacing(orig_spacing)
sitk.WriteImage(itk,f"{save_dir}/pred/{subj_idx}.nii.gz")
itk = sitk.GetImageFromArray(img_arr.astype(float))
itk.SetSpacing(orig_spacing)
sitk.WriteImage(itk,f"{save_dir}/input/{subj_idx}.nii.gz")
print(f"test result \n n : {len(dice_similarities)}, mean dice score : \
{np.mean(dice_similarities)} \n dice similarities : {dice_similarities}")
if _config['record']:
writer.add_scalar(f'dice_score/mean', np.mean(dice_similarities))
| Python |
3D | oopil/3D_medical_image_FSS | BiGRU_fewshot/config.py | .py | 3,802 | 145 | """Experiment Configuration"""
import os
import re
import glob
import itertools
import sacred
from sacred import Experiment
from sacred.observers import FileStorageObserver
from sacred.utils import apply_backspaces_and_linefeeds
sacred.SETTINGS['CONFIG']['READ_ONLY_CONFIG'] = False
sacred.SETTINGS.CAPTURE_MODE = 'no'
ex = Experiment('PANet')
ex.captured_out_filter = apply_backspaces_and_linefeeds
source_folders = ['.', './dataloaders', './models', './util', './dataloaders_medical']
sources_to_save = list(itertools.chain.from_iterable(
[glob.glob(f'{folder}/*.py') for folder in source_folders]))
for source_file in sources_to_save:
ex.add_source_file(source_file)
# "Organs" : ["background",
# "spleen",
# "right kidney",
# "left kidney",
# "gallbladder",
# "esophagus",
# "liver",
# "stomach",
# "aorta",
# "inferior vana cava",
# "portal vein & splenic vein",
# "pancreas",
# "right adrenal gland",
# "left adrenal gland",
# ],
@ex.config
def cfg():
"""Default configurations"""
iter_print=True
# size = 320
size = 256
input_size = (size, size) # 419? 480!
seed = 1234
cuda_visable = '0, 1, 2, 3, 4, 5, 6, 7'
gpu_id = 0
mode = 'test' # 'train' or 'test'
record = False
scale = 1.0
n_layer = 1
q_slice = 5
n_shot = 3
s_idx = 0
n_pool = 3 # 3 - number of pooling
target = 1
is_attention=False
add_target = 0
is_super=False
external = "None" # "decathlon" # "CT_ORG"
is_fast_test = False
n_fast_test = 5
if external == "None":
internal = True
else:
internal = False
if mode == 'train':
lr_milestones = [50*i for i in range(1,3)]
n_iter = 300
dataset = 'prostate' # 'VOC' or 'COCO'
n_steps = 300
n_work = 1
batch_size = 3
print_interval = 500
validation_interval = 500
save_pred_every = 10000
val_cnt = 100
model = {
'align': False,
# 'align': True,
}
optim = {
'lr': 1e-4,
'momentum': 0.9,
'weight_decay': 0.0005,
}
elif mode == 'test':
save_sample = False
save_name = ""
is_test = True
dataset = 'prostate' # 'VOC' or 'COCO'
notrain = False
# snapshot = './runs/PANet_VOC_sets_0_1way_1shot_[train]/1/snapshots/30000.pth'
snapshot = '/user/home2/soopil/tmp/PANet/runs/PANet_VOC_sets_0_3way_5shot_[train]/2/snapshots/50000.pth'
n_iter = 1
n_runs = 1
n_update = 0
n_steps = 1000
batch_size = 1
# for fine tunning
optim = {
'lr': 5e-5,
'momentum': 0.9,
'weight_decay': 0.0005,
}
# Set model config from the snapshot string
model = {}
for key in ['align',]:
model[key] = key in snapshot
else:
raise ValueError('Wrong configuration for "mode" !')
exp_str = '_'.join([
mode,
])
path = {
'log_dir': './runs',
# 'init_path': None,
'init_path': './../../pretrained_model/vgg16-397923af.pth',
}
### configuration for Medical Image Test
modal_index = 0 #["flair","t1","t1ce","t2"]
mask_index = 1 #[1, 2, 4]
board=""
@ex.config_hook
def add_observer(config, command_name, logger):
"""A hook function to add observer"""
exp_name = f'{ex.path}_{config["exp_str"]}'
observer = FileStorageObserver.create(os.path.join(config['path']['log_dir'], exp_name))
ex.observers.append(observer)
return config
| Python |
3D | oopil/3D_medical_image_FSS | BiGRU_fewshot/util/__init__.py | .py | 0 | 0 | null | Python |
3D | oopil/3D_medical_image_FSS | BiGRU_fewshot/util/utils.py | .py | 2,004 | 74 | """Util functions"""
import random
import torch
import numpy as np
import os
from datetime import datetime
def try_mkdir(path):
try:
os.mkdir(path)
print(f"mkdir : {path}")
except:
print(f"failed to make a directory : {path}")
def date():
now = datetime.now()
string = now.year + now.month + now.day
string = now.strftime('%Y%m%d_%H%M%S')
return string
def set_seed(seed):
"""
Set the random seed
"""
random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
CLASS_LABELS = {
'VOC': {
'all': set(range(1, 21)),
0: set(range(1, 21)) - set(range(1, 6)),
1: set(range(1, 21)) - set(range(6, 11)),
2: set(range(1, 21)) - set(range(11, 16)),
3: set(range(1, 21)) - set(range(16, 21)),
},
'COCO': {
'all': set(range(1, 81)),
0: set(range(1, 81)) - set(range(1, 21)),
1: set(range(1, 81)) - set(range(21, 41)),
2: set(range(1, 81)) - set(range(41, 61)),
3: set(range(1, 81)) - set(range(61, 81)),
}
}
def get_bbox(fg_mask, inst_mask):
"""
Get the ground truth bounding boxes
"""
fg_bbox = torch.zeros_like(fg_mask, device=fg_mask.device)
bg_bbox = torch.ones_like(fg_mask, device=fg_mask.device)
inst_mask[fg_mask == 0] = 0
area = torch.bincount(inst_mask.view(-1))
cls_id = area[1:].argmax() + 1
cls_ids = np.unique(inst_mask)[1:]
mask_idx = np.where(inst_mask[0] == cls_id)
y_min = mask_idx[0].min()
y_max = mask_idx[0].max()
x_min = mask_idx[1].min()
x_max = mask_idx[1].max()
fg_bbox[0, y_min:y_max+1, x_min:x_max+1] = 1
for i in cls_ids:
mask_idx = np.where(inst_mask[0] == i)
y_min = max(mask_idx[0].min(), 0)
y_max = min(mask_idx[0].max(), fg_mask.shape[1] - 1)
x_min = max(mask_idx[1].min(), 0)
x_max = min(mask_idx[1].max(), fg_mask.shape[2] - 1)
bg_bbox[0, y_min:y_max+1, x_min:x_max+1] = 0
return fg_bbox, bg_bbox
| Python |
3D | oopil/3D_medical_image_FSS | BiGRU_fewshot/models/decoder.py | .py | 4,109 | 101 | import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import numpy as np
# from torchsummary import summary
if __name__ == '__main__':
from nnutils import conv_unit
else:
from .nnutils import conv_unit
class Decoder(nn.Module):
def __init__(self, input_channels=512, input_res=(8, 14), init_channels=512, shrink_per_block=2, output_channels=1, output_res=(256, 448)):
super(Decoder, self).__init__()
self.layer1 = nn.Sequential(
nn.Conv2d(512, 512, kernel_size=3, padding=1),
nn.BatchNorm2d(512, momentum=1, affine=True),
nn.ReLU()
)
self.layer2 = nn.Sequential(
nn.Conv2d(512, 512, kernel_size=3, padding=1),
nn.BatchNorm2d(512, momentum=1, affine=True),
nn.ReLU()
)
self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.double_conv1 = nn.Sequential(
nn.Conv2d(512 + 512*1, 512, kernel_size=3, padding=1),
nn.BatchNorm2d(512, momentum=1, affine=True),
nn.ReLU(),
nn.Conv2d(512, 512, kernel_size=3, padding=1),
nn.BatchNorm2d(512, momentum=1, affine=True),
nn.ReLU()
) # 14 x 14
self.double_conv2 = nn.Sequential(
nn.Conv2d(512 + 512*1, 256, kernel_size=3, padding=1),
nn.BatchNorm2d(256, momentum=1, affine=True),
nn.ReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1),
nn.BatchNorm2d(256, momentum=1, affine=True),
nn.ReLU()
) # 28 x 28
self.double_conv3 = nn.Sequential(
nn.Conv2d(256 + 256*1, 128, kernel_size=3, padding=1),
nn.BatchNorm2d(128, momentum=1, affine=True),
nn.ReLU(),
nn.Conv2d(128, 128, kernel_size=3, padding=1),
nn.BatchNorm2d(128, momentum=1, affine=True),
nn.ReLU()
) # 56 x 56
self.double_conv4 = nn.Sequential(
nn.Conv2d(128 + 128*1, 64, kernel_size=3, padding=1),
nn.BatchNorm2d(64, momentum=1, affine=True),
nn.ReLU(),
nn.Conv2d(64, 64, kernel_size=3, padding=1),
nn.BatchNorm2d(64, momentum=1, affine=True),
nn.ReLU()
) # 112 x 112
self.double_conv5 = nn.Sequential(
nn.Conv2d(64 + 64 * 1, 64, kernel_size=3, padding=1),
nn.BatchNorm2d(64, momentum=1, affine=True),
nn.ReLU(),
nn.Conv2d(64, 2, kernel_size=1, padding=0), # 1 for bce and 2 for cross entropy loss
# nn.Conv2d(64, 1, kernel_size=1, padding=0), # 1 for bce and 2 for cross entropy loss
# nn.Softmax2d()
# nn.Sigmoid()
) # 256 x 256
# x = F.interpolate(x, orig_size, mode="bilinear")
self._init_weights()
def mask_process(self, mask):
# x = F.interpolate(x, orig_size, mode="bilinear")
mask = F.interpolate(mask, [16,16], mode="bilinear")
def forward(self, hidden, ft_list):
out = self.layer1(hidden)
out = self.layer2(out)
# out = self.upsample(out) # block 1
out = torch.cat((out, ft_list[-1]), dim=1)
out = self.double_conv1(out)
# out = self.upsample(out) # block 2
out = torch.cat((out, ft_list[-2]), dim=1)
out = self.double_conv2(out)
out = self.upsample(out) # block 3
out = torch.cat((out, ft_list[-3]), dim=1)
out = self.double_conv3(out)
out = self.upsample(out) # block 4
out = torch.cat((out, ft_list[-4]), dim=1)
out = self.double_conv4(out)
out = self.upsample(out) # block 5
out = torch.cat((out, ft_list[-5]), dim=1)
out = self.double_conv5(out)
# out = F.sigmoid(out)
# out = torch.squeeze(out)
return out
def _init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
# torch.nn.init.normal_(m.weight)
torch.nn.init.kaiming_normal_(m.weight, nonlinearity='relu') | Python |
3D | oopil/3D_medical_image_FSS | BiGRU_fewshot/models/encoder.py | .py | 1,483 | 43 | import pdb
import torch
import torch.nn as nn
import torchvision
from .vgg import Encoder_vgg
# from torchsummary import summary
if __name__ == '__main__':
from nnutils import conv_unit
else:
from .nnutils import conv_unit
class SupportEncoder(nn.Module):
def __init__(self, pretrained_path, device):
super(SupportEncoder, self).__init__()
self.encoder_list = list(Encoder_vgg(in_channels=2, pretrained_path=pretrained_path).features.to(device))
self.conv1x1 = conv_unit(in_ch=512, out_ch=256, kernel_size=1, activation='relu').to(device)
def forward(self, x,y):
out = torch.cat((x,y),dim=1)
ft_list = []
for model_i, model in enumerate(self.encoder_list):
out = model(out)
if model_i % 2 == 0:
ft_list.append(out)
out = self.conv1x1(out)
return out, ft_list[:]
class QueryEncoder(nn.Module):
def __init__(self, pretrained_path, device):
super(QueryEncoder, self).__init__()
self.encoder_list = list(Encoder_vgg(in_channels=1, pretrained_path=pretrained_path).features.to(device))
self.conv1x1 = conv_unit(in_ch=512, out_ch=256, kernel_size=1, activation='relu').to(device)
def forward(self, x):
ft_list = []
for model_i, model in enumerate(self.encoder_list):
x = model(x)
if model_i % 2 == 0:
ft_list.append(x)
x = self.conv1x1(x)
return x, ft_list[:]
| Python |
3D | oopil/3D_medical_image_FSS | BiGRU_fewshot/models/nnutils.py | .py | 1,670 | 51 | import torch
import torch.nn as nn
def conv_unit(in_ch, out_ch, kernel_size, stride = 1, padding = 0, activation = 'relu', batch_norm = True):
seq_list = []
seq_list.append(nn.Conv2d(in_channels = in_ch, out_channels = out_ch, kernel_size = kernel_size, stride = stride, padding = padding))
if batch_norm:
seq_list.append(nn.BatchNorm2d(num_features = out_ch))
if activation == 'relu':
seq_list.append(nn.ReLU())
elif activation == 'sigmoid':
seq_list.append(nn.Sigmoid())
return nn.Sequential(*seq_list)
# class VOSBaseArch(nn.Module):
# def __init__(self, initializer, encoder, convlstmcell, decoder, cost_fn, optimizer):
# super(VOSBaseArch, self).__init__()
# self.initializer = initializer
# self.encoder = encoder
# self.convlstmcell = convlstmcell
# self.decoder = decoder
# self.cost_fn = cost_fn
# self.optimizer = optimizer
# def forward(self, x, y, t):
# yhat_list = []
# loss_list = []
# loss_per_video = 0.0
# print(x[:, 0, :, :, :].size(), y[:, 0, :, :, :].size())
# ci, hi = initializer(x[:, 0, :, :, :] + y[:, 0, :, :, :])
# for frame_id in range(1, x.size(1)):
# xi = x[:, frame_id, :, :, :]
# yi = y[:, frame_id, :, :, :]
# xi = encoder(xi)
# ci, hi = convlstmcell(xi, ci, hi)
# yhati = decoder(hi)
# yhat_list.append(yhati)
# loss = cost_fn(yhati, yi)
# loss_per_video += loss
# loss_list.append(loss.item())
# return yhat_list, loss_list, loss_per_video | Python |
3D | oopil/3D_medical_image_FSS | BiGRU_fewshot/models/__init__.py | .py | 0 | 0 | null | Python |
3D | oopil/3D_medical_image_FSS | BiGRU_fewshot/models/vgg.py | .py | 4,435 | 119 | """
Encoder for few shot segmentation (VGG16)
"""
import torch
import torch.nn as nn
import pdb
class Encoder_vgg(nn.Module):
"""
Encoder for few shot segmentation
Args:
in_channels:
number of input channels
pretrained_path:
path of the model for initialization
"""
def __init__(self, in_channels=2, pretrained_path=None):
super().__init__()
self.pretrained_path = pretrained_path
## basic model
features = nn.Sequential(
self._make_layer(2, in_channels, 64),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
self._make_layer(2, 64, 128),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
self._make_layer(3, 128, 256),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
self._make_layer(3, 256, 512),
nn.MaxPool2d(kernel_size=3, stride=1, padding=1),
self._make_layer(3, 512, 512, dilation=2, lastRelu=False),
)
## vgg16 model
features1 = nn.Sequential( ## 5 pooling and 1 dilation
self._make_layer(2, in_channels, 64),
nn.MaxPool2d(kernel_size=2, stride=2),
self._make_layer(2, 64, 128),
nn.MaxPool2d(kernel_size=2, stride=2),
self._make_layer(3, 128, 256),
nn.MaxPool2d(kernel_size=2, stride=2),
self._make_layer(3, 256, 512),
nn.MaxPool2d(kernel_size=2, stride=2), ## no pooing
self._make_layer(3, 512, 512, dilation=2), #, lastRelu=False # dilation 2
nn.MaxPool2d(kernel_size=2, stride=2),
)
features2 = nn.Sequential( ## 4 pooling and 1 dilation
self._make_layer(2, in_channels, 64),
nn.MaxPool2d(kernel_size = 2, stride = 2),
self._make_layer(2, 64, 128),
nn.MaxPool2d(kernel_size = 2, stride = 2),
self._make_layer(3, 128, 256),
nn.MaxPool2d(kernel_size = 2, stride = 2),
self._make_layer(3, 256, 512),
# nn.MaxPool2d(kernel_size = 2, stride = 2),
# self._make_layer(3, 512, 512, dilation=1, lastRelu=False), #, lastRelu=False # dilation 2
# nn.MaxPool2d(kernel_size=2, stride=2),
nn.MaxPool2d(kernel_size = 1, stride = 1), # 1 for no pooling
self._make_layer(3, 512, 512, dilation=2, lastRelu=False), #, lastRelu=False # dilation 2
)
# self.features = features1
self.features = features2
self._init_weights()
def forward(self, x):
return self.features(x)
def _make_layer(self, n_convs, in_channels, out_channels, dilation=1, lastRelu=True):
"""
Make a (conv, relu) layer
Args:
n_convs:
number of convolution layers
in_channels:
input channels
out_channels:
output channels
"""
layer = []
for i in range(n_convs):
layer.append(nn.Conv2d(in_channels, out_channels, kernel_size=3,
dilation=dilation, padding=dilation))
## add Batch normalization
# layer.append(nn.BatchNorm2d(out_channels, momentum=1, affine=False))
if i != n_convs - 1 or lastRelu:
layer.append(nn.ReLU(inplace=True))
in_channels = out_channels
return nn.Sequential(*layer)
def _init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
torch.nn.init.kaiming_normal_(m.weight, nonlinearity='relu')
if self.pretrained_path is not None:
# print("load pretrained model.")
dic = torch.load(self.pretrained_path, map_location='cpu')
keys = list(dic.keys())
new_dic = self.state_dict()
new_keys = list(new_dic.keys())
## remove variables for Batch normalization
# print(new_keys)
length = len(new_keys)
for i in range(len(new_keys)):
idx = length - 1 - i
key = new_keys[idx]
if "bias" in key or "weight" in key:
pass
else:
new_keys.remove(key)
for i in range(4,26): #26
new_dic[new_keys[i]] = dic[keys[i]]
self.load_state_dict(new_dic)
| Python |
3D | oopil/3D_medical_image_FSS | BiGRU_fewshot/models/convgru.py | .py | 12,588 | 275 | import pdb
import torch
import torch.nn as nn
# from torchsummary import summary
if __name__ == '__main__':
from nnutils import conv_unit
else:
from .nnutils import conv_unit
class ConvGRUCell(nn.Module):
"""
Basic CGRU cell.
"""
def __init__(self, in_channels, hidden_channels, kernel_size, bias, device='cuda:0'):
super(ConvGRUCell, self).__init__()
self.device = device
self.input_dim = in_channels
self.hidden_dim = hidden_channels
self.kernel_size = kernel_size
self.padding = kernel_size[0] // 2, kernel_size[1] // 2
self.bias = bias
self.update_gate = nn.Conv2d(in_channels=self.input_dim+self.hidden_dim, out_channels=self.hidden_dim,
kernel_size=self.kernel_size, padding=self.padding,
bias=self.bias)
self.reset_gate = nn.Conv2d(in_channels=self.input_dim+self.hidden_dim, out_channels=self.hidden_dim,
kernel_size=self.kernel_size, padding=self.padding,
bias=self.bias)
self.out_gate = nn.Conv2d(in_channels=self.input_dim+self.hidden_dim, out_channels=self.hidden_dim,
kernel_size=self.kernel_size, padding=self.padding,
bias=self.bias)
def forward(self, input_tensor, cur_state):
h_cur = cur_state
# data size is [batch, channel, height, width]
x_in = torch.cat([input_tensor, h_cur], dim=1)
update = torch.sigmoid(self.update_gate(x_in))
reset = torch.sigmoid(self.reset_gate(x_in))
# print(h_cur.device)
# pdb.set_trace()
x_out = torch.tanh(self.out_gate(torch.cat([input_tensor, h_cur * reset], dim=1)))
h_new = h_cur * (1 - update) + x_out * update
return h_new
def init_hidden(self, b, h, w):
return torch.zeros(b, self.hidden_dim, h, w, device='cpu')
class ConvGRU(nn.Module):
def __init__(self, in_channels, hidden_channels, kernel_size, num_layers,
batch_first=False, bias=True, return_all_layers=False, device='cuda:0'):
super(ConvGRU, self).__init__()
self._check_kernel_size_consistency(kernel_size)
# Make sure that both `kernel_size` and `hidden_dim` are lists having len == num_layers
kernel_size = self._extend_for_multilayer(kernel_size, num_layers)
hidden_channels = self._extend_for_multilayer(hidden_channels, num_layers)
if not len(kernel_size) == len(hidden_channels) == num_layers:
raise ValueError('Inconsistent list length.')
self.device=device
self.input_dim = in_channels
self.hidden_dim = hidden_channels
self.kernel_size = kernel_size
self.num_layers = num_layers
self.batch_first = batch_first
self.bias = bias
self.return_all_layers = return_all_layers
cell_list = []
for i in range(0, self.num_layers):
cur_input_dim = self.input_dim if i == 0 else self.hidden_dim[i-1]
cell_list.append(ConvGRUCell(in_channels=cur_input_dim,
hidden_channels=self.hidden_dim[i],
kernel_size=self.kernel_size[i],
bias=self.bias))
self.cell_list = nn.ModuleList(cell_list)
def forward(self, input_tensor, hidden_state=None):
"""
Parameters
----------
input_tensor: todo
5-D Tensor either of shape (t, b, c, h, w) or (b, t, c, h, w)
hidden_state: todo
None. todo implement stateful
Returns
-------
last_state_list, layer_output
"""
# if not self.batch_first:
# # (t, b, c, h, w) -> (b, t, c, h, w)
# input_tensor = input_tensor.permute(1, 0, 2, 3, 4)
# Implement stateful ConvGRU
if hidden_state is not None:
raise NotImplementedError()
else:
b, _, _, h, w = input_tensor.shape
hidden_state = self._init_hidden(b, h, w)
layer_output_list = []
last_state_list = []
seq_len = input_tensor.size(1)
cur_layer_input = input_tensor
for layer_idx in range(self.num_layers):
h = hidden_state[layer_idx].to(self.device)
output_inner = []
for t in range(seq_len):
h = self.cell_list[layer_idx](input_tensor=cur_layer_input[:, t, :, :, :],
cur_state=h)
output_inner.append(h)
layer_output = torch.stack(output_inner, dim=1)
cur_layer_input = layer_output # use encoded features as input again
layer_output_list.append(layer_output)
last_state_list.append(h)
if not self.return_all_layers:
layer_output_list = layer_output_list[-1:]
last_state_list = last_state_list[-1:]
return layer_output_list, last_state_list
def _init_hidden(self, b, h, w):
init_states = []
for i in range(self.num_layers):
init_states.append(self.cell_list[i].init_hidden(b, h, w))
return init_states
@staticmethod
def _check_kernel_size_consistency(kernel_size):
if not (isinstance(kernel_size, tuple) or
(isinstance(kernel_size, list) and all([isinstance(elem, tuple) for elem in kernel_size]))):
raise ValueError('`kernel_size` must be tuple or list of tuples')
@staticmethod
def _extend_for_multilayer(param, num_layers):
if not isinstance(param, list):
param = [param] * num_layers
return param
class ConvBGRU(nn.Module):
# Constructor
def __init__(self, in_channels, hidden_channels,
kernel_size, num_layers, bias=True, batch_first=False, device='cuda:0'):
super(ConvBGRU, self).__init__()
in_channels = 512
hidden_channels = 512
self.device=device
self.forward_net = ConvGRU(in_channels, hidden_channels//2, kernel_size,
num_layers, batch_first=batch_first, bias=bias, device=device).to(device)
self.reverse_net = ConvGRU(in_channels, hidden_channels//2, kernel_size,
num_layers, batch_first=batch_first, bias=bias, device=device).to(device)
def forward(self, xforward, xreverse):
"""
xforward, xreverse = B T C H W tensors.
"""
y_out_fwd, _ = self.forward_net(xforward)
y_out_rev, _ = self.reverse_net(xreverse)
y_out_fwd = y_out_fwd[-1] # outputs of last CGRU layer = B, T, C, H, W
y_out_rev = y_out_rev[-1] # outputs of last CGRU layer = B, T, C, H, W
reversed_idx = list(reversed(range(y_out_rev.shape[1])))
y_out_rev = y_out_rev[:, reversed_idx, ...] # reverse temporal outputs.
ycat = torch.cat((y_out_fwd, y_out_rev), dim=2)
return ycat
class SupportConvLSTMCell(nn.Module):
def __init__(self, channels = 512, height = 8, width = 8, device = 'cuda:0'):
super(SupportConvLSTMCell, self).__init__()
## batch normalization in LSTM cell
is_bn = False #False
# Convolutions for gate computations
self.Wxi = conv_unit(in_ch = channels, out_ch = channels, kernel_size = 3, padding = 1, activation = None, batch_norm = is_bn).to(device)
self.Whi = conv_unit(in_ch = channels, out_ch = channels, kernel_size = 3, padding = 1, activation = None, batch_norm = is_bn).to(device)
self.Wxf = conv_unit(in_ch = channels, out_ch = channels, kernel_size = 3, padding = 1, activation = None, batch_norm = is_bn).to(device)
self.Whf = conv_unit(in_ch = channels, out_ch = channels, kernel_size = 3, padding = 1, activation = None, batch_norm = is_bn).to(device)
self.Wxc = conv_unit(in_ch = channels, out_ch = channels, kernel_size = 3, padding = 1, activation = None, batch_norm = is_bn).to(device)
self.Whc = conv_unit(in_ch = channels, out_ch = channels, kernel_size = 3, padding = 1, activation = None, batch_norm = is_bn).to(device)
self.Wxo = conv_unit(in_ch = channels, out_ch = channels, kernel_size = 3, padding = 1, activation = None, batch_norm = is_bn).to(device)
self.Who = conv_unit(in_ch = channels, out_ch = channels, kernel_size = 3, padding = 1, activation = None, batch_norm = is_bn).to(device)
# Matrices used for Hadamard product used in gate computations
# maybe they are in CPU device
self.Wci = torch.randn((channels, height, width), requires_grad = True).to(device)
self.Wcf = torch.randn((channels, height, width), requires_grad = True).to(device)
self.Wco = torch.randn((channels, height, width), requires_grad = True).to(device)
nn.init.kaiming_uniform_(self.Wci)
nn.init.kaiming_uniform_(self.Wcf)
nn.init.kaiming_uniform_(self.Wco)
# Since paper uses ReLU instead of the standard TanH function
# self.gate_activation = nn.ReLU()
self.gate_activation = nn.Tanh()
self.sigmoid = nn.Sigmoid()
def forward(self, x, c_prev, h_prev):
i = self.Wxi(x) + self.Whi(h_prev) + (self.Wci * c_prev)
i = self.sigmoid(i)
f = self.Wxf(x) + self.Whf(h_prev) + (self.Wcf * c_prev)
f = self.sigmoid(f)
c = self.gate_activation(self.Wxc(x) + self.Whc(h_prev))
# c = (f * c_prev) + (i * c)
c = ((f * c_prev) + (i * c))/2 ## scale c_state not to overflow
o = self.Wxo(x) + self.Who(h_prev) + (self.Wco * c)
o = self.sigmoid(o)
h = o * self.gate_activation(c)
return c, h
class QueryConvLSTMCell(nn.Module):
def __init__(self, channels = 512, height = 8, width = 8, device = 'cuda:0'):
super(QueryConvLSTMCell, self).__init__()
## batch normalization in LSTM cell
is_bn = False #False
# Convolutions for gate computations
x_ch = channels
h_ch = 2*channels
self.Wxi = conv_unit(in_ch = x_ch, out_ch = channels, kernel_size = 3, padding = 1, activation = None, batch_norm = is_bn).to(device)
self.Whi = conv_unit(in_ch = h_ch, out_ch = channels, kernel_size = 3, padding = 1, activation = None, batch_norm = is_bn).to(device)
self.Wxf = conv_unit(in_ch = x_ch, out_ch = channels, kernel_size = 3, padding = 1, activation = None, batch_norm = is_bn).to(device)
self.Whf = conv_unit(in_ch = h_ch, out_ch = channels, kernel_size = 3, padding = 1, activation = None, batch_norm = is_bn).to(device)
self.Wxc = conv_unit(in_ch = x_ch, out_ch = channels, kernel_size = 3, padding = 1, activation = None, batch_norm = is_bn).to(device)
self.Wxo = conv_unit(in_ch = x_ch, out_ch = channels, kernel_size = 3, padding = 1, activation = None, batch_norm = is_bn).to(device)
self.Whc = conv_unit(in_ch = h_ch, out_ch = channels, kernel_size = 3, padding = 1, activation = None, batch_norm = is_bn).to(device)
self.Who = conv_unit(in_ch = h_ch, out_ch = channels, kernel_size = 3, padding = 1, activation = None, batch_norm = is_bn).to(device)
# Matrices used for Hadamard product used in gate computations
# maybe they are in CPU device
self.Wci = torch.randn((channels, height, width), requires_grad = True).to(device)
self.Wcf = torch.randn((channels, height, width), requires_grad = True).to(device)
self.Wco = torch.randn((channels, height, width), requires_grad = True).to(device)
nn.init.kaiming_uniform_(self.Wci)
nn.init.kaiming_uniform_(self.Wcf)
nn.init.kaiming_uniform_(self.Wco)
# Since paper uses ReLU instead of the standard TanH function
# self.gate_activation = nn.ReLU()
self.gate_activation = nn.Tanh()
self.sigmoid = nn.Sigmoid()
def forward(self, x, c_prev, h_prev, h_supp):
h = torch.cat((h_prev, h_supp),dim=1)
i = self.Wxi(x) + self.Whi(h) + (self.Wci * c_prev)
i = self.sigmoid(i)
f = self.Wxf(x) + self.Whf(h) + (self.Wcf * c_prev)
f = self.sigmoid(f)
c = self.gate_activation(self.Wxc(x) + self.Whc(h))
c = (f * c_prev) + (i * c)
# c = ((f * c_prev) + (i * c))/2 ## scale c_state not to overflow
o = self.Wxo(x) + self.Who(h) + (self.Wco * c)
o = self.sigmoid(o)
h = o * self.gate_activation(c)
return c, h
| Python |
3D | oopil/3D_medical_image_FSS | BiGRU_fewshot/models/convlstm.py | .py | 5,696 | 97 | import pdb
import torch
import torch.nn as nn
import torchvision
import numpy as np
# from torchsummary import summary
if __name__ == '__main__':
from nnutils import conv_unit
else:
from .nnutils import conv_unit
class SupportConvLSTMCell(nn.Module):
def __init__(self, channels = 512, height = 8, width = 8, device = 'cuda:0'):
super(SupportConvLSTMCell, self).__init__()
## batch normalization in LSTM cell
is_bn = False #False
# Convolutions for gate computations
self.Wxi = conv_unit(in_ch = channels, out_ch = channels, kernel_size = 3, padding = 1, activation = None, batch_norm = is_bn).to(device)
self.Whi = conv_unit(in_ch = channels, out_ch = channels, kernel_size = 3, padding = 1, activation = None, batch_norm = is_bn).to(device)
self.Wxf = conv_unit(in_ch = channels, out_ch = channels, kernel_size = 3, padding = 1, activation = None, batch_norm = is_bn).to(device)
self.Whf = conv_unit(in_ch = channels, out_ch = channels, kernel_size = 3, padding = 1, activation = None, batch_norm = is_bn).to(device)
self.Wxc = conv_unit(in_ch = channels, out_ch = channels, kernel_size = 3, padding = 1, activation = None, batch_norm = is_bn).to(device)
self.Whc = conv_unit(in_ch = channels, out_ch = channels, kernel_size = 3, padding = 1, activation = None, batch_norm = is_bn).to(device)
self.Wxo = conv_unit(in_ch = channels, out_ch = channels, kernel_size = 3, padding = 1, activation = None, batch_norm = is_bn).to(device)
self.Who = conv_unit(in_ch = channels, out_ch = channels, kernel_size = 3, padding = 1, activation = None, batch_norm = is_bn).to(device)
# Matrices used for Hadamard product used in gate computations
# maybe they are in CPU device
self.Wci = torch.randn((channels, height, width), requires_grad = True).to(device)
self.Wcf = torch.randn((channels, height, width), requires_grad = True).to(device)
self.Wco = torch.randn((channels, height, width), requires_grad = True).to(device)
nn.init.kaiming_uniform_(self.Wci)
nn.init.kaiming_uniform_(self.Wcf)
nn.init.kaiming_uniform_(self.Wco)
# Since paper uses ReLU instead of the standard TanH function
# self.gate_activation = nn.ReLU()
self.gate_activation = nn.Tanh()
self.sigmoid = nn.Sigmoid()
def forward(self, x, c_prev, h_prev):
i = self.Wxi(x) + self.Whi(h_prev) + (self.Wci * c_prev)
i = self.sigmoid(i)
f = self.Wxf(x) + self.Whf(h_prev) + (self.Wcf * c_prev)
f = self.sigmoid(f)
c = self.gate_activation(self.Wxc(x) + self.Whc(h_prev))
# c = (f * c_prev) + (i * c)
c = ((f * c_prev) + (i * c))/2 ## scale c_state not to overflow
o = self.Wxo(x) + self.Who(h_prev) + (self.Wco * c)
o = self.sigmoid(o)
h = o * self.gate_activation(c)
return c, h
class QueryConvLSTMCell(nn.Module):
def __init__(self, channels = 512, height = 8, width = 8, device = 'cuda:0'):
super(QueryConvLSTMCell, self).__init__()
## batch normalization in LSTM cell
is_bn = False #False
# Convolutions for gate computations
x_ch = channels
h_ch = 2*channels
self.Wxi = conv_unit(in_ch = x_ch, out_ch = channels, kernel_size = 3, padding = 1, activation = None, batch_norm = is_bn).to(device)
self.Whi = conv_unit(in_ch = h_ch, out_ch = channels, kernel_size = 3, padding = 1, activation = None, batch_norm = is_bn).to(device)
self.Wxf = conv_unit(in_ch = x_ch, out_ch = channels, kernel_size = 3, padding = 1, activation = None, batch_norm = is_bn).to(device)
self.Whf = conv_unit(in_ch = h_ch, out_ch = channels, kernel_size = 3, padding = 1, activation = None, batch_norm = is_bn).to(device)
self.Wxc = conv_unit(in_ch = x_ch, out_ch = channels, kernel_size = 3, padding = 1, activation = None, batch_norm = is_bn).to(device)
self.Wxo = conv_unit(in_ch = x_ch, out_ch = channels, kernel_size = 3, padding = 1, activation = None, batch_norm = is_bn).to(device)
self.Whc = conv_unit(in_ch = h_ch, out_ch = channels, kernel_size = 3, padding = 1, activation = None, batch_norm = is_bn).to(device)
self.Who = conv_unit(in_ch = h_ch, out_ch = channels, kernel_size = 3, padding = 1, activation = None, batch_norm = is_bn).to(device)
# Matrices used for Hadamard product used in gate computations
# maybe they are in CPU device
self.Wci = torch.randn((channels, height, width), requires_grad = True).to(device)
self.Wcf = torch.randn((channels, height, width), requires_grad = True).to(device)
self.Wco = torch.randn((channels, height, width), requires_grad = True).to(device)
nn.init.kaiming_uniform_(self.Wci)
nn.init.kaiming_uniform_(self.Wcf)
nn.init.kaiming_uniform_(self.Wco)
# Since paper uses ReLU instead of the standard TanH function
# self.gate_activation = nn.ReLU()
self.gate_activation = nn.Tanh()
self.sigmoid = nn.Sigmoid()
def forward(self, x, c_prev, h_prev, h_supp):
h = torch.cat((h_prev, h_supp),dim=1)
i = self.Wxi(x) + self.Whi(h) + (self.Wci * c_prev)
i = self.sigmoid(i)
f = self.Wxf(x) + self.Whf(h) + (self.Wcf * c_prev)
f = self.sigmoid(f)
c = self.gate_activation(self.Wxc(x) + self.Whc(h))
c = (f * c_prev) + (i * c)
# c = ((f * c_prev) + (i * c))/2 ## scale c_state not to overflow
o = self.Wxo(x) + self.Who(h) + (self.Wco * c)
o = self.sigmoid(o)
h = o * self.gate_activation(c)
return c, h
| Python |
3D | oopil/3D_medical_image_FSS | BiGRU_fewshot/test/summarize_test_results.py | .py | 13,672 | 403 | import re
import glob
import numpy as np
def access_dice_score(path):
fd = open(path)
lines = fd.readlines()
# print(len(lines),file)
result_line = lines[-2]
find = re.search("0.*", result_line)
line_parts = re.split(" ", result_line)
dice = line_parts[-2]
return dice
def summarize(files):
if len(files) < 5:
# print("There is no results for this set.")
return 0,0
dices = []
for file in files[:5]: #[-5:]
dices.append(float(access_dice_score(file)))
return dices
def print_dir_5shot(dir):
# used original data
print(dir)
shot=5
for organ in [1, 3, 6, 14]:
files = glob.glob(f"{dir}/*_{organ}_*{shot}shot*")
files.sort()
dices = summarize(files)
avg, std = np.mean(dices), np.std(dices)
avg = float("{:.3f}".format(avg))
std = float("{:.4f}".format(std))
dices = [str(dice) for dice in dices]
dice_str = ",".join(dices)
# print(dir, organ, shot)
if avg * std != 0:
print(f"organ:{organ},{shot}shot,{dice_str},{avg},{std}")
print()
def main():
# bcv_dir_dice = "runs/log/bcv_dice"
bcv_dir_dice_ce = "runs/log/bcv_dice_ce"
ctorg_dir = "runs/log/ctorg"
decath_dir = "runs/log/decathlon"
# dirs = [bcv_dir_dice_ce, decath_dir]
dirs = [bcv_dir_dice_ce, ctorg_dir, decath_dir, "runs/log/bcv_dice"]
for dir in dirs:
print()
print(dir)
for shot in [1,3,5]:
for organ in [1,3,6,14]:
files = glob.glob(f"{dir}/*_{organ}_*{shot}shot*")
files.sort()
dices = summarize(files)
avg, std = np.mean(dices), np.std(dices)
avg = float("{:.3f}".format(avg))
std = float("{:.4f}".format(std))
dices = [str(dice) for dice in dices]
dice_str = ",".join(dices)
# print(dir, organ, shot)
if avg*std!=0:
print(f"organ:{organ},{shot}shot,{dice_str},{avg},{std}")
dir = "runs/log/bcv_dice_ce_bladder"
print(dir)
shot=5
organ=14
files = glob.glob(f"{dir}/*_{organ}_*{shot}shot*")
files.sort()
for i, file in enumerate(files):
dice = float(access_dice_score(file))
print(i, dice)
# dices = summarize(files)
# print(dices)
# avg, std = np.mean(dices), np.std(dices)
# avg = float("{:.3f}".format(avg))
# std = float("{:.4f}".format(std))
# dices = [str(dice) for dice in dices]
# dice_str = ",".join(dices)
# # print(dir, organ, shot)
# if avg * std != 0:
# print(f"organ:{organ},{shot}shot,{dice_str},{avg},{std}")
print()
assert False
dir = "runs/log/bcv_super"
print(dir)
for shot in [1, 3, 5]:
for organ in [1, 3, 6, 14]:
files = glob.glob(f"{dir}/*_super{organ}_*{shot}shot*")
files.sort()
dices = summarize(files)
avg, std = np.mean(dices), np.std(dices)
avg = float("{:.3f}".format(avg))
std = float("{:.4f}".format(std))
dices = [str(dice) for dice in dices]
dice_str = ",".join(dices)
# print(dir, organ, shot)
if avg * std != 0:
print(f"organ:{organ},{shot}shot,{dice_str},{avg},{std}")
print()
dir = "runs/log/bcv_dice_ce_7slice"
print(dir)
shot=1
for organ in [1, 3, 6, 14]:
files = glob.glob(f"{dir}/*_{organ}_*{shot}shot*")
files.sort()
dices = summarize(files)
avg, std = np.mean(dices), np.std(dices)
avg = float("{:.3f}".format(avg))
std = float("{:.4f}".format(std))
dices = [str(dice) for dice in dices]
dice_str = ",".join(dices)
# print(dir, organ, shot)
if avg * std != 0:
print(f"organ:{organ},{shot}shot,{dice_str},{avg},{std}")
print()
dir = "runs/log/bcv_dice_ce_9slice"
print(dir)
shot=1
for organ in [1, 3, 6, 14]:
files = glob.glob(f"{dir}/*_{organ}_*{shot}shot*")
files.sort()
dices = summarize(files)
avg, std = np.mean(dices), np.std(dices)
avg = float("{:.3f}".format(avg))
std = float("{:.4f}".format(std))
dices = [str(dice) for dice in dices]
dice_str = ",".join(dices)
# print(dir, organ, shot)
if avg * std != 0:
print(f"organ:{organ},{shot}shot,{dice_str},{avg},{std}")
print()
dir = "runs/log/bcv_dice_ce_11slice"
print(dir)
shot=1
for organ in [1, 3, 6, 14]:
files = glob.glob(f"{dir}/*_{organ}_*{shot}shot*")
files.sort()
dices = summarize(files)
avg, std = np.mean(dices), np.std(dices)
avg = float("{:.3f}".format(avg))
std = float("{:.4f}".format(std))
dices = [str(dice) for dice in dices]
dice_str = ",".join(dices)
# print(dir, organ, shot)
if avg * std != 0:
print(f"organ:{organ},{shot}shot,{dice_str},{avg},{std}")
print()
dir = "runs/log/bcv_dice_ce_5shot_7slice"
print(dir)
shot=5
for organ in [1, 3, 6, 14]:
files = glob.glob(f"{dir}/*_{organ}_*{shot}shot*")
files.sort()
dices = summarize(files)
avg, std = np.mean(dices), np.std(dices)
avg = float("{:.3f}".format(avg))
std = float("{:.4f}".format(std))
dices = [str(dice) for dice in dices]
dice_str = ",".join(dices)
# print(dir, organ, shot)
if avg * std != 0:
print(f"organ:{organ},{shot}shot,{dice_str},{avg},{std}")
print()
# bcv_finetuning_dir = "runs/log/bcv_finetuning_supp0"
# print(bcv_finetuning_dir)
# for organ in [1, 3, 6, 14]:
# for update in [0, 5, 10, 15, 20, 25, 30, 40, 50, 100, 150, 200, 250, 300]:
# files = glob.glob(f"{bcv_finetuning_dir}/*_{organ}_*{update}update*")
# files.sort()
# dices = access_dice_score(files[0])
# print(f"organ:{organ}, {shot}shot, {update}update,{dices}")
# print()
# bcv_finetuning_dir = "runs/log/bcv_finetuning_supp0_v2"
# print(bcv_finetuning_dir)
# for organ in [1, 3, 6, 14]:
# for update in [0, 200, 400, 600, 1000]:
# files = glob.glob(f"{bcv_finetuning_dir}/*_{organ}_*{update}update*")
# files.sort()
# dices = access_dice_score(files[0])
# print(f"organ:{organ}, {shot}shot, {update}update,{dices}")
# print()
# ctorg_finetuning_dir = "runs/log/ctorg_finetuning_supp30_b2"
# print(ctorg_finetuning_dir)
# for organ in [3, 6, 14]:
# for update in [0, 30, 60, 90, 120, 150]:
# files = glob.glob(f"{ctorg_finetuning_dir}/*_{organ}_*{update}update*")
# files.sort()
# dices = access_dice_score(files[0])
# print(f"organ:{organ}, {shot}shot, {update}update,{dices}")
# print()
decathlon_finetuning_dir = "runs/log/decathlon_finetuning_supp0"
print(decathlon_finetuning_dir)
for organ in [1, 6]:
for update in [0, 10, 20, 30, 40, 50]:
files = glob.glob(f"{decathlon_finetuning_dir}/*_{organ}_*{update}update*")
files.sort()
dices = access_dice_score(files[0])
print(f"organ:{organ}, {shot}shot, {update}update,{dices}")
print()
dir = "runs/log/bcv_100update"
print(dir)
shot=5
for organ in [1, 3, 6, 14]:
files = glob.glob(f"{dir}/*_{organ}_*{shot}shot*")
files.sort()
dices = summarize(files)
avg, std = np.mean(dices), np.std(dices)
avg = float("{:.3f}".format(avg))
std = float("{:.4f}".format(std))
dices = [str(dice) for dice in dices]
dice_str = ",".join(dices)
# print(dir, organ, shot)
if avg * std != 0:
print(f"organ:{organ},{shot}shot,{dice_str},{avg},{std}")
dir = "runs/log/decathlon_100update"
print(dir)
shot=5
for organ in [1, 3, 6, 14]:
files = glob.glob(f"{dir}/*_{organ}_*{shot}shot*")
files.sort()
dices = summarize(files)
avg, std = np.mean(dices), np.std(dices)
avg = float("{:.3f}".format(avg))
std = float("{:.4f}".format(std))
dices = [str(dice) for dice in dices]
dice_str = ",".join(dices)
# print(dir, organ, shot)
if avg * std != 0:
print(f"organ:{organ},{shot}shot,{dice_str},{avg},{std}")
dir = "runs/log/ctorg_0update_v1_fast"
# used original data
print(dir)
shot=5
for organ in [1, 3, 6, 14]:
files = glob.glob(f"{dir}/*_{organ}_*{shot}shot*")
files.sort()
dices = summarize(files)
avg, std = np.mean(dices), np.std(dices)
avg = float("{:.3f}".format(avg))
std = float("{:.4f}".format(std))
dices = [str(dice) for dice in dices]
dice_str = ",".join(dices)
# print(dir, organ, shot)
if avg * std != 0:
print(f"organ:{organ},{shot}shot,{dice_str},{avg},{std}")
dir = "runs/log/ctorg_10update_v1_fast"
# used original data
print(dir)
shot=5
for organ in [1, 3, 6, 14]:
files = glob.glob(f"{dir}/*_{organ}_*{shot}shot*")
files.sort()
dices = summarize(files)
avg, std = np.mean(dices), np.std(dices)
avg = float("{:.3f}".format(avg))
std = float("{:.4f}".format(std))
dices = [str(dice) for dice in dices]
dice_str = ",".join(dices)
# print(dir, organ, shot)
if avg * std != 0:
print(f"organ:{organ},{shot}shot,{dice_str},{avg},{std}")
print()
dir = "runs/log/ctorg_20update_v1_fast"
# used original data
print(dir)
shot=5
for organ in [1, 3, 6, 14]:
files = glob.glob(f"{dir}/*_{organ}_*{shot}shot*")
files.sort()
dices = summarize(files)
avg, std = np.mean(dices), np.std(dices)
avg = float("{:.3f}".format(avg))
std = float("{:.4f}".format(std))
dices = [str(dice) for dice in dices]
dice_str = ",".join(dices)
# print(dir, organ, shot)
if avg * std != 0:
print(f"organ:{organ},{shot}shot,{dice_str},{avg},{std}")
print()
dir = "runs/log/ctorg_10update_v1_fast_ce"
# used original data
print(dir)
shot=5
for organ in [1, 3, 6, 14]:
files = glob.glob(f"{dir}/*_{organ}_*{shot}shot*")
files.sort()
dices = summarize(files)
avg, std = np.mean(dices), np.std(dices)
avg = float("{:.3f}".format(avg))
std = float("{:.4f}".format(std))
dices = [str(dice) for dice in dices]
dice_str = ",".join(dices)
# print(dir, organ, shot)
if avg * std != 0:
print(f"organ:{organ},{shot}shot,{dice_str},{avg},{std}")
print()
dir = "runs/log/ctorg_50update_v1_fast"
# used original data
print(dir)
shot=5
for organ in [1, 3, 6, 14]:
files = glob.glob(f"{dir}/*_{organ}_*{shot}shot*")
files.sort()
dices = summarize(files)
avg, std = np.mean(dices), np.std(dices)
avg = float("{:.3f}".format(avg))
std = float("{:.4f}".format(std))
dices = [str(dice) for dice in dices]
dice_str = ",".join(dices)
# print(dir, organ, shot)
if avg * std != 0:
print(f"organ:{organ},{shot}shot,{dice_str},{avg},{std}")
dir = "runs/log/ctorg_50update_v1"
# used original data
print(dir)
shot=5
for organ in [1, 3, 6, 14]:
files = glob.glob(f"{dir}/*_{organ}_*{shot}shot*")
files.sort()
dices = summarize(files)
avg, std = np.mean(dices), np.std(dices)
avg = float("{:.3f}".format(avg))
std = float("{:.4f}".format(std))
dices = [str(dice) for dice in dices]
dice_str = ",".join(dices)
# print(dir, organ, shot)
if avg * std != 0:
print(f"organ:{organ},{shot}shot,{dice_str},{avg},{std}")
# print_dir_5shot("runs/log/ctorg_0update_v1")
# print_dir_5shot("runs/log/ctorg_5update_v1")
# print_dir_5shot("runs/log/ctorg_10update_v1")
# print_dir_5shot("runs/log/ctorg_15update_v1")
# print_dir_5shot("runs/log/ctorg_20update_v1")
# print_dir_5shot("runs/log/ctorg_40update_v1")
# print_dir_5shot("runs/log/ctorg_0update_v2_fast")
# print_dir_5shot("runs/log/ctorg_10update_v2_fast")
# print_dir_5shot("runs/log/ctorg_40update_v2_fast")
# dir = "runs/log/ctorg_finetuning_supp0_fast_v3_2"
# print(dir)
# for organ in [3, 6, 14]:
# for update in [0, 5, 10,20,40,60,100,200]:
# files = glob.glob(f"{dir}/*_{organ}_*{update}update*")
# files.sort()
# dices = access_dice_score(files[0])
# print(f"organ:{organ}, {shot}shot, {update}update,{dices}")
# print()
# dir = "runs/log/ctorg_kidney"
# organ=3
# print(dir)
# for shot in [3, 5]:
# files = glob.glob(f"{dir}/*_{organ}_*{shot}shot*")
# files.sort()
#
# dices = []
# for file in files[5:5+5]:
# dices.append(float(access_dice_score(file)))
#
# avg, std = np.mean(dices), np.std(dices)
# avg = float("{:.3f}".format(avg))
# std = float("{:.4f}".format(std))
# dices = [str(dice) for dice in dices]
# dice_str = ",".join(dices)
#
# if avg * std != 0:
# print(f"organ:{organ},{shot}shot,{dice_str},{avg},{std}")
# print()
if __name__=="__main__":
main()
| Python |
3D | oopil/3D_medical_image_FSS | BiGRU_fewshot/dataloaders_medical/common.py | .py | 6,690 | 210 | """
Dataset classes for common uses
"""
import random
import SimpleITK as sitk
import numpy as np
from PIL import Image
from torch.utils.data import Dataset
import torch
import torchvision.transforms.functional as tr_F
from skimage.exposure import equalize_hist
import pdb
def crop_resize(slice):
x_size, y_size = np.shape(slice)
slice = slice[40:x_size - 20, 50:y_size - 50]
slice = resize(slice, (240, 240))
return slice
def fill_empty_space(arr):
arr[arr==0] = np.mean(arr)
return arr
def prostate_sample(img_arr, label_arr, isize):
img = Image.fromarray(img_arr.astype(np.uint8))
label = Image.fromarray(label_arr.astype(np.uint8))
sample = {
'image':img,
'label':label,
'inst':label,
'scribble':label,
}
# pdb.set_trace()
sample = resize(sample, (isize,isize))
sample = to_tensor_normalize(sample)
return sample
def prostate_mask(sample, isize):
# pdb.set_trace()
label = sample['label']
fg_mask = torch.where(label == 1, torch.ones_like(label), torch.zeros_like(label))
bg_mask = torch.ones_like(label) - fg_mask
fg_mask = fg_mask.expand((1, isize, isize))
bg_mask = bg_mask.expand((1, isize, isize))
return {'fg_mask': fg_mask,
'bg_mask': bg_mask,
}
def get_support_sample(ipath, lpath, modal_index, mask_n, is_HE, shift=0):
arr = read_npy(ipath, modal_index, is_HE)
# pdb.set_trace() ## for debugging
# arr = fill_empty_space(arr)
arr_mask = read_sitk(lpath)
## for 2-way(binary) segmentation
arr_mask = (arr_mask>0)*1.0
# arr_mask = (arr_mask == mask_n) * 1.0
cnt = np.sum(arr_mask, axis=(1, 2))
maxarg = np.argmax(cnt)
slice = arr[maxarg+shift, :, :]
slice = crop_resize(slice)
# slice = normalize(slice)
slice = convert3ch(slice)
save_img(slice, "tmp_img.png")
slice_mask = arr_mask[maxarg+shift, :, :]*255.0
slice_mask = crop_resize(slice_mask)
# slice_mask = convert3ch(slice_mask)
save_img(slice_mask, "tmp_label.png")
sample = read_sample("tmp_img.png", "tmp_label.png")
# sample = transforms(sample)
sample = to_tensor_normalize(sample)
return sample
## for 5 way segmentation
# arr_mask = (arr_mask == mask_n)*1.0
# if mask_n == 2:
# arr_mask = (arr_mask > 0)*1.0
# elif mask_n == 1:
# arr_mask = (arr_mask == 1)*1.0
# elif mask_n == 4:
# arr_mask = (arr_mask == 4)*1.0 + (arr_mask == 1)*1.0
# else:
# raise("invalid mask_n")
def getMask(sample, class_id=1, class_ids=[0, 1]):
label = sample['label']
empty = sample['empty']
fg_mask = torch.where(label == class_id, torch.ones_like(label), torch.zeros_like(label))
brain_bg_mask = empty
# bg_mask = torch.ones_like(label) - fg_mask - empty
bg_mask = torch.ones_like(label) - fg_mask
# brain_fg_mask = torch.ones_like(label) - empty
brain_fg_mask = torch.ones_like(label) - empty - fg_mask
fg_mask = fg_mask.expand((1, 240, 240))
bg_mask = bg_mask.expand((1, 240, 240))
brain_fg_mask = brain_fg_mask.expand((1, 240, 240))
brain_bg_mask = brain_bg_mask.expand((1, 240, 240))
return {'fg_mask': fg_mask,
'bg_mask': bg_mask,
'brain_fg_mask': brain_fg_mask,
'brain_bg_mask': brain_bg_mask,}
def read_npy(path, modal_index, is_HE):
arr = np.load(path)[modal_index]
if is_HE:
arr = equalize_hist(arr)
arr = normalize(arr, type=0)
return arr
def convert3ch(slice, axis=2):
slice = np.expand_dims(slice, axis=axis)
slice = np.concatenate([slice, slice, slice], axis=axis)
return slice
def normalize(arr, type=0):
# print(np.mean(arr*255.0), np.std(arr*255.0), np.amin(arr*255.0), np.amax(arr*255.0))
if type == 0: # min and max
mini = np.amin(arr)
arr -= mini
maxi = np.amax(arr)
arr_norm = arr/maxi
elif type == 1: # stddev and mean
mean = np.mean(arr)
stddev = np.std(arr)
arr_norm = (arr-mean)/stddev
return arr_norm*255.0
def map_distribution(arr, tg_mean=0, tg_std=1, tg_min=0, tg_max=255):
arr_nonzero = arr[np.nonzero(arr)]
## input arr range : (0,255)
mean, std, mini, maxi = np.mean(arr_nonzero), np.std(arr_nonzero), np.amin(arr_nonzero), np.amax(arr_nonzero)
Z = (arr - mean) / std ## map arr into standard var Z
new_arr = Z*tg_std + tg_mean ## map Z into the target distribution
print(np.mean(new_arr), np.std(new_arr), np.amin(new_arr), np.amax(new_arr))
new_arr = np.clip(new_arr, tg_min, tg_max)
return new_arr
def to_tensor_normalize(sample):
img, label = sample['image'], sample['label']
inst, scribble = sample['inst'], sample['scribble']
## map distribution
# pdb.set_trace()
# arr = np.array(img)
# arr = map_distribution(arr, tg_mean=0.456, tg_std=0.224, tg_min=-10, tg_max=10)
# img = Image.fromarray(arr.astype(dtype=np.uint8))
img = tr_F.to_tensor(img)
empty = (img[0] == 0.0) * 1.0
img = tr_F.normalize(img, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
label = torch.Tensor(np.array(label)).long()
img = img.expand((1, 3, 240, 240))
label = label/255.0
sample['empty'] = empty.long()
sample['image'] = img
sample['label'] = label
sample['inst'] = inst
sample['scribble'] = scribble
return sample
def read_sample(img_path, label_path):
sample = {}
sample['image'] = Image.open(img_path)
sample['label'] = Image.open(label_path)
sample['inst'] = Image.open(label_path)
sample['scribble'] = Image.open(label_path)
# Save the original image (without normalization)
sample['original'] = Image.open(img_path)
return sample
def read_sitk(path):
itk_img = sitk.ReadImage(path)
arr = sitk.GetArrayFromImage(itk_img)
arr = np.array(arr, dtype=np.float32)
return arr
def save_sitk(arr, itk_ref, opath):
sitk_oimg = sitk.GetImageFromArray(arr)
sitk_oimg.CopyInformation(itk_ref)
sitk.WriteImage(sitk_oimg, opath)
def save_img(arr, path):
im = Image.fromarray(arr.astype(np.uint8))
im.save(path)
def load_img(path):
arr = read_PIL(path)
return to_tensor_normalize(arr)
def load_seg(path):
arr = read_PIL(path)
arr = np.expand_dims(arr, axis=0)
arr = tr_F.to_tensor(arr)
return arr
def read_PIL(path):
im = Image.open(path)
arr = np.array(im, dtype=np.float32)
# arr = np.swapaxes(arr, 0, 2)
return arr
def resize(sample, size):
img, label = sample['image'], sample['label']
img = tr_F.resize(img, size)
label = tr_F.resize(label, size, interpolation=Image.NEAREST)
sample['image'] = img
sample['label'] = label
return sample
| Python |
3D | oopil/3D_medical_image_FSS | BiGRU_fewshot/dataloaders_medical/dataset.py | .py | 11,971 | 321 | import os
import re
import sys
import json
import math
import random
import numpy as np
sys.path.append("/home/soopil/Desktop/github/python_utils")
# sys.path.append("../dataloaders_medical")
from dataloaders_medical.common import *
# from common import *
import cv2
from cv2 import resize
def prostate_img_process(img_arr, HE=False):
if HE:
img_arr = equalize_hist(img_arr) * 255.0
else:
img_arr = normalize(img_arr, type=0)
return img_arr
def totensor(arr):
tensor = torch.from_numpy(arr).float()
return tensor
def random_augment(s_imgs, s_labels, q_imgs, q_labels):
## do random rotation and flip
k = random.sample([i for i in range(0, 4)], 1)[0]
s_imgs = np.rot90(s_imgs, k, (3, 4)).copy()
s_labels = np.rot90(s_labels, k, (3, 4)).copy()
q_imgs = np.rot90(q_imgs, k, (2, 3)).copy()
q_labels = np.rot90(q_labels, k, (2, 3)).copy()
if random.random() < 0.5:
s_imgs = np.flip(s_imgs, 3).copy()
s_labels = np.flip(s_labels, 3).copy()
q_imgs = np.flip(q_imgs, 2).copy()
q_labels = np.flip(q_labels, 2).copy()
if random.random() < 0.5:
s_imgs = np.flip(s_imgs, 4).copy()
s_labels = np.flip(s_labels, 4).copy()
q_imgs = np.flip(q_imgs, 3).copy()
q_labels = np.flip(q_labels, 3).copy()
return s_imgs, s_labels, q_imgs, q_labels
class Base_dataset():
def __init__(self, img_paths, label_paths, config):
"""
dataset constructor for training
"""
super().__init__()
self.mode = config['mode']
self.length = len(img_paths)
print(f"# of data : {self.length}")
self.valid_img_n = len(img_paths)
self.size = config['size']
self.img_paths = img_paths
self.label_paths = label_paths
self.q_slice = config["q_slice"]
self.n_shot = config["n_shot"]
self.s_idx = config["s_idx"]
self.is_train = True
if str(self.__class__).split(".")[-1][:4]=="Test":
self.is_train = False
## load file names in advance
self.img_lists = []
self.slice_cnts = []
for img_path in self.img_paths:
fnames = os.listdir(img_path)
self.slice_cnts.append(len(fnames))
fnames = [int(e.split(".")[0]) for e in fnames]
fnames.sort()
fnames = [f"{e}.npy" for e in fnames]
self.img_lists.append(fnames)
## remove ids if its slice number is less than max_slice number
if self.is_train:
remove_ids = []
for i, img_list in enumerate(self.img_lists):
if len(img_list) < self.q_slice:
remove_ids.append(i)
print(self.is_train, self.__class__, self.valid_img_n," # of remove ids : ", len(remove_ids))
for id in reversed(remove_ids):
self.img_paths.pop(id)
self.label_paths.pop(id)
self.img_lists.pop(id)
self.valid_img_n -= 1
## count the test counts for validation
else:
self.q_cnts = []
for img_list in self.img_lists:
self.q_cnts.append(len(img_list)-self.q_slice+1)
self.length = sum(self.q_cnts)
def get_sample(self, s_img_paths_all, s_label_paths_all, q_img_paths, q_label_paths):
seed = random.randrange(0,1000)
# s_length = len(s_img_paths)
s_imgs_all, s_labels_all = [],[]
for s_idx, s_img_paths in enumerate(s_img_paths_all):
s_label_paths = s_label_paths_all[s_idx]
imgs, labels = [],[]
for i in range(len(s_img_paths)):
img_path, label_path = s_img_paths[i], s_label_paths[i]
img = self.img_load(img_path, seed)
img = resize(img, dsize=(self.size, self.size), interpolation=cv2.INTER_AREA)
img = np.expand_dims(img, axis=0)
imgs.append(img)
label = np.load(label_path)
label = resize(label, dsize=(self.size, self.size), interpolation=cv2.INTER_NEAREST)
label = np.expand_dims(label, axis=0)
labels.append(label)
s_imgs = np.stack(imgs,axis=0)
s_labels = np.stack(labels,axis=0)
s_imgs_all.append(s_imgs)
s_labels_all.append(s_labels)
s_imgs = np.stack(s_imgs_all,axis=0)
s_labels = np.stack(s_labels_all,axis=0)
q_length = len(q_img_paths)
imgs, labels = [],[]
for i in range(len(q_img_paths)):
img_path, label_path = q_img_paths[i], q_label_paths[i]
img = self.img_load(img_path, seed)
img = resize(img, dsize=(self.size, self.size), interpolation=cv2.INTER_AREA)
img = np.expand_dims(img, axis=0)
imgs.append(img)
label = np.load(label_path)
label = resize(label, dsize=(self.size, self.size), interpolation=cv2.INTER_NEAREST)
label = np.expand_dims(label, axis=0)
labels.append(label)
q_imgs = np.stack(imgs,axis=0)
q_labels = np.stack(labels,axis=0)
# print(imgs.shape) [slice_num,1,256,256]?
if self.is_train: ## random augmentation : flip, rotation
s_imgs, s_labels, q_imgs, q_labels = random_augment(s_imgs, s_labels, q_imgs, q_labels)
sample = {
"s_x":totensor(s_imgs),
"s_y":totensor(s_labels), #.long()
"q_x":totensor(q_imgs),
"q_y":totensor(q_labels), #.long()
# "s_length":s_length,
# "q_length":q_length,
"s_fname":s_img_paths_all,
"q_fname":q_img_paths,
}
return sample
def handle_idx(self, q_idxs, q_n, s_n):
"""
choose slices for support indices
:return: supp_idxs
"""
s_idxs = []
for q_idx in q_idxs:
q_ratio = (q_idx)/(q_n-1)
s_idx = round((s_n-1)*q_ratio)
s_idxs.append(s_idx)
return s_idxs
def random_flip_z(self, q, s):
if random.random() < 0.5:
q.reverse()
s.reverse()
return q,s
def getitem_train(self):
## choose support and target
idx_space = [i for i in range(self.valid_img_n)]
# print(idx_space, self.n_shot)
subj_idxs = random.sample(idx_space, self.n_shot+1)
s_subj_idxs = subj_idxs[:self.n_shot]
q_subj_idx = subj_idxs[self.n_shot]
q_subj_img_path = self.img_paths[q_subj_idx]
q_subj_label_path = self.label_paths[q_subj_idx]
q_fnames = self.img_lists[q_subj_idx]
q_idx_start = random.randrange(0,len(q_fnames)-self.q_slice)
q_idxs = [n for n in range(q_idx_start, q_idx_start+self.q_slice)]
is_flip = False
if random.random() < 0.5:
is_flip = True
q_fnames.reverse() # ??? when to reverse
s_img_paths_all, s_label_paths_all = [],[]
for s_subj_idx in s_subj_idxs:
s_subj_img_path = self.img_paths[s_subj_idx]
s_subj_label_path = self.label_paths[s_subj_idx]
s_fnames = self.img_lists[s_subj_idx]
## flip augmentation
if is_flip:
s_fnames.reverse()
## choose support and query slice
s_idxs = self.handle_idx(q_idxs, len(q_fnames), len(s_fnames))
# s_idx = self.handle_idx(len(s_fnames), q_idx, len(q_fnames))
s_fnames_selected = [s_fnames[idx] for idx in s_idxs]
## define path, load data, and return
s_img_paths_selected = [f"{s_subj_img_path}/{fname}" for fname in s_fnames_selected]
s_label_paths_selected = [f"{s_subj_label_path}/{fname}" for fname in s_fnames_selected]
s_img_paths_all.append(s_img_paths_selected)
s_label_paths_all.append(s_label_paths_selected)
q_fnames_selected = [q_fnames[idx] for idx in q_idxs]
q_img_paths_selected = [f"{q_subj_img_path}/{fname}" for fname in q_fnames_selected]
q_label_paths_selected = [f"{q_subj_label_path}/{fname}" for fname in q_fnames_selected]
return self.get_sample(s_img_paths_all, s_label_paths_all, q_img_paths_selected, q_label_paths_selected)
def getitme_test(self, idx):
q_subj_idx, q_idx_start = self.get_test_subj_idx(idx)
q_subj_img_path = self.img_paths[q_subj_idx]
q_subj_label_path = self.label_paths[q_subj_idx]
q_fnames = self.img_lists[q_subj_idx]
q_idxs = [n for n in range(q_idx_start, q_idx_start+self.q_slice)]
s_img_paths_all, s_label_paths_all = [],[]
for s_idx in range(self.n_shot):
s_subj_img_path = self.s_img_paths[s_idx]
s_subj_label_path = self.s_label_paths[s_idx]
s_fnames = self.s_fnames_list[s_idx]
## choose support and query slice
s_idxs = self.handle_idx(q_idxs, len(q_fnames), len(s_fnames))
s_fnames_selected = [s_fnames[idx] for idx in s_idxs]
## define path, load data, and return
s_img_paths_selected = [f"{s_subj_img_path}/{fname}" for fname in s_fnames_selected]
s_label_paths_selected = [f"{s_subj_label_path}/{fname}" for fname in s_fnames_selected]
s_img_paths_all.append(s_img_paths_selected)
s_label_paths_all.append(s_label_paths_selected)
q_fnames_selected = [q_fnames[idx] for idx in q_idxs]
q_img_paths_selected = [f"{q_subj_img_path}/{fname}" for fname in q_fnames_selected]
q_label_paths_selected = [f"{q_subj_label_path}/{fname}" for fname in q_fnames_selected]
return self.get_sample(s_img_paths_all, s_label_paths_all, q_img_paths_selected, q_label_paths_selected)
def get_len_train(self):
return self.length
def get_len_test(self):
return self.length
def get_val_subj_idx(self, idx):
for subj_idx,cnt in enumerate(self.q_cnts):
if idx < cnt:
return subj_idx, idx*self.q_slice
else:
idx -= cnt
print("get_val_subj_idx function is not working.")
assert False
def get_test_subj_idx(self, idx):
# for subj_idx,cnt in enumerate(self.slice_cnts):
for subj_idx,cnt in enumerate(self.q_cnts):
if idx < cnt:
return subj_idx, idx
else:
idx -= cnt
print("get_test_subj_idx function is not working.")
assert False
def get_cnts(self):
## only for test loader
return self.q_cnts
# return self.slice_cnts
def img_load(self, img_path, seed=0):
img_arr = np.load(img_path)
return img_arr
class BaseLoader(Base_dataset):
modal_i = [0] # there is only one modality
label_i = 1.0 # there is only one label for each image
class TrainLoader(BaseLoader):
def __len__(self):
return self.get_len_train()
def __getitem__(self, idx):
return self.getitem_train()
class TestLoader(BaseLoader):
def __len__(self):
return self.get_len_test()
def __getitem__(self, idx):
return self.getitme_test(idx)
def set_support_volume(self, s_img_paths, s_label_paths):
## set support img path and label path for validation and testing
self.s_img_paths = []
self.s_label_paths = []
self.s_fnames_list = []
for i in range(len(s_img_paths)):
s_fnames = os.listdir(s_img_paths[i])
s_fnames = [int(e.split(".")[0]) for e in s_fnames]
s_fnames.sort()
print(f'support img {i} path : {s_img_paths[i]} length : {len(s_fnames)}')
self.s_img_paths.append(s_img_paths[i])
self.s_label_paths.append(s_label_paths[i])
self.s_fnames_list.append([f"{e}.npy" for e in s_fnames])
if __name__ == "__main__":
pass
# main() | Python |
3D | oopil/3D_medical_image_FSS | BiGRU_fewshot/dataloaders_medical/__init__.py | .py | 0 | 0 | null | Python |
3D | oopil/3D_medical_image_FSS | BiGRU_fewshot/dataloaders_medical/dataset_CT_ORG.py | .py | 11,500 | 297 | import os
import re
import sys
import json
import math
import random
import numpy as np
sys.path.append("/home/soopil/Desktop/github/python_utils")
# sys.path.append("../dataloaders_medical")
from dataloaders_medical.common import *
# from common import *
import cv2
from cv2 import resize
def totensor(arr):
tensor = torch.from_numpy(arr).float()
return tensor
def random_augment(s_imgs, s_labels, q_imgs, q_labels):
## do random rotation and flip
k = random.sample([i for i in range(0, 4)], 1)[0]
s_imgs = np.rot90(s_imgs, k, (3, 4)).copy()
s_labels = np.rot90(s_labels, k, (3, 4)).copy()
q_imgs = np.rot90(q_imgs, k, (2, 3)).copy()
q_labels = np.rot90(q_labels, k, (2, 3)).copy()
if random.random() < 0.5:
s_imgs = np.flip(s_imgs, 3).copy()
s_labels = np.flip(s_labels, 3).copy()
q_imgs = np.flip(q_imgs, 2).copy()
q_labels = np.flip(q_labels, 2).copy()
if random.random() < 0.5:
s_imgs = np.flip(s_imgs, 4).copy()
s_labels = np.flip(s_labels, 4).copy()
q_imgs = np.flip(q_imgs, 3).copy()
q_labels = np.flip(q_labels, 3).copy()
return s_imgs, s_labels, q_imgs, q_labels
class Base_dataset_ctorg():
def __init__(self, img_paths, label_paths, config):
"""
dataset constructor for training
"""
super().__init__()
self.mode = config['mode']
self.length = len(img_paths)
print(f"# of data : {self.length}")
self.valid_img_n = len(img_paths)
self.size = config['size']
self.img_paths = img_paths
self.label_paths = label_paths
self.q_slice = config["q_slice"]
self.n_shot = config["n_shot"]
self.s_idx = config["s_idx"]
self.is_train = True
train_criterion=str(self.__class__).split(".")[-1][:4]
print(f"train_criterion : {train_criterion}")
if train_criterion=="Test":
self.is_train = False
## load file names in advance
self.img_lists = []
for img_path in self.img_paths:
fnames = os.listdir(img_path)
fnames = [int(e.split(".")[0]) for e in fnames]
fnames.sort()
fnames = [f"{e}.npy" for e in fnames]
self.img_lists.append(fnames)
## remove ids if its slice number is less than max_slice number
if self.is_train:
remove_ids = []
for i, img_list in enumerate(self.img_lists):
if len(img_list) < self.q_slice:
remove_ids.append(i)
print(self.is_train, self.__class__, self.valid_img_n," # of remove ids : ", len(remove_ids))
for id in reversed(remove_ids):
self.img_paths.pop(id)
self.label_paths.pop(id)
self.img_lists.pop(id)
self.valid_img_n -= 1
## count the test counts for validation
else:
self.q_cnts = []
for img_list in self.img_lists:
self.q_cnts.append(len(img_list)-self.q_slice)
self.length = sum(self.q_cnts)
def get_sample(self, s_img_paths_all, s_label_paths_all, q_img_paths, q_label_paths):
seed = random.randrange(0,1000)
# s_length = len(s_img_paths)
s_imgs_all, s_labels_all = [],[]
for s_idx, s_img_paths in enumerate(s_img_paths_all):
s_label_paths = s_label_paths_all[s_idx]
imgs, labels = [],[]
for i in range(len(s_img_paths)):
img_path, label_path = s_img_paths[i], s_label_paths[i]
img = self.img_load(img_path, seed)
img = resize(img, dsize=(self.size, self.size), interpolation=cv2.INTER_AREA)
img = np.expand_dims(img, axis=0)
imgs.append(img)
label = np.load(label_path)
label = resize(label, dsize=(self.size, self.size), interpolation=cv2.INTER_NEAREST)
label = np.expand_dims(label, axis=0)
labels.append(label)
s_imgs = np.stack(imgs,axis=0)
s_labels = np.stack(labels,axis=0)
s_imgs_all.append(s_imgs)
s_labels_all.append(s_labels)
s_imgs = np.stack(s_imgs_all,axis=0)
s_labels = np.stack(s_labels_all,axis=0)
q_length = len(q_img_paths)
imgs, labels = [],[]
for i in range(len(q_img_paths)):
img_path, label_path = q_img_paths[i], q_label_paths[i]
img = self.img_load(img_path, seed)
img = resize(img, dsize=(self.size, self.size), interpolation=cv2.INTER_AREA)
img = np.expand_dims(img, axis=0)
imgs.append(img)
label = np.load(label_path)
label = resize(label, dsize=(self.size, self.size), interpolation=cv2.INTER_NEAREST)
label = np.expand_dims(label, axis=0)
labels.append(label)
q_imgs = np.stack(imgs,axis=0)
q_labels = np.stack(labels,axis=0)
# print(imgs.shape) [slice_num,1,256,256]?
if self.is_train: ## random augmentation : flip, rotation
s_imgs, s_labels, q_imgs, q_labels = random_augment(s_imgs, s_labels, q_imgs, q_labels)
sample = {
"s_x":totensor(s_imgs),
"s_y":totensor(s_labels), #.long()
"q_x":totensor(q_imgs),
"q_y":totensor(q_labels), #.long()
# "s_length":s_length,
# "q_length":q_length,
"s_fname":s_img_paths_all,
"q_fname":q_img_paths,
}
return sample
def handle_idx(self, q_idxs, q_n, s_n):
"""
choose slices for support indices
:return: supp_idxs
"""
s_idxs = []
for q_idx in q_idxs:
q_ratio = (q_idx)/(q_n-1)
s_idx = round((s_n-1)*q_ratio)
s_idxs.append(s_idx)
return s_idxs
def getitem_train(self):
## choose support and target
idx_space = [i for i in range(self.valid_img_n)]
subj_idxs = random.sample(idx_space, self.n_shot+1)
s_subj_idxs = subj_idxs[:self.n_shot]
q_subj_idx = subj_idxs[self.n_shot]
q_subj_img_path = self.img_paths[q_subj_idx]
q_subj_label_path = self.label_paths[q_subj_idx]
q_fnames = self.img_lists[q_subj_idx]
q_idx_start = random.randrange(0,len(q_fnames)-self.q_slice)
q_idxs = [n for n in range(q_idx_start, q_idx_start+self.q_slice)]
is_flip = False
if random.random() < 0.5:
is_flip = True
q_fnames.reverse() # ??? when to reverse
s_img_paths_all, s_label_paths_all = [],[]
for s_subj_idx in s_subj_idxs:
s_subj_img_path = self.img_paths[s_subj_idx]
s_subj_label_path = self.label_paths[s_subj_idx]
s_fnames = self.img_lists[s_subj_idx]
## flip augmentation
if is_flip:
s_fnames.reverse()
## choose support and query slice
s_idxs = self.handle_idx(q_idxs, len(q_fnames), len(s_fnames))
# s_idx = self.handle_idx(len(s_fnames), q_idx, len(q_fnames))
s_fnames_selected = [s_fnames[idx] for idx in s_idxs]
## define path, load data, and return
s_img_paths_selected = [f"{s_subj_img_path}/{fname}" for fname in s_fnames_selected]
s_label_paths_selected = [f"{s_subj_label_path}/{fname}" for fname in s_fnames_selected]
s_img_paths_all.append(s_img_paths_selected)
s_label_paths_all.append(s_label_paths_selected)
q_fnames_selected = [q_fnames[idx] for idx in q_idxs]
q_img_paths_selected = [f"{q_subj_img_path}/{fname}" for fname in q_fnames_selected]
q_label_paths_selected = [f"{q_subj_label_path}/{fname}" for fname in q_fnames_selected]
return self.get_sample(s_img_paths_all, s_label_paths_all, q_img_paths_selected, q_label_paths_selected)
def getitme_test(self, idx):
q_subj_idx, q_idx_start = self.get_test_subj_idx(idx)
q_subj_img_path = self.img_paths[q_subj_idx]
q_subj_label_path = self.label_paths[q_subj_idx]
q_fnames = self.img_lists[q_subj_idx]
q_idxs = [n for n in range(q_idx_start, q_idx_start+self.q_slice)]
s_img_paths_all, s_label_paths_all = [],[]
for s_idx in range(self.n_shot):
s_subj_img_path = self.s_img_paths[s_idx]
s_subj_label_path = self.s_label_paths[s_idx]
s_fnames = self.s_fnames_list[s_idx]
## choose support and query slice
s_idxs = self.handle_idx(q_idxs, len(q_fnames), len(s_fnames))
s_fnames_selected = [s_fnames[idx] for idx in s_idxs]
## define path, load data, and return
s_img_paths_selected = [f"{s_subj_img_path}/{fname}" for fname in s_fnames_selected]
s_label_paths_selected = [f"{s_subj_label_path}/{fname}" for fname in s_fnames_selected]
s_img_paths_all.append(s_img_paths_selected)
s_label_paths_all.append(s_label_paths_selected)
q_fnames_selected = [q_fnames[idx] for idx in q_idxs]
q_img_paths_selected = [f"{q_subj_img_path}/{fname}" for fname in q_fnames_selected]
q_label_paths_selected = [f"{q_subj_label_path}/{fname}" for fname in q_fnames_selected]
return self.get_sample(s_img_paths_all, s_label_paths_all, q_img_paths_selected, q_label_paths_selected)
def get_len_train(self):
return self.length
def get_len_test(self):
return self.length
def get_test_subj_idx(self, idx):
# for subj_idx,cnt in enumerate(self.slice_cnts):
for subj_idx,cnt in enumerate(self.q_cnts):
if idx < cnt:
# print(subj_idx, idx, self.q_cnts[subj_idx], len(self.img_lists[subj_idx]))
return subj_idx, idx
else:
idx -= cnt
print("get_test_subj_idx function is not working.")
assert False
def get_cnts(self):
## only for test loader
return self.q_cnts
# return self.slice_cnts
def img_load(self, img_path, seed=0):
img_arr = np.load(img_path)+0.25
# print(img_arr)
return img_arr
def set_support_volume(self, s_img_paths, s_label_paths):
## set support img path and label path for validation and testing
self.s_img_paths = []
self.s_label_paths = []
self.s_fnames_list = []
for i in range(len(s_img_paths)):
s_fnames = os.listdir(s_img_paths[i])
s_fnames = [int(e.split(".")[0]) for e in s_fnames]
s_fnames.sort()
print(f'support img {i} path : {s_img_paths[i]} length : {len(s_fnames)}')
self.s_img_paths.append(s_img_paths[i])
self.s_label_paths.append(s_label_paths[i])
self.s_fnames_list.append([f"{e}.npy" for e in s_fnames])
class BaseLoader_CTORG(Base_dataset_ctorg):
modal_i = [0] # there is only one modality
label_i = 1.0 # there is only one label for each image
class TrainLoader_CTORG(BaseLoader_CTORG):
def __len__(self):
return self.get_len_train()
def __getitem__(self, idx):
return self.getitem_train()
class TestLoader_CTORG(BaseLoader_CTORG):
def __len__(self):
return self.get_len_test()
def __getitem__(self, idx):
return self.getitme_test(idx)
if __name__ == "__main__":
pass
# main() | Python |
3D | oopil/3D_medical_image_FSS | BiGRU_fewshot/dataloaders_medical/dataset_decathlon.py | .py | 16,033 | 483 | import os
import re
import sys
import json
import random
import numpy as np
sys.path.append("/home/soopil/Desktop/github/python_utils")
# sys.path.append("../dataloaders_medical")
from dataloaders_medical.common import *
# from common import *
import cv2
from cv2 import resize
def totensor(arr):
tensor = torch.from_numpy(arr).float()
# tensor = F.interpolate(tensor, size=size,mode=interp)
return tensor
def random_augment(s_imgs, s_labels, q_imgs, q_labels):
## do random rotation and flip
k = random.sample([i for i in range(0, 4)], 1)[0]
s_imgs = np.rot90(s_imgs, k, (3, 4)).copy()
s_labels = np.rot90(s_labels, k, (3, 4)).copy()
q_imgs = np.rot90(q_imgs, k, (2, 3)).copy()
q_labels = np.rot90(q_labels, k, (2, 3)).copy()
if random.random() < 0.5:
s_imgs = np.flip(s_imgs, 3).copy()
s_labels = np.flip(s_labels, 3).copy()
q_imgs = np.flip(q_imgs, 2).copy()
q_labels = np.flip(q_labels, 2).copy()
if random.random() < 0.5:
s_imgs = np.flip(s_imgs, 4).copy()
s_labels = np.flip(s_labels, 4).copy()
q_imgs = np.flip(q_imgs, 3).copy()
q_labels = np.flip(q_labels, 3).copy()
return s_imgs, s_labels, q_imgs, q_labels
class Base_dataset():
def __init__(self, img_paths, label_paths, config):
"""
dataset constructor for training
"""
super().__init__()
self.mode = config['mode']
self.length = len(img_paths)
print(f"# of data : {self.length}")
self.valid_img_n = len(img_paths)
self.size = config['size']
self.img_paths = img_paths
self.label_paths = label_paths
self.q_slice = config["q_slice"]
self.n_shot = config["n_shot"]
self.s_idx = config["s_idx"]
self.is_train = True
if str(self.__class__).split("_")[-1][:4]=="test":
self.is_train = False
## load file names in advance
self.img_lists = []
for img_path in self.img_paths:
fnames = os.listdir(img_path)
fnames = [int(e.split(".")[0]) for e in fnames]
fnames.sort()
fnames = [f"{e}.npy" for e in fnames]
self.img_lists.append(fnames)
## remove ids if its slice number is less than max_slice number
if self.is_train:
remove_ids = []
for i, img_list in enumerate(self.img_lists):
if len(img_list) < self.q_slice:
remove_ids.append(i)
print(self.is_train, self.__class__, self.valid_img_n," # of remove ids : ", len(remove_ids))
for id in reversed(remove_ids):
self.img_paths.pop(id)
self.label_paths.pop(id)
self.img_lists.pop(id)
self.valid_img_n -= 1
## count the test counts for validation
else:
self.q_cnts = []
for img_list in self.img_lists:
self.q_cnts.append(len(img_list)-self.q_slice)
self.length = sum(self.q_cnts)
def get_sample(self, s_img_paths_all, s_label_paths_all, q_img_paths, q_label_paths):
seed = random.randrange(0,1000)
# s_length = len(s_img_paths)
s_imgs_all, s_labels_all = [],[]
for s_idx, s_img_paths in enumerate(s_img_paths_all):
s_label_paths = s_label_paths_all[s_idx]
imgs, labels = [],[]
for i in range(len(s_img_paths)):
img_path, label_path = s_img_paths[i], s_label_paths[i]
img = self.img_load(img_path, seed)
img = resize(img, dsize=(self.size, self.size), interpolation=cv2.INTER_AREA)
img = np.expand_dims(img, axis=0)
imgs.append(img)
label = np.load(label_path)
label = resize(label, dsize=(self.size, self.size), interpolation=cv2.INTER_NEAREST)
label = np.expand_dims(label, axis=0)
labels.append(label)
s_imgs = np.stack(imgs,axis=0)
s_labels = np.stack(labels,axis=0)
s_imgs_all.append(s_imgs)
s_labels_all.append(s_labels)
s_imgs = np.stack(s_imgs_all,axis=0)
s_labels = np.stack(s_labels_all,axis=0)
q_length = len(q_img_paths)
imgs, labels = [],[]
for i in range(len(q_img_paths)):
img_path, label_path = q_img_paths[i], q_label_paths[i]
img = self.img_load(img_path, seed)
img = resize(img, dsize=(self.size, self.size), interpolation=cv2.INTER_AREA)
img = np.expand_dims(img, axis=0)
imgs.append(img)
label = np.load(label_path)
label = resize(label, dsize=(self.size, self.size), interpolation=cv2.INTER_NEAREST)
label = np.expand_dims(label, axis=0)
labels.append(label)
q_imgs = np.stack(imgs,axis=0)
q_labels = np.stack(labels,axis=0)
# print(imgs.shape) [slice_num,1,256,256]?
if self.is_train: ## random augmentation : flip, rotation
s_imgs, s_labels, q_imgs, q_labels = random_augment(s_imgs, s_labels, q_imgs, q_labels)
sample = {
"s_x":totensor(s_imgs),
"s_y":totensor(s_labels), #.long()
"q_x":totensor(q_imgs),
"q_y":totensor(q_labels), #.long()
# "s_length":s_length,
# "q_length":q_length,
"s_fname":s_img_paths_all,
"q_fname":q_img_paths,
}
return sample
def handle_idx(self, q_idxs, q_n, s_n):
"""
choose slices for support indices
:return: supp_idxs
"""
s_idxs = []
for q_idx in q_idxs:
q_ratio = (q_idx)/(q_n-1)
s_idx = round((s_n-1)*q_ratio)
s_idxs.append(s_idx)
return s_idxs
def getitem_train(self):
## choose support and target
idx_space = [i for i in range(self.valid_img_n)]
subj_idxs = random.sample(idx_space, self.n_shot+1)
s_subj_idxs = subj_idxs[:self.n_shot]
q_subj_idx = subj_idxs[self.n_shot]
q_subj_img_path = self.img_paths[q_subj_idx]
q_subj_label_path = self.label_paths[q_subj_idx]
q_fnames = self.img_lists[q_subj_idx]
q_idx_start = random.randrange(0,len(q_fnames)-self.q_slice)
q_idxs = [n for n in range(q_idx_start, q_idx_start+self.q_slice)]
is_flip = False
if random.random() < 0.5:
is_flip = True
q_fnames.reverse() # ??? when to reverse
s_img_paths_all, s_label_paths_all = [],[]
for s_subj_idx in s_subj_idxs:
s_subj_img_path = self.img_paths[s_subj_idx]
s_subj_label_path = self.label_paths[s_subj_idx]
s_fnames = self.img_lists[s_subj_idx]
## flip augmentation
if is_flip:
s_fnames.reverse()
## choose support and query slice
s_idxs = self.handle_idx(q_idxs, len(q_fnames), len(s_fnames))
# s_idx = self.handle_idx(len(s_fnames), q_idx, len(q_fnames))
s_fnames_selected = [s_fnames[idx] for idx in s_idxs]
## define path, load data, and return
s_img_paths_selected = [f"{s_subj_img_path}/{fname}" for fname in s_fnames_selected]
s_label_paths_selected = [f"{s_subj_label_path}/{fname}" for fname in s_fnames_selected]
s_img_paths_all.append(s_img_paths_selected)
s_label_paths_all.append(s_label_paths_selected)
q_fnames_selected = [q_fnames[idx] for idx in q_idxs]
q_img_paths_selected = [f"{q_subj_img_path}/{fname}" for fname in q_fnames_selected]
q_label_paths_selected = [f"{q_subj_label_path}/{fname}" for fname in q_fnames_selected]
return self.get_sample(s_img_paths_all, s_label_paths_all, q_img_paths_selected, q_label_paths_selected)
def getitme_test(self, idx):
q_subj_idx, q_idx_start = self.get_test_subj_idx(idx)
q_subj_img_path = self.img_paths[q_subj_idx]
q_subj_label_path = self.label_paths[q_subj_idx]
q_fnames = self.img_lists[q_subj_idx]
q_idxs = [n for n in range(q_idx_start, q_idx_start+self.q_slice)]
s_img_paths_all, s_label_paths_all = [],[]
for s_idx in range(self.n_shot):
s_subj_img_path = self.s_img_paths[s_idx]
s_subj_label_path = self.s_label_paths[s_idx]
s_fnames = self.s_fnames_list[s_idx]
## choose support and query slice
s_idxs = self.handle_idx(q_idxs, len(q_fnames), len(s_fnames))
s_fnames_selected = [s_fnames[idx] for idx in s_idxs]
## define path, load data, and return
s_img_paths_selected = [f"{s_subj_img_path}/{fname}" for fname in s_fnames_selected]
s_label_paths_selected = [f"{s_subj_label_path}/{fname}" for fname in s_fnames_selected]
s_img_paths_all.append(s_img_paths_selected)
s_label_paths_all.append(s_label_paths_selected)
q_fnames_selected = [q_fnames[idx] for idx in q_idxs]
q_img_paths_selected = [f"{q_subj_img_path}/{fname}" for fname in q_fnames_selected]
q_label_paths_selected = [f"{q_subj_label_path}/{fname}" for fname in q_fnames_selected]
return self.get_sample(s_img_paths_all, s_label_paths_all, q_img_paths_selected, q_label_paths_selected)
def get_len_train(self):
return self.length
def get_len_test(self):
return self.length
# def get_val_subj_idx(self, idx):
# for subj_idx,cnt in enumerate(self.q_cnts):
# if idx < cnt:
# return subj_idx, idx*self.q_slice
# else:
# idx -= cnt
#
# print("get_val_subj_idx function is not working.")
# assert False
def get_test_subj_idx(self, idx):
# for subj_idx,cnt in enumerate(self.slice_cnts):
for subj_idx,cnt in enumerate(self.q_cnts):
if idx < cnt:
# print(subj_idx, idx, self.q_cnts[subj_idx], len(self.img_lists[subj_idx]))
return subj_idx, idx
else:
idx -= cnt
print("get_test_subj_idx function is not working.")
assert False
def get_cnts(self):
## only for test loader
return self.q_cnts
# return self.slice_cnts
def img_load(self, img_path, seed=0):
img_arr = np.load(img_path)
# print(img_arr)
return img_arr
def set_support_volume(self, s_img_paths, s_label_paths):
## set support img path and label path for validation and testing
self.s_img_paths = []
self.s_label_paths = []
self.s_fnames_list = []
for i in range(len(s_img_paths)):
s_fnames = os.listdir(s_img_paths[i])
s_fnames = [int(e.split(".")[0]) for e in s_fnames]
s_fnames.sort()
print(f'support img {i} path : {s_img_paths[i]} length : {len(s_fnames)}')
self.s_img_paths.append(s_img_paths[i])
self.s_label_paths.append(s_label_paths[i])
self.s_fnames_list.append([f"{e}.npy" for e in s_fnames])
class Spleen_Base(Base_dataset):
modal_i = 0
label_i = 1.0
class Spleen_train(Spleen_Base):
def __len__(self):
return self.get_len_train()
def __getitem__(self, idx):
return self.getitem_train()
class Spleen_test(Spleen_Base):
def __len__(self):
return self.get_len_test()
def __getitem__(self, idx):
return self.getitme_test(idx)
class Liver_Base(Base_dataset):
modal_i = 0 # only 1 modality
label_i = 1.0 # use both 1 : cancer / 2 : liver
class Liver_train(Liver_Base):
def __len__(self):
return self.get_len_train()
def __getitem__(self, idx):
return self.getitem_train()
class Liver_test(Liver_Base):
def __len__(self):
return self.get_len_test()
def __getitem__(self, idx):
return self.getitme_test(idx)
class Tumor_Base(Base_dataset):
modal_i = [0, 1, 2, 3] # 4 modalities
label_i = 3.0 # 1 : edema / 2 : non enhancing tumor / 3 : enhancing tumour
def img_load(self, img_path, seed=0):
modal_idx = seed%len(self.modal_i)
img_arr = np.load(img_path)
return img_arr[modal_idx] # synchronize with query img and other support img
class Tumor_train(Tumor_Base):
def __len__(self):
return self.get_len_train()
def __getitem__(self, idx):
return self.getitem_train()
class Tumor_test(Tumor_Base):
def __len__(self):
return self.get_len_test()
def __getitem__(self, idx):
return self.getitme_test(idx)
class Prostate_Base(Base_dataset):
modality_n = 2
modal_i = 0
label_i = 2.0
def img_load(self, img_path, seed=0):
img_arr = np.load(img_path)
return img_arr[self.modal_i]
class Prostate_train(Prostate_Base):
def __len__(self):
return self.get_len_train()
def __getitem__(self, idx):
return self.getitem_train()
class Prostate_test(Prostate_Base):
def __len__(self):
return self.get_len_test()
def __getitem__(self, idx):
return self.getitme_test(idx)
class Hippo_Base(Base_dataset):
modal_i = 0
label_i = 1.0 # use both 1.0 and 2.0
class Hippo_train(Hippo_Base):
def __len__(self):
return self.get_len_train()
def __getitem__(self, idx):
return self.getitem_train()
class Hippo_test(Hippo_Base):
def __len__(self):
return self.get_len_test()
def __getitem__(self, idx):
return self.getitme_test(idx)
class Lung_Base(Base_dataset):
modal_i = 0 # only 1 modality
label_i = 1.0 # use both 1 : cancer
class Lung_train(Lung_Base):
def __len__(self):
return self.get_len_train()
def __getitem__(self, idx):
return self.getitem_train()
class Lung_test(Lung_Base):
def __len__(self):
return self.get_len_test()
def __getitem__(self, idx):
return self.getitme_test(idx)
class HepaticVessel_Base(Base_dataset):
modality_n = 1
# modal_i = 0
label_i = 1.0 # 1 for vessel, 2 for tumour
# use only vessel
class HepaticVessel_train(HepaticVessel_Base):
def __len__(self):
return self.get_len_train()
def __getitem__(self, idx):
return self.getitem_train()
class HepaticVessel_test(HepaticVessel_Base):
def __len__(self):
return self.get_len_test()
def __getitem__(self, idx):
return self.getitme_test(idx)
class Heart_Base(Base_dataset):
modality_n = 1
# modal_i = 0
label_i = 1.0 # 1 for left atrium
class Heart_train(Heart_Base):
def __len__(self):
return self.get_len_train()
def __getitem__(self, idx):
return self.getitem_train()
class Heart_test(Heart_Base):
def __len__(self):
return self.get_len_test()
def __getitem__(self, idx):
return self.getitme_test(idx)
class Pancreas_Base(Base_dataset):
modality_n = 1 # only 1 modality
# modal_i = 0
label_i = 1.0 # 1 for pancreas, 2 for cancer
# use all of them
class Pancreas_train(Pancreas_Base):
def __len__(self):
return self.get_len_train()
def __getitem__(self, idx):
return self.getitem_train()
class Pancreas_test(Pancreas_Base):
def __len__(self):
return self.get_len_test()
def __getitem__(self, idx):
return self.getitme_test(idx)
class Colon_Base(Base_dataset):
modality_n = 1 # only 1 modality
# modal_i = 0
label_i = 1.0 # 1 for colon cancer primaries
# use 1.0
class Colon_train(Colon_Base):
def __len__(self):
return self.get_len_train()
def __getitem__(self, idx):
return self.getitem_train()
class Colon_test(Colon_Base):
def __len__(self):
return self.get_len_test()
def __getitem__(self, idx):
return self.getitme_test(idx)
if __name__ == "__main__":
pass
# main() | Python |
3D | oopil/3D_medical_image_FSS | BiGRU_fewshot/dataloaders_medical/prostate.py | .py | 9,422 | 245 | import sys
import glob
import json
import re
from glob import glob
from util.utils import *
from dataloaders_medical.dataset import *
from dataloaders_medical.dataset_decathlon import *
from dataloaders_medical.dataset_CT_ORG import *
import numpy as np
class MetaSliceData_train():
def __init__(self, datasets, iter_n = 100):
super().__init__()
self.datasets = datasets
self.dataset_n = len(datasets)
self.iter_n =iter_n
def __len__(self):
return self.iter_n
def __getitem__(self, idx):
dataset = random.sample(self.datasets, 1)[0]
return dataset.__getitem__(idx)
def metadata():
info = {
"src_dir" : "/user/home2/soopil/Datasets/MICCAI2015challenge/Abdomen/RawData/Training",
"trg_dir" : "/user/home2/soopil/Datasets/MICCAI2015challenge/Abdomen/RawData/Training_2d", # 144 setting
"trg_dir2" : "/user/home2/soopil/Datasets/MICCAI2015challenge/Abdomen/RawData/Training_2d_2", # 144 setting
"trg_dir3" : "/user/home2/soopil/Datasets/MICCAI2015challenge/Abdomen/RawData/Training_2d_denoise",
# "trg_dir" : "/home/soopil/Desktop/Dataset/MICCAI2015challenge/Abdomen/RawData/Training_2d", # desktop setting
"Tasks" : [i for i in range(1,14)],
# "Tasks" : [i for i in range(1,17+1)],
"Organs" : ["background",
"spleen", #1
"right kidney", #2
"left kidney", #3
"gallbladder", #4
"esophagus", #5
"liver", #6
"stomach", #7
"aorta", #8
"inferior vana cava", #9
"portal vein & splenic vein", #10
"pancreas", #11
"right adrenal gland", #12
"left adrenal gland", #13
"bladder", #14
"uturus", #15
"rectum", #16
"small bowel", #17
],
}
return info
def meta_data(_config, is_finetuning=False):
def path_collect(idx, option='train'):
img_paths = glob(f"{meta['trg_dir2']}/{idx}/{option}/img/*")
label_paths = glob(f"{meta['trg_dir2']}/{idx}/{option}/label/*")
return img_paths, label_paths
def spliter(idx):
tr_imgs, tr_labels = path_collect(idx, 'train')
val_imgs, val_labels = path_collect(idx, 'valid')
ts_imgs, ts_labels = path_collect(idx, 'test')
return tr_imgs, tr_labels, val_imgs, val_labels, ts_imgs, ts_labels
target_task = _config['target']
meta = metadata()
print(meta['trg_dir'])
# tasks = meta['Tasks']
tasks = [1,2,3,5,6,7,8,9,14,15]
# tasks_remove = [4, 10, 12, 13] # 7 11
# tasks_remove = [4, 5, 8, 9, 10, 11, 12, 13]
# tasks_remove = [4, 5, 8, 9, 10, 11, 12, 13, 16, 17]
## we sholdn't use both left and right kidneys
# for task in tasks_remove:
# tasks.remove(task)
kidneys = [2,3]
if target_task in kidneys:
kidneys.remove(target_task)
other_task = kidneys[0]
try:
tasks.remove(other_task)
except:
pass
print(f"tasks : {tasks}")
datasets = {}
for task in tasks:
tr_imgs, tr_labels, val_imgs, val_labels, ts_imgs, ts_labels = spliter(task)
datasets[task] = [TrainLoader(tr_imgs, tr_labels, _config), TestLoader(val_imgs, val_labels, _config), TestLoader(ts_imgs, ts_labels, _config)]
tr_imgs, tr_labels, val_imgs, val_labels, ts_imgs, ts_labels = spliter(target_task)
if _config['add_target']:
n_add_target = _config['add_target']
datasets[target_task] = [TrainLoader(tr_imgs[:n_add_target], tr_labels[:n_add_target], _config), TestLoader(val_imgs, val_labels, _config), TestLoader(ts_imgs, ts_labels, _config)]
val_dataset = datasets[target_task][1]
ts_dataset = datasets[target_task][2]
tr_datasets = [dataset[0] for dataset in datasets.values()]
else:
val_dataset = datasets[target_task][1]
ts_dataset = datasets[target_task][2]
if _config['is_super']:
pass
else:
datasets.pop(target_task)
tr_datasets = [dataset[0] for dataset in datasets.values()]
print(f"training tasks : {datasets.keys()}")
print(f"target tasks : {target_task}")
if _config["internal"]:
pass
else:
tr_imgs, tr_labels, ts_dataset = external_testset(_config, target_task)
## set the support volume for testing
if is_finetuning:
tr_dataset = TrainLoader(tr_imgs[_config['s_idx']:_config['s_idx']+_config['n_shot']+1], tr_labels[_config['s_idx']:_config['s_idx']+_config['n_shot']+1], _config)
meta_tr_dataset = MetaSliceData_train([tr_dataset], iter_n=_config['n_update'])
else:
meta_tr_dataset = MetaSliceData_train(tr_datasets, iter_n=_config['n_iter'])
val_dataset.set_support_volume(tr_imgs[_config['s_idx']:_config['s_idx']+_config['n_shot']], tr_labels[_config['s_idx']:_config['s_idx']+_config['n_shot']])
ts_dataset.set_support_volume(tr_imgs[_config['s_idx']:_config['s_idx']+_config['n_shot']], tr_labels[_config['s_idx']:_config['s_idx']+_config['n_shot']])
return meta_tr_dataset, val_dataset, ts_dataset
def external_testset(_config, target_task):
def decathlon_spliter(idx):
def path_collect(idx, option='train'):
tasks = ["Task01_BrainTumour",
"Task02_Heart",
"Task03_Liver",
"Task04_Hippocampus",
"Task05_Prostate",
"Task06_Lung",
"Task07_Pancreas",
"Task08_HepaticVessel",
"Task09_Spleen",
"Task10_Colon",
"Task11_Davis"
]
src_path='/user/home2/soopil/Datasets/Decathlon_2d'
img_paths = glob(f"{src_path}/{tasks[idx - 1]}/{option}/img/*")
label_paths = glob(f"{src_path}/{tasks[idx - 1]}/{option}/label/*")
return img_paths, label_paths
tr_imgs, tr_labels = path_collect(idx, 'train')
ts_imgs, ts_labels = path_collect(idx, 'test')
return tr_imgs, tr_labels, ts_imgs, ts_labels
def CT_ORG_spliter(idx):
def path_collect(idx, option='train'):
Organs = ["background",
"Liver", # 1
"Bladder", # 2
"Lung", # 3
"Kidney", # 4
"Bone", # 5
"Brain", # 6
],
src_path="/user/home2/soopil/Datasets/CT_ORG/Training_2d_align"
# src_path="/user/home2/soopil/Datasets/CT_ORG/Training_2d_align_spacing"
# src_path="/user/home2/soopil/Datasets/CT_ORG/Training_2d_align_spacing_v2"
img_paths = glob(f"{src_path}/{idx}/{option}/img/*")
label_paths = glob(f"{src_path}/{idx}/{option}/label/*")
return img_paths, label_paths
tr_imgs, tr_labels = path_collect(idx, 'train')
ts_imgs, ts_labels = path_collect(idx, 'test')
return tr_imgs, tr_labels, ts_imgs, ts_labels
external = _config["external"]
print(f"external testset : {external}")
if external == "decathlon":
if target_task == 1: # spleen
target_idx_decath = 9
tr_imgs, tr_labels, ts_imgs, ts_labels = decathlon_spliter(target_idx_decath)
ts_dataset = Spleen_test(ts_imgs, ts_labels, _config)
elif target_task == 6:
target_idx_decath = 3
tr_imgs, tr_labels, ts_imgs, ts_labels = decathlon_spliter(target_idx_decath)
ts_dataset = Liver_test(ts_imgs, ts_labels, _config)
else:
print("There isn't according organ in Decathlon dataset.")
assert False
print(f"target index in external dataset : {target_idx_decath}")
elif external == "CT_ORG":
is_fast_test = _config["is_fast_test"]
n_fast_test = _config["n_fast_test"]
if target_task == 3: # kidney
target_idx_ctorg = 4
tr_imgs, tr_labels, ts_imgs, ts_labels = CT_ORG_spliter(target_idx_ctorg)
if is_fast_test:
ts_imgs = ts_imgs[:n_fast_test]
ts_labels = ts_labels[:n_fast_test]
ts_dataset = TestLoader_CTORG(ts_imgs, ts_labels, _config)
elif target_task == 6: # liver
target_idx_ctorg = 1
tr_imgs, tr_labels, ts_imgs, ts_labels = CT_ORG_spliter(target_idx_ctorg)
if is_fast_test:
ts_imgs = ts_imgs[:n_fast_test]
ts_labels = ts_labels[:n_fast_test]
ts_dataset = TestLoader_CTORG(ts_imgs, ts_labels, _config)
elif target_task == 14: # liver
target_idx_ctorg = 2
tr_imgs, tr_labels, ts_imgs, ts_labels = CT_ORG_spliter(target_idx_ctorg)
if is_fast_test:
ts_imgs = ts_imgs[:n_fast_test]
ts_labels = ts_labels[:n_fast_test]
ts_dataset = TestLoader_CTORG(ts_imgs, ts_labels, _config)
else:
print("There isn't according organ in CT_ORG dataset.")
assert False
print(f"target index in external dataset : {target_idx_ctorg}")
else:
print("configuration of external dataset is wrong")
assert False
return tr_imgs, tr_labels, ts_dataset
if __name__=="__main__":
pass | Python |
3D | oopil/3D_medical_image_FSS | UNet_upperbound/train.py | .py | 9,194 | 235 | """Training Script"""
import os
import shutil
import numpy as np
import pdb
import random
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim
from torch.utils.data import DataLoader
from torch.optim.lr_scheduler import MultiStepLR
import torch.backends.cudnn as cudnn
from torchvision.transforms import Compose
import torchvision.transforms as transforms
from torchvision.utils import make_grid
from nn_common_modules import losses
if __name__ == '__main__':
from util.utils import set_seed, CLASS_LABELS, date
from config import ex
from tensorboardX import SummaryWriter
from dataloaders_medical.common import *
from dataloaders_medical.prostate import *
from model import MedicalFSS
else:
from .util.utils import set_seed, CLASS_LABELS, date
from .config import ex
from tensorboardX import SummaryWriter
from .dataloaders_medical.common import *
from .dataloaders_medical.prostate import *
from .model import MedicalFSS
def overlay_color(img, mask, label, scale=50):
"""
:param img: [1, 256, 256]
:param mask: [1, 256, 256]
:param label: [1, 256, 256]
:return:
"""
# pdb.set_trace()
scale = np.mean(img.cpu().numpy())
mask = mask[0]
label = label[0]
zeros = torch.zeros_like(mask)
zeros = [zeros for _ in range(3)]
zeros[0] = mask
mask = torch.stack(zeros,dim=0)
zeros[1] = label
label = torch.stack(zeros,dim=0)
img_3ch = torch.cat([img,img,img],dim=0)
masked = img_3ch+mask.float()*scale+label.float()*scale
return [masked]
@ex.capture
def get_info(_run):
print(_run._id)
print(_run.experiment_info["name"])
@ex.automain
def main(_run, _config, _log):
if _run.observers:
os.makedirs(f'{_run.observers[0].dir}/snapshots', exist_ok=True)
for source_file, _ in _run.experiment_info['sources']:
os.makedirs(os.path.dirname(f'{_run.observers[0].dir}/source/{source_file}'),
exist_ok=True)
_run.observers[0].save_file(source_file, f'source/{source_file}')
shutil.rmtree(f'{_run.observers[0].basedir}/_sources')
print(f"experiment : {_run.experiment_info['name']} , ex_ID : {_run._id}")
set_seed(_config['seed'])
cudnn.enabled = True
cudnn.benchmark = True
device = torch.device(f"cuda:{_config['gpu_id']}")
# torch.cuda.set_device(device=_config['gpu_id'])
# torch.set_num_threads(1)
model = MedicalFSS(_config,device).to(device)
_log.info('###### Load data ######')
make_data = meta_data
tr_dataset, val_dataset, ts_dataset = make_data(_config)
trainloader = DataLoader(
dataset=tr_dataset,
batch_size=_config['batch_size'],
shuffle=True,
num_workers=_config['n_work'],
pin_memory=False, #True load data while training gpu
drop_last=True
)
validationloader = DataLoader(
dataset=val_dataset,
batch_size=1,
# batch_size=_config['batch_size'],
shuffle=True,
num_workers=_config['n_work'],
pin_memory=False,#True
drop_last=False
)
# all_samples = test_loader_Spleen(split=1) # for iterative validation
_log.info('###### Set optimizer ######')
print(_config['optim'])
# optimizer = torch.optim.SGD(model.parameters(), **_config['optim'])
optimizer = torch.optim.Adam(list(model.parameters()),
_config['optim']['lr'])
# scheduler = MultiStepLR(optimizer, milestones=_config['lr_milestones'], gamma=0.1)
scheduler = MultiStepLR(optimizer, milestones=_config['lr_milestones'], gamma=0.1)
# criterion_ce = nn.CrossEntropyLoss()
# criterion = losses.DiceLoss()
criterion = nn.BCELoss()
if _config['record']: ## tensorboard visualization
_log.info('###### define tensorboard writer #####')
_log.info(f'##### board/train_{_config["board"]}_{date()}')
writer = SummaryWriter(f'board/train_{_config["board"]}_{date()}')
i_iter = 0
log_loss = {'loss': 0, 'align_loss': 0}
min_val_loss = 100000.0
min_iter = 0
min_epoch = 0
iter_n_train, iter_n_val = len(trainloader), len(validationloader)
_log.info('###### Training ######')
q_slice_n = _config['q_slice']
blank = torch.zeros([1, 256, 256]).to(device)
iter_print = _config['iter_print']
for i_epoch in range(_config['n_steps']):
loss_epoch = 0
## training stage
for i_iter, sample_train in enumerate(trainloader):
preds = []
loss_per_video = 0.0
optimizer.zero_grad()
s_x = sample_train['s_x'].to(device) # [B, Support, slice_num, 1, 256, 256]
s_y = sample_train['s_y'].to(device) # [B, Support, slice_num, 1, 256, 256]
preds = model(s_x)
for frame_id in range(q_slice_n):
s_yi = s_y[:, 0, frame_id, 0, :, :] # [B, 1, 256, 256]
yhati = preds[frame_id]
# pdb.set_trace()
# loss = criterion(F.softmax(yhati, dim=1), s_yi2)+criterion_ce(F.softmax(yhati, dim=1), s_yi2)
# loss = criterion(F.softmax(yhati, dim=1), s_yi2)
loss = criterion(yhati, s_yi)
loss_per_video += loss
preds.append(yhati)
loss_per_video.backward()
optimizer.step()
loss_epoch += loss_per_video
if iter_print:
print(f"train, iter:{i_iter}/{iter_n_train}, iter_loss:{loss_per_video}", end='\r')
if _config['record'] and i_iter == 0:
batch_i = 0
frames = []
for frame_id in range(0, q_slice_n):
# query_pred = output.argmax(dim=1)
frames += overlay_color(q_x[batch_i, frame_id], preds[frame_id][batch_i].round(), s_y[batch_i, frame_id], scale=_config['scale'])
for frame_id in range(0, q_slice_n):
frames += overlay_color(s_x[batch_i, 0, frame_id], blank, s_y[batch_i, 0, frame_id], scale=_config['scale'])
visual = make_grid(frames, normalize=True, nrow=5)
writer.add_image("train/visual", visual, i_epoch)
with torch.no_grad(): ## validation stage
loss_valid = 0
preds = []
for i_iter, sample_valid in enumerate(validationloader):
loss_per_video = 0.0
optimizer.zero_grad()
s_x = sample_valid['s_x'].to(device) # [B, slice_num, 1, 256, 256]
s_y = sample_valid['s_y'].to(device) # [B, slice_num, 1, 256, 256]
preds = model(s_x)
for frame_id in range(q_slice_n):
s_yi = s_y[:, 0, frame_id, 0, :, :] # [B, 1, 256, 256]
# s_yi2 = s_yi.squeeze(1) # [B, 256, 256]
yhati = preds[frame_id]
# loss = criterion(F.softmax(yhati, dim=1), s_yi2) + criterion_ce(F.softmax(yhati, dim=1), s_yi2)
# loss = criterion(F.softmax(yhati, dim=1), s_yi2)
loss = criterion(yhati, s_yi)
loss_per_video += loss
preds.append(yhati)
loss_valid += loss_per_video
if iter_print:
print(f"valid, iter:{i_iter}/{iter_n_val}, iter_loss:{loss_per_video}", end='\r')
if _config['record'] and i_iter == 0:
batch_i = 0
frames = []
for frame_id in range(0, q_slice_n):
frames += overlay_color(q_x[batch_i, frame_id], preds[frame_id][batch_i].round(), s_y[batch_i, frame_id], scale=_config['scale'])
for frame_id in range(0, q_slice_n):
frames += overlay_color(s_x[batch_i, 0, frame_id], blank, s_y[batch_i, 0, frame_id], scale=_config['scale'])
visual = make_grid(frames, normalize=True, nrow=5)
writer.add_image("valid/visual", visual, i_epoch)
if min_val_loss > loss_valid:
min_epoch = i_epoch
min_val_loss = loss_valid
print(f"train - epoch:{i_epoch}/{_config['n_steps']}, epoch_loss:{loss_epoch} valid_loss:{loss_valid} \t => model saved", end='\n')
save_fname = f'{_run.observers[0].dir}/snapshots/lowest.pth'
else:
print(f"train - epoch:{i_epoch}/{_config['n_steps']}, epoch_loss:{loss_epoch} valid_loss:{loss_valid} - min epoch:{min_epoch}", end='\n')
save_fname = f'{_run.observers[0].dir}/snapshots/last.pth'
_run.log_scalar("training.loss", float(loss_epoch), i_epoch)
_run.log_scalar("validation.loss", float(loss_valid), i_epoch)
_run.log_scalar("min_epoch", min_epoch, i_epoch)
if _config['record']:
writer.add_scalar('loss/train_loss', loss_epoch, i_epoch)
writer.add_scalar('loss/valid_loss', loss_valid, i_epoch)
torch.save({
'model': model.state_dict(),
'optimizer': optimizer.state_dict(),
}, save_fname
)
writer.close()
| Python |
3D | oopil/3D_medical_image_FSS | UNet_upperbound/train_all.sh | .sh | 1,000 | 17 | # this code require gpu_id when running
# 1 2 3 4 5 6 7 8 9 10 11 12 13
gpu=$1
j=$2
for organ in 1 3 6 14
do
echo "python train.py with mode=train gpu_id=${gpu} target=${organ} board=ID${j}_${organ} record=False n_work=3 is_lowerbound=True"
python train.py with mode=train gpu_id=${gpu} target=${organ} board=ID${j}_${organ}_data record=False n_work=3 is_lowerbound=True
echo "python test.py with gpu_id=${gpu} target=${organ} board=ID${j}_${organ}_lowest record=False snapshot=runs/PANet_train/${j}/snapshots/lowest.pth"
python test.py with gpu_id=${gpu} target=${organ} board=ID${j}_${organ}_lowest record=False snapshot=runs/PANet_train/${j}/snapshots/lowest.pth
echo "python test.py with gpu_id=${gpu} target=${organ} board=ID${j}_${organ}_last record=False snapshot=runs/PANet_train/${j}/snapshots/last.pth"
python test.py with gpu_id=${gpu} target=${organ} board=ID${j}_${organ}_last record=False snapshot=runs/PANet_train/${j}/snapshots/last.pth
j=$(($j+1))
done | Shell |
3D | oopil/3D_medical_image_FSS | UNet_upperbound/train_all_ctorg.sh | .sh | 1,126 | 17 | # this code require gpu_id when running
# 1 2 3 4 5 6 7 8 9 10 11 12 13
gpu=$1
j=$2
for organ in 3 6 14
do
echo "python train.py with mode=train gpu_id=${gpu} target=${organ} board=ID${j}_${organ} record=False n_work=3 external_train=CT_ORG is_lowerbound=True"
python train.py with mode=train gpu_id=${gpu} target=${organ} board=ID${j}_${organ}_data record=False n_work=3 external_train=CT_ORG is_lowerbound=True
echo "python test.py with gpu_id=${gpu} target=${organ} board=ID${j}_${organ}_lowest record=False external_test=CT_ORG snapshot=runs/PANet_train/${j}/snapshots/lowest.pth"
python test.py with gpu_id=${gpu} target=${organ} board=ID${j}_${organ}_lowest record=False external_test=CT_ORG snapshot=runs/PANet_train/${j}/snapshots/lowest.pth
echo "python test.py with gpu_id=${gpu} target=${organ} board=ID${j}_${organ}_last record=False external_test=CT_ORG snapshot=runs/PANet_train/${j}/snapshots/last.pth"
python test.py with gpu_id=${gpu} target=${organ} board=ID${j}_${organ}_last record=False external_test=CT_ORG snapshot=runs/PANet_train/${j}/snapshots/last.pth
j=$(($j+1))
done | Shell |
3D | oopil/3D_medical_image_FSS | UNet_upperbound/train_all_decath.sh | .sh | 1,142 | 17 | # this code require gpu_id when running
# 1 2 3 4 5 6 7 8 9 10 11 12 13
gpu=$1
j=$2
for organ in 1 6
do
echo "python train.py with mode=train gpu_id=${gpu} target=${organ} board=ID${j}_${organ} record=False n_work=3 external_train=decathlon is_lowerbound=True "
python train.py with mode=train gpu_id=${gpu} target=${organ} board=ID${j}_${organ}_data record=False n_work=3 external_train=decathlon is_lowerbound=True
echo "python test.py with gpu_id=${gpu} target=${organ} board=ID${j}_${organ}_lowest record=False external_test=decathlon snapshot=runs/PANet_train/${j}/snapshots/lowest.pth"
python test.py with gpu_id=${gpu} target=${organ} board=ID${j}_${organ}_lowest record=False external_test=decathlon snapshot=runs/PANet_train/${j}/snapshots/lowest.pth
echo "python test.py with gpu_id=${gpu} target=${organ} board=ID${j}_${organ}_last record=False external_test=decathlon snapshot=runs/PANet_train/${j}/snapshots/last.pth"
python test.py with gpu_id=${gpu} target=${organ} board=ID${j}_${organ}_last record=False external_test=decathlon snapshot=runs/PANet_train/${j}/snapshots/last.pth
j=$(($j+1))
done | Shell |
3D | oopil/3D_medical_image_FSS | UNet_upperbound/model.py | .py | 1,070 | 28 | import pdb
import numpy as np
import torch
import torch.nn as nn
from models.encoder import Encoder
from models.decoder import Decoder
class MedicalFSS(nn.Module):
def __init__(self, config, device):
super(MedicalFSS, self).__init__()
self.config=config
resize_dim = self.config['input_size']
self.encoded_h = int(resize_dim[0] / 2 ** self.config['n_pool'])
self.encoded_w = int(resize_dim[1] / 2 ** self.config['n_pool'])
self.encoder = Encoder(self.config['path']['init_path'], device) # .to(device)
self.decoder = Decoder(input_res=(self.encoded_h, self.encoded_w), output_res=resize_dim).to(device)
self.q_slice_n = self.config['q_slice']
self.ch = 256 # number of channels of embedding vector
def forward(self, x):
x = x.squeeze(3)
x = x.squeeze(1) #[B,5,256,256]
x_enc, ft_list = self.encoder(x)
yhat = self.decoder(x_enc, ft_list) # [B, 1, 256, 256]
out = [yhat[:, k, ...] for k in range(5)]
# pdb.set_trace()
return out | Python |
3D | oopil/3D_medical_image_FSS | UNet_upperbound/__init__.py | .py | 0 | 0 | null | Python |
3D | oopil/3D_medical_image_FSS | UNet_upperbound/test.py | .py | 9,388 | 246 | """Evaluation Script"""
import os
import shutil
import pdb
import tqdm
import numpy as np
import torch
import torch.optim
import torch.nn as nn
from torch.utils.data import DataLoader
import torch.backends.cudnn as cudnn
from torchvision.transforms import Compose
from torchvision.utils import make_grid
from math import isnan
# from util.metric import Metric
from util.utils import set_seed, CLASS_LABELS, get_bbox, date
from config import ex
from tensorboardX import SummaryWriter
from dataloaders_medical.prostate import *
from model import MedicalFSS
from nn_common_modules import losses
import torch.nn.functional as F
def overlay_color(img, mask, label, scale=50):
"""
:param img: [1, 256, 256]
:param mask: [1, 256, 256]
:param label: [1, 256, 256]
:return:
"""
# pdb.set_trace()
scale = np.mean(img.cpu().numpy())
mask = mask[0]
label = label[0]
zeros = torch.zeros_like(mask)
zeros = [zeros for _ in range(3)]
zeros[0] = mask
mask = torch.stack(zeros,dim=0)
zeros[1] = label
label = torch.stack(zeros,dim=0)
img_3ch = torch.cat([img,img,img],dim=0)
masked = img_3ch+mask.float()*scale+label.float()*scale
return [masked]
@ex.automain
def main(_run, _config, _log):
for source_file, _ in _run.experiment_info['sources']:
os.makedirs(os.path.dirname(f'{_run.observers[0].dir}/source/{source_file}'),
exist_ok=True)
_run.observers[0].save_file(source_file, f'source/{source_file}')
shutil.rmtree(f'{_run.observers[0].basedir}/_sources')
set_seed(_config['seed'])
cudnn.enabled = True
cudnn.benchmark = True
torch.cuda.set_device(device=_config['gpu_id'])
torch.set_num_threads(1)
device = torch.device(f"cuda:{_config['gpu_id']}")
_log.info('###### Load data ######')
data_name = _config['dataset']
make_data = meta_data
q_slice_n = _config['q_slice']
iter_print = _config['iter_print']
if _config['record']:
_log.info('###### define tensorboard writer #####')
board_name = f'board/test_{_config["board"]}_{date()}'
writer = SummaryWriter(board_name)
if _config["n_update"]:
_log.info('###### fine tuning with support data of target organ #####')
_config["n_shot"] = _config["n_shot"]-1
_log.info('###### Create model ######')
model = MedicalFSS(_config, device).to(device)
checkpoint = torch.load(_config['snapshot'], map_location='cpu')
print("checkpoint keys : ", checkpoint.keys())
# initializer.load_state_dict(checkpoint['initializer'])
model.load_state_dict(checkpoint['model'])
tr_dataset, val_dataset, ts_dataset = make_data(_config, is_finetuning=True)
trainloader = DataLoader(
dataset=tr_dataset,
batch_size=1,
shuffle=False,
pin_memory=False,
drop_last=False
)
optimizer = torch.optim.Adam(list(model.parameters()),_config['optim']['lr'])
# criterion_ce = nn.CrossEntropyLoss()
# criterion = losses.DiceLoss()
criterion = nn.BCELoss()
for i_iter, sample_train in enumerate(trainloader):
preds = []
loss_per_video = 0.0
optimizer.zero_grad()
s_x = sample_train['s_x'].to(device) # [B, Support, slice_num, 1, 256, 256]
s_y = sample_train['s_y'].to(device) # [B, Support, slice_num, 1, 256, 256]
preds = model(s_x)
for frame_id in range(q_slice_n):
s_yi = s_y[:, 0, frame_id, 0, :, :] # [B, 1, 256, 256]
yhati = preds[frame_id]
# loss = criterion(F.softmax(yhati, dim=1), q_yi2)
# loss = criterion(F.softmax(yhati, dim=1), q_yi2)+criterion_ce(F.softmax(yhati, dim=1), q_yi2)
loss = criterion(yhati, s_yi)
loss_per_video += loss
preds.append(yhati)
loss_per_video.backward()
optimizer.step()
if iter_print:
print(f"train, iter:{i_iter}/{_config['n_update']}, iter_loss:{loss_per_video}", end='\r')
_config["n_shot"] = _config["n_shot"]+1
else:
_log.info('###### Create model ######')
model = MedicalFSS(_config, device).to(device)
checkpoint = torch.load(_config['snapshot'], map_location='cpu')
print("checkpoint keys : ", checkpoint.keys())
# initializer.load_state_dict(checkpoint['initializer'])
model.load_state_dict(checkpoint['model'])
model.n_shot = _config["n_shot"]
tr_dataset, val_dataset, ts_dataset = make_data(_config)
testloader = DataLoader(
dataset=ts_dataset,
batch_size=1,
shuffle=False,
pin_memory=False,
drop_last=False
)
_log.info('###### Testing begins ######')
# metric = Metric(max_label=max_label, n_runs=_config['n_runs'])
img_cnt = 0
# length = len(all_samples)
length = len(testloader)
blank = torch.zeros([1, 256, 256]).to(device)
reversed_idx = list(reversed(range(q_slice_n)))
ch = 256 # number of channels of embedding
img_lists = []
pred_lists = []
label_lists = []
saves = {}
n_test = len(ts_dataset.q_cnts)
for subj_idx in range(n_test):
saves[subj_idx] = []
with torch.no_grad():
batch_idx = 0 # use only 1 batch size for testing
for i, sample_test in enumerate(testloader): # even for upward, down for downward
subj_idx, idx = ts_dataset.get_test_subj_idx(i)
img_list, pred_list, label_list, preds = [],[],[],[]
s_x = sample_test['s_x'].to(device) # [B, Support, slice_num, 1, 256, 256]
s_y = sample_test['s_y'].to(device) # [B, Support, slice_num, 1, 256, 256]
preds = model(s_x)
for frame_id in range(q_slice_n):
s_xi = s_x[:, 0, frame_id, :, :, :] # only 1 shot in upperbound model
s_yi = s_y[:, 0, frame_id, :, :, :] # [B, 1, 256, 256]
yhati = preds[frame_id]
# pdb.set_trace()
preds.append(yhati.round())
img_list.append(s_xi[batch_idx].cpu().numpy())
pred_list.append(yhati.round().cpu().numpy())
label_list.append(s_yi[batch_idx].cpu().numpy())
saves[subj_idx].append([subj_idx, idx, img_list, pred_list, label_list])
if iter_print:
print(f"test, iter:{i}/{length} - {subj_idx}/{idx} \t\t", end='\r')
img_lists.append(img_list)
pred_lists.append(pred_list)
label_lists.append(label_list)
# if _config['record']:
# frames = []
# for frame_id in range(0, q_x.size(1)):
# frames += overlay_color(q_x[batch_idx, frame_id], preds[frame_id-1][batch_idx].round(), q_y[batch_idx, frame_id], scale=_config['scale'])
# visual = make_grid(frames, normalize=True, nrow=5)
# writer.add_image(f"test/{subj_idx}/{idx}_query_image", visual, i)
center_idx = (q_slice_n//2)+1 -1 # 5->2 index
dice_similarities = []
for subj_idx in range(n_test):
imgs, preds, labels = [], [], []
save_subj = saves[subj_idx]
for i in range(len(save_subj)):
subj_idx, idx, img_list, pred_list, label_list = save_subj[i]
# if idx==(q_slice_n//2):
if idx==0:
for j in range((q_slice_n//2)+1):# 5//2 + 1 = 3
imgs.append(img_list[idx+j])
preds.append(pred_list[idx+j])
labels.append(label_list[idx+j])
elif idx==(len(save_subj)-1):
# pdb.set_trace()
for j in range((q_slice_n//2)+1):# 5//2 + 1 = 3
imgs.append(img_list[center_idx+j])
preds.append(pred_list[center_idx+j])
labels.append(label_list[center_idx+j])
else:
imgs.append(img_list[center_idx])
preds.append(pred_list[center_idx])
labels.append(label_list[center_idx])
# pdb.set_trace()
img_arr = np.concatenate(imgs, axis=0)
pred_arr = np.concatenate(preds, axis=0)
label_arr = np.concatenate(labels, axis=0)
dice = np.sum([label_arr * pred_arr]) * 2.0 / (np.sum(pred_arr) + np.sum(label_arr))
dice_similarities.append(dice)
print(f"{len(imgs)} slice -> computing dice scores. {subj_idx}/{n_test}. {ts_dataset.q_cnts[subj_idx] }/{len(save_subj)} => {len(imgs)}", end='\r')
if _config['record']:
frames = []
for frame_id in range(0, len(imgs)):
frames += overlay_color(torch.tensor(imgs[frame_id]), torch.tensor(preds[frame_id]), torch.tensor(labels[frame_id]), scale=_config['scale'])
print(len(frames))
visual = make_grid(frames, normalize=True, nrow=5)
writer.add_image(f"test/{subj_idx}", visual, i)
writer.add_scalar(f'dice_score/{i}', dice)
print(f"test result \n n : {len(dice_similarities)}, mean dice score : \
{np.mean(dice_similarities)} \n dice similarities : {dice_similarities}")
if _config['record']:
writer.add_scalar(f'dice_score/mean', np.mean(dice_similarities))
| Python |
3D | oopil/3D_medical_image_FSS | UNet_upperbound/config.py | .py | 3,957 | 153 | """Experiment Configuration"""
import os
import re
import glob
import itertools
import sacred
from sacred import Experiment
from sacred.observers import FileStorageObserver
from sacred.utils import apply_backspaces_and_linefeeds
sacred.SETTINGS['CONFIG']['READ_ONLY_CONFIG'] = False
sacred.SETTINGS.CAPTURE_MODE = 'no'
ex = Experiment('PANet')
ex.captured_out_filter = apply_backspaces_and_linefeeds
source_folders = ['.', './dataloaders', './models', './util', './dataloaders_medical']
sources_to_save = list(itertools.chain.from_iterable(
[glob.glob(f'{folder}/*.py') for folder in source_folders]))
for source_file in sources_to_save:
ex.add_source_file(source_file)
# "Organs" : ["background",
# "spleen",
# "right kidney",
# "left kidney",
# "gallbladder",
# "esophagus",
# "liver",
# "stomach",
# "aorta",
# "inferior vana cava",
# "portal vein & splenic vein",
# "pancreas",
# "right adrenal gland",
# "left adrenal gland",
# ],
@ex.config
def cfg():
"""Default configurations"""
iter_print=True
# size = 320
size = 256
input_size = (size, size) # 419? 480!
seed = 1234
cuda_visable = '0, 1, 2, 3, 4, 5, 6, 7'
gpu_id = 0
mode = 'test' # 'train' or 'test'
record = False
scale = 1.0
n_layer = 1
q_slice = 5
n_shot = 1
s_idx = 0
n_pool = 3 # 3 - number of pooling
target = 1
is_attention=False
add_target = 0
is_super=False
is_lowerbound=False
external_test = "None" # "decathlon" # "CT_ORG"
external_train = "None" # "decathlon" # "CT_ORG"
is_fast_test = False
n_fast_test = 5
if external_train == "None":
internal_train = True
else:
internal_train = False
if external_test == "None":
internal_test = True
else:
internal_test = False
if mode == 'train':
lr_milestones = [50*i for i in range(1,3)]
n_iter = 300
dataset = 'prostate' # 'VOC' or 'COCO'
n_steps = 100
n_work = 1
batch_size = 5
print_interval = 500
validation_interval = 500
save_pred_every = 10000
val_cnt = 100
model = {
'align': False,
# 'align': True,
}
optim = {
'lr': 1e-4,
'momentum': 0.9,
'weight_decay': 0.0005,
}
elif mode == 'test':
is_test = True
dataset = 'prostate' # 'VOC' or 'COCO'
notrain = False
# snapshot = './runs/PANet_VOC_sets_0_1way_1shot_[train]/1/snapshots/30000.pth'
snapshot = '/user/home2/soopil/tmp/PANet/runs/PANet_VOC_sets_0_3way_5shot_[train]/2/snapshots/50000.pth'
n_iter = 1
n_runs = 1
n_update = 0
n_steps = 1000
batch_size = 1
# for fine tunning
optim = {
'lr': 5e-5,
'momentum': 0.9,
'weight_decay': 0.0005,
}
# Set model config from the snapshot string
model = {}
for key in ['align',]:
model[key] = key in snapshot
else:
raise ValueError('Wrong configuration for "mode" !')
exp_str = '_'.join([
mode,
])
path = {
'log_dir': './runs',
# 'init_path': None,
'init_path': './../../pretrained_model/vgg16-397923af.pth',
}
### configuration for Medical Image Test
modal_index = 0 #["flair","t1","t1ce","t2"]
mask_index = 1 #[1, 2, 4]
board=""
@ex.config_hook
def add_observer(config, command_name, logger):
"""A hook function to add observer"""
exp_name = f'{ex.path}_{config["exp_str"]}'
observer = FileStorageObserver.create(os.path.join(config['path']['log_dir'], exp_name))
ex.observers.append(observer)
return config
| Python |
3D | oopil/3D_medical_image_FSS | UNet_upperbound/util/__init__.py | .py | 0 | 0 | null | Python |
3D | oopil/3D_medical_image_FSS | UNet_upperbound/util/utils.py | .py | 2,004 | 74 | """Util functions"""
import random
import torch
import numpy as np
import os
from datetime import datetime
def try_mkdir(path):
try:
os.mkdir(path)
print(f"mkdir : {path}")
except:
print(f"failed to make a directory : {path}")
def date():
now = datetime.now()
string = now.year + now.month + now.day
string = now.strftime('%Y%m%d_%H%M%S')
return string
def set_seed(seed):
"""
Set the random seed
"""
random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
CLASS_LABELS = {
'VOC': {
'all': set(range(1, 21)),
0: set(range(1, 21)) - set(range(1, 6)),
1: set(range(1, 21)) - set(range(6, 11)),
2: set(range(1, 21)) - set(range(11, 16)),
3: set(range(1, 21)) - set(range(16, 21)),
},
'COCO': {
'all': set(range(1, 81)),
0: set(range(1, 81)) - set(range(1, 21)),
1: set(range(1, 81)) - set(range(21, 41)),
2: set(range(1, 81)) - set(range(41, 61)),
3: set(range(1, 81)) - set(range(61, 81)),
}
}
def get_bbox(fg_mask, inst_mask):
"""
Get the ground truth bounding boxes
"""
fg_bbox = torch.zeros_like(fg_mask, device=fg_mask.device)
bg_bbox = torch.ones_like(fg_mask, device=fg_mask.device)
inst_mask[fg_mask == 0] = 0
area = torch.bincount(inst_mask.view(-1))
cls_id = area[1:].argmax() + 1
cls_ids = np.unique(inst_mask)[1:]
mask_idx = np.where(inst_mask[0] == cls_id)
y_min = mask_idx[0].min()
y_max = mask_idx[0].max()
x_min = mask_idx[1].min()
x_max = mask_idx[1].max()
fg_bbox[0, y_min:y_max+1, x_min:x_max+1] = 1
for i in cls_ids:
mask_idx = np.where(inst_mask[0] == i)
y_min = max(mask_idx[0].min(), 0)
y_max = min(mask_idx[0].max(), fg_mask.shape[1] - 1)
x_min = max(mask_idx[1].min(), 0)
x_max = min(mask_idx[1].max(), fg_mask.shape[2] - 1)
bg_bbox[0, y_min:y_max+1, x_min:x_max+1] = 0
return fg_bbox, bg_bbox
| Python |
3D | oopil/3D_medical_image_FSS | UNet_upperbound/models/decoder.py | .py | 4,107 | 101 | import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import numpy as np
# from torchsummary import summary
if __name__ == '__main__':
from nnutils import conv_unit
else:
from .nnutils import conv_unit
class Decoder(nn.Module):
def __init__(self, input_channels=512, input_res=(8, 14), init_channels=512, shrink_per_block=2, output_channels=1, output_res=(256, 448)):
super(Decoder, self).__init__()
self.layer1 = nn.Sequential(
nn.Conv2d(512, 512, kernel_size=3, padding=1),
nn.BatchNorm2d(512, momentum=1, affine=True),
nn.ReLU()
)
self.layer2 = nn.Sequential(
nn.Conv2d(512, 512, kernel_size=3, padding=1),
nn.BatchNorm2d(512, momentum=1, affine=True),
nn.ReLU()
)
self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.double_conv1 = nn.Sequential(
nn.Conv2d(512 + 512*1, 512, kernel_size=3, padding=1),
nn.BatchNorm2d(512, momentum=1, affine=True),
nn.ReLU(),
nn.Conv2d(512, 512, kernel_size=3, padding=1),
nn.BatchNorm2d(512, momentum=1, affine=True),
nn.ReLU()
) # 14 x 14
self.double_conv2 = nn.Sequential(
nn.Conv2d(512 + 512*1, 256, kernel_size=3, padding=1),
nn.BatchNorm2d(256, momentum=1, affine=True),
nn.ReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1),
nn.BatchNorm2d(256, momentum=1, affine=True),
nn.ReLU()
) # 28 x 28
self.double_conv3 = nn.Sequential(
nn.Conv2d(256 + 256*1, 128, kernel_size=3, padding=1),
nn.BatchNorm2d(128, momentum=1, affine=True),
nn.ReLU(),
nn.Conv2d(128, 128, kernel_size=3, padding=1),
nn.BatchNorm2d(128, momentum=1, affine=True),
nn.ReLU()
) # 56 x 56
self.double_conv4 = nn.Sequential(
nn.Conv2d(128 + 128*1, 64, kernel_size=3, padding=1),
nn.BatchNorm2d(64, momentum=1, affine=True),
nn.ReLU(),
nn.Conv2d(64, 64, kernel_size=3, padding=1),
nn.BatchNorm2d(64, momentum=1, affine=True),
nn.ReLU()
) # 112 x 112
self.double_conv5 = nn.Sequential(
nn.Conv2d(64 + 64 * 1, 64, kernel_size=3, padding=1),
nn.BatchNorm2d(64, momentum=1, affine=True),
nn.ReLU(),
nn.Conv2d(64, 5, kernel_size=1, padding=0), # 1 for bce and 2 for cross entropy loss
# nn.Conv2d(64, 1, kernel_size=1, padding=0), # 1 for bce and 2 for cross entropy loss
# nn.Softmax2d()
nn.Sigmoid()
) # 256 x 256
# x = F.interpolate(x, orig_size, mode="bilinear")
self._init_weights()
def mask_process(self, mask):
# x = F.interpolate(x, orig_size, mode="bilinear")
mask = F.interpolate(mask, [16,16], mode="bilinear")
def forward(self, hidden, ft_list):
out = self.layer1(hidden)
out = self.layer2(out)
# out = self.upsample(out) # block 1
out = torch.cat((out, ft_list[-1]), dim=1)
out = self.double_conv1(out)
# out = self.upsample(out) # block 2
out = torch.cat((out, ft_list[-2]), dim=1)
out = self.double_conv2(out)
out = self.upsample(out) # block 3
out = torch.cat((out, ft_list[-3]), dim=1)
out = self.double_conv3(out)
out = self.upsample(out) # block 4
out = torch.cat((out, ft_list[-4]), dim=1)
out = self.double_conv4(out)
out = self.upsample(out) # block 5
out = torch.cat((out, ft_list[-5]), dim=1)
out = self.double_conv5(out)
# out = F.sigmoid(out)
# out = torch.squeeze(out)
return out
def _init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
# torch.nn.init.normal_(m.weight)
torch.nn.init.kaiming_normal_(m.weight, nonlinearity='relu') | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.