text stringlengths 8 6.12M |
|---|
function range = bootstrap_mean_small(data,varargin)
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%Setup variables and parse command line
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
i_p = inputParser;
i_p.addRequired('data',@isnumeric);
i_p.addParamValue('bonfer_correction',1,@(x)isnumeric(x) && x > 0);
i_p.addParamValue('samp_num',100000,@(x)isnumeric(x) && x > 0);
i_p.addParamValue('samp_size',50,@(x)isnumeric(x) && x > 0);
i_p.parse(data,varargin{:});
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Main Program
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
samp_num = i_p.Results.samp_num;
samp_size = i_p.Results.samp_size;
% sample_indexes = NaN(samp_num,samp_size);
% for i=1:samp_num
% sample_indexes(i,:) = randi(length(data),[samp_size,1]);
% end
% tic;
% if (mod(samp_size,1) == 0)
% median_index = [samp_size/2,samp_size/2+1];
% else
% median_index = samp_size/2+0.5;
% end
%
% data_sort = sort(data);
% samples = NaN(samp_num,1);
% for i=1:samp_num
% temp = sort(randi(length(data),[samp_size,1]));
% temp = temp(median_index);
% samples(i) = median(data_sort(temp));
% end
% toc;
% tic;
samples = NaN(samp_num,1);
for i=1:samp_num
samples(i) = median(data(randi(length(data),[samp_size,1])));
end
% toc;
% round(length(samples)*0.05/i_p.Results.bonfer_correction)
samples = sort(samples);
range = [samples(round(length(samples)*0.05/i_p.Results.bonfer_correction)), ...
samples(round(length(samples)*(1-0.05/i_p.Results.bonfer_correction)))]; |
function [starttime, endtime,index_start, index_end] = get_trial_TTL(TTL,ts,threshold)
index = [];
TTL_threshold = ((max(TTL)-min(TTL))/threshold);
trial_times = [];
TTL_shift = [TTL(1) [TTL(1:(length(TTL)-1))]];
diff = TTL-TTL_shift;
starttime = ts(diff > TTL_threshold)';
endtime = ts(diff < -TTL_threshold)';
index_start = find(diff > TTL_threshold)';
index_end = find(diff < -TTL_threshold)';
|
% Author: Guosheng Lin (guosheng.lin@gmail.com)
% perform segmentation prediction on user provided images.
% specify the location of your images, e.g., using the following
% folder which contains serveral example images:
% ds_config.img_data_dir='../datasets/custom_data';
function demo_refinenet_test_example_egohands()
%%% testset
folders = {'CARDS_COURTYARD_B_T','CARDS_OFFICE_S_B','CHESS_COURTYARD_B_T','CHESS_LIVINGROOM_T_H','JENGA_LIVINGROOM_S_T','JENGA_OFFICE_H_T','PUZZLE_COURTYARD_H_T','PUZZLE_LIVINGROOM_T_B'};
%%% valset
%folders = {'JENGA_COURTYARD_T_S','CHESS_COURTYARD_H_S','PUZZLE_OFFICE_S_T','CARDS_LIVINGROOM_S_H'}
for i = 1:length(folders)
rng('shuffle');
addpath('./my_utils');
dir_matConvNet='../libs/matconvnet/matlab';
run(fullfile(dir_matConvNet, 'vl_setupnn.m'));
run_config=[];
ds_config=[];
run_config.use_gpu=true;
% run_config.use_gpu=false;
run_config.gpu_idx=1;
% result dir:
result_name=[char(folders(i))];
result_dir=fullfile('../cache_data', 'egohands', result_name);
% the folder that contains testing images:
ds_config.img_data_dir=strcat('/home/zxi/refinenet/datasets/EgoHands/JPEGImages/',char(folders(i)));
% using a trained model which is trained on VOC 2012
% run_config.trained_model_path='../model_trained/refinenet_res101_voc2012.mat';
% ds_config.class_info=gen_class_info_voc();
% using the object parsing model
run_config.trained_model_path='../model_trained/refinenet_res101_egohands.mat';
ds_config.class_info=gen_class_info_ego();
% for voc trained model, control the size of input images
run_config.input_img_short_edge_min=450;
run_config.input_img_short_edge_max=600;
% set the input image scales, useful for multi-scale evaluation
% e.g. using multiple scale settings (1.0 0.8 0.6) and average the resulting score maps.
run_config.input_img_scale=1.0;
run_config.gen_net_opts_fn=@gen_net_opts_model_type1;
run_config.run_evaonly=true;
ds_config.use_custom_data=true;
ds_config.use_dummy_gt=true;
run_config.use_dummy_gt=ds_config.use_dummy_gt;
ds_config.ds_name='tmp_data';
run_config.root_cache_dir=result_dir;
mkdir_notexist(run_config.root_cache_dir);
run_config.model_name=result_name;
diary_dir=run_config.root_cache_dir;
mkdir_notexist(diary_dir);
diary(fullfile(diary_dir, 'output.txt'));
diary on
run_dir_name=fileparts(mfilename('fullpath'));
[~, run_dir_name]=fileparts(run_dir_name);
run_config.run_dir_name=run_dir_name;
run_config.run_file_name=mfilename();
ds_info=gen_dataset_info(ds_config);
my_diary_flush();
train_opts=run_config.gen_net_opts_fn(run_config, ds_info.class_info);
imdb=my_gen_imdb(train_opts, ds_info);
data_norm_info=[];
data_norm_info.image_mean=128;
imdb.ref.data_norm_info=data_norm_info;
if run_config.use_gpu
gpu_num=gpuDeviceCount;
if gpu_num>=1
gpuDevice(run_config.gpu_idx);
else
error('no gpu found!');
end
end
[net_config, net_exp_info]=prepare_running_model(train_opts);
my_net_tool(train_opts, imdb, net_config, net_exp_info);
fprintf('\n\n--------------------------------------------------\n\n');
disp('results are saved in:');
disp(run_config.root_cache_dir);
my_diary_flush();
diary off
end
end
|
for p = 4:40
isgood = 1;
for a = 2:p/2+1
b = p-a;
if isprime(b) && isprime(a)
isgood = 0;
break;
end
end
if isgood
disp(p);
end
end |
function ce30_Raw2CartPolar(obj)
% % % % % % % % % % % % % % % % % % % % % % % % % % % % % %
% @Func ce30_Raw2CartPolar;
% @Brief 将全部原始数据转化为需要的类型;
% @Param NONE
% @Retval 修改属性的值;
% @Date 2019/11/21;
% % % % % % % % % % % % % % % % % % % % % % % % % % % % % %
%% 函数主体
indexCartMatrix = 1;
indexCartPolar = 1;
for p = 1: 1: 26
for b = 1: 1: 12
for s = 1: 1: 20
obj.ce30_ExtractAngleDist(p, b, s);
obj.ce30_Polar2Cart;
obj.ce30_DataWrite(s, indexCartMatrix, indexCartPolar);
indexCartMatrix = indexCartMatrix + 1;
end
indexCartPolar = indexCartPolar + 1;
end
end
for b = 1: 1: 8
for s = 1: 1: 20
obj.ce30_ExtractAngleDist(27, b, s);
obj.ce30_Polar2Cart;
obj.ce30_DataWrite(s, indexCartMatrix, indexCartPolar);
indexCartMatrix = indexCartMatrix + 1;
end
indexCartPolar = indexCartPolar + 1;
end
end |
% this is used in app designer
function myTimerFcn(app,~,~)
cd('C:\Users\Stage-2\Desktop\HandEyeApp\Variables\hand-eye');
if exist ("posesRobot.csv")
imageDir = fullfile('C:\Users\Stage-2\Desktop\HandEyeApp\Images\realRobotImages');
images = imageDatastore(imageDir);
[worldPoints,cameraParams] = calibrFunction (images.Files',12);
for i=1:size(images.Files,1)
[Tc(:,:,i),cameraPos(i,:)]= extrinsicFunc(worldPoints,images,cameraParams,i);
end
% save(strcat(app.PathoutputEditField_HandEye.Value,'\Tc.mat'),'Tc') ;
filename='posesRobot.csv'
posesRobot=load(filename)
for i=1:size(posesRobot,1)
eul=posesRobot(i,1:3);
rotmZYX(:,:,i) = eul2rotm(eul);
Tg(:,:,i) = [rotmZYX(:,:,i),posesRobot(i,4:6)';0,0,0,1];
end
if ~exist("X.mat")
X = handEye(Tc,Tg);
save(strcat(app.PathoutputEditField_HandEye.Value,'\X.mat'),'X') ;
else
logger(app,'File X.mat is already in our directory');
end
for i=1:size(posesRobot,1)
Z(:,:,i)= Tg(:,:,i)*X/Tc(:,:,i);
end
save(strcat(app.PathoutputEditField_HandEye.Value,'\Z.mat'),'Z') ;
cd(app.PathoutputEditField_HandEye.Value);
z=pose_base2world(Z);
csvwrite('z.csv',z);
else
logger(app, 'WARNING:The file of Robot poses is empty!');
end
end |
%% 5.5a
%Parameters
K = 0.16;
T = 75;
omega0 = pi/4; %0.7823;
lambda = 0.07;
sigma = 0.0281;
Kw = 2*lambda*omega0*sigma
% Sampling
fs = 10;
Ts = 1/fs;
% The system
A = [0, 1, 0, 0, 0;
-omega0^2, -2*lambda*omega0, 0, 0, 0
0, 0, 0, 1, 0
0, 0, 0, -1/T, -K/T
0, 0, 0, 0, 0];
B = [0;
0;
0;
K/T;
0];
E = [0,0;
Kw,0;
0,0;
0,0;
0,1];
C = [0, 1, 1, 0, 0];
% Discretizing
syms s t;
Ad = vpa(ilaplace((s*eye(5)-A)^-1, Ts),5);
eat = @(t) ilaplace(((s*eye(length(A)) - A))^-1)
Bd = vpa(int(eat,t,0,Ts)*B,3);
Ed = vpa(int(eat,t,0,Ts)*E,3);
Cd = C;
% Discretizing using matlab function (equivalent)
% [Ad,Bd] = c2d(A,B,Ts);
% Cd = C;
% [Ad,Ed] = c2d(A,E,Ts);
%% 5.5b
% run simulation to get disturbance
sim("ship_5_5b.slx");
%find the variance
sigma = var(simout.data);
R = sigma/fs;
%% 5.5 d&e variable values
% given variables
Q = [30 0; 0 1e-6];
P0bar = [1 0 0 0 0;
0 0.013 0 0 0;
0 0 pi^2 0 0;
0 0 0 1 0;
0 0 0 0 2.5e-3];
x0bar = [0 0 0 0 0]';
I = eye(5);
% PD-controller
Kpd = 0.8159;
Td = 75;
Tf = 8.3910;
parameters = struct('A',double(Ad),'B',double(Bd),'C',double(Cd),'E',double(Ed),'I',I,'Q',Q,'R',R,'P0bar',P0bar,'x0bar',x0bar);
%% 5.5d
% run simulation
sim5d = sim("ship_5_5d.slx", "Stoptime", "600");
% plot
figure
plot(sim5d.compass);
hold on
plot(sim5d.compass_est);
grid
title("$\psi_r = 30^\circ$ with disturbances",'Interpreter','latex')
legend({'Heading $\psi$', 'Est heading $\psi^-$'},'Interpreter','latex','location','northeastoutside');
xlabel("Time (Seconds)",'Interpreter','latex','FontSize', 15)
ylabel("(Degrees)",'Interpreter','latex','FontSize', 15)
set(gcf, 'Position', [100, 100, 700, 400])
set(gca,'FontSize',12,'linewidth',1.0)
figure
plot(sim5d.u)
hold on
plot(sim5d.rudderBias_est)
grid
title("$\psi_r = 30^\circ$ with disturbances",'Interpreter','latex')
legend({'Rudder input', 'Est bias $r$'},'Interpreter','latex','location','northeastoutside');
xlabel("Time (Seconds)",'Interpreter','latex','FontSize', 15)
ylabel("(Degrees)",'Interpreter','latex','FontSize', 15)
set(gcf, 'Position', [100, 100, 700, 400])
set(gca,'FontSize',12,'linewidth',1.0)
%% 5.5e
load("wave.mat");
% run simulation
sim5e = sim("ship_5_5e.slx", "Stoptime", "600");
% plot compass
figure
plot(sim5e.compass);
hold on
plot(sim5e.compass_est);
grid
title("$\psi_r = 30^\circ$ with disturbances",'Interpreter','latex')
legend({'Heading $\psi$', 'Est heading $\psi^-$'},'Interpreter','latex','location','northeastoutside');
xlabel("Time (Seconds)",'Interpreter','latex','FontSize', 15)
ylabel("(Degrees)",'Interpreter','latex','FontSize', 15)
set(gcf, 'Position', [100, 100, 700, 400])
set(gca,'FontSize',12,'linewidth',1.0)
%plot rudder bias and input
figure
plot(sim5e.u)
hold on
plot(sim5e.rudderBias_est)
grid
title("$\psi_r = 30^\circ$ with disturbances",'Interpreter','latex')
legend({'Rudder input', 'Est bias $r$'},'Interpreter','latex','location','northeastoutside');
xlabel("Time (Seconds)",'Interpreter','latex','FontSize', 15)
ylabel("(Degrees)",'Interpreter','latex','FontSize', 15)
set(gcf, 'Position', [100, 100, 700, 400])
set(gca,'FontSize',12,'linewidth',1.0)
% plot waves
figure
plot(sim5e.wave_est.Time, psi_w(2,1:length(sim5e.wave_est.Data)));
hold on
plot(sim5e.wave_est)
grid
title("Wave disturbance",'Interpreter','latex')
legend({'Waves', 'Est waves'},'Interpreter','latex','location','northeastoutside');
xlabel("Time (Seconds)",'Interpreter','latex','FontSize', 15)
ylabel("(Degrees)",'Interpreter','latex','FontSize', 15)
set(gcf, 'Position', [100, 100, 700, 400])
set(gca,'FontSize',12,'linewidth',1.0)
%plot difference in waves and wavesEst
figure
plot(sim5e.wave_est.Time, abs((psi_w(2,1:length(sim5e.wave_est.Data))-sim5e.wave_est.Data')))
grid
title("Difference between waves and estimated waves",'Interpreter','latex')
%legend({'waves', 'Est waves'},'Interpreter','latex','location','northeastoutside');
xlabel("Time (Seconds)",'Interpreter','latex','FontSize', 15)
ylabel("(Degrees)",'Interpreter','latex','FontSize', 15)
axis([0,600,0,2])
set(gcf, 'Position', [100, 100, 700, 400])
set(gca,'FontSize',12,'linewidth',1.0)
%% 5.5f
% Q = [30 0; 0 1e-6]; %standard Q
newQs = {diag([30,1e-6]),diag([30,1e-12]),diag([30000,1e-6])}%,diag([1e-6,30]),diag([0,0])}
%%,diag([3000,1e-4])...diag([0.3,1e-6]),diag([30,1e-8]),diag([0.3,1e-8])};
compassPlot = figure;
hold on
biasPlot = figure;
hold on
%wavePlot = figure;
hold on
for Q = newQs
parameters.Q = Q{1}; %update parameters
% run simulations
sim5f_d = sim("ship_5_5e.slx", "Stoptime", "600");
%sim5f_e = sim("ship_5_5e.slx", "Stoptime", "600"); %do seperatly?
% plot
figure(compassPlot)
plt = plot(sim5f_d.compass);
color = get(plt,'Color');
plot(sim5f_d.compass_est,"--","color",color);
figure(biasPlot)
plt = plot(sim5f_d.rudderBias_est);
color = get(plt,'Color');
%plot(sim5f_d.u,"--","color",color);
end
% figure(compassPlot)
% legend
% figure(biasPlot)
% legend
% pretty plot
figure(compassPlot)
grid
title("Simulation with different Q-values",'Interpreter','latex')
legend({'$Q_0 \psi$', '$Q_0 \psi^-$','$Q_1 \psi$', '$Q_1 \psi^-$','$Q_2 \psi$', '$Q_2 \psi^-$'},'Interpreter','latex','location','northeastoutside');
xlabel("Time (Seconds)",'Interpreter','latex','FontSize', 15)
ylabel("(Degrees)",'Interpreter','latex','FontSize', 15)
axis([0,600,10,34])
set(gcf, 'Position', [100, 100, 700, 400])
set(gca,'FontSize',12,'linewidth',1.0)
|
function [E,P]=MT_VCA_initialize(X,labels,parameters)
% Inputs:
% X - Inputdata, reshaped hyperspectral image treats each pixel as column vector, d by N
% parameters - struct - parameter structure which can be set using the EF_parameters() function
% labels - binary the same size as input data, indicates positive bag with logical '1'
%
% Outputs:
% E - Initial Endmembers value, d by M+1, M accounts for the number of background endmembers
% P - Initial Proportion Values, M+1 by N
T=parameters.T;%No. of initial background endmembers
M=parameters.M;%No. of initial background endmembers
index_plus=find(labels);%find positive index
index_minus=find(labels==0);%find negative index
N=size(X,2);
X_plus=X(:,index_plus);%extract positive data
X_minus=X(:,index_minus);%extract negative data
N_plus=size(X_plus,2);% total No. of data in positive bags
N_minus=size(X_minus,2);% total No. of data in negative bags
%E initialization
E_minus=VCA(X_minus,'Endmembers',M);% VCA to extract background endmembers
P_plus_unmix=keep_E_update_P(X_plus,E_minus,1);%unmix X_plus use E_minus
syn_X_plus=E_minus*P_plus_unmix;%calculate synthetic X_plus
unmix_diff=sqrt(sum((X_plus-syn_X_plus).^2,1));% calculate difference between real and synthetic
[~,idx_et]=sort(unmix_diff,'descend');%find the one unmixed worst by E_minus
E_T=X_plus(:,idx_et(1:T));
E=[E_T E_minus];
%%%P initialization
P=zeros(T+M,N);
P_plus=ones(T+M,N_plus)*(1/(M+T));%use mean value to initialize proportion velues according labels
P_minus=ones(M,N_minus)*(1/M);
P(:,index_plus)=P_plus;
P(:,index_minus)=[zeros(T,N_minus);P_minus];
end
function [P]=keep_E_update_P(Inputdata,E,flag)
% given endmembers and solve proportions with or without sum to one constraint
%
%
% Inputs:
% InputData: double,Nxd or NxMxd matrix, dimensionality d for each feature vector
% E: given endmembers
% flag: 0 without sum to one constraint, 1 with sum to one constraint
%
% Outputs:
% P: proportion values
%
%
%
%
% Author: Changzhe Jiao
% Contact: cjr25@mail.missouri.edu
%%
X=double(Inputdata);
flag_Data=0;
if length(size(X))==3
flag_Data=1;
X=EF_reshape(X);
end
P=P_Update_KE(X,E,flag);
end
%%
function [P]=P_Update_KE(X,E,flag)
M=size(E,2);
N=size(X,2);
if M>1
Eps=1e-8;
DP=Eps*eye(M,M);
U=pinv(E'*E+DP);
V=E'*X;
if flag==0
P=U*V;
elseif flag==1
P=U*(V+ones(M,1)*((1-ones(1,M)*U*V)/(ones(1,M)*U*ones(M,1))));
end
Z=P<0;
while (sum(sum(Z))>0)
ZZ = unique(Z', 'rows', 'first')';
for i=1:size(ZZ,2)
if(sum(ZZ(:,i)))>0
eLocs=find(1-ZZ(:,i));
rZZi=repmat(ZZ(:,i),1,N);
inds=all(Z==rZZi, 1);
P_temp=P_Update_KE(X(:,inds),E(:,eLocs),flag);
P_temp2=zeros(size(ZZ,1),sum(inds));
P_temp2(eLocs,:)=P_temp;
P(:,inds)=P_temp2;
end
end
Z=P<0;
end
else
P=ones(M,N);
end
end
|
function E = minGlucReg(model, m, d)
r = model.result;
AUC = trapz(r.time, r.vcurr(:, m.v.Ra_g));
E = AUC - m.c.Gtot; |
function CXX= CX(Alpha,el)
% Body-axis X Force
a=[-.099 -.081 -.081 -.063 -.025 .044 .097 .113 .145 .167 .174 .166
-.048 -.038 -.040 -.021 .016 .083 .127 .137 .162 .177 .179 .167
-.022 -.020 -.021 -.004 .032 .094 .128 .130 .154 .161 .155 .138
-.040 -.038 -.039 -.025 .006 .062 .087 .085 .100 .110 .104 .091
-.083 -.073 -.076 -.072 -.046 .012 .024 .025 .043 .053 .047 .040]';
% This has been checked for accuracy with the Stevens Table
s=.2*Alpha;
k=fix(s);
if(k<=-2),k=-1; end
if(k>=9),k=8; end
da=s-k;
l=k+fix(1.1*sign(da));
s=el/12;
m=fix(s);
if(m<=-2),m=-1;end
if(m>=2),m=1;end
de=s-m;
n=m+fix(1.1*sign(de));
k=k+3;
l=l+3;
m=m+3;
n=n+3;
t=a(k,m);
u=a(k,n);
v=t+abs(da)*(a(l,m)-t);
w=u+abs(da)*(a(l,n)-u);
CXX = v+(w-v)*abs(de);
% This has been double checked.
end |
% function isdetected = detectEd(filename,RotatedDir,EdemaDir)
%
% hema_file_name = [hemaDir, filename];
% edema_file_name = [EdemaDir, filename];
img = imread('18.png');
% img = imread([RotatedDir, filename]);
% img = rgb2gray(imread('14.jpg'));
% img = rgb2gray(imread('13.png'));
% img = imrotate(img,-2.5);
% img = img(:,1:size(img,2)/2);
% img = img(:,size(img,2)/2+1:end);
imshow(img)
hold on;
% %%
% img = imadjust(img);
% imshow(img)
%% Create Ventricle Templates
rp = imfill(double(img),'holes');
rp(rp>0) = max(max(rp));
[rpb,~] = bwlabel(rp);
rpbox = regionprops(rpb,'BoundingBox','Centroid');
rectangle('Position',rpbox(1).BoundingBox,'EdgeColor','r');
xl = rpbox(1).BoundingBox(1,1);
yl = rpbox(1).BoundingBox(1,2);
w = rpbox(1).BoundingBox(1,3);
h = rpbox(1).BoundingBox(1,4);
centx = rpbox(1).Centroid(1,1);
centy = rpbox(1).Centroid(1,2);
% cx = centx;
% cy = centy;
cx = xl+w/2;
cy = yl+h/2;
xl2 = cx-w/6;
yl2 = cy-h/6-h/12;
w2 = w/3;
h2 = h/3+h/24;
box2 = [xl2 yl2 w2 h2];
rectangle('Position',box2,'EdgeColor','g');
xl3 = cx-w/4+w/24;
yl3 = cy+h/8-h/24;
w3 = w/8;
h3 = h/4;
box3 = [xl3 yl3 w3 h3];
rectangle('Position',box3,'EdgeColor','b');
xl4 = cx+w/4-w3-w/24;
yl4 = cy+h/8-h/24;
box4 = [xl4 yl4 w3 h3];
rectangle('Position',box4,'EdgeColor','b');
%Set1 Edema
%%
% -------------------------------------------------------------------------
white = imfill((img),'holes');
white(white>0) = 1;
% max(max(white));
%% Intracranial Extraction conversion into white image
img2new = ones(size(img))*255;
img2new = img2new.*(1-double(white))+ double(img).*double(white);
imshow(uint8(img2new));
img2n = double(medfilt2(img2new));
%% Median Filtering
img2n = double(medfilt2(img2new));
imshow(uint8(img2n));
%%
% finalim = zeros(size(img2n));
% finalim(innerbrain_mask==1) = double(img_Mattress_2d(innerbrain_mask==1));
% x = double(img2n(:));
% label = emgm(x',5);
% k = reshape(label,size(img2n,1),size(img2n,2));
% figure, imshow(mat2gray(k)),title('GMM on normal image');
%% Histogram of intracranial matter and Subracting graymatter intensit
% [a,~]=hist(img2n(:),256);
[a,~]=hist(double(img2n(:)),256);
[~,grval]=max(a(10:end));
img2f = double(img2n);
img2f(img2f>grval) = 255;
% - (img2n>0)*(grval);
figure,imshow(uint8(img2f));
% figure, plot(a(10:end));
% img2f = double(img2n) - (~white2)*(grval);
% img2n(img2n<grval)=0;
% imshow(uint8(img2n));
% hist(double(img2fin(:)))
%%
img2f = medfilt2(double(img2f));
figure,imshow(uint8(img2f));
%%
% Linear Contrast Stretching for Edema
[a,~]=hist(img2n(:),256);
[~,grval]=max(a(10:end));
img2fin = imadjust(uint8(img2n),[0 0.5],[]);
figure,imshow(uint8(img2fin)),title('LCS');
% -------------------------------------------------------------------------
%% Running GMM after Subtracting Grey matter Intensity
% x2 = [double(img2n(:)) double(img2f(:)) double(img2fin(:))];
% x2 = [double(img2f(:)) double(img2fin(:))];
x2 = [double(img2fin(:)) double(img2f(:))];
% x2 = [double(img2fin(:)) double(img2n(:))];
% x2 = [double(img2n(:)) double(img2fin(:))];
% x2 = double(img2f(:));
label = [];maxiter = 5;id=0;
while length(unique(label))~=4 && id<maxiter
label = emgm(x2',4);
k2 = reshape(label,size(img2f,1),size(img2f,2));
id = id+1;
end
if length(unique(label))~=4
display('Changing Features..')
x2 = [double(img2f(:)) double(img2fin(:)) double(img2n(:))];id=0;
while length(unique(label))~=4 && id<maxiter
label = emgm(x2',4);
k2 = reshape(label,size(img2f,1),size(img2f,2));
id = id+1;
end
end
% figure,imshow(mat2gray(k2)),title('GMM after Subtracting GreyIntensity');
% [~, threshold] = edge(img2fin, 'sobel');
% fudgeFactor = .5;
% BWs = edge(img2fin,'sobel', threshold * fudgeFactor);
% figure, imshow(BWs), title('binary gradient mask');
% I = uint8(mat2gray(k));
%%
s2 = regionprops(k2,'centroid','Area','PixelIdxList','PixelList','FilledArea','FilledImage','ConvexImage','Image','Extent','Extrema','Eccentricity');
% centroids = cat(1, s.Centroid);
% imshow(mat2gray(k2))
% hold on
% plot(centroids(:,1), centroids(:,2), 'b*')
%Hematoma
% -------------------------------------------------------------------------
%% Separate component masks
s=s2;
comps = zeros([size(img2n) numel(s)]);
for i = 1 : numel(s)
complot = zeros(size(img2n));
for j = 1:length(s(i).PixelList)
complot(s(i).PixelList(j,2),s(i).PixelList(j,1)) = 1;
end
comps(:,:,i) = complot;
end
%% Calculate mean intensity for each connected component
compsInt = zeros(numel(s),1);
for i = 1: numel(s)
temp = comps(:,:,i).*255;
compsInt(i) = mean(img2n(find(temp)));
end
%% Saving Hematoma image
% Condition for hematoma to exist has to be included
% Area Constraint
areas = zeros(1,numel(s));
for i = 1:numel(s)
areas(1,i) = s(i).Area;
end
BrArea = sum(areas)-max(areas);
[~,hIdx] = max(compsInt);
hImg = repmat(img2n,1,1,3);
hImg(:,:,1) = hImg(:,:,1) + comps(:,:,hIdx).*255;
hImg(:,:,2) = hImg(:,:,1).*(~comps(:,:,hIdx));
hImg(:,:,3) = hImg(:,:,1).*(~comps(:,:,hIdx));
% figure,imshow(uint8(hImg));
if areas(hIdx)<BrArea*(0.4)
figure,imshow(uint8(hImg));
isdetected = strcat('Hematoma Image saved ', filename);
else
isdetected = strcat('Edema not detected in ', edema_file_name);
imwrite(uint8(img2n), edema_file_name);
end
%Edema
% -------------------------------------------------------------------------
% % % %% Linear Contrast Stretching
% % %
% % % img2fin = imadjust(uint8(img2f));
% % % figure, imshow(uint8(img2fin)),title('LCS');
% ------------------------------------
% % % %% Choosing the largest connected components
% % % %%
% % % x = double(img2ff(:));
% % % label = emgm(x',4);
% % % k = reshape(label,size(img2n,1),size(img2n,2));
% % % subplot(1,4,4), imshow(mat2gray(k)),title('GMM after LCS');
% % % toc
% -------------------------------------------------------------------------
|
% clean
clear all;
%clc;
% load
load('/Volumes/TOSHIBA/Seņales suizos tercero, cuarto, quinto y sexto escenario/Datos/Datos_raw/test_train_raw_s_resh.mat');
%definimos las layers
layers = [ ...
%input
sequenceInputLayer(1)
%hidden
bilstmLayer(100,'OutputMode','last')
% Output
fullyConnectedLayer(4)
softmaxLayer
classificationLayer
]
options = trainingOptions('adam', ...
'MaxEpochs',2, ...
'MiniBatchSize', 150, ...
'InitialLearnRate', 0.01, ...
'SequenceLength', 500, ...
'GradientThreshold', 1, ...
'ExecutionEnvironment',"auto",...
'plots','training-progress', ...
'Verbose',false);
net = trainNetwork(XTrain,YTrain,layers,options);
trainPred = classify(net,XTrain,'SequenceLength',1000);
LSTMAccuracy1 = sum(trainPred == YTrain)/numel(YTrain)*100
testPred2 = classify(net,XTest);
LSTMAccuracy2 = sum(testPred2 == YTest)/numel(YTest)*100
|
function varargout = request(action,varargin)
% request [Not a public function] Persistent repository for container class.
%
% Backend IRIS function.
% No help provided.
% -IRIS Macroeconomic Modeling Toolbox.
% -Copyright (c) 2007-2017 IRIS Solutions Team.
mlock( );
persistent X;
if isempty(X)
% @@@@ MOSW
X = struct( );
X.name = cell(1,0);
X.data = cell(1,0);
X.lock = false(1,0);
end
%--------------------------------------------------------------------------
switch action
case 'get'
ix = strcmp(X.name,varargin{1});
if any(ix)
varargout{1} = X.data{ix};
varargout{2} = true;
else
varargout{1} = [ ];
varargout{2} = false;
end
case 'set'
ix = strcmp(X.name,varargin{1});
if any(ix)
if X.lock(ix)
varargout{1} = false;
else
X.data{ix} = varargin{2};
varargout{1} = true;
end
else
X.name{end+1} = varargin{1};
X.data{end+1} = varargin{2};
X.lock(end+1) = false;
varargout{1} = true;
end
case 'list'
varargout{1} = X.name;
case {'lock','unlock'}
tmp = strcmp(action,'lock');
if isempty(varargin)
X.lock(:) = tmp;
else
pos = doFindNames(X,varargin);
X.lock(pos) = tmp;
end
case 'islocked'
pos = doFindNames(X,varargin);
varargout{1} = X.lock(pos);
case 'locked'
varargout{1} = X.name(X.lock);
case 'unlocked'
varargout{1} = X.name(~X.lock);
case 'clear'
% @@@@@ MOSW
X = struct( );
X.name = cell(1,0);
X.data = cell(1,0);
X.lock = false(1,0);
case 'save'
if nargin > 1
pos = doFindNames(X,varargin);
x = struct( );
x.name = X.name(pos);
x.data = X.data(pos);
x.lock = X.lock(pos);
varargout{1} = x;
else
varargout{1} = X;
end
case 'load';
pos = textfun.findnames(X.name,varargin{1}.name,'[^\s,;]+');
new = isnan(pos);
nnew = sum(new);
X.name(end+(1:nnew)) = varargin{1}.name(new);
X.data(end+(1:nnew)) = varargin{1}.data(new);
X.lock(end+(1:nnew)) = varargin{1}.lock(new);
pos = pos(~new);
if any(X.lock(pos))
pos = pos(X.lock(pos));
container.error(1,X.name(pos));
end
X.data(pos) = varargin{1}.data(~new);
case 'remove'
if ~isempty(varargin)
pos = doFindNames(X,varargin);
X.name(pos) = [ ];
X.data(pos) = [ ];
X.lock(pos) = [ ];
end
case 'count'
varargout{1} = numel(X.name);
case '?name'
varargout{1} = X.name;
case '?data'
varargout{1} = X.data;
case '?lock'
varargout{1} = X.lock;
end
% Nested functions...
%**************************************************************************
function Pos = doFindNames(X,Select)
Pos = textfun.findnames(X.name,Select,'[^\s,;]+');
if any(isnan(Pos))
container.error(2,Select(isnan(Pos)));
end
end % doFindNames( )
end
|
function str = get_arch_id ()
% str = GET_ARCH_ID ()
%
% Retrieves OpenCV arch identifier for Windows platforms (i.e., x86 for
% win32 and x64 for win64). For other platforms, an empty string is
% returned.
%
% This function is primarily aimed for determining OpenCV library or
% binary path in build/configuration scripts.
if ispc()
if isequal(computer('arch'), 'win64')
str = 'x64';
else
str = 'x86';
end
else
str = '';
end
end
|
clc;clear all; close all;
t=-5:0.005:5;
u1=0.5*(sign(t-1)+1);
u2=0.5*(sign(2*t+1)+1);
ims=0.5*(sign(t-2.5)+1)-0.5*(sign(t-2.5-0.005)+1);
tt=0:5;
rpm=0.5*(sign(tt-3)+1);
subplot(4,1,1);
plot(t,u1);
axis([-5 5 0 1.5]);
title('UNIT STEP SIGNAL');
subplot(4,1,2);
plot(t,u2);
axis([-5 5 0 1.5]);
title('UNIT STEP FUNCTION');
subplot(4,1,3);
plot(t,ims);
axis([-5 5 0 1.5]);
title('IMPLSE FUNCTION');
subplot(4,1,4);
plot(tt,rpm);
grid on;
xlabel('TIMES>>>>>>>');
ylabel('AMPLITUDE>>>>>>>');
title('UNIT RAMP FUNCTION'); |
clear all
Exps=[1:3 5:53];
for e=1:length(Exps),
ExpNum=Exps(e);
frun=['runs/Exp' num2str(ExpNum) '.mat'];
load (frun);
load dat/Robin_Sensitive_study.mat
%% estimate & temperature profiles
iUse=1:N>N/5; %burn in
rhohat=median(rhoc(iUse));%use the median to estimate the true values
dThat=median(dTc(iUse));
Bhat=median(Bc(iUse));
% Try if it there is a better temp estimation to use mean weighted by
% frequency. Also the corresponding tempE is calculated and ploted
% Check the script ExpectionEstimate to see if it makes sense.
% Add by Yuna-April 15th
ExpectionEstimate;
[tempt{e},z{e}] = TempProfile(H(x),Bt/1000,M(x),Ts(x,1)+dTt);
[temphat{e}] = TempProfile(H(x),Bhat/1000,M(x),Ts(x,1)+dThat);
[tempE{e}] = TempProfile(H(x),EB/1000,M(x),Ts(x,1)+EdT);
%calculate error statistics
[~,j]=min( abs( z{e} - 10 ) );
T10t(e)=tempt{e}(j);
T10m(e)=temphat{e}(j);
T10e(e)=tempE{e}(j);
err10m(e)=T10m(e)-T10t(e);
errE10m(e)=T10e(e)-T10t(e);
Tavgt(e)=mean(tempt{e});
Tavgm(e)=mean(temphat{e});
Tavge(e)=mean(tempE{e});
erravg(e)=Tavgm(e)-Tavgt(e);
errEavg(e)=Tavge(e)-Tavgt(e);
RMSm(e)=sqrt(mean( (tempt{e}-temphat{e}).^2 ) );
RMSe(e)=sqrt(mean( (tempt{e}-tempE{e}).^2 ) );
end
%%
Nsta=length(Exps);
figure(1)
stem(1:Nsta,err10m); hold on;
stem(1:Nsta,errE10m,'r-'); hold off;
figure(2)
stem(1:Nsta,erravg); hold on;
stem(1:Nsta,errEavg,'r-'); hold off;
figure(3)
stem(1:Nsta,RMSm); hold on;
stem(1:Nsta,RMSe,'r-'); hold off;
figure(4)
plot(T10t,T10e,'o',[240 258],[240 258],'LineWidth',2)
set(gca,'FontSize',14)
title('Ten meter temperature, K')
xlabel('True temp, K')
ylabel('Estimated temp, K')
figure(5)
plot(Tavgt,Tavge,'o',[246 262],[246 262],'LineWidth',2)
set(gca,'FontSize',14)
title('Vertical aveage temperature, K')
xlabel('True temp, K')
ylabel('Estimated temp, K')
%%
sqrt(mean( (T10t-T10e).^2 ))
mean(T10e-T10t)
sqrt(mean( (Tavgt-Tavge).^2 ))
mean(Tavge-Tavgt) |
function [callpricef,sigmaf]=mybeta4(beta0,beta1,beta2,lath,T,K)
%beta0=m_input(1);
%beta1=m_input(2);
%beta2=m_input(3);
%lath=m_input(4);
%T=m_input(5);
%K=m_input(6);
sigma0=0.19319;
%K=(2.15:0.05:2.9);
%[~,col]=size(K);
%T=78;
%K=2.07;
sigmasqrt0=sigma0^2;
r=0.043;
S0=2.301;
St=zeros(100000,T);
sigmac=zeros(100000,T);%sigma
sigmasqrt=zeros(100000,T);%sigma^2
delta0=randn(1,1);delta=randn(100000,T);
%for k=1:col
%callprice_final=zeros(1,k);
%sigmam=zeors(1,k);
%callpricef=zeros(1,k);
for i=1:100000
sigmasqrt(i,1)=beta0+beta1*sigmasqrt0/252+beta2*sigmasqrt0/252*(delta0-lath)^2;%the estimation of first-order sigma^2
sigmac(i,1)=sqrt(sigmasqrt(i,1));%the estimation of first-order sigma
St(i,1)=exp(log(S0)+r-0.5*sigmasqrt(i,1)+sigmac(i,1)*delta(i,1));
for j=2:T
sigmasqrt(i,j)=beta0+beta1*sigmasqrt(i,j-1)+beta2*sigmasqrt(i,j-1)*(delta(i,j-1)-lath)^2;
sigmac(i,j)=sqrt(sigmasqrt(i,j));
St(i,j)=exp(log(St(i,j-1))+r/252-0.5*sigmasqrt(i,j)+sigmac(i,j)*delta(i,j));
end
end |
R1 = 5;
A1 = 2;
R2 = 5;
hmax = 15;
PriorityTank = 1;
|
function norm_theta = normalize_angle(theta)
%Put angle theta in [-pi, pi]
norm_theta = theta;
while norm_theta < -pi
norm_theta = norm_theta + 2 * pi;
end
while norm_theta > pi
norm_theta = norm_theta - 2 * pi;
end
end |
% Setup data structures for read / write on the daq board
s = daq.createSession('ni');
% Add output channels
s.addAnalogOutputChannel('Dev1', 0, 'Voltage');
s.addDigitalChannel('Dev1', ['port0/line4'], 'OutputOnly');
s.addDigitalChannel('Dev1', ['port0/line8'], 'OutputOnly');
SAMPLING_RATE = 1000;
s.Rate = SAMPLING_RATE;
% chanSecDur = round(duration * 60 / 2);
% chanSampDur = round(chanSecDur * SAMPLING_RATE);
% % Initialize the output vectors to zero
% zeroStim = zeros(chanSampDur * 2, 1);
% chanACommand = zeroStim;
% chanBCommand = zeroStim;
% dummyCommand = zeroStim;
% % Create stim output vectors
% chanACommand(1:chanSampDur) = 1;
% chanBCommand(chanSampDur:end) = 1;
% dummyCommand(:) = 1;
% outputData = [zeroStim, chanACommand, chanBCommand, chanBCommand, chanACommand, dummyCommand];
% outputData(end, :) = 0; % To make sure the DAQ doesn't stay on between trials
nSamples = 300000;
outputData = [ones(nSamples,1)*10, ones(nSamples, 1), ones(nSamples, 1)];
outputData(end, :) = 0;
queueOutputData(s, outputData);
s.startForeground();
release(s); |
function [] = betweenness()
% 本脚本可用于将CCpeak等网络拓扑结构矩阵转换成对称形式,并计算可通过计算其中的betweeness来获得Hub Neuron
% Identify hub neurons in the network
% CCPEAK is extracted by mutual information method
network_matrix = (CCpeak + CCpeak');
for i = 1:64
for j = 1:64
if network_matrix(i,j) < 0.1
network_matrix(i,j) = 0;
end
end
end
% Calc the betweenness in this unidirectional weighted network
BC = getbc(network_matrix);
% Generate the list of electrodes
elecs = (1:64);
% Output the hub electrodes list
util_convert_hw2ch(elecs(BC > 100))
BC(BC > 100)'
topoplotgy(CCpeak,0);
end
|
%%Arctic multilayer cloud detection algorithm
%Written by Maiken Vassel, latest update 2019
%This is the main program of the classification algorithm. Here the dataset is analysed for
%seeding/non-seeding multilayer clouds from day to day.
%The general structure is that one day (i) is evaluated and the resulting information is written into
%the structure MLC_classification.mat. As soon as MLC_classification.mat is filled for one year
%(loop), the rest of the evaluation is done based on MLC_classification.mat and the loop can be replaced
%by any single day.
%The structure of this program is the following:
%1.Settings for the classification
%2.Name: Used as ending for MLC_classification.mat and for saved plots
%3.Date: Defines the time period of the classification.
%4.Single day or loop over time period
%5.Radiosonde evaluation
%6.Sublimation calculation
%7.Cloudnet (Radar) evaluation as additional information
%8.Pieplots
%9.Skill scores
%Turn on/off specific programs for the specific need.
clearvars -except MLC_classification
close all;
%Some extra Matlab programs are needed:
addpath('Matlab_extra_functions'); %Path added for Matlab extra Programs (colors,etc.)
addpath('Matlab_extra_functions/matlab2tikz-master/src'); %Path added for converting Pie Plot into .tex file.
addpath('Matlab_extra_functions/floatAxis');
addpath('Matlab_extra_functions/export_fig-master');
load cm.mat; %line colors
%The path to the radiosonde data needs to be specified in Raso_1_read
addpath('Inputdata/Cloudnet'); %Path for Cloudnet (Radar) data
%%
%1.Settings:
%Here you can adjust some settings (max height, relative humidity threshold, etc.):
hmin=0.2228; %Minimum height at groud [m]. Std: hmin=0.2228
hmax=10; %Maximum height [km] upto where it is searched for. Std: hmax=10;
Rsize=400; %Radius of ice crystal [mikrometer]. Std Rsize=100
rhthres=100.0; %Relative Humidity threshold [%]. Std rhgrenze=100.0
minsub=100; %Minimum thickness of subsaturated layer [m]. Std:20
minsuper=100.0; %Minimum thickness of supersaturated layer [m]. Std:100
%uncert=0.0; %Raso uncertainty -5.0, 0.0, 5.0
gap_min=30; %This number [min] defines the timeperiod for evaluation of Cloudnet. %Std:30min, for test: 15min
ending='MHP_WC'; %Defines the kind of ice particle calculation.
%MHP_WC: Mitchell, 1994: Hexagonal plate; Witchell, 2008: capacitance
%AG: Aggregate, RC: rimed column, SP: star particle
%2.Name:
%Define an name/ending here. This will be used as ending for the struct MLC_classification....mat and
%for the generated plots.
%It is recommended to change the name if the settings are changed.
name=strxcat('r',Rsize,ending); %Used as Std
%name=strxcat(Rsize,'_msub',minsub','RH',rhthres); %Alternative, if RH is changed
%name=strxcat(Rsize,'_msub',minsub,'uncert95'); %Alternative, if minsub is changed
%name=strxcat(Rsize,'_minsuper',minsuper); %Alternative, if minsuper is changed
%name=strxcat(Rsize,'_avgtime',gap_min); %Alternative, if average time is changed
%%
%3.Date:
%Chose the time period for the analysis: Define the length and start date of the analysed time period.
%1-year dataset:
ii=1:365; %Dataset length [days]: Std:1:365
NCloudnet=datenum(2016,06,9+ii,00,00,00); %Dataset start date (use one day before actual start date), [year, month, day] Std: 2016,06,9 => 10.6.16-9.6.17
%24.5-year dataset:
%ii=[1:8926]; %Alternative: until 9.6.2017
%NCloudnet=datenum(1993,01,00+ii,00,00,00); %Alternative
%datestr(NCloudnet); %This is only for control to display chosen time period.
Cloudnet_1_calcN %This function prepares the time for further use
%4.Single day or loop
%Here you specify if you want to analyse only a single day (if you want a plot only for a single day) or do
%the full loop over the 1-year dataset. If you have done the loop once, it will be saved in
%MLC_classification.mat as a structure. Then there is no need to do the loop again over all days. Instead
%load MLC_classification.mat in the beginning. Choose a random single day and run make_MLC_classification
%without the loop (if you e.g. want to plot pie plots).
%If you use/do not use the loop, uncomment/comment out the lines 81 (for i=...) and 123 (end).
%Each day is given an index 'i', which is kept for the entire calculation (i=1=> 9.6.2016).
%i=147; %13, 157; %Single day, 147=3.Nov 2017
for i=1:365 %Loop, std: 1:365
%Output for each day:
i %Gives i-number as output
disp(strxcat('Date: ',timestruct.time(i,:))); %Gives date of i-number as output
%%
%5.Radiosonde evaluation
%Here the evaluation of the radiosonde (Raso) data is done.
Raso_1_read %Reads the Raso data of the actual day 'i' and writes it in a Raso-structure.
Raso_3_layers %Calculates mean RH for each subsaturated layer and calculates the sublimation/seeding
layer=1; %Specify which subsaturated layer (layer nr starts counting from top) should be used in Raso_5_findposition
Raso_6_advection %Calculates the wind advection time
clear a c d d3 dhelp dlambda dN dN3 dphi folder idx idx_nonnan ii lambda1 lambda2 lambda3 lat1 lat2 ...
lat3 lon1 lon2 lon3 phi1 phi2 phi3 tadv v3
%%
%6.Sublimation calculation plots
%Sublimation_2_radii %Plots multiple radii in one plot
%Deleting variables that are not needed any more:
clear layer ii j r1 Sublimation tC Seeding maxtime TK1 TK_nocloud RHmax_nocloud RHi_nocloud RHi1 ...
Press_nocloud P1 maxtime H_falldown H_fallbeginn z1 Sublimation50 Sublimation100 Sublimation150 ...
maxtime50 maxtime100 maxtime150 layer k
%%
%7. Including Cloudnet (Radar) for evaluation
Cloudnet_2_read %Reads Cloudnet data and writes into structure
Cloudnet_2_short %Reduces the size of Cloudnet structure from 2000 to 400 time steps.
Cloudnet_4_preparation_adv %Data preparation: excludes cases where radar and radiosonde do not overlap in time, includes the advection
Cloudnet_4_evaluation %Evaluation of Cloudnet. (Defining if cloud above,in between, below)
%Cloudnet_4_plot_sectionlines %Overview plot
end %End of loop
%%
%8. Creating Pieplots after MLC_classification- struct is created.
%Save the struct MLC_classification. Then you only need to run the loop once.
save(strxcat('MLC_classification_',name,'.mat'), 'MLC_classification');
index=[1:365]; %this can be modified if only a shorter time period should be evaluated.
Evaluation_1_calc %finds indicies for the following pie/histogram plots.
%Only Raso(=Radiosonde):
Evaluation_2_pie %Raso-Pie plot
%Evaluation_2_histogram_radii %Histogram with all 3 radius in one.
%Evaluation_2_visual %Reads and evaluates the manual visual detection
%Deleting variables that are not needed any more:
clear Anz_0cloud Anz_1cloud Anz_both Anz_cloudcover Anz_Nan Anz_nonNan Anz_nonseed Anz_onlyseed idx_0cloud ...
idx_1cloud idx_both idx_cloudcover idx_Nan idx_nonNan idx_nonseed idx_onlyseed Anz_noML Anz_noML0 Anz_noML1 ...
idx_noML idx_noML0 idx_noML1
%Cloud categories:
%Evaluation_3_nonSeeding %Cloud category of non-seeding
%Evaluation_3_Seeding %Cloud category of seeding
%Raso and Radar (RC) combined:
Evaluation_4_RC_calc %Preparation of following plots (is included in the two following programs)
Evaluation_4_RC_pie %Pie-plot
%Evaluation_4_RC_histogram_radii %Histogram with all 3 radius in one
|
%evaluate and tune Bayesian-SARSAQ with the features identified by linear
%regression
clear,close all,clc
addpath('../MatlabTools/') %change to your directory for MatlabTools
addpath('../metaMDP/')
addpath('../Supervised/')
addpath('../')
load ../../results/lightbulb_fit.mat
S = lightbulb_problem(1).mdp.states;
nr_actions=2;
nr_states=2;
gamma=1;
feature_names={'VPI','VOC_1','VOC_2','E[R|guess,b]','1'};
selected_features=[1;2;4];
nr_features=numel(selected_features);
costs=logspace(-3,-1/4,15);%0.01;%
% costs = 0.0001;
c = 7;
% mus=[[1;1;1],[0;1;1],[1;0;1],[1;1;0],[0;0;1],[0;1;0],[1;0;0],[0;0;0],[0.5;0.5;0.5]];
% sigmas = 0.1:0.1:0.3;
mus = [0;0;1];
sigmas = 0.2;
rep = 1;
matws = zeros(size(mus,2),numel(sigmas),rep,3);
matr = zeros(size(mus,2),numel(sigmas),rep);
matm = zeros(size(mus,2),numel(sigmas),rep);
% mate = zeros(size(mus,2),numel(sigmas),rep);
for m=1:size(mus,2)
for sig=1:numel(sigmas)
disp(num2str(mus(:,m)))
disp(num2str(sigmas(sig)))
for z=1:rep
for c=1:numel(costs)
% for c=7:10
mdp=metaMDP(nr_actions,gamma,nr_features,costs(c));
nr_episodes=1000;
fexr=@(s,a,mdp) feature_extractor(s,a,mdp,selected_features);
mdp.action_features=1:nr_features;
sigma0=sigmas(sig);
glm=BayesianGLM(nr_features,sigma0);
glm.mu_0=mus(:,m);
glm.mu_n=mus(:,m);
[glm,avg_MSE,R_total]=BayesianSARSAQ(mdp,fexr,nr_episodes,glm);
figure(),
subplot(2,1,1)
plot(smooth(avg_MSE,100))
xlabel('Episode','FontSize',16)
ylabel('Average MSE','FontSize',16)
subplot(2,1,2)
plot(smooth(R_total,100))
xlabel('Episode','FontSize',16)
ylabel('R_{total}','FontSize',16)
w=glm.mu_n;
figure()
bar(w)
ylabel('Learned Weights','FontSize',16)
set(gca,'XTickLabel',feature_names(selected_features),'FontSize',16)
%plot the corresponding fit to the Q-function
nr_states=size(lightbulb_problem(c).mdp.states,1);
for s=1:nr_states
F(s,:)=fexr(lightbulb_problem(c).mdp.states(s,:),1,mdp);
end
valid_states=and(sum(lightbulb_problem(c).mdp.states,2)<=30,...
sum(lightbulb_problem(c).mdp.states,2)>0);
Q_hat(:,1)=F*w;
Q_hat(:,2)=F(:,3);
V_hat=max(Q_hat,[],2);
qh = Q_hat(valid_states,1);
qs = lightbulb_problem(c).fit.Q_star(valid_states,1);
R2(c)=corr(Q_hat(valid_states,1),lightbulb_problem(c).fit.Q_star(valid_states,1));
lightbulb_problem(c).w_BSARSA=w;
lightbulb_problem(c).Q_hat_BSARSA=Q_hat;
lightbulb_problem(c).V_hat_BSARSA=V_hat;
lightbulb_problem(c).R2_BSARSA=R2(c);
fig_Q=figure()
scatter(Q_hat(valid_states),lightbulb_problem(c).fit.Q_star(valid_states,1))
set(gca,'FontSize',16)
xlabel(['$\hat{Q}=',modelEquation(feature_names(selected_features),roundsd(w,4)),'$'],...
'Interpreter','LaTeX','FontSize',16)
ylabel('$Q^\star$','FontSize',16,'Interpreter','LaTeX')
title(['Bayesian SARSA learns Q-function of 1-lightbulb meta-MDP, R^2=',num2str(roundsd(R2(c),4))],'FontSize',16)
saveas(fig_Q,['../../results/figures/QFitToyProblemBayesianSARSA_c',int2str(c),'.fig'])
saveas(fig_Q,['../../results/figures/QFitToyProblemBayesianSARSA_c',int2str(c),'.png'])
%% Compute approximate PRs
observe=1; guess=2;
for s=1:nr_states-1
approximate_PR(s,observe)=Q_hat(s,observe)-V_hat(s);
approximate_PR(s,guess)=Q_hat(s,guess)-V_hat(s);
end
lightbulb_problem(c).approximate_PRs=approximate_PR;
% end
matws(m,sig,z,:) = w;
matm(m,sig,z) = immse(qh(:),qs(:));
matr(m,sig,z) = corr(qh(:),qs(:))^2;
% esim
% mate(m,sig,z) = er;
end
end
end
end
save('../../results/lightbulb_fit_.mat','lightbulb_problem') |
fc_tools.utils.cleaning()
N=10;
options={'perm',@(A) colamd(A)};
[bvp,info]=fc_vfemp1.examples.setBVPfunny2D01(N,2);
fc_vfemp1.examples.print_info(info)
fprintf('*** Solving %s\n',info.name)
[U,SolveInfo]=bvp.solve('split',true,'time',true,options{:});
fprintf(' -> ndof (number of degrees of freedom) = %d\n',sum(cellfun(@(x) size(x,1),U)));
fc_vfemp1.examples.print_info(SolveInfo)
if fc_tools.utils.is_fcPackage('siplt')
fprintf('*** Graphics with fc_siplt package\n');
tstart=tic();
Th=bvp.Th;
options=fc_tools.graphics.DisplayFigures('nfig',2); % To present 'nicely' the 2 figures
figure(1)
fc_siplt.plot(Th,U{1})
axis image;axis off;shading interp
colorbar
figure(2);
fc_siplt.plot(Th,U{2})
axis image;axis off;shading interp
colorbar
tcpu=toc(tstart);
fprintf(' -> Done it in %.3f(s)\n',tcpu)
end
|
%DAM_HIST Histogram of dam fly activity
% [avg,sem]=dam_hist(o,wells,p)
%
% SDL 23-JUN-02 : abstracted core functionality to HIST.M
%
function [avg,sem]=dam_hist(o,wells,p)
if nargin < 2 | isempty(wells)
wells=1:size(o.data,2);
end
if nargin<3
p=dam_hist_par;
end
if isempty(p.title)
if length(wells)>1
titl=sprintf('%s-%s',o.names{wells(1)}, o.names{wells(length(wells))});
else
% titl=o.names{wells(1)};
end
else
titl=p.title;
end
x = o.x;
f = o.f(:,wells);
first_hour = 1;
if ~isempty(o.daylight) & ~isempty(o.daylight{wells(1)})
lights=o.lights;
if ~isempty(lights)
lights=lights(first_hour:length(x)-(1-first_hour));
end
else
lights=[];
end
[avg,sem] = my_hist(x, f, lights, p.method, ...
p.hours, p.skipDays, p.spanDays, p.lightsOn, ...
p.lightsOff, p.firstHour, p.barSize, p.plotSEM);
title(sprintf('%s (n=%d days=%.1f)',titl,length(wells),length(f)/48));
function [avg,sem]=my_hist(x, f, lights, method, ...
hours, skipDays, spanDays, lightsOn, ...
lightsOff, firstHour, minPerBar, plotSEM)
% HIST Histogram of fly activity.
%
% Input Arguments:
%
% x hour values
% f activity by hour
% lights daylight on/off flags
% method plotting method: 0 = first hour to last (default)
% 1 = center dark
% 2 = center light
% 3 = lights off to lights on
% 4 = lights on to lights off
% hours baseline hours
% skipDays
% spanDays
% lightsOn
% lightsOff
% firstHour
% minPerBar
% plotSEM
%
%
% Output Arguments:
%
% AVG mean activities by hour
% SEM std dev of activities by hour
%
% abstracted from Pablo's DAM_HIST.M by Simon Levy, 24-JUN-02
hours_per_bin = minPerBar / 60;
bins_per_hour = 60 / minPerBar;
lightsoff_color = .5;
lightson_color = 1;
nbins = round(hours * bins_per_hour);
% post-hoc warning about display
warning = '';
%m002 this is a hack, fixes a bug brutely
if x(1) <= 0
x = x+hours;
end
f = mean(f,2);
xfirst = min(find(x>(skipDays*hours)));
xlast = xfirst-1+nbins*floor((length(x)-xfirst+1)/nbins);
if (spanDays>0) & (spanDays<Inf)
xlast = min(xlast,xfirst+48*spanDays-1);
end
f = f(xfirst:xlast);
x = x(xfirst:xlast);
if ~isempty(lights)
lights = lights(xfirst:xlast);
end
n = length(f);
if isempty(firstHour)
firstHour=x(1);
end
binno = floor(bins_per_hour*mod(x-firstHour,hours))+1;
binhour = (0:hours_per_bin:hours-hours_per_bin)+firstHour;
binhour = mod(binhour,1)*60+100*floor(mod(binhour,hours));
if lightsOff>lightsOn
binlight = (binhour > lightsOn) & (binhour <= lightsOff);
else
binlight=~((binhour > lightsOff) & (binhour <= lightsOn));
end
binhour = floor(binhour./100)+mod(binhour,100)./60;
s = full(sparse(binno,1,f));
c = full(sparse(binno,1,ones(size(f,1),1)));
avg = s ./ c;
sumsq = full(sparse(binno,1,f.^2));
se = sqrt(((sumsq ./ c) - (avg.^2)));
sem = se ./ sqrt(c);
% default to normal order
bins = 1:length(s);
% process daylight vector
if ~isempty(lights)
ls = full(sparse(binno,1,lights));
lc = full(sparse(binno,1,ones(size(lights,1),1)));
lmean = ls ./ lc;
binlight = lmean;
on = find(lights);
off = find(lights == 0);
txt = sprintf('Mean on=%.1f off=%.1f all=%.1f', ...
mean(f(on)), mean(f(off)), mean(f));
% get contiguous dark or light bins
if method
if method == 1 | method == 4
bit = 1;
else
bit = 0;
end
maxbins = length(binlight);
where = find(binlight == bit);
diff = where(2:end) - where(1:end-1);
split = find(diff > 1);
if ~isempty(split)
if length(split) > 1
warning = 'More than one dark/light cycle; using normal format';
method = 0;
end
where = [where(split+1:end);where(1:split)];
end
end
if ~isempty(find(binlight ~= fix(binlight)))
%warning = 'Grayscale light bins; using normal format';
method = 0;
end
switch method
case 1 % center light
bins = center(where, binlight);
case 2 % center dark
bins = center(where, binlight);
case 3 % lights off to lights on
bins = left(where, binlight);
case 4 % lights on to lights off
bins = left(where, binlight);
end
else
txt = sprintf('Mean=%f', mean(f));
% txt = sprintf('Sum=%f', sum(f));
end
cla;hold on;
% plot histogram boxes
for i=1:length(bins)
j = bins(i);
a = [i-1 i i i-1];
b = [ 0 0 avg(j) avg(j)];
grayscale = .65+0.3*(0.5*((binlight(j)>0) + (binlight(j)>0.99)));
grayscale = (1-binlight(j)) * lightsoff_color + (binlight(j))*lightson_color;
fill(a,b,[1 1 1]*grayscale);
end
% plot standard error mean
if (plotSEM)
hold on
plot((1:length(s))-0.5, avg(bins)+sem(bins),'.');
end
% axis labels etc.
%set(gca, 'xticklabel', fix(bin2hrs(bins(1:2:end))+.5));
xtick=[0:1:length(bins)];
%binhour
ticklabel=cell(1,length(xtick)); % empty tick labels
% find bins for 0,6,12,18 hrs and label those ticks
for i=0:6:18
%bin=1+max(find(binhour<=i));
offset=mod(binhour-i,hours);
ofzero=find(offset==min(offset));
bin=ofzero+1;
if (bin>length(xtick))
bin=1;
end
if ~isempty(bin)
ticklabel{bin}=num2str(i);
%fprintf('bin %d=%d\n',i,bin);
end
end
set(gca, 'xtick', xtick)
%xtick
set(gca,'xticklabel',ticklabel);
%ticklabel
set(gca,'xlim',[xtick(1) xtick(end)]);
set(gca,'tickdir','in');
xlabel('hours');
ylabel('mean activity');
text(1, max(avg), txt)
% put up any warning afterwards
if ~isempty(warning)
warndlg(warning)
end
% return bin numbers for center-aligning light or dark
function bins = center(where, light)
maxbins = length(light);
pad = maxbins - length(where);
if (pad/2) == fix(pad/2)
lpad = pad / 2;
rpad = pad / 2;
else
lpad = fix(pad/2) + 1;
rpad = fix(pad/2);
end
bbeg = where(1) - lpad;
if bbeg < 0
bbeg = maxbins + bbeg;
end
bin = bbeg;
for i = 1:lpad
bins(i) = bin;
bin = bin + 1;
if bin > maxbins
bin = 1;
end
end
bins = [bins';where];
lastbin = bins(end);
for i = 1:rpad
bin = lastbin + i;
if bin > maxbins
bin = 1;
end
bins(end+1) = bin;
end
% return bin numbers for left-aligning light or dark
function bins = left(bins, light)
bin = bins(end);
for i = 1:length(light)-length(bins)
bin = bin + 1;
if bin > length(light)
bin = 1;
end
bins(end+1) = bin;
end
|
%% ------------------------------------------------------------------------%
% EE 569 Homework #3
% Date: Nov. 1, 2015
% Name: Faiyadh Shahid
% ID: 4054-4699-70
% Email: fshahid@usc.edu
%------------------------------------------------------------------------%
function type = mvbq(CMY_vector)
% This code is inspired from the following paper:
% D.Shaked, N. Arad, A.Fitzhugh, I. Sobel, ?Color Diffusion: Error-Diffusion
% for Color Halftones?,HP Labs Technical Report, HPL-96-128R1, 1996.
C = CMY_vector(1,1,1); M = CMY_vector(1,1,2); Y = CMY_vector(1,1,3);
x = C+M; y = M+Y; z = C+M+Y;
if (x < 1)
if (y < 1)
if ((z) < 2)
type = 'CMYW';
else
type = 'MYGC';
end
else
type = 'RGMY';
end
else
if (~(y < 1))
if (~(z < 1))
type = 'KRGB';
else
type = 'RGBM';
end
else
type = 'CMGB';
end
end
end
|
% Written by Patrick Strassmann
function [probeCategVect, rewardCategVect, probeStmCategVect, rewardStmCategVect] = findPrecededTrialCateg(startTime, endTime, eventStartTimes_probe, eventStartTimes_rwd, eventStartTimes_probeStm, eventStartTimes_rwdStm)
if nargin<5
eventStartTimes_rwdStm = [];
eventStartTimes_probeStm = [];
end
timeBins = str2double(endTime)-str2double(startTime);
timeBinsVect = zeros(1,timeBins);
timeBinsVect(eventStartTimes_probe)=2;
timeBinsVect(eventStartTimes_rwd)=1;
timeBinsVect(eventStartTimes_probeStm)=4;
timeBinsVect(eventStartTimes_rwdStm)=3;
timeBinsVect(timeBinsVect==0) = [];
rewardInds = find(timeBinsVect==1);
probeInds = find(timeBinsVect==2);
rewardStmInds = find(timeBinsVect==3);
probeStmInds = find(timeBinsVect==4);
rewardCategVect = zeros(1,numel(eventStartTimes_rwd));
probeCategVect = zeros(1,numel(eventStartTimes_probe));
rewardStmCategVect = zeros(1,numel(eventStartTimes_rwdStm));
probeStmCategVect = zeros(1,numel(eventStartTimes_probeStm));
for i = 1:numel(rewardInds)
currInd = rewardInds(i);
if currInd == 1
continue
elseif timeBinsVect(currInd-1)==1 | timeBinsVect(currInd-1)==3
rewardCategVect(i)=1;
elseif timeBinsVect(currInd-1)==2 | timeBinsVect(currInd-1)==4
rewardCategVect(i)=2;
end
end
for i = 1:numel(probeInds)
currInd = probeInds(i);
if currInd == 1
continue
elseif timeBinsVect(currInd-1)==1 | timeBinsVect(currInd-1)==3
probeCategVect(i)=1;
elseif timeBinsVect(currInd-1)==2 | timeBinsVect(currInd-1)==4
probeCategVect(i)=2;
end
end
for i = 1:numel(rewardStmInds)
currInd = rewardStmInds(i);
if currInd == 1
continue
elseif timeBinsVect(currInd-1)==1 | timeBinsVect(currInd-1)==3
rewardStmCategVect(i)=1;
elseif timeBinsVect(currInd-1)==2 | timeBinsVect(currInd-1)==4
rewardStmCategVect(i)=2;
end
end
for i = 1:numel(probeStmInds)
currInd = probeStmInds(i);
if currInd == 1
continue
elseif timeBinsVect(currInd-1)==1 | timeBinsVect(currInd-1)==3
probeStmCategVect(i)=1;
elseif timeBinsVect(currInd-1)==2 | timeBinsVect(currInd-1)==4
probeStmCategVect(i)=2;
end
end |
function plot_lines_noise
load('t_scale_noise.mat');
marker= { '^', 'v', 'square', 'o', 'x', 'x', 'none', '+','none', '<','v','v','^'};
color= {'r','g','b','y','m','c','g','k','r','g','b','b','g'};
markerfacecolor= color;%{'r','g','n','m','n','n','r','r','g'};
linestyle= {'-','-','-','-','-','-','--','-','--',':','-','-','-'};
for i = 1:9
method_list(i).name= num2str(i);
method_list(i).marker = marker{i};
method_list(i).color= color{i};
method_list(i).markerfacecolor= markerfacecolor{i};
method_list(i).linestyle= linestyle{i};
end
close all;
h = sp_position();
set(gcf,'Units','normal');
set(gcf, 'PaperPositionMode', 'auto');
% subplot(1,5,1);
sp_format(1);
XLabel = 'Translation magnitude';
Xarg = tlens;
ws = ones(length(method_list), 1);
yrange = [0 35];
[mnames, p] = util.xdraw_main(Xarg,yrange,method_list,'med_r','Median Rotation',XLabel,'Rotation Error (degrees)', ws);
util.correct_margin();
% subplot(1,5,2);
sp_format(2);
yrange = [0 100];
ws = ones(length(method_list), 1);
util.xdraw_main(Xarg,yrange,method_list,'med_t','Median Translation',...
XLabel,'Translation Error (percent)',ws);
util.correct_margin();
% subplot(1, 5, 5);
% sp_format(5);
% ws = ones(length(method_list), 1);
% yrange = [0 10];
% xdraw_main(Xarg,yrange,method_list,'mean_reproj_pts_lines','Mean Reprojection',...
% XLabel,'Reprojection Error (pixels)',ws);
% correct_margin();
% hL = legend(mnames, 'Orientation','horizontal', 'Position', [0.2 0.9 0.6 0.1]);
hL = legend(mnames, 'Orientation','vertical', 'Position', [0.85 0.2 0.1 0.6]);
set(h,'Units','Inches');
pos = get(h,'Position');
set(h,'PaperPositionMode','Auto','PaperUnits','Inches','PaperSize',[pos(3), pos(4)])
print(h,'figs/trans.pdf','-dpdf','-r0')
end
function h = sp_position
h = figure('position', [100 100 500 225]);
end
function sp_format(i)
subplot('Position', [0.05+0.40*(i-1) 0.0 0.28 0.99]);
end |
function [aV] = vol_invert(aV_input, varargin)
%
% NAME
%
% function [aV] = vol_invert(aV_input [, aV_maskVol])
%
%
% ARGUMENTS
% INPUTS
% aV_input volume data space to "invert"
% aV_maskVol volume (optional) if specified, use non-zero
% entries as mask for inversion
%
% OUTPUTS
% aV volume volume with intensities
% inverted
%
% DESCRIPTION
%
% 'vol_invert' "inverts" the intensity values of its input volume,
% creating a "negative" image.
%
% If an optional <aV_maskVol> is passed, then only values in the
% <aV_input> that correspond to non-zero intensities in the mask
% are processed.
%
% PRECONDITIONS
%
% o aV_input is a volume, i.e. 3 dimensional data structure.
%
% POSTCONDITIONS
%
% o A logical "negative" (i.e. inverted intensity) volume is returned.
%
%
% HISTORY
% 08 December 2008
% o Initial design and coding.
%
%%%%%%%%%%%%%%
%%% Nested functions
%%%%%%%%%%%%%%
function error_exit( str_action, str_msg, str_ret)
fprintf(1, '\tFATAL:\n');
fprintf(1, '\tSorry, some error has occurred.\n');
fprintf(1, '\tWhile %s,\n', str_action);
fprintf(1, '\t%s\n', str_msg);
error(str_ret);
end
function vprintf(level, str_msg)
if verbosity >= level
fprintf(1, str_msg);
end
end
%%%%%%%%%%%%%%
%%%%%%%%%%%%%%
v_sizeInput = size(aV_input);
V_mask = ones(v_sizeInput);
aV = zeros(v_sizeInput);
if length(varargin) >= 1, V_mask = varargin{1}; end
v_sizeMask = size(V_mask);
if v_sizeMask ~= v_sizeInput
error_exit('checking volumes', 'mask and input volumes mismatch.', '1');
end
v_mask = find(V_mask > 0);
aV(v_mask) = aV_input(v_mask);
f_max = max(max(max(aV_input)));
f_min = min(min(min(aV_input)));
f_range = f_max - f_min;
for i=1:v_sizeInput(1)
for j=1:v_sizeInput(2)
for k=1:v_sizeInput(3)
if V_mask(i, j, k)
f_delta = aV_input(i, j, k) - f_min;
f_inv = f_max - f_delta;
aV(i, j, k) = f_inv;
end
end
end
end
end |
% File tema2.M
% Function: tema2
% Call: tema2(wt,M)
% Se aleg parametri de intrare wt1 si M. wt1 trebuie sa fie un numar
% subunitar in scopul aplearii functiei fir1. De asemenea,
% construirea vectorului de tip fereastra trebuie facuta pe un
% suport de lungime M, asadar, apleurile vor fi de forma f =
% tip_fereastra(M+1); aceste modificari se fac in scopul aplearii
% fara erori a functiei fir1 care construieste un filtru de lungime
% M+1 si cere utilizatorului sa ii paseze ca argument un vector
% fereastra de aceeasi lungime. M trebuie sa fie un numar natural.
% Se vor crea ferestrele si filtrele corespunzatoare acestora, iar
% raspunsurile in frecventa vor fi plotate, pentru fiecare fereastra,
% intr-o figura diferita. Se vor obtine astfel 9 grafice. Pentru
% ferestrele cu parametru (Cebyshev, Kaiser, Lanczos, Tukey), am
% variat valorile parametrilor conform enuntului. De asemenea,
% functia traseaza raspunsul ideal (FTJ) sub forma unei linii
% punctate rosii. In cazul subpunctului b, se alege o fereastra
% pentru care se variaza ordinul M. Cele 3 cazuri (3 valori ale lui
% M) sunt reprezentate cu subplot in figura nr 10. La iesire, functia
% nu va furniza nicio variabila, ci doar graficele necesare
% comparatiei caracteristicilor de frecventa.
% Daca vor exista erori, programul se va incheia, afisand in linia de
% comanda Matlab eroarea ce a provocat intreruperea functionarii.
% Uses: WAR_ERR
% Autor: Irina COSTACHESCU
% Creat: Decembrie 18, 2017
% Updatat: Ianuarie 5, 2018
function tema2 (M,wt1)
%{
SUBPUNCTUL a:
Caracteristicile de frecventa ale filtrelor obtinute cu ajutorul celor
9 ferestre; Pentru ferestrele cu parametru, am variat valorile conform
enuntului.
%}
f = boxcar(M+1); %Construire fereastra
h = fir1(M,wt1,f); %Construire filtru
[H,w] = freqz(h); %Construire caracteristica de frecventa a filtrului
figure(1)
axis([0 pi 0 1.2]); %Setare limite axe pentru o reprezentare usor de urmarit
hold on;
plot(w,abs(H));
wt = wt1*pi; %Am inmultit valoarea subunitara a lui wt cu pi pentru a
%putea trasa corect linia in frecventa de taiere.
%Mentionez ca wt era subunitar pentru posibilitatea
%apelarii functiei fir1.
line([wt wt],[0 1],'Color','red','LineStyle','--'); %Creez liniile pt FTJ
line([0 wt],[1 1],'Color','red','LineStyle','--');
set(gca,'XTick',[wt],'XTickLabel',{'wt'});
xlabel('w');
ylabel('Amplitudinea raspunsului in frecventa');
title('FTJ tip FIR cu fereastra Rectangulara');
f = triang(M+1);
h = fir1(M,wt1,f);
[H,w] = freqz(h);
figure(2)
axis([0 pi 0 1.2]);
hold on;
plot(w,abs(H));
wt = wt1*pi;
line([wt wt],[0 1],'Color','red','LineStyle','--');
line([0 wt],[1 1],'Color','red','LineStyle','--');
set(gca,'XTick',[wt],'XTickLabel',{'wt'});
xlabel('w');
ylabel('Amplitudinea raspunsului in frecventa');
title('FTJ tip FIR cu fereastra Triunghiulara');
f = blackman(M+1);
h = fir1(M,wt1,f);
[H,w] = freqz(h);
figure(3)
axis([0 pi 0 1.2]);
hold on;
plot(w,abs(H));
wt = wt1*pi;
line([wt wt],[0 1],'Color','red','LineStyle','--');
line([0 wt],[1 1],'Color','red','LineStyle','--');
set(gca,'XTick',[wt],'XTickLabel',{'wt'});
xlabel('w');
ylabel('Amplitudinea raspunsului in frecventa');
title('FTJ tip FIR cu fereastra Blackman');
f = chebwin(M+1,80);
h = fir1(M,wt1,f);
[H,w] = freqz(h);
figure(4)
axis([0 pi 0 1.2]);
hold on;
plot(w,abs(H));
hold on
f = chebwin(M+1,90);
h = fir1(M,wt1,f);
[H,w] = freqz(h);
figure(4)
axis([0 pi 0 1.2]);
hold on;
plot(w,abs(H));
f = chebwin(M+1,95);
h = fir1(M,wt1,f);
[H,w] = freqz(h);
figure(4)
axis([0 pi 0 1.2]);
hold on;
plot(w,abs(H));
f = chebwin(M+1,100);
h = fir1(M,wt1,f);
[H,w] = freqz(h);
figure(4)
axis([0 pi 0 1.2]);
hold on;
plot(w,abs(H));
wt = wt1*pi;
line([wt wt],[0 1],'Color','red','LineStyle','--');
line([0 wt],[1 1],'Color','red','LineStyle','--');
set(gca,'XTick',[wt],'XTickLabel',{'wt'});
xlabel('w');
ylabel('Amplitudinea raspunsului in frecventa');
title('FTJ tip FIR cu fereastra Cebisev');
legend('r = 82dB','r = 90dB','r = 95dB','r = 100dB');
f = hamming(M+1);
h = fir1(M,wt1,f);
[H,w] = freqz(h);
figure(5)
axis([0 pi 0 1.2]);
hold on;
plot(w,abs(H));
wt = wt1*pi;
line([wt wt],[0 1],'Color','red','LineStyle','--');
line([0 wt],[1 1],'Color','red','LineStyle','--');
set(gca,'XTick',[wt],'XTickLabel',{'wt'});
xlabel('w');
ylabel('Amplitudinea raspunsului in frecventa');
title('FTJ tip FIR cu fereastra Hamming');
f = hanning(M+1);
h = fir1(M,wt1,f);
[H,w] = freqz(h);
figure(6)
axis([0 pi 0 1.2]);
hold on;
plot(w,abs(H));
wt = wt1*pi;
line([wt wt],[0 1],'Color','red','LineStyle','--');
line([0 wt],[1 1],'Color','red','LineStyle','--');
set(gca,'XTick',[wt],'XTickLabel',{'wt'});
xlabel('w');
ylabel('Amplitudinea raspunsului in frecventa');
title('FTJ tip FIR cu fereastra Hanning');
beta = 1;
f = kaiser(M+1,beta);
h = fir1(M,wt1,f);
[H,w] = freqz(h);
figure(7)
axis([0 pi 0 1.2]);
hold on;
plot(w,abs(H));
beta = 3;
f = kaiser(M+1,beta);
h = fir1(M,wt1,f);
[H,w] = freqz(h);
figure(7)
axis([0 pi 0 1.2]);
hold on;
plot(w,abs(H));
beta = 5;
f = kaiser(M+1,beta);
h = fir1(M,wt1,f);
[H,w] = freqz(h);
figure(7)
axis([0 pi 0 1.2]);
hold on;
plot(w,abs(H));
beta = 9;
f = kaiser(M+1,beta);
h = fir1(M,wt1,f);
[H,w] = freqz(h);
figure(7)
axis([0 pi 0 1.2]);
hold on;
plot(w,abs(H));
wt = wt1*pi;
line([wt wt],[0 1],'Color','red','LineStyle','--');
line([0 wt],[1 1],'Color','red','LineStyle','--');
set(gca,'XTick',[wt],'XTickLabel',{'wt'});
xlabel('w');
ylabel('Amplitudinea raspunsului in frecventa');
title('FTJ tip FIR cu fereastra Kaiser');
legend('beta = 1dB','beta = 3dB','beta = 5dB','beta = 9dB');
L = 0.5;
f = lanczos(M+1,L);
h = fir1(M,wt1,f);
[H,w] = freqz(h);
figure(8)
axis([0 pi 0 1.2]);
hold on;
plot(w,abs(H));
L = 1;
f = lanczos(M+1,L);
h = fir1(M,wt1,f);
[H,w] = freqz(h);
figure(8)
axis([0 pi 0 1.2]);
hold on;
plot(w,abs(H));
L = 2;
f = lanczos(M+1,L);
h = fir1(M,wt1,f);
[H,w] = freqz(h);
figure(8)
axis([0 pi 0 1.2]);
hold on;
plot(w,abs(H));
L = 3;
f = lanczos(M+1,L);
h = fir1(M,wt1,f);
[H,w] = freqz(h);
figure(8)
axis([0 pi 0 1.2]);
hold on;
plot(w,abs(H));
wt = wt1*pi;
line([wt wt],[0 1],'Color','red','LineStyle','--');
line([0 wt],[1 1],'Color','red','LineStyle','--');
set(gca,'XTick',[wt],'XTickLabel',{'wt'});
xlabel('w');
ylabel('Amplitudinea raspunsului in frecventa');
title('FTJ tip FIR cu fereastra Lanczos');
legend('L = 0.5','L = 1','L = 2','L = 3');
alfa = 0.25;
f = tukeywin(M+1,alfa);
h = fir1(M,wt1,f);
[H,w] = freqz(h);
figure(9)
axis([0 pi 0 1.2]);
hold on;
plot(w,abs(H));
alfa = 0.47;
f = tukeywin(M+1,alfa);
h = fir1(M,wt1,f);
[H,w] = freqz(h);
figure(9)
axis([0 pi 0 1.2]);
hold on;
plot(w,abs(H));
alfa = 0.78;
f = tukeywin(M+1,alfa);
h = fir1(M,wt1,f);
[H,w] = freqz(h);
figure(9)
axis([0 pi 0 1.2]);
hold on;
plot(w,abs(H));
alfa = 1;
f = tukeywin(M+1,alfa);
h = fir1(M,wt1,f);
[H,w] = freqz(h);
figure(9)
axis([0 pi 0 1.2]);
hold on;
plot(w,abs(H));
wt = wt1*pi;
line([wt wt],[0 1],'Color','red','LineStyle','--');
line([0 wt],[1 1],'Color','red','LineStyle','--');
set(gca,'XTick',[wt],'XTickLabel',{'wt'});
xlabel('w');
ylabel('Amplitudinea raspunsului in frecventa');
title('FTJ tip FIR cu fereastra Tukey cu alfa = 25%');
legend('alfa = 25%','alfa = 47%','alfa = 78%','alfa = 100% ');
%{
SUBPUNCTUL b:
Aleg fereastra Chebysev conform rezultatelor exercitiului anterior si
pastrez valoarea lui r = 100dB.
Aleg valori pentru M = 24, M = 32 si le plotez, alaturi de raspunsul
pentru M = 16 intr-o noua figura pentru a observa diferentele.
%}
M = 16;
f = chebwin(M+1);
h = fir1(M,wt1,f);
wt = wt1*pi;
[H,w] = freqz(h);
figure(10)
subplot(131);
axis([0 pi 0 1.2]);
hold on
plot(w,abs(H));
line([wt wt],[0 1],'Color','red','LineStyle','--');
line([0 wt],[1 1],'Color','red','LineStyle','--');
set(gca,'XTick',[wt],'XTickLabel',{'wt'});
xlabel('w');
ylabel('Amplitudinea raspunsului in frecventa');
title('FTJ tip FIR cu fereastra Chebysev M=16');
M = 24;
f = chebwin(M+1);
h = fir1(M,wt1,f);
wt = wt1*pi;
[H,w] = freqz(h);
subplot(132);
plot(w,abs(H));
line([wt wt],[0 1],'Color','red','LineStyle','--');
line([0 wt],[1 1],'Color','red','LineStyle','--');
set(gca,'XTick',[wt],'XTickLabel',{'wt'});
title('FTJ tip FIR cu fereastra Chebysev M=24');
M = 32;
f = chebwin(M+1);
h = fir1(M,wt1,f);
wt = wt1*pi;
[H,w] = freqz(h);
subplot(133);
plot(w,abs(H));
line([wt wt],[0 1],'Color','red','LineStyle','--');
line([0 wt],[1 1],'Color','red','LineStyle','--');
set(gca,'XTick',[wt],'XTickLabel',{'wt'});
title('FTJ tip FIR cu fereastra Chebysev M=32');
%{
Se observa ca cu cat M este mai mare, cu atat caracteristica in frecventa
se apropie de cea a unui filtru ideal trece jos.
%}
|
function opts = parse_varargin(defaults, optargs)
% Parses varargin, sets defaults, and replaces defaults with user-supplied
% values when appropriate. Returns the optional arguments as a structure.
%
% Required Inputs
% ===============
% defaults A structure corresponding to the default arguments. For
% example: defaults = struct('name1', val1, 'name2', val2).
% The number of fields corresponds to the number of
% optional arguments allowed.
%
% optargs The optional arguments input. This corresponds to
% varargin from the calling function.
%
% Output
% ======
% opts Structure containing the final values of all optional
% arguments
%
% Reference
% =========
% stackoverflow.com/questions/2775263/how-to-deal-with-name-value-pairs-of-
% function-arguments-in-matlab
%
opts = defaults;
optionNames = fieldnames(opts);
nargs = length(optargs);
if round(nargs/2) ~= nargs/2
error('Variable arguments need propertyName/propertyValue pairs');
end
for pair = reshape(optargs, 2, [])
inpName = lower(pair{1});
if any(strcmp(inpName, optionNames))
opts.(inpName) = pair{2};
else
error('%s is not a recognized parameter name', inpName);
end
end
end |
function start_listening
% MATLEAP.START_LISTENING Start listening for frames from Leap motion controller (needed for matleap.frames)
matleap.matleap(3);
|
function [image] = loadGrayImage(filename)
% LOADGRAYIMAGE Loads an image, and, if RGB, makes it greyscale.
image = imread(filename);
if size(image, 3) == 3,
image = rgb2gray(image);
end
end
|
classdef kcsd2d < handle
%-------------------------------------------------------------------------
properties
X % X - component of the estimation area
% (meshgrid logic)
Y % Y - component of the estimation area
% (meshgrid logic, same size as X)
X_src % source positions
Y_src
h = 1 % h parameter of the kCSD method
sigma = 1 % space conductivity
R % Radius of basis element
lambda % lambda parameter for ridge regression
Rs % set of R parameters for cross-validation
lambdas; % set of lambda parameters for cross-validation
image % time frame for performing CV
manage_data = 1;
end
properties (SetAccess = private)
el_pos % vector of electrode positions
pots % vector of potential values (n_electrodes, nt)
n_el % number of electrodes
dist_max % maximal distance in estimation area, used
% to calculate dist table
dist_table % table used to interpolate potential value
K_pot % matrices for for estimating CSD & potentials
interp_pot
interp_cross
K_cross
b_pot_matrix
b_src_matrix
b_interp_pot_matrix
pots_est % estimated potentials
CSD_est % estimated CSD
CV_errors;
tol % tolerance used seeking CV seeking of R via
% fminbnd
end
properties (GetAccess = private, SetAccess = private)
matrices_up_to_date = 1;
estimation_up_to_date = 0;
pot_interp_up_to_date = 0;
pots_estimation_up_to_date = 0;
cv_errors_up_to_date = 0;
end
methods
%-------------------------------------------------------------------------
function k = kcsd2d(el_pos, pots, varargin)
% kcsd2d class constructor. In the input line, after supplying the
% obligatory electrode positions vector (el_pos <n_el x 1 double>)
% and potential values (pots <n_el x nt double>) the user can enter
% several options by providing 'option_name', option_value pairs:
%
% Estimation area options:
%
% 'X_min' minimal X position for the estimation area.
% 'X_max' maximal X position for the estimation area.
% 'Y_min' minimal Y position for the estimation area.
% 'Y_max' maximal Y position for the estimation area.
% 'X' X - component of the estimation area (meshgrid logic).
% 'Y' Y - component of the estimation area
% (meshgrid logic, same size as X).
% 'gdX' interpolated resolution in X direction.
% 'gdY' interpolated resolution in Y direction.
%
% Model hyper-parameter options:
%
% 'R' initial value for basis function radius.
% 'h' h parameter of the kCSD2d mrthod.
% 'sigma' space conductivity.
%
% Source basis functions positions and count options:
%
% 'ext_X' length by which the sources go out beyond the estimation
% area in the X direction.
% 'ext_Y' length by which the sources go out beyond the estimation
% area in the Y direction.
% 'n_src' initial number of basis elements used.
%
% Data management:
%
% 'manage_data' determines whether previously calculated
% estimation data will be used or whether new
% estimation data will be saved to be used later on
%
% Example:
%
% add directory with the kcsd2d class
%
% addpath(genpath('../kcsd2d_class'));
%
% define the estimation area and generate the CSD (here we use the set
% 'small sources')
%
% [X, Y] = meshgrid(-.2:0.01:1.2, -.2:0.01:1.4);
% CSD = test_csd4(X, Y, 0);
%
% define the grid of electrodes (here electrode arrengement from fig.?
% in paper)
%
% [el_pos] = slanted_grid;
% pots = generate_potentials(@test_csd4, ...
% [-.2 1.4], [-.2 1.4], .5, el_pos(:,1)', el_pos(:,2)', ...
% zeros(size(el_pos(:,1)')))';
%
% Create an instance of the kcsd2d class with 1000 basis elements and
% with the set CSD being the test data.
%
% k = kcsd2d(el_pos, pots, 'X', X, 'Y', Y, 'n_src', 1000);
%
% Plot estimated CSD:
%
% k.plot_CSD;
%
%
% save the estimated CSD to a workspace variable
%
% estimated = k.CSD_est;
if (~ischar(el_pos) && ~ischar(pots))
k.el_pos = el_pos;
k.pots = pots;
else
error('Must specify el_pos and pots first');
end;
propertyArgIn = varargin;
while length(propertyArgIn) >= 2,
prop = propertyArgIn{1};
val = propertyArgIn{2};
propertyArgIn = propertyArgIn(3:end);
% reading input
switch prop
case 'X_min'
X_min = val;
case 'X_max'
X_max = val;
case 'Y_min'
Y_min = val;
case 'Y_max'
Y_max = val;
case 'gdX'
gdX = val;
case 'gdY'
gdY = val;
case 'R'
R_init = val;
case 'h'
k.h = val;
case 'sigma'
k.sigma = val;
case 'ext_X'
ext_X = val;
case 'ext_Y'
ext_Y = val;
case 'n_src'
n_src = val;
case 'X'
k.X = val;
case 'Y'
k.Y = val;
case 'manage_data'
k.manage_data = val;
otherwise
error(['no method defined for input: ',prop]);
end %case
end %while
if isempty(k.el_pos) || isempty(k.pots)
error('must specify el_pos & pots')
else
if ~exist('X_min', 'var') && ~exist('X', 'var')
X_min = min(k.el_pos(:,1));
end
if ~exist('X_max', 'var') && ~exist('X', 'var')
X_max = max(k.el_pos(:,1));
end
if ~exist('Y_min', 'var') && ~exist('Y', 'var')
Y_min = min(k.el_pos(:,2));
end
if ~exist('Y_max', 'var') && ~exist('Y', 'var')
Y_max = max(k.el_pos(:,2));
end
if ~exist ('gdX', 'var') && ~exist('X', 'var')
gdX = 0.01*(X_max - X_min);
end
if ~exist ('gdY', 'var') && ~exist('Y', 'var')
gdY = 0.01*(Y_max - Y_min);
end
if ~exist('n_src', 'var')
n_src = 300;
end
if ~exist('ext_X', 'var')
ext_X = 0;
end
if ~exist('ext_Y', 'var')
ext_Y = 0;
end
if ~exist('R_init', 'var')
R_init = 2*calc_min_dist(el_pos);
end
if isempty(k.X) && isempty(k.Y)
[k.X, k.Y] = meshgrid(X_min:gdX:X_max, Y_min:gdY:Y_max);
end
[k.X_src, k.Y_src, ~, ~, k.R] = make_src_2d(k.X, k.Y, n_src, ...
ext_X, ext_Y, R_init);
Lx=max(k.X_src(:))-min(k.X_src(:))+k.R;
Ly=max(k.Y_src(:))-min(k.Y_src(:))+k.R;
k.dist_max=sqrt(Lx^2+Ly^2);
k.image = choose_CV_image(pots);
[k.Rs, k.tol] = calc_Rs(k, length(pots));
k.n_el = length(k.el_pos);
k.lambdas = calc_lambdas;
k.analyze;
end %if
end %function
%-------------------------------------------------------------------------
function analyze(k, varargin)
% Estimates CSD having all the parameters defined. Method is run by
% default when the constructor is executed. However one might want to
% modify some parameters and carry out a ne estimation.
%
% Example:
% Repeat the procedures in the example shown with the constructor:
%
% addpath(genpath('../kcsd2d_class'));
% [X, Y] = meshgrid(-.2:0.01:1.2, -.2:0.01:1.4);
% CSD = test_csd4(X, Y, 0);
% [el_pos] = slanted_grid;
% pots = generate_potentials(@test_csd4, ...
% [-.2 1.4], [-.2 1.4], .5, el_pos(:,1)', el_pos(:,2)', ...
% zeros(size(el_pos(:,1)')))';
% k = kcsd2d(el_pos, pots, 'X', X, 'Y', Y, 'n_src', 1000, ...
% 'test', CSD);
%
% Parameter R has some value chosen by deafult. We may want to change
% it and then run the estimation once again:
%
% k.R = 0.5;
% k.analyze;
% k.plot_CSD;
% k.plot_test;
%
% We see that R is now to large.
k.calc_matrices;
k.choose_lambda; % this estimates as well
end
%-------------------------------------------------------------------------
function calc_K_interp_cross(k)
% calculates K_interp_cross used for interpolating CSD. Method used in
% methods : estimate, analyze;
if k.manage_data == 1
filename = generate_filename(k, 'cross');
if exist([filename, '.mat'], 'file') == 0
b_src_matrix = make_b_src_matrix_2D(k.X, k.Y, k.X_src, k.Y_src, ...
k.R, 'gauss');
k.interp_cross=b_src_matrix*k.b_pot_matrix;
interp = k.interp_cross;
dist = k.dist_table;
save(filename, 'interp', 'dist');
else
load(filename);
k.interp_cross = interp;
k.dist_table = dist;
clear interp;
end
else
k.b_src_matrix = make_b_src_matrix_2D(k.X, k.Y, k.X_src, k.Y_src, ...
k.R, 'gauss');
k.interp_cross=k.b_src_matrix*k.b_pot_matrix;
end
end
%-------------------------------------------------------------------------
function calc_interp_pot(k)
% calculates K_interp_cross used for interpolating CSD. Method used in
% methods : estimate, analyze;
k.b_interp_pot_matrix = make_b_interp_pot_matrix_2D(k.X, k.Y, ...
k.X_src, k.Y_src, k.R, k.dist_table);
k.interp_pot=k.b_interp_pot_matrix*k.b_pot_matrix;
k.pot_interp_up_to_date = 1;
end
%-------------------------------------------------------------------------
function calc_K_pot(k)
% calculates K_pot b_pot_matrix matrice, which sufficient to carry
% out cross validation and essential (but not sufficient) for
% interpolating CSD. Method used in methods calc_matrices, analyze,
% calc_K_pot.
if k.manage_data == 1
filename = generate_filename(k, 'pot');
if exist([filename, '.mat'], 'file') == 0
k.dist_table = create_dist_table(100, k.dist_max, k.R, k.h, k.sigma, ...
'gauss');
k.b_pot_matrix = make_b_pot_matrix_2D(k.X, k.Y, k.X_src, k.Y_src, ...
k.el_pos, k.dist_table, k.R);
k.K_pot=(k.b_pot_matrix)'*(k.b_pot_matrix);
b_pot = k.b_pot_matrix;
K = k.K_pot;
save(filename, 'b_pot', 'K');
else
load(filename);
k.b_pot_matrix = b_pot;
k.K_pot = K;
clear K; clear b_pot;
end
else
k.dist_table = create_dist_table(100, k.dist_max, k.R, k.h, k.sigma, ...
'gauss');
k.b_pot_matrix = make_b_pot_matrix_2D(k.X, k.Y, k.X_src, k.Y_src, ...
k.el_pos, k.dist_table, k.R);
k.K_pot=(k.b_pot_matrix)'*(k.b_pot_matrix);
end
end
%-------------------------------------------------------------------------
function err = calc_cv_error(k, n_folds)
% An cross-validation estimator of the error of the estimation. Used in methods that
% choose parameters through cross-validation.
if k.matrices_up_to_date == 0
k.calc_matrices;
end
Ind_perm = randperm(k.n_el);
err = cross_validation(k.lambda, k.pots(:, k.image), k.K_pot,...
n_folds, Ind_perm);
end
%-------------------------------------------------------------------------
function calc_matrices(k)
% calculates K_pot and K_interp_cross. doesn't estimate.
k.calc_K_pot;
k.calc_K_interp_cross;
k.matrices_up_to_date = 1;
end
%-------------------------------------------------------------------------
function choose_lambda(k, varargin)
% Chooses the regularisation lambda parameter for ridge regression. The
% user can enter options by providing 'property_name', property_value
% pairs:
%
% 'n_folds' number of folds to perform Cross validation (CV)
% 'n_iter' number of iterations for the CV procedure
% 'sampling' ways of looking for the optimal lambda:
% 1 - simple sampling
% 2 - using fminbnd function
[n_folds, n_iter, sampling] = ...
get_choose_lambda_parameters(k, varargin);
% choosing one frame for carry out
if sampling==1 % choose lambda via simple sampling
value = lambda_sampling_1(k, n_folds, n_iter);
elseif sampling == 2 % choose lambda using fminbnd function
value = lambda_sampling_2(k, n_folds, n_iter);
else
error('Sampling must be 1 or 2.');
end
k.lambda = value;
k.estimate;
end
%-------------------------------------------------------------------------
function choose_R_lambda(k)
n_lambdas = length(k.lambdas);
n_Rs = length(k.Rs);
error_min = 200;
k.CV_errors = zeros(n_Rs, n_lambdas);
wait = waitbar(0, 'Performing cross - validation over R & lambda...');
for i = 1:n_Rs
k.R = k.Rs(i);
k.calc_K_pot;
for j = 1:n_lambdas
waitbar(((i-1)*n_lambdas + j)/(n_Rs*n_lambdas));
k.lambda = k.lambdas(j);
error = k.calc_cv_error(k.n_el);
k.CV_errors(i, j) = error;
if error_min > error
error_min = error;
lambda_min = k.lambdas(j);
R_min = k.Rs(i);
end
end
end
close(wait);
disp(['selected R: ', num2str(R_min)]);
disp(['selected lambda: ', num2str(lambda_min)]);
k.R = R_min;
k.lambda = lambda_min;
k.analyze;
k.cv_errors_up_to_date = 1;
end
%-------------------------------------------------------------------------
function estimate(k)
if isempty(k.K_pot) || isempty(k.interp_cross) || k.matrices_up_to_date == 0
k.calc_matrices;
end
k.CSD_est = estimation(k, 'CSD');
k.estimation_up_to_date = 1;
end
%-------------------------------------------------------------------------
function estimate_potentials(k)
if (isempty(k.interp_pot) || k.pot_interp_up_to_date == 0)
k.calc_interp_pot;
end
k.pots_est = estimation(k, 'pots');
end
%-------------------------------------------------------------------------
function plot_CSD(k)
if isempty(k.CSD_est) || (k.estimation_up_to_date == 0)
k.estimate;
end;
kcsd_plot(k.X, k.Y, k.CSD_est, k.el_pos, 'estimated CSD');
end
%-------------------------------------------------------------------------
function plot_pots(k)
if (k.pots_estimation_up_to_date == 0 || k.pot_interp_up_to_date == 0)
k.estimate_potentials;
end;
kcsd_plot(k.X, k.Y, k.pots_est, k.el_pos, 'estimated potentials');
end
%-------------------------------------------------------------------------
function plot_params_vs_cv(k)
if k.cv_errors_up_to_date == 0
disp('No up to date data, run choose_R_lambda first')
else
imagesc(k.CV_errors);
end;
end;
%-------------------------------------------------------------------------
function set.R(k, value)
k.R = value;
k.matrices_up_to_date = 0;
k.estimation_up_to_date = 0;
k.pot_interp_up_to_date = 0;
k.pots_estimation_up_to_date = 0;
k.cv_errors_up_to_date = 0;
end
function set.h(k, value)
k.h = value;
k.matrices_up_to_date = 0;
k.estimation_up_to_date = 0;
k.pot_interp_up_to_date = 0;
k.pots_estimation_up_to_date = 0;
k.cv_errors_up_to_date = 0;
end
function set.sigma(k, value)
k.sigma = value;
k.matrices_up_to_date = 0;
k.estimation_up_to_date = 0;
k.pot_interp_up_to_date = 0;
k.pots_estimation_up_to_date = 0;
k.cv_errors_up_to_date = 0;
end
function set.X(k, value)
k.X = value;
k.matrices_up_to_date = 0;
k.estimation_up_to_date = 0;
k.pot_interp_up_to_date = 0;
k.pots_estimation_up_to_date = 0;
k.cv_errors_up_to_date = 0;
end
function set.Y(k, value)
k.Y = value;
k.matrices_up_to_date = 0;
k.estimation_up_to_date = 0;
k.pot_interp_up_to_date = 0;
k.pots_estimation_up_to_date = 0;
k.cv_errors_up_to_date = 0;
end
function set.Rs(k, value)
k.Rs = value;
k.cv_errors_up_to_date = 0;
end
function set.lambdas(k, value)
k.lambdas = value;
k.cv_errors_up_to_date = 0;
end
function CSD_est = get.CSD_est(k)
if k.matrices_up_to_date == 0
k.calc_matrices;
end
if k.estimation_up_to_date == 0
k.estimate;
end
CSD_est = k.CSD_est;
end
function pots_est = get.pots_est(k)
if k.pot_interp_up_to_date == 0
k.calc_interp_pot;
end;
if k.pots_estimation_up_to_date == 0
k.estimate_potentials;
end
pots_est = k.pots_est;
end
end
end |
function [path, cost]= postProcess(path, map)
%returns postProcessed path and its cost
n = size(path);
head = 1;
final_ids = head;
for i = 1:n(1)-1
validBranch = checkBranchValid(path(head, :),path(i+1, :), map, 20*i, true);
if ~validBranch
body = i;
tail = i+1;
final_ids = [final_ids, [body, tail]];
head = tail;
end
end
%append goal config if its not included in final path
if norm(path(final_ids(end), :) - path(end, :)) > 0.01
final_ids(end+1) = n(1);
end
path = path(final_ids, :);
%compute cost of path from only its first 4 columns
p = path(:,1:4);
normOfDiffs = vecnorm(transpose(diff(p)));
cost = sum(normOfDiffs);
shape =size(path);
if shape(1) < 2
cost = 0;
end
end |
%% MLBREADIV reads the baseball statistics from mlb07al.dat
%
% Demonstrates using the FGETL function to import character data from the
% 'mlb07al.dat' file. In this example, the FGETL function reads a line of
% text from the file and discards the newline character. The WHILE-loop
% continues executing until it encounters the end of the file. For every
% line it checks for the occurence of word "Central". When that happens it
% reads the next 5 lines to import data for the central teams.
%% 1. Open mlb07al.dat file
edit('mlb07al.dat')
%% 2. Read to the point where the keyword 'Central' is found.
fid = fopen('mlb07al.dat');
textline = fgetl(fid);
while ischar(textline) % continue reading if textline is not -1
% Compare to see if line read is the header for central teams
if any(strfind(textline,'Central'))
break
end
% Read the next line from the file
textline = fgetl(fid);
end
%% 3. Import the data for the Central Region
% Establish a conversion specifier for the TEXTSCAN function
fmt = '%11[^0123456789] %f %f %f %s %s %f-%f %s %f-%f %f-%f';
% Import data for the central teams
central = textscan(fid,fmt,5);
% Close the connection.
status = fclose(fid);
|
%% defend.m
% determines where to move king to avoid check
% check to see if king will move into check
% takes argument of moves struct and moves.board matrix
% returns [0,0] if no check
% returns [-1,-1] if checkmate
function position = defend(moves)
%get position of king
[r,c] = find(moves.king1 == 2);
flag = test_check(moves,1); %test for check
%sets position to move king to
if flag == 0 %if no check
position = [0,0]; %return [0,0] if no check
else
flag = 0; %reset flag
direction = [-1,-1; -1,0; -1,1; 0,1; 1,1; 1,0; 1,-1; 0,-1];
for dir_index = 1:8 %checks each direction
r_new = direction(dir_index,1);
c_new = direction(dir_index,2);
if (r+r_new > 0 && r+r_new < 9 && c+c_new > 0 && c+c_new < 9)
%move piece and check for checkmate
moves.board(r,c,3) = 0;
moves.board(r+r_new,c+c_new,3) = 1;
moves.board(r,c,4) = 0;
moves.board(r+r_new,c+c_new,4) = 1;
moves = get_moves(moves.board);
flag = test_check(moves,1);
%reset board back
moves.board(r,c,3) = 1;
moves.board(r+r_new,c+c_new,3) = 0;
moves.board(r,c,4) = 1;
moves.board(r+r_new,c+c_new,4) = 0;
moves = get_moves(moves.board);
if flag == 0
position = [r+r_new,c+c_new];
break;
end
end
end
if flag == 1 %if still in check after all moves
position = [-1,-1]; %return [-1,-1] on checkmate
end
end
end |
function [ estimatedLabels ] = classifyWithTemplateMatching( templates , testData , method, errorMeasure,emotions)
%CLASSIFYWITHTEMPLATEMATCHING Given a set of templates and a test dataset,
%this function estimates the labels of each sample in the test dataset
%comparing it with each of the templates.
%Convert all the images in the testData into a chamfer distance images
if(strcmp(method,'chamferMean')==1)
for i = 1:size(testData,1)
image = squeeze(testData(i,:,:));
testData(i,:,:) = bwdist(edge(image,'canny',0.4));
end
end
%init the variable where the estimated labels will be stored
estimatedLabels = zeros(1,size(testData,1));
%get the number of templates we are going to evaluate
numTemplates = size(templates,1);
%Iterate over all the test data
for i = 1:size(testData,1)
%get the current sample we want to evaluate
currentSample = squeeze(testData(i,:,:));
%init the similarity score for each template with the current
%sample
templateScore = zeros(1,numTemplates);
for e = 1:numTemplates
%get the current template
currentTemplate = squeeze(templates(e,:,:));
%get the similarity score of the pattern with the given sample
%and store into templateScore variable
switch errorMeasure
case 'euclidean'
templateScore(e) = pdist2(currentSample(:)', currentTemplate(:)','euclidean');
case 'mean-dist'
for k = 1:size(currentSample,1)
for j = 1:size(currentSample,2)
z(k,j) = mean(currentSample(k,j))- currentTemplate(1);
end
end
templateScore(e) = mean(mean(z));
case 'z-dist'
z = (currentSample - currentTemplate(1:128,:))/currentTemplate(129:end,:);
templateScore(e) = mean(mean(z));
case 'z-dist-pixel'
for k = 1:size(currentSample,1)
for j = 1:size(currentSample,2)
z(k,j) = ((currentSample(k,j) - currentTemplate(k,j))^2)/currentTemplate(i+128,j);
end
end
templateScore(e) = mean(mean(z));
case 'hist-dist'
histograma = histcounts(currentSample,50);
templateScore(e) = mean(mean(histograma-currentTemplate).^2);
case 'gabor-dist'
[MAG, ~] = imgaborfilt(currentSample,2,90);
templateScore(e) = pdist2(MAG(:)', currentTemplate(:)','euclidean');
case 'z-gabor-dist'
[MAG, ~] = imgaborfilt(currentSample,2,90);
templateScore(e) = mean(mean((MAG - currentTemplate(1:128,:))./currentTemplate(129:end,:)));
%% FROM HERE THEY ARE ALL IMAGE FILTERS
% First there is the euclidean distance, then the
% Z-dist for each filter
case 'std-dist'
[MAG] = stdfilt(currentSample);
templateScore(e) = pdist2(MAG(:)', currentTemplate(:)','euclidean');
case 'z-std-dist'
[MAG] = stdfilt(currentSample);
templateScore(e) = mean(mean((MAG - currentTemplate(1:128,:))./currentTemplate(129:end,:)));
case 'range-dist'
[MAG] = rangefilt(currentSample);
templateScore(e) = pdist2(MAG(:)', currentTemplate(:)','euclidean');
case 'z-range-dist'
MAG = rangefilt(currentSample);
templateScore(e) = mean(mean((MAG - currentTemplate(1:128,:))./currentTemplate(129:end,:)));
case 'fib-dist'
MAG = fibermetric(currentSample);
templateScore(e) = pdist2(MAG(:)', currentTemplate(:)','euclidean');
case 'z-fib-dist'
MAG = fibermetric(currentSample);
templateScore(e) = mean(mean((MAG - currentTemplate(1:128,:))./currentTemplate(129:end,:)));
end
end
%get the label with the minimum similarity score and assign it to
%the current sample
estimatedLabels(i) = emotions(find(templateScore==min(templateScore),1));
end
end
|
%% fn_savefile
%% Syntax
% filename = fn_savefile(varargin)
%% Description
% synonyme de "filename = fn_getfile('SAVE',varargin)"
%
% See also fn_getfile
%% Source
% Thomas Deneux
%
% Copyright 2003-2012
%
|
function spc_drawAll
%Fig2 = lifetime in ROI.
%set(gui.spc.figure.projectImage, 'CData', spc.project);
spc_drawLifetimeMap;
spc_drawLifetime; |
%% MULTIFRAME MOTION COUPLING FOR VIDEO SUPER RESOLUTION
%
%
% Be sure to initialize the submodules and to compile them,
% following their instructions
clearvars;
%% Data properties
datasetName = 'surfer';
startFrame = 1;
numFrames = 5;
cslice = ceil(numFrames/2);
factor = 4; % Magnification factor
%% Data generation process
dataFolder = '../data/videos_scenes/';
[imageSequenceSmall,imageSequenceLarge] = LoadImSequence([dataFolder,filesep,datasetName],startFrame,numFrames,factor,'bicubic');
%% Construct algorithm object
% Input: RGB-Time matlab array
mainSuper = MultiframeMotionCouplingAlternating(imageSequenceSmall,imageSequenceLarge);
%% Set variables (these are the standard parameters)
% Procedure
mainSuper.outerIts = 15;
mainSuper.factor = factor; % magnification factor
mainSuper.verbose = 1; % enable intermediate output, 1 is text, 2 is image
mainSuper.framework = 'prost'; % Choose framework for super resolution problem
% Either 'flexBox' or 'flexBox_vector'
% or 'prost', if installed
% Problem parameters
mainSuper.alpha = 0.01; % regularizer weight
mainSuper.beta = 0.2; % flow field complexity
mainSuper.kappa = 0.25; % regularization pendulum
mainSuper.flowDirection = 'forward'; % flow field direction
% Operator details
mainSuper.interpMethod = 'average'; % Downsampling operator D
mainSuper.k = fspecial('gaussian',7,sqrt(0.6));% Blur operator B
%% Init flow field and solvers
tic
mainSuper.init;
toc
%% Solve super resolution problem
tic
mainSuper.run;
toc
%% Show error margin
outImage = mainSuper.result1(20:end-20,20:end-20,:,ceil(numFrames/2));
psnrErr = round(psnr(outImage,imageSequenceLarge(20:end-20,20:end-20,:,ceil(numFrames/2))),2);
ssimErr = round(ssim(outImage,imageSequenceLarge(20:end-20,20:end-20,:,ceil(numFrames/2))),3);
disp(['PSNR (central patch, central slice): ',num2str(psnrErr),' dB']);
disp(['SSIM (central patch, central slice): ',num2str(ssimErr),' ']);
%% Visualize either central image or full video
if mainSuper.verbose > 0
vid = implay(mainSuper.result1,2);
set(vid.Parent, 'Position',get(0, 'Screensize'));
else
figure(1), imshow(outImage); title(['PSNR: ', num2str(psnrErr)]); axis image
end
%%
disp('---------------------------------------------------------------------')
|
function boxes = detect(input, model, thresh)
% Keep track of detected boxes and features
BOXCACHESIZE = 100000;
cnt = 0;
boxes.s = 0;
boxes.c = 0;
boxes.xy = 0;
boxes.level = 0;
boxes(BOXCACHESIZE) = boxes;
% Compute the feature pyramid and prepare filters
pyra = featpyramid(input,model);
[components,filters,resp] = modelcomponents(model,pyra);
for c = randperm(length(components)),
minlevel = model.interval+1;
levels = minlevel:length(pyra.feat);
for rlevel = levels(randperm(length(levels))),
parts = components{c};
numparts = length(parts);
% Local part scores
for k = 1:numparts,
f = parts(k).filterid;
level = rlevel-parts(k).scale*model.interval;
if isempty(resp{level}),
resp{level} = fconv(pyra.feat{level},filters,1,length(filters));
end
parts(k).score = resp{level}{f};
parts(k).level = level;
end
% Walk from leaves to root of tree, passing message to parent
% Given a 2D array of filter scores 'child', shiftdt() does the following:
% (1) Apply distance transform
% (2) Shift by anchor position (child.startxy) of part wrt parent
% (3) Downsample by child.step
for k = numparts:-1:2,
child = parts(k);
par = child.parent;
[Ny,Nx,foo] = size(parts(par).score);
[msg,parts(k).Ix,parts(k).Iy] = shiftdt(child.score, child.w(1),child.w(2),child.w(3),child.w(4), ...
child.startx, child.starty, Nx, Ny, child.step);
parts(par).score = parts(par).score + msg;
end
% Add bias to root score
rscore = parts(1).score + parts(1).w;
[Y,X] = find(rscore >= thresh);
if ~isempty(X)
XY = backtrack( X, Y, parts, pyra);
end
% Walk back down tree following pointers
for i = 1:length(X)
x = X(i);
y = Y(i);
if cnt == BOXCACHESIZE
b0 = nms_face(boxes,0.3);
clear boxes;
boxes.s = 0;
boxes.c = 0;
boxes.xy = 0;
boxes.level = 0;
boxes(BOXCACHESIZE) = boxes;
cnt = length(b0);
boxes(1:cnt) = b0;
end
cnt = cnt + 1;
boxes(cnt).c = c;
boxes(cnt).s = rscore(y,x);
boxes(cnt).level = rlevel;
boxes(cnt).xy = XY(:,:,i);
end
end
end
boxes = boxes(1:cnt);
% Backtrack through dynamic programming messages to estimate part locations
% and the associated feature vector
function box = backtrack(x,y,parts,pyra)
numparts = length(parts);
ptr = zeros(numparts,2,length(x));
box = zeros(numparts,4,length(x));
k = 1;
p = parts(k);
ptr(k,1,:) = x;
ptr(k,2,:) = y;
% image coordinates of root
scale = pyra.scale(p.level);
padx = pyra.padx;
pady = pyra.pady;
box(k,1,:) = (x-1-padx)*scale + 1;
box(k,2,:) = (y-1-pady)*scale + 1;
box(k,3,:) = box(k,1,:) + p.sizx*scale - 1;
box(k,4,:) = box(k,2,:) + p.sizy*scale - 1;
for k = 2:numparts,
p = parts(k);
par = p.parent;
x = ptr(par,1,:);
y = ptr(par,2,:);
inds = sub2ind(size(p.Ix), y, x);
ptr(k,1,:) = p.Ix(inds);
ptr(k,2,:) = p.Iy(inds);
% image coordinates of part k
scale = pyra.scale(p.level);
box(k,1,:) = (ptr(k,1,:)-1-padx)*scale + 1;
box(k,2,:) = (ptr(k,2,:)-1-pady)*scale + 1;
box(k,3,:) = box(k,1,:) + p.sizx*scale - 1;
box(k,4,:) = box(k,2,:) + p.sizy*scale - 1;
end
% Cache various statistics from the model data structure for later use
function [components,filters,resp] = modelcomponents(model,pyra)
components = cell(length(model.components),1);
for c = 1:length(model.components),
for k = 1:length(model.components{c}),
p = model.components{c}(k);
x = model.filters(p.filterid);
[p.sizy p.sizx foo] = size(x.w);
p.filterI = x.i;
x = model.defs(p.defid);
p.defI = x.i;
p.w = x.w;
% store the scale of each part relative to the component root
par = p.parent;
assert(par < k);
ax = x.anchor(1);
ay = x.anchor(2);
ds = x.anchor(3);
if par > 0,
p.scale = ds + components{c}(par).scale;
else
assert(k == 1);
p.scale = 0;
end
% amount of (virtual) padding to hallucinate
step = 2^ds;
virtpady = (step-1)*pyra.pady;
virtpadx = (step-1)*pyra.padx;
% starting points (simulates additional padding at finer scales)
p.starty = ay-virtpady;
p.startx = ax-virtpadx;
p.step = step;
p.level = 0;
p.score = 0;
p.Ix = 0;
p.Iy = 0;
components{c}(k) = p;
end
end
resp = cell(length(pyra.feat),1);
filters = cell(length(model.filters),1);
for i = 1:length(filters),
filters{i} = model.filters(i).w;
end
|
% clc, clear
format LONGG
%%Ch 3 Formulas
PV = @(FV, i, n) FV./((1+i./100).^n);
%Calculates the Present Value from a Future Value
FV = @(PV, i, n) PV.*((1+i./100).^n);
%Calculates the Future Value from a present value
EIR = @(r, m) (((1 + r./(100*m)).^m) - 1).*100;
%EIR is calculating effective interest rate
%r = nominal annual interest rate
%m = number of compounding periods per years
Im = @(EIR, m) (((1 + EIR./100).^(1./m)) - 1).*100;
%Find compound interest given Effective Interest Rate
%%Ch4 Formulas
%Uniform Series Compound Amount Factor (FV)
USFA = @(A, i, n) A.*(((1+(i./100)).^n - 1)./(i./100));
%Uniform Series Sinking Fund Factor (FV)
USAF = @(F, i, n) F.*((i./100)./((1+(i./100)).^n - 1));
%Uniform Series Capital Recovery Factor (PV)
USAP = @(P, i, n) P.*((i./100).*(1+(i./100)).^n)/((1+(i./100)).^n - 1);
%Uniform Series Present Worth Factor (PV)
USPA = @(A, i, n) A.*(((1+(i./100)).^n)-1)./((i./100).*(1+(i./100)).^n);
%Solve for n value, given P, A, and i
USNPA = @(P, A, i) log(1/(1 - P.*(i./100)./A))./log(1 + (i./100));
%Solve for Present Value/Worth given Gradiant, i, n
PG = @(G, i, n) G.*(1 - (1 + n.*(i./100)).*(1 + (i./100)).^(-n))./((i./100).^2);
%Solve for Uniform Series Given Gradient, i, n
AG = @(G, i, n) G.*((1 + (i./100)).^n - (1 + n.*(i./100)))./((i./100).*((1 + (i./100)).^n - 1));
%Solve for Future Value/Worth given Gradiant, i, n
FG = @(G, i, n) G.*((1 + (i./100)).^n - (1 + n.*(i./100)))./((i./100).^2);
%Solve for Present Worth/Value for Uniform Series A, g (geometric gradient), n, i
PAgG = @(A, g, i, n) A.*(1 - ((1 + (g./100)).^n).*(1 + (i./100)).^(-n))./((i./100) - (g./100));
i = [0:0.00001:100];
zeroed = zeros(1, 100e5);
irr = zeros(1, 100e5);
count = 0;
compare = 0;
tolerance = 0.1;
for j = 1:10e6
zeroed(j) = -550000 + PV(117189, i(j), 1) + PV(150119.60, i(j), 2) + PV(144708.16, i(j), 3) + PV(147212.50, i(j), 4) + PV(161590.50, i(j), 5) + PV(167063.25, i(j), 6);
if(zeroed(j) <= (compare+tolerance) && zeroed(j) >= (compare-tolerance))
count = count + 1;
fprintf('The i values that are within %.5f and %.5f are %f\n', compare+tolerance, compare-tolerance, i(j))
irr(count) = i(j);
if(count > 1)
iavg = sum(irr)/(count)
end
end
end
% i = [0:0.1:100];
% A = zeros(1, 1000);
% B = zeros(1, 1000);
% C = zeros(1, 1000);
% D = zeros(1, 1000);
%
% for j = 1:1001
% A(j) = -8000 + USPA(1750, i(j), 10);
% B(j) = -6000 + USPA(1300, i(j), 10);
% C(j) = -6000 + USPA(1425, i(j), 10);
% D(j) = -9500 + USPA(1900, i(j), 10);
% if(A(j) > B(j) && A(j) > C(j) && A(j) > D(j) && A(j) >= 0)
% fprintf('At %3.2f A is best\n', i(j))
% elseif(B(j) > A(j) && B(j) > C(j) && B(j) > D(j) && B(j) >= 0)
% fprintf('At %3.2f B is best\n', i(j))
% elseif(C(j) > A(j) && C(j) > B(j) && C(j) > D(j) && C(j) >= 0)
% fprintf('At %3.2f C is best\n', i(j))
% elseif(D(j) > A(j) && D(j) > B(j) && D(j) > C(j) && D(j) >= 0)
% fprintf('At %3.2f D is best\n', i(j))
% else
% fprintf('At %3.2f Do Nothing is best\n', i(j));
% end
% end
% plot(i, A)
% hold
% plot(i, B)
% plot(i, C)
% plot(i, D)
% title('A vs. B vs. C vs. D');
% xlabel('Interest Rate (i)');
% ylabel('A, B, C, D ($)');
% legend;
% n = [0:0.00001:100];
% zeroed = zeros(1, 800e3);
% nfind = zeros(1, 800e3);
% compare = 140e3;
% tolerance = 0.1;
% count = 0;
% for j = 1:100e5
% zeroed(j) = 100E3 + PV(120e3, 8, n(j));
% if(zeroed(j) <= (compare+tolerance) && zeroed(j) >= (compare-tolerance))
% count = count + 1;
% fprintf('The n values that are within %.6f and %.6f are %.6f\n', compare+tolerance, compare-tolerance, n(j))
% nfind(count) = n(j);
% if(count > 1)
% navg = sum(nfind)/(count)
% end
% end
% end
|
cd \\sonas-hs.cshl.edu\churchland\data\fni17\imaging\151028
%%
a = dir('*_MCM.TIF');
aa = {a.name};
showcell(aa')
%%
for i=1:length(aa)
o = aa{i};
n = [aa{i}(1:end-4),'_eachSess.TIF'];
movefile(o, n)
% pause
end |
function [auth, sigmat, sigind] = GenSig(share1,share2)
crap = size(share1);
crap(1) = crap(1)/2;
crap(2) = crap(2)/2;
auth = zeros(crap(1),crap(2));
fileID = fopen('gensig.txt','w');
bleh = crap(1)*crap(2);
sigmat = zeros(1,bleh);
k=1;
for i=1:crap(1)
for j=1:crap(2)
auth(i,j)=bitxor(share1(i+crap(1),j+crap(2)),share2((i),(j)));
sigmat(k)=auth(i,j);
k=k+1;
end
end
%auth=~auth;
sigind = randperm(numel(sigmat));
sigmat = sigmat(sigind);
for i=1:bleh
fprintf(fileID,'%d',sigmat(i));
end
%disp()
end |
% Fish eats tank
function [thisFish, tank] = cannibalism(thisFish, tank)
% Maximum size of target fish
sizeLimit = thisFish.size * thisFish.cannibalismSizeCoefficient;
% Filter with fish of specific size
targetCounter = 0;
[~, n] = size(tank.fish);
targetList = ones(n) * -1;
for i = 1:n
% Alive + small -> add to list
% If a fish has a sizeCoefficient bigger or equal to 1 it could
% eat itself in the end
if (tank.fish(i).status == STATUS.ALIVE && tank.fish(i).size <= sizeLimit)
targetCounter = targetCounter + 1;
targetList(targetCounter) = i;
end
end
% If no target -> no target to eat
if (targetCounter > 0)
% Choose target randomly
tIndex = ceil(targetCounter * rand());
% Eat fish at specific index
tank.fish(targetList(tIndex)).status = STATUS.DIED;
% Eating makes the fish bigger
% Adds up to nutrition as well
thisFish.nutrition = thisFish.nutrition + ...
tank.fish(targetList(tIndex)).size;
end
end |
%% PSET4
% CPNS 34231
% The file mtNeuron.mat contains the responses of a single directionally tuned
% MT neuron to random dot stimuli moving coherently in directions varying from
% -90 to 90 degrees relative to the previously measured preferred direction of
% the neuron. The thirteen stimuli were each presented 184 times. Each stimulus
% began at time 0 and continued for 256 ms. Recordings continued until 512 ms
% after the beginning of the stimulus. The array has the dimensions 256?13?184.
% The first dimension is time in 2 ms bins, the second dimension motion direction,
% the third dimension the repeated presentations.
%% Problem 1
% Create a raster of all the spike trains in one plot, sorted by stimulus
% direction. Rasters corresponding to different directions should be
% represented in different colors.
clear all; close all;
load('mtNeuron.mat');
spikes = getfield(mtNeuron,'data');
times = getfield(mtNeuron,'time');
figure;
RasterPlotDir(spikes,times,13,184);
xlabel('Time (ms)'); ylabel('Trial (color represents direction)');
title('Raster Plot (direction -90 to 90 degrees)');
%% Problem 2
% Compute and plot the mutual information between cumulative spike count
% and motion direction as a function of time.
% parameters
[dur,ndir,ntrials] = size(spikes);
p_dir = 1.0/ndir; % probability of a given direction
max_spikes = dur; % the maximum possible number of spikes
cum_spikes = cumsum(spikes,1); % the cumulative spikes
% variables
mutual_information = zeros(1,dur);
pt_n_dir = zeros(dur,ndir,max_spikes);
pt_n = zeros(dur,max_spikes);
for t = 1:dur
% first, compute pt_n_dir for all n and all dir
for n = 1:max_spikes
for dir = 1:ndir
pt_n_dir(t,dir,n) = length(find(cum_spikes(t,dir,:) == (n-1)))/ntrials;
end
end
% next, compute pt_n for all n
for n = 1:max_spikes
pt_n(t,n) = sum(p_dir*pt_n_dir(t,:,n));
end
% use these values to compute the mutual information
total_result = 0;
for dir = 1:ndir
direction_result = 0;
for n = 1:max_spikes
if(pt_n(t,n)*pt_n_dir(t,dir,n) ~= 0)
direction_result = direction_result + pt_n_dir(t,dir,n)*log2(pt_n_dir(t,dir,n)/pt_n(t,n));
end
end
total_result = total_result + p_dir*direction_result;
end
mutual_information(t) = total_result;
end
figure;
plot(times*1000,mutual_information);
xlabel('Time (ms)'); ylabel('Mutual Information (bits)');
title('Mutual Information between Cumulative Spike Count and Motion Direction');
% Note: This method for computing the mutual information is outlined in
% Osborne et al, 2004.
%% Problem 3
% Determine the latency of this neuron in a principled way. What proportion
% of the mutual information is available within the first 50 ms of the neural
% response? Within the first 100 ms?
% determine the latency of the neuron (in ms)
latency = 0;
counts = zeros(1,dur); % total spike count per time point
for t = 1:dur
for dir = 1:ndir
counts(t) = counts(t) + sum(spikes(t,dir,:));
end
if t ~= 1
previous_mean = mean(counts(1:t-1));
if counts(t) - previous_mean > 6*std(counts(1:t-1))
latency = 1000*times(t);
break
end
end
end
display(latency);
% compute the proportion of mutual information available after 50 ms and 100 ms
binsize = 0.002; % bin size of the recordings
max_info = max(mutual_information); % the maximum mutual information
proportion50 = mutual_information(((latency+50)/1000)/binsize)/max_info;
proportion100 = mutual_information(((latency+100)/1000)/binsize)/max_info;
display(proportion50); display(proportion100);
% Based on the raster plot, the latency of the neuron appears to be around
% 90 milliseconds as this tends to be where firing picks up significantly for
% directions focused around the preferred direction. A principled way to
% determine this latency is to find the time point where the difference
% between the current spike count (summed across all directions and all
% trials) and the mean spike count of all previous time points has a value
% greater than 6 times the standard deviation of the spike counts from the
% previous time points. This resulted in a latency of 94 milliseconds.
% Based on this latency, a greater proportion of the mutual information is
% available within 100 ms of the response than within 50 ms of the response.
% The exact proportions are shown below. This means more "information" can
% be obtained about the neuron from the spike counts within 100 ms of the
% response.
|
clear
path_data = '/home/pbellec/database/stability_surf/';
%in.part = [path_data 'basc_cambridge_sc10.nii.gz'];
in.part = [path_data 'preproc' filesep 'trt' filesep 'part_sc100_resampled.nii.gz'];
opt.sampling.type = 'window';
opt.sampling.opt.length = 30;
% Scale 10
list_subject = {'sub90179','sub94293'};
pipe = struct();
for ss = 1:length(list_subject)
subject = list_subject{ss};
for rr = 1:3
in.fmri = [path_data 'preproc' filesep 'trt' filesep 'fmri_' subject '_session' num2str(rr) '_rest.mnc.gz'];
opt.folder_out = [path_data 'xp_pb_trt_2014_06_08b' filesep subject '_sess' num2str(rr) filesep];
pipe = psom_add_job(pipe,[subject '_sess' num2str(rr)],'niak_brick_scores_fmri_v2',in,struct(),opt);
end
end
opt_p.path_logs = [path_data 'xp_pb_trt_2014_06_08b' filesep 'logs'];
% mricron /home/pbellec/database/template.nii.gz -c -0 -o stability_maps.nii.gz -c 5redyell -l 0.05 -h 1&
% niak_brick_mnc2nii('/home/pbellec/database/stability_surf/xp_pb_trt_2014_06_08')
|
load([project.paths.processedData '/original_fake_convergence.mat']);
gmm = load([project.paths.processedData '/GMM_scores.mat']);
load([project.paths.processedData '/processed_data_word_level.mat']);
% project.paths.figures = 'C:/Users/SMukherjee/Desktop/data/figures/convergence';
%% merge into only event distribution
ALL=[];
for g=1:length(project.subjects.group)
% fake
fake_event_dist_diff = fake_conversaion{g,1};
temp =[];
for i=1:size(fake_event_dist_diff,1)
temp = [temp; fake_event_dist_diff(i,1:2);fake_event_dist_diff(i,3:4)];
end
fake_event_dist_diff = cell2mat(temp);
% original
temp = orginal_conversation(g,:);
orginal_event_dist_diff = cell(1,4);
for i=1:length(orginal_event_dist_diff)
orginal_event_dist_diff{i} = abs(temp{1,i}(:,1) - temp{1,i}(:,2))';
end
orginal_event_dist = orginal_conversation(g,:);
for session= 1:4
session_data = orginal_event_dist{session};
[convergence_score,divergence,convergence_A,convergence_B,rare_event] = convergence_condition2(session_data(:,1),session_data(:,2),fake_event_dist_diff,D,gmm.gmmScores(:,g),...
session,project,g,project.convergence.use_both_cond);
ALL{g,session,1} = convergence_score';
ALL{g,session,2} = divergence';
ALL{g,session,3} = convergence_A';
ALL{g,session,4} = convergence_B';
ALL{g,session,5} = rare_event';
end
end
GMM_conv.convergence = cell2mat(ALL(:,:,1));
GMM_conv.divergence = cell2mat(ALL(:,:,2));
GMM_conv.convergence_A = cell2mat(ALL(:,:,3));
GMM_conv.convergence_B = cell2mat(ALL(:,:,4));
GMM_conv.rare_event = cell2mat(ALL(:,:,5));
save([project.paths.processedData '/GMM_speech_conv_spy.mat'],'GMM_conv');
%%
% show=1;
% b = [51 102 153 204];
% a=0;
% for i=1:4
% a = a+(cell2mat(ALL(:,:,i))*b(i));
% end
% % a= cell2mat(ALL(:,:,show));
% ALL
a = GMM_conv.convergence + 2*GMM_conv.convergence_A + 3*GMM_conv.convergence_B + 4*GMM_conv.divergence;
a = a .* GMM_conv.rare_event;
% convergence
a = GMM_conv.convergence .* GMM_conv.rare_event;
a = [a;zeros(1,396)];
FigHandle1 = figure('Position', [100, 100, 800, 700]);
pcolor(a);
hold on;
plot( [99 99],get(gca,'ylim'),'w','LineWidth',1)
plot( [198 198],get(gca,'ylim'),'w','LineWidth',1)
plot( [297 297],get(gca,'ylim'),'w','LineWidth',1)
set(get(gca,'YLabel'),'String','Groups');
set(get(gca,'XLabel'),'String','Sessions')
set(gca,'YTick',[1.5 2.5 3.5 4.5 5.5 6.5 7.5 8.5]);
set(gca,'YTickLabel',[project.subjects.group]);
set(gca,'XTick',[50 150 250 350]);
set(gca,'XTickLabel',[1:4]);
h1 = area(NaN,NaN,'Facecolor','k');
h2 = area(NaN,NaN,'Facecolor','g');
h3 = area(NaN,NaN,'Facecolor','b');
h4 = area(NaN,NaN,'Facecolor','r');
h5 = area(NaN,NaN,'Facecolor','w');
hL = legend([h2 h1 h3 h4 h5],{'Convergence','divergence','convergence_A', 'convergence_B', 'No change'},'Orientation','horizontal','FontSize',9);
hL = legend([h2 h3],{'Convergence', 'No change'},'Orientation','horizontal','FontSize',9);
set(hL,'Position', [0.5 0.025 0.005 0.0009]);
saveas(FigHandle1,[project.paths.figures '/convergence_points.fig']);
saveas(FigHandle1,[project.paths.figures '/convergence_points.tif']);
%% an example pair
group = 8;session=3;diff_flag=0;
plot_session_convergence(D,orginal_conversation,gmm.gmmScores(:,group),ALL,group,session,project,diff_flag);
%%
[event event_idx event_common_idx] = get_event_wordPair();
temp =[];
for i=1:size(ALL,1)
temp = [temp; ALL(i,1:2);ALL(i,3:4)];
end
a = cell2mat(temp);
b = sum(a);
idx = find(b > 0);
words = event(idx,:);
%%
convergence_data = convert2originalDataStruct(ALL,D,project);
%% get gmm score diff
convergence_score_diff = zeros(size(D,1),1);
gmmScores = sum(gmm.gmmScores,2);
for sub=1:length(project.subjects.list)
temp = find(D(:,1)== sub & D(:,2) == 1);
Score = gmmScores(temp);
% compute the upper and lower of the pretest distribution
UP = nanmean(Score)+project.convergence.fakestd*nanstd(Score);
LW = nanmean(Score)-project.convergence.fakestd*nanstd(Score);
%
for session= 2:5
idx = find(D(:,1)== sub & D(:,2) == session);
session_data = gmmScores(idx);
if (mod(sub,2))
convergence_score_diff(idx) = LW - session_data;
else
convergence_score_diff(idx) = session_data - UP;
end
end
end
save([project.paths.processedData '/convergence.mat'],'convergence_data', 'convergence_score_diff');
|
function nop(varargin)
%NOP %NOP Do nothing
%
% NOP( ... )
%
% A do-nothing function for use as a placeholder when working with callbacks
% or function handles.
% Intentionally does nothing
end
|
function [M_sort_r,numParticles] = fluor_sizing(M_sort_cut,pixeldistance,~,ROIsize,check)
global FolderName FileNameImage ImageNumber
%Measures the radius of fluorescent particles
%Nested function: myfun5
clc;
close all;
% Allocate, initialize M_sort_r
M_sort_r = M_sort_cut;
% Generate 2D arrays to represent regions of interest around particles
xROI = linspace(round(-ROIsize/2),round(ROIsize/2),ROIsize+1);
yROI = xROI;
[XXROI,YYROI] = meshgrid(xROI,yROI); % xy space for ROI
%Bring in images, if haven't previously
if check == 0;
fprintf('Select first image in series');
pause(2);
[filename,FolderName] = uigetfile('*.tif','Select first image in series');
imagenum = char(regexp(filename,'\d{4}','match'));
ImageNumber = str2double(imagenum);
ImageNumber;
FileNameImage = char(regexp(filename,'\w+(?=\d{4}\.tif)','match'));
FileNameImage;
elseif check == 1;
fprintf('Sizing fluorescent particles in images\n')
pause(1);
end;
for m = ImageNumber:ImageNumber+max(M_sort_cut(:,4))
%Add image numbers to 2nd column of M_sort_r
M_sort_r((M_sort_r(:,4) == m - ImageNumber),2)= m;
end;
% Find number of particles
numParticles = max(M_sort_cut(:,3))+1 %number of tracked particles
if numParticles == 0
fprintf('Check M_sort array');
end
for i=1:numParticles % number of tracked particles
close all
clc
fprintf('********** Inside loop over Particles ****************************\n');
i
%Designate columns within tracking data
N = M_sort_cut((M_sort_cut(:,3) == i-1),:);
A = N(:,4); %framenumber
B = N(:,3); %spotID
C = N(:,5); %x
D = N(:,6); %y
E = N(:,7); %r set in VST
C_offset = 2; % move center of ROI to center of particle; was 3.5??? check this
D_offset = 0; % ";was -2.0
C = C+C_offset; % "
D = D+D_offset; % "
%Average the intensities of the first 20 frames
% For each particle, loop over all frames to average z at each pixel
% within the ROI that is centered on moving particle
%Set z_sum at 0, set count at 1
z_sum(ROIsize+1,ROIsize+1)=0;
count = 1; % reset at start of each loop over j
for j = ImageNumber:ImageNumber+19;
count
PICTNAME = sprintf('%s%s%04d.tif',FolderName,FileNameImage,j);
img = imread(PICTNAME); % first image in series
if j == ImageNumber;
img1 = img; % set first image aside for later display
figure (1) % ********************************************** Fig 1
I_particle0 = img1(round(D(1)-ROIsize/2):round(D(1)+ROIsize/2),...
round(C(1)-ROIsize/2):round(C(1)+ROIsize/2));
imshow(I_particle0,[0 round(1.1*max(max(I_particle0)))])
titletext2 = sprintf('Fig 1 ROI for particleID = %s', num2str(B(1)));
title(titletext2);
xlabel('x');
ylabel('y');
pause(1);
end % end of if j == ImageNumber
cc = round(C(j-ImageNumber+1)); % x at center (columns) of box
dd = round(D(j-ImageNumber+1)); % y at center (rows) of box
z_sum = z_sum + double(img(dd-ROIsize/2:dd+ROIsize/2,cc-ROIsize/2:cc+ROIsize/2));
count = count + 1;
end % end of for loop, "j = ImageNumber:ImageNumber + 19" over first 20 frames.
% divide sum of z's by number of frames to get averaged image of vesicle
z_ave = z_sum/20;
% scale z values into appropriate range for fitting; we will scale back later
scalingfactor = max(max(z_ave))- min(min(z_ave));
z_scaled = z_ave/scalingfactor;
figure(2)% ******************************************************** Fig 2
% mesh plot of ROI for averaged raw vesicle intensities
% (0,0) at upper left of image;
mesh(xROI,yROI,z_ave)
set(gca,'YDir','reverse'); % flip y axis so 00 is at upper left.
title('Fig 2. Mesh of averaged z');
xlabel('x (0,0)topleft');
ylabel('y ');
pause(1);
% * * * * * * * finished with raw data * * * * * * *
% % ************* Use fmincon to fit single Gaussian to raw data *********
% % %initial guesses for the 10 variables which define 1d Gaussian
x0 = [ max(max(z_scaled))-min(min(z_scaled)); 0.22; 0; 0; -min(min(z_scaled))];
% x0 = [ -.25; 0.22; 0; 0; 0.1]
% constraints
A = [];
b = [];
Aeq = [];
beq = [];
% set upper and lower bounds for variables in x
lb = [-3; 0;-5;-5; -2]; %[-3; 0; -5;-5;-1]
ub = [ 3; 6; 5; 5; 0]; %[3; 6 5; 5; 1]
myfunflag = 0; % set flag to print x on first use of myfun
% set options for fmincon
options = optimset('largescale','off','hessian','off',...
'Display','iter','tolFun',1e-8,'maxfunEvals',15000);
% % *************** ***************** ************* ************
% % PRIMARY USE OF fmincon ******************************************* L289
[x,fvalfunction] = fmincon(@myfun5,x0,A,b,Aeq,beq,lb,ub)
% fprintf('*************finished fmincon*********************\n')
figure(3)% *********************************************************** Fig 3
contour(xROI,yROI,z_fit,10);
% mesh(xROI,yROI,z_fit);
set(gca,'YDir','reverse'); % flip y axis so 00 is at upper left.
set(gca,'XGrid','on','YGrid','on');
title('Fig 3. zfit');
xlabel('xROI (col)');
ylabel('yROI (row)');
axis([-ROIsize/2 ROIsize/2 -ROIsize/2 ROIsize/2 0 2000]);%was -0.3 0.3
pause(1);
figure (4) % ******************************************************* Fig 4
% mesh plot of ROI for fitted, unscaled particle
% (0,0) at upper left of image;
z_fit_unscaled = (x(1)*scalingfactor)*exp(-((x(2)*(XXROI-x(3)).^2)+(x(2) *(YYROI-x(4)).^2)))-((x(5)*scalingfactor));
mesh(z_fit_unscaled)
set(gca,'YDir','reverse'); % flip y axis so 00 is at upper left.
title('Fig 4. Mesh of fitted, unscaled z');
xlabel('x (0,0)top_left');
ylabel('y ');
%find fitted max
fprintf('fitted max= \n');
max_int = max(max(z_fit_unscaled))
distance_sigma = ((1/(2 * x(2)))^0.5)/(pixeldistance);
radius = distance_sigma;
radius % distance across half a single gaussian,in microns
pause(2);
z_sum = 0;
fprintf('********** end of this particle ***************\n');
fprintf('*************************************************\n');
% Pick lines in M_sort for particle i
M_sort_r((M_sort_cut(:,3) == i-1),7) = radius/(10^6); % converts from microns to meters for GSE calculations
M_sort_r((M_sort_cut(:,3) == i-1),8) = max_int;
end % end of outermost loop over i=1:numberMatFiles
% write data to Excel spreadsheet
% OUTPUTPATH2 = strcat(FolderName,'sizedata.xls')
% xlswrite(OUTPUTPATH2,data)
% % *******************************************************************
% % ************************** Nested Functions ************
function f = myfun5(x)% % x is a 1-d array of 4 parameters (for 1 2d Gaussian)
% % x is optimized by fmincon
% % f is chi-squared, the measure of the goodness of fit to the data
% % parameters in x are
% % Gaussian 1 definition
% % x(1)=A1
% % x(2)=1/(2sigmax1^2)
% % x(3)=x_center
% % x(4)=y_center
% % x(5)=baseline
f = 0;
if (myfunflag == 0)
fprintf('first use of myfun5. x0, x ,x');
x0
x
myfunflag = 1
end
% integral = 0;
% construct z_fit, the 2D array of the FITTED SINGLE Gaussian
z_fit =x(1)*exp(-((x(2)*(XXROI-x(3)).^2)+(x(2) *(YYROI-x(4)).^2))) - x(5);
% compute f, the sumsquare of the deviations between fitted and observed, scaled z
f = sum(sum((z_fit-z_scaled).^2));
end % end of nested function myfun5
end % end of fluor_sizing
|
% snesim_set_resim_data : Select and set a region to resimulate.
%
% Call
% S=snesim_set_resim_data(S,D,lim,pos)
%
% S : SNESIM of VISIM structure
% D : completete conditional data set (e.g. S.D(:,:,1));
% lim : lim(1) : horizontal radius (in meter)
% lim(2) : vertical radius (in meter)
%
% resim_type
%
function [S pos]=snesim_set_resim_data(S,D,lim,resim_type)
if nargin<2
D=S.D(:,:,1);
end
if isempty(D)
D=S.D(:,:,1);
end
if nargin<3
lim(1)=3;
lim(2)=3;
lim(3)=3;
end
if nargin<4
resim_type=1; % SQUARE AREA
%resim_type=2; % NUMBER OF DATA
end
pos(1)=min(S.x)+rand(1)*(max(S.x)-min(S.x));
pos(2)=min(S.y)+rand(1)*(max(S.y)-min(S.y));
pos(3)=min(S.z)+rand(1)*(max(S.z)-min(S.z));
if length(lim)<2, lim(2:3)=lim(1);end
if length(lim)<3, lim(3)=lim(2);end
[xx,yy,zz]=meshgrid(S.x,S.y,S.z);
if resim_type==2;
% RANDOM SELECTION OF MODEL PARAMETERS FOR RESIM
N=prod(size(xx));
n_resim=lim(1);
if n_resim<=1;
% if n_resim is less than one
% n_resim defines the fraction of N to use
n_resim=ceil(n_resim.*N);
end
n_cond=N-n_resim;
ih=randomsample(N,n_cond);
d_cond=[xx(ih(:)) yy(ih(:)) zz(ih(:)) D(ih(:))];
else
% BOX TYPE SELECTION OF MODEL PARAMETERS FOR RESIM
used=xx.*0+1;
used(find( (abs(xx-pos(1))<lim(1)) & (abs(yy-pos(2))<lim(2)) ))=0;
ih=find(used);
d_cond=[xx(ih) yy(ih) zz(ih) D(ih)];
end
%clos
write_eas(S.fconddata.fname,d_cond);
|
clear all;
load( 'NonRepeatStim');
load('NonRepeatstimtimes');
load('SpTimes');
addpath('preProcessTools');
defineGlobalParameters();
load('globalParams');
% Build general data for training(Spike train, stimulus, stimulus design
% matrix)
[spikes, stimulus, stimulusDesignMatrix, postSpikeBaseVectors, spikeHistoryData , couplingData, refreactoryPeriodArr] = BuildGeneralDataForLearning(Stim, stimtimes, SpTimes,stimulusFilterParamsSize,...
spikesWantedSampFactor, stimulusWantedSampleFactor);
|
% compare localization
clear all
close all
clc
load('RushilGabriel_filtered_50_90.mat');
Fs = 6500;
SensorLocations = [0,0;0,3;5,0;3.5,0];
options = optimoptions('fsolve','Algorithm','levenberg-marquardt','Display','none');
sensorID = 1;
sensorNum = 4;
for personID = [1,3]
personID
for traceID = 1:10
traceID
signal.index = traceSig{personID, traceID, sensorID}.index;
signal.signal = signalDenoise(filteredSig{personID, traceID, sensorID},50);
noiseSig{sensorID}.signal = signalDenoise(noiseSig{sensorID}.signal,50);
% extract steps and localize
[ stepEventsSig, stepEventsIdx, stepEventsVal, ...
stepStartIdxArray, stepStopIdxArray, ...
windowEnergyArray, noiseMu, noiseSigma, ...
noiseRange ] = SEDetection( signal.signal, noiseSig{sensorID}.signal,16,Fs );
[~, maxSigIdx] = sort(abs(stepEventsVal), 'descend');
maxSigIdx = maxSigIdx(1:5);
maxSigIdx = sort(maxSigIdx,'ascend');
stepEventsIdx = stepEventsIdx(maxSigIdx);
stepEventsVal = stepEventsVal(maxSigIdx);
stepStartIdxArray = stepStartIdxArray(maxSigIdx);
stepStopIdxArray = stepStopIdxArray(maxSigIdx);
figure;
plot(signal.index,signal.signal);hold on;
scatter(signal.index(stepEventsIdx), signal.signal(stepEventsIdx));hold on;
stepNum = length(stepEventsIdx);
for stepIdx = 1 : stepNum
tempIdx = stepStartIdxArray(stepIdx);
plot([signal.index(tempIdx),signal.index(tempIdx)],[-550, 500],'r');
tempIdx = stepStopIdxArray(stepIdx);
plot([signal.index(tempIdx),signal.index(tempIdx)],[-550, 500],'g');
end
hold off;
% extract steps within one trace from 4 sensors
figure;
peakTimestamp = zeros(stepNum,sensorNum);
firstPeakTDoA = zeros(stepNum,sensorNum);
xcorrTDoA = zeros(stepNum,sensorNum);
calcLoc = zeros(stepNum,4); % 1,2 for first peak, 3,4 for xcorr
for stepIdx = 1 : stepNum
subplot(1,stepNum,stepIdx);
startTimestamp = signal.index(stepStartIdxArray(stepIdx));
stopTimestamp = signal.index(stepStopIdxArray(stepIdx));
for sensorIdx = 1 : sensorNum
tempSigIdx = traceSig{personID, traceID, sensorIdx}.index;
tempSigSig = filteredSig{personID, traceID, sensorIdx};
tempIdx = find(tempSigIdx > startTimestamp & ...
tempSigIdx < stopTimestamp);
tempSig = tempSigSig(tempIdx);
plot(tempIdx, tempSig); hold on;
[peaks, locs,w,p] = findpeaks(tempSig,'MinPeakDistance',Fs/500,'MinPeakHeight',max(tempSig)/2,'Annotate','extents');
scatter(tempIdx(locs(1)),peaks(1),'rv');
peakTimestamp(stepIdx, sensorIdx) = tempIdx(locs(1));
stepSigSet{personID, traceID, stepIdx, sensorIdx} = [tempIdx';tempSig];
if sensorIdx == 1
referenceStep = stepSignalNormalization( tempSig );
else
compareStep = stepSignalNormalization( tempSig );
[v,sh] = max(abs(xcorr(referenceStep, compareStep)));
xcorrTDoA(stepIdx, sensorIdx) = sh -length(referenceStep);
end
end
hold off;
%% obtaining TDoA
% use first peak
firstPeakTDoA(stepIdx,:) = peakTimestamp(stepIdx,:) - peakTimestamp(stepIdx,1);
for v = [0.05]
x0 = [0;0];
pc = SensorLocations(1,:);
pi = SensorLocations(2,:);
pj = SensorLocations(3,:);
v12 = v;
v13 = v*1.5;
tic = -firstPeakTDoA(stepIdx,2);
tjc = -firstPeakTDoA(stepIdx,3);
[x_firstPeak, fval, exitflag] = fsolve(@(x) localizationEquations( x, pi, pj, pc, tic, tjc, v12, v13 ),x0,options);
if validateLocation( SensorLocations, x_firstPeak ) == 1
calcLoc(stepIdx,1) = x_firstPeak(1);
calcLoc(stepIdx,2) = x_firstPeak(2);
end
tic = xcorrTDoA(stepIdx,2);
tjc = xcorrTDoA(stepIdx,3);
[x_xcorr, fval, exitflag] = fsolve(@(x) localizationEquations( x, pi, pj, pc, tic, tjc, v12, v13 ),x0,options);
if validateLocation( SensorLocations, x_xcorr ) == 1
calcLoc(stepIdx,3) = x_xcorr(1);
calcLoc(stepIdx,4) = x_xcorr(2);
end
end
end
figure;
subplot(2,1,1);
plot(firstPeakTDoA);
subplot(2,1,2);
plot(xcorrTDoA);
%% trilateration with these two
% first peak
figure;
subplot(2,1,1);
scatter(SensorLocations(:,1),SensorLocations(:,2),'ro');hold on;
scatter(calcLoc(:,1),calcLoc(:,2),'b*'); hold off;
subplot(2,1,2);
scatter(SensorLocations(:,1),SensorLocations(:,2),'ro');hold on;
scatter(calcLoc(:,3),calcLoc(:,4),'b*'); hold off;
end
end
|
stat = 'pixelStats';
stimfname = 'allStimuli123_6.mat'; % if not on path, it's in equneq/
load(stimfname,'sizes','sTr','dTr'); % some trials and parameters
ratios = sizes(:,1)./sizes(:,2);
psyAxis = log2(ratios);
nTicks = numel(psyAxis);
conds = {'6vs6', '12vs12', '6vs12', '12vs6'};
nConds = numel(conds);
figure();
for cnum = 1:nConds
condString = conds{cnum};
conddat{cnum} = load(['textureStats_1pool_' condString '.mat']);
[tt{cnum}, tresults{cnum}, statL{cnum}, statR{cnum}] = compareStats(conddat{cnum}.tsL, conddat{cnum}.tsR, stat);
subplot(nConds, 1, cnum), plot(psyAxis, abs(tt{cnum}),'o-'), title(condString)
set(gca, 'FontSize',14), ylabel('|t_{value}|')
hold on, ylim([0 10])
plot(psyAxis, 2.2*ones(nTicks,1),'k:'),
plot(psyAxis, 3.1*ones(nTicks,1),'k--'),
end % for cond num
xlabel('log_2 (mean ratio)')
subplot(nConds,1,1), legend('mean','var','skew','kurt', 'Location','NorthOutside','Orientation','Horizontal')
suptitle('Paired t-test (df=11) for each Pixel Statistic')
tt, tresults, statL, statR
|
function figure2_massGap
tic;
%
minBHmass = 2.5;
maxBHmass = 300;
lowTotalMassStevenson = 80;
lowGap = 43;
uppGap = 124;
massRatioLimit = 0.9;
% Loading LIGO posteriors
% GW170729
GW170729_90 = importdata('../data/GW170729_mirroredM1M2_90percent_contour.dat');
GW170729_90_m1 = GW170729_90(:,1);
GW170729_90_m2 = GW170729_90(:,2);
% GW190521
GW190521_90 = importdata('../data/GW190521_mirroredM1M2_90percent_contour.dat');
GW190521_90_m1 = GW190521_90(:,1);
GW190521_90_m2 = GW190521_90(:,2);
% Creating histogram
%
M=importdata('../data/Mrad_fraction_chi0.dat');
massRatioRad = M.data(:,1);
fRad = M.data(:,2);
%
CHE = importdata('../data/totalMassMergingBBHsCHE.mat');
nonCHE = importdata('../data/totalMassMergingBBHsNonCHE.mat');
% Calculating post-merger mass
% CHE channel
[minVal, idx] = min(abs(CHE.massRatioMergingBBHsCHE - massRatioRad));
totalMass_CHE = CHE.totalMassMergingBBHsCHE;
massRatioRad_CHE = massRatioRad(idx)';
fRad_CHE = fRad(idx)';
totalMassBBHin_CHE = totalMass_CHE.*(1.-fRad_CHE);
clear minVal
clear idx
% Non-CHE channel
[minVal, idx] = min(abs(nonCHE.massRatioMergingBBHsNonCHE - massRatioRad));
totalMass_nonCHE = nonCHE.totalMassMergingBBHsNonCHE;
massRatioRad_nonCHE = massRatioRad(idx)';
fRad_nonCHE = fRad(idx)';
totalMassBBHin_nonCHE = totalMass_nonCHE.*(1.-fRad_nonCHE);
%
totalMass_temp = [totalMassBBHin_CHE, totalMassBBHin_nonCHE];
massRatio_temp = [CHE.massRatioMergingBBHsCHE, nonCHE.massRatioMergingBBHsNonCHE];
totalMass = totalMass_temp(massRatio_temp >= massRatioLimit);
xBins = linspace(0,90,19);
[Ntotal,edgesTotal] = histcounts(totalMass,xBins);
NtotalNorm = Ntotal./max(Ntotal);
edgesTotal = edgesTotal(2:end) - (edgesTotal(2)-edgesTotal(1))/2;
%
massTripleCompanion = logspace(log10(minBHmass),log10(maxBHmass),2000);
totalMassInnerBBH = logspace(log10(2*minBHmass),log10(2*maxBHmass),2000);
[X,Y] = meshgrid(massTripleCompanion,totalMassInnerBBH);
totalMassTripleMerger = X+Y;
minAlpha = 0.2;
colorTotalMassTripleMerger = zeros(size(totalMassTripleMerger));
for num=1:length(edgesTotal)-1;
colorTotalMassTripleMerger(find(Y >= edgesTotal(num) & Y < edgesTotal(num+1) & totalMassTripleMerger<=2*uppGap & totalMassTripleMerger>=lowTotalMassStevenson)) = NtotalNorm(num)+minAlpha;
end
% colorTotalMassTripleMerger(find(totalMassTripleMerger>100 & totalMassTripleMerger<2*uppGap))=1;
% colorTotalMassTripleMerger(find(Y<uppGap & X<lowGap & totalMassTripleMerger>100 & totalMassTripleMerger<2*uppGap))=minAlpha;
% colorTotalMassTripleMerger(find(totalMassTripleMerger>80 & totalMassTripleMerger<=100))=medAlpha;
colorTotalMassTripleMerger(find(Y>uppGap & Y<2*uppGap & totalMassTripleMerger>100 & totalMassTripleMerger<2*uppGap))=minAlpha;
% colorTotalMassTripleMerger(find(X>uppGap & X<2*uppGap & Y<lowGap & totalMassTripleMerger>100 & totalMassTripleMerger<2*uppGap))=minAlpha;
% Blank inside the mass gap
colorTotalMassTripleMerger(find(Y>lowGap*2 & Y<uppGap+minBHmass))=0.0;
colorTotalMassTripleMerger(find(X>lowGap & X<uppGap))=0.0;
regionColor = ones(size(totalMassTripleMerger));
regionColor(find(X>Y))=2;
regionColor(find(X<lowGap & Y>uppGap))=3;
regionColor(find(X<Y & X>uppGap))=5;
% Visually delimitate regions
colorTotalMassTripleMerger(find(totalMassTripleMerger>99 & totalMassTripleMerger<101))=0;
% colorTotalMassTripleMerger(find(Y>lowGap-0.5 & Y<lowGap+0.5 & totalMassTripleMerger > uppGap))=0;
% Plot
sz=10;
fs=18;
lw=2.0;
alphaNum0=0.7;
alphaNum1=0.2;
stringX = '$\rm \textit{M}_{BH,3}\ [M_{\odot}]$';
stringY = '$\rm \textit{M}_{BBH,in}\ [M_{\odot}]$';
% Based on Du Buisson + 2020
% BH masses 43 < M/Msol < 124
N = 100;
xPISNGap = linspace(lowGap,uppGap+0.4,N); % Based on Du Buisson+2020
Xfull = linspace(minBHmass,maxBHmass,N);
Y1 = repmat([2*minBHmass;2*maxBHmass],1,N); % Based on Stevenson+2019
Y2 = repmat([2*lowGap;uppGap+minBHmass],1,N);
X2=massTripleCompanion;
Z2=ones(size(X2));
totalMass2 = Z2.*2*uppGap;
totalMass3 = Z2.*lowGap;
maxGapFarmer=56;
totalMassFarmer = Z2.*maxGapFarmer;
color=lines(7);
% Plot
numTop = 3;
clf
hold on
s=surf(Y,X,regionColor,'HandleVisibility','Off');
set(s, 'EdgeColor', 'none');
set(s, 'AlphaData', colorTotalMassTripleMerger, 'AlphaDataMapping', 'none');
set(s, 'FaceAlpha', 'flat');
indexBot = find(X2 <= lowGap);
indexTop = find(X2 >= uppGap);
plot3(totalMass2(indexBot),X2(indexBot),Z2(indexBot),'--k','Linewidth',lw,'HandleVisibility','Off')
plot3(totalMass2(indexTop),X2(indexTop),Z2(indexTop),'--k','Linewidth',lw)
% plot3(totalMass3(indexBot),X2(indexBot),2.*Z2(indexBot),'-.k','Linewidth',lw)
% plot3(totalMass3(indexTop),X2(indexTop),2.*Z2(indexTop),'-.k','Linewidth',lw,'HandleVisibility','Off')
purpleColor = [0.4660 0.6740 0.1880];
plot3(totalMassFarmer(indexBot),X2(indexBot),2.*Z2(indexBot),'-.k','Linewidth',lw)
plot3(totalMassFarmer(indexTop),X2(indexTop),2.*Z2(indexTop),'-.k','Linewidth',lw,'HandleVisibility','Off')
plot3(GW170729_90_m1,GW170729_90_m2,numTop.*ones(size(GW170729_90_m2)),'Color',purpleColor,'Linewidth',lw)
plot3(GW190521_90_m1,GW190521_90_m2,numTop.*ones(size(GW190521_90_m2)),'--','Color',purpleColor,'Linewidth',lw)
h1=fill([Y1(1,:) flip(Y1(2,:))], [xPISNGap flip(xPISNGap)],'k','EdgeColor','none','HandleVisibility','Off');
set(h1,'FaceAlpha',alphaNum0)% if size(y0,1)==2 %plot shaded area
h2=fill([Y2(1,:) flip(Y2(2,:))],[Xfull flip(Xfull)],'k','EdgeColor','none','HandleVisibility','Off');
set(h2,'FaceAlpha',alphaNum0)% if size(y0,1)==2 %plot shaded area
text(10,140,numTop,'\bf IMRI','FontSize',fs,'Color','k','Interpreter','Latex')
text(65,140,numTop,'\bf A','FontSize',fs,'Color','k','Interpreter','Latex')
text(72,35,numTop,'\bf B','FontSize',fs,'Color','k','Interpreter','Latex')
text(63,18,numTop,'\bf U','FontSize',fs,'Color','k','Interpreter','Latex')
text(140,20,numTop,'\bf C','FontSize',fs,'Color','k','Interpreter','Latex')
ht=text(150,10,numTop,'\bf IMRI','FontSize',fs,'Color','k','Interpreter','Latex');
set(ht,'Rotation',270);
text(6,75,numTop,'\bf Mass gap','FontSize',fs,'Color','w','Interpreter','Latex')
hgap=text(105,10,numTop,'\bf Mass gap','FontSize',fs,'Color','w','Interpreter','Latex');
set(hgap,'Rotation',270);
text(300,10,numTop,'\bf CHE','FontSize',fs,'Color','k','Interpreter','Latex')
text(300,200,numTop,'\bf CHE','FontSize',fs,'Color','k','Interpreter','Latex')
plot3(49.5,33.5,numTop,'dk','MarkerFaceColor','k','MarkerSize',sz)
% blueString = '$\rm \textit{M}_{BH,out} < \textit{M}_{BBH,in}$';
% redString = '$\rm \textit{M}_{BH,out} > \textit{M}_{BBH,in}$';
% yellowString = '$\rm \textit{M}_{BBH,in} \gg \textit{M}_{BH,out}$';
%
% text(10,5,numTop,blueString,'FontSize',fs,'Color','k','Interpreter','Latex')
% text(6,30,numTop,redString,'FontSize',fs,'Color','k','Interpreter','Latex')
% ht2=text(400,30,numTop,yellowString,'FontSize',fs,'Color','k','Interpreter','Latex')
% set(ht2,'Rotation',270);
% GW170729MBBHin = 49.5;
% GW170729MBBH3 = 33.5;
% plot3(GW170729MBBHin,GW170729MBBH3,numTop,'*k','MarkerSize',10)
legend( 'CHE limit',...
'Single BH limit',...
'GW170729',...
'GW190521',...
'Triple',...
'Interpreter','Latex',...
'Location','SouthWest',...
'FontSize',fs,...
'Box','Off');
xlabel(stringY,'Interpreter','Latex','FontSize',fs)
ylabel(stringX,'Interpreter','Latex','FontSize',fs)
ylim([minBHmass maxBHmass])
xlim([2*minBHmass 2*maxBHmass])
yticks([2.5 10 43 124 200 300]);
xticks([5 10 43 86 124+minBHmass 248 600]);
set(gca, 'XScale', 'log')
set(gca, 'YScale', 'log')
set(gca,'Fontsize',fs)
box on
hold off
colormap(lines(5))
print(gcf,'../figures/figure2_massGap.png','-dpng','-r400');
toc;
end |
clear all
%[Y,Fs] = audioread('test.wav');
%Y = wavread('test.wav');
n = [1:100];
f= 80;
Fs = 8000;
Ns = Fs/f;
N = 1:100;
Y1 = 2048+ 2048* sin(2*pi*N/Ns-pi/4);
Y2 = 2048+ 2048* .1*sin(2*pi*N/Ns-pi/4);
Y3 = 2048+ 2048* .3*sin(2*pi*N/Ns-pi/4);
%Y = int16(Y1);
Y= [Y1 Y1 Y1];
Y = int16(Y);
%Y = 2047+ Y*2047;
[re,en1] = adpcm_encoder_mod(Y,Y(1));
[dre,YY] = adpcm_decoder_mod(en1,Y(1));
%[re,en2] = adpcm_encoder_mod(Y,Y(100));
%YY2 = adpcm_decoder_mod(en2,Y(100));
%[re,en3] = adpcm_encoder_mod(Y3,Y2(100));
%YY = adpcm_decoder_mod(en3,Y2(100));
%Xin = [Y1 Y2 Y3]
%X = [YY1 YY2 YY]
%figure(3);
%plot(X)
%title('Test ')
%grid on;
%filename = 'out.wav';
%audiowrite(filename,YY,Fs);
%YY = YY *32767;
% figure(1);
% plot(Y)
% figure(2);
% plot(YY)
L = length( re(3,:) )
N= 1:L;
t= N/Fs;
figure(1);
plot(t,Y)
%plot(Y)
title('Input Signal before encoding Fs= 8000 SPS ')
xlabel('Time(sec)')
ylabel('Input Signal')
grid on;
figure(2);
plot(t,re(4,:))
title('Signal before quantization ')
xlabel('Time(sec)')
ylabel('IP Signal')
grid on;
figure(3);
plot(t,re(10,:))
title('Signal after quantization ')
xlabel('Time(sec)')
ylabel('encoded value')
grid on;
figure(3);
plot(t,re(10,:))
title('Signal after quantization ')
xlabel('Time(sec)')
ylabel('encoded value')
grid on;
figure(4);
plot(t,dre(1,:))
title('Signal after dequantization ')
xlabel('Time(sec)')
ylabel('dequantization Signal')
grid on;
figure(5);
plot(t,dre(2,:))
title('decoded samples ')
xlabel('Time(sec)')
ylabel('Output Signal')
grid on;
inp = re(1,:) ;
for r = 1:L
% H(r,c) = 1/(r+c-1);
err(r) = (inp(r) - YY(r) );
end
err = err*100/max(Y)
figure(6);
plot(t,err);
title('IP-OP Error ')
xlabel('Time(sec)')
ylabel('% Error')
grid on;
% be(1,:) = re(10,:);
%
%
% figure(4);
% %plot(t,re(3,:))
% %hold on;
% plot(t,be(1,:),'color',[0.75 0.75 0.75])
% %hold off
% title('Quantization during encoding vs input signal ')
% xlabel('Time(sec)','FontWeight','bold')
% ylabel('Quantization step size and input signal')
% legend({'y = step size','y = input signal'},'Location','southwest')
% grid on;
|
%% Stelling 21
%
% Een element-wise logische operator bestaat altijd
% uit 2 symbolen:
%
% || en de operator &&
%
Antwoord = 0;
|
clear all
load('test_13-Feb-2020_2500_0.6.mat')
beta1 = [0.5 + 0.5; 0; 1; -1];
rho1 = 0.6;
rho2 = rho1;
disp('The mean estimates are')
disp(mean(b2(:, [1, 2, 3, 4, end-2, end-1]) - [beta1', rho1, rho2]))
disp(mean(SE1(:, [1, 2, 3, 4, end-1, end])))
disp(mean(SE2(:, [1, 2, 3, 4, end-1, end])))
disp('The standard error calculated from the simulation')
disp(std(b2(:, [1, 2, 3, 4, end-2, end-1])))
test1 = sum(abs(b2(:, [1, 2, 3, 4, end-2, end-1]) - [beta1', rho1, rho2])...
./SE1(:, [1, 2, 3, 4, end-1, end]) > 1.96)./size(b1, 1).*100;
test2 = sum(abs(b2(:, [1, 2, 3, 4, end-2, end-1]) - [beta1', rho1, rho2])...
./SE2(:, [1, 2, 3, 4, end-1, end]) > 1.96)./size(b1, 1).*100;
test3 = sum(abs(b2(:, [1, 2, 3, 4, end-2, end-1]) - mean(b2(:, [1, 2, 3, 4, end-2, end-1])))...
./std(b2(:, [1, 2, 3, 4, end-2, end-1])) > 1.96)./size(b1, 1).*100;
k = 6;
figure()
histogram(b2(:, k), 'Normalization','pdf')
hold on
mu = mean(b2(:, k));
sigma = std(b2(:, k));
y = mu - 4*sigma:0.01:mu + 4*sigma;
f = exp(-(y-mu).^2./(2*sigma^2))./(sigma*sqrt(2*pi));
plot(y,f,'LineWidth',1.5)
figure()
histogram(SE1(:, k), 'Normalization','pdf')
hold on
mu = mean(SE1(:, k));
sigma = std(SE1(:, k));
y = mu - 4*sigma:0.001:mu + 4*sigma;
f = exp(-(y-mu).^2./(2*sigma^2))./(sigma*sqrt(2*pi));
plot(y,f,'LineWidth',1.5) |
%% Opdracht 7
% Maak een functie aan met de naam 'opdracht_7'.
% Deze functie heeft 1 input: genaamd 'vector'.
% Deze functie heeft 2 outputs: respectievelijk het aantal elementen van
% de input en de waarde van het laatste element van de input.
function [totaalElementen, laatsteWaarde] = opdracht_7(vector)
totaalElementen = numel(vector);
laatsteWaarde = vector(end);
end
|
function win=biorwin(wins,dM,dN);
% biorbin : Find Biorthogonal analysis Window for STFT
% ***************************************************************@
% Inputs:
% wins, synthesis window;
% dM, sampling step in Time;
% dN, sampling step in Frequency;
% Output:
% win, analysis window;
% Usage:
% win=biorwin(wins,dM,dN);
% Defaults:
% noverlap=length(wins)/2;
% Copyright (c) 2000. Dr Israel Cohen.
% All rights reserved. Created 5/12/00.
% ***************************************************************@
wins=wins(:);
L=length(wins);
N=L/dN;
win=zeros(L,1);
mu=zeros(2*dN-1,1);
mu(1)=1;
%mu(1)=1/N;
for k=1:dM
H=zeros(2*dN-1,ceil(L/dM));
for q=0:2*dN-2
h=shiftcir(wins,q*N);
H(q+1,:)=h(k:dM:L)';
end
win(k:dM:L)=pinv(H)*mu;
end
%win=win/max(win); |
function Ko = jacfun(theta,varargin)
% K=jacfun(theta,varargin)
[wn,gasvec,cros,refe,invgas,sol,wn_shift,noise,L,geo,err,offset,ncut] = extract_varargin(varargin);
ninvgas = length(invgas);
[p1,p2,p3,offset] = fetch_params(theta,invgas,offset);
% scale densities:
dens = geo.layer_dens;
for n = 1:ninvgas
dens.(char(invgas(n))) = dens.(char(invgas(n)))*theta(n);
end
% evaluate Jacobian
[~,K] = calc_direct_radiance(dens,geo.los_lens,gasvec,cros,sol,p1,p2,p3,offset,L);
% retrieved gases
for n = 1:ninvgas
ind = find(ismember(gasvec,invgas(n))==1);
Ko(:,n) = conv_spectrum(wn,K(:,ind));
Ko(:,n) = interp1(wn,Ko(:,n),wn+wn_shift,'linear','extrap');
Ko(:,n) = Ko(:,n)./err;
end
% polynom temrms and possible the offset term
for n = ninvgas+1:length(theta)
ind = length(gasvec)+n-ninvgas;
Ko(:,n) = conv_spectrum(wn,K(:,ind));
Ko(:,n) = interp1(wn,Ko(:,n),wn + wn_shift, 'linear', 'extrap');
Ko(:,n) = Ko(:,n)./err;
end
% ignore edges
Ko = Ko(ncut:end-ncut,:);
|
function solution = AV_gu(n,xi,beta,k)
A = S2C_matrix(n,xi);
% A = [0,0.2,0.8;0.4,0,0.6;0,1,0];
p= sdpvar(n,1) ;
delta= sdpvar(n,1);
y = sdpvar(n,n,'full');
r = sdpvar(n,n,'full');
x = sdpvar(n,1);
z = sdpvar(n,1);
d = sdpvar(n,1);
constraints=[d==min(x+z,(1-p)),...
x'==beta*((min(x,1-p))'*A+ones(1,n)*y)+delta',...
y*ones(n,1) == max((x-(1-p)),0),...
z'==min(z,max(1-p-x,0))'*A+ones(1,n)*r,...
r*ones(n,1)==max(z-max(((1-p)-x),0),0),...
r(1,2) == r(1,3),...
y(1,2) == y(1,3),...
delta(2) == delta(3),...
delta>=0,...
y(:)==0,...
r(:)>=0,...
p>=0,...
x==0,...
z>=0,...
x+z>=(1-p),... %This needs to hold for the assumption in the cost to be correct
];
obj=sum(p.*(1-p))-sum(delta)-k*sum(z);
options = sdpsettings('verbose',0,'solver','gurobi');
sol = optimize(constraints,-obj,options);
% Analyze error flags
if sol.problem == 0
% Extract and display value
solution.profit = value(obj);
solution.price = value(p);
solution.delta = value(delta);
solution.z = value(z);
solution.r = value(r);
solution.y = value(y);
solution.x = value(x);
else
display('Hmm, something went wrong!');
sol.info
yalmiperror(sol.problem);
end
end |
function [ output_args ] = visualizeTrajectory( trial, left, sampling)
if nargin == 2
sampling = 1;
end
data = trial{left};
color = (data(:,9)+pi)/(2*pi);
[n p] = size(color);
subset = sort(randsample((1:1:n),round(sampling*n)));
gray = subset'/n;
output_args = plot3(data(subset,1), data(subset,2), data(subset,3),'k')%, 30) %[gray gray gray], 'filled');
xlabel('X (cm)');
ylabel('Y (cm)');
zlabel('Z (cm)');
end
|
%チェッカーボード周囲のLIDAR取得点群の空間分解能計算
Filename = 'data\evaluation\LIDAR\2021-08-03-21-02-32_Velodyne-HDL-32-Data_thesiscand.pcap';
velodyne = velodyneFileReader(Filename,'HDL32E');
checkerRanges = [-0.55, -0.4;
0.6, 0.8;
-0.1, 0.1];%チェッカーボード板のある範囲を指定する
totalptcnt = 0;
for i = 1:velodyne.NumberOfFrames
pcobj = readFrame(velodyne, i);
id = pcobj.Location(:,:,1) > checkerRanges(1,1) & pcobj.Location(:,:,1) < checkerRanges(1,2) ...
& pcobj.Location(:,:,2) > checkerRanges(2,1) & pcobj.Location(:,:,2) < checkerRanges(2,2) ...
& pcobj.Location(:,:,3) > checkerRanges(3,1) & pcobj.Location(:,:,3) < checkerRanges(3,2);
ptcnt = sum(sum(id));
totalptcnt = totalptcnt + ptcnt;
end
%全フレームでのチェッカーボード板の範囲内の点数の平均
meanptcnt = totalptcnt / velodyne.NumberOfFrames;
%空間分解能の計算
spatioresol = meanptcnt/((checkerRanges(2,2)-checkerRanges(2,1)) * (checkerRanges(3,2)-checkerRanges(3,1))) / 10000;
%% 計測精度の評価
frameid = randperm(velodyne.NumberOfFrames);
Checker_LIDARPoints = [];
for readid = frameid
pcobj = readFrame(velodyne, readid);
X = pcobj.Location(:,:,1);
Y = pcobj.Location(:,:,2);
Z = pcobj.Location(:,:,3);
id = pcobj.Location(:,:,1) > checkerRanges(1,1) & pcobj.Location(:,:,1) < checkerRanges(1,2) & ...
pcobj.Location(:,:,2) > checkerRanges(2,1) & pcobj.Location(:,:,2) < checkerRanges(2,2) & ...
pcobj.Location(:,:,3) > checkerRanges(3,1) & pcobj.Location(:,:,3) < checkerRanges(3,2);
Checker_LIDARPoints = [Checker_LIDARPoints; double([X(id), Y(id),Z(id)] * 1000)];
end
%平面フィッティング
if size(Checker_LIDARPoints,1)<10000
psize = size(Checker_LIDARPoints,1);
else
psize = 10000;
end
opt_planeparams = planefitting_func(Checker_LIDARPoints(1:psize,:), 0);
%平面との距離[mm]
dists = (opt_planeparams(1).*Checker_LIDARPoints(:,1) + opt_planeparams(2).*Checker_LIDARPoints(:,2) ...
+ opt_planeparams(3).*Checker_LIDARPoints(:,3) - 1)./norm(opt_planeparams);
mean(dists)
std(dists)
spatioresol
%% LIDAR点群と平面の出力
f = figure;
colormap jet;
scatter3(Checker_LIDARPoints(:,1),Checker_LIDARPoints(:,2),Checker_LIDARPoints(:,3),[], dists);
hold on
graphX = linspace(min(Checker_LIDARPoints(:,1)),max(Checker_LIDARPoints(:,1)),50);
graphY = linspace(min(Checker_LIDARPoints(:,2)),max(Checker_LIDARPoints(:,2)),50);
[gX,gY] = meshgrid(graphX,graphY);
gZ = -opt_planeparams(1)/opt_planeparams(3)*gX-opt_planeparams(2)/opt_planeparams(3)*gY+1/opt_planeparams(3);
s = mesh(gX,gY,gZ);
daspect([1 1 1]);
xlim([checkerRanges(1,1) checkerRanges(1,2)]*1000);
zlim([checkerRanges(3,1) checkerRanges(3,2)]*1000);
%% MDPI用の図の作成
% 点群提示範囲を決める
showRanges = [-0.55, -0.4;
0.63, 0.77;
-0.1, 0.1];%チェッカーボード板のある範囲を指定する
% 平面の法線ベクトルがX軸になるような平面に変換する
planenorm = opt_planeparams/norm(opt_planeparams);
inplane_z0 = [planenorm(2) -planenorm(1) 0]/norm(planenorm(1:2));
inplane_other = cross(planenorm,inplane_z0);
R_orig2view = inv([planenorm;inplane_z0;inplane_other]);
% Scan回数の点群を出力する
scannum = 20;
scanid = randperm(velodyne.NumberOfFrames,scannum);
f = figure;
colormap jet;
Clim = [-30 30];
for j = scanid
pcobj = readFrame(velodyne, j);
X = pcobj.Location(:,:,1);
Y = pcobj.Location(:,:,2);
Z = pcobj.Location(:,:,3);
id = pcobj.Location(:,:,1) > showRanges(1,1) & pcobj.Location(:,:,1) < showRanges(1,2) & ...
pcobj.Location(:,:,2) > showRanges(2,1) & pcobj.Location(:,:,2) < showRanges(2,2) & ...
pcobj.Location(:,:,3) > showRanges(3,1) & pcobj.Location(:,:,3) < showRanges(3,2);
Checker_LIDARPoint_oneframe = double([X(id), Y(id),Z(id)] * 1000) * R_orig2view;
transplanenorm = opt_planeparams * R_orig2view;
dist_oneframe = -(transplanenorm(1).*Checker_LIDARPoint_oneframe(:,1) + transplanenorm(2).*Checker_LIDARPoint_oneframe(:,2) ...
+ transplanenorm(3).*Checker_LIDARPoint_oneframe(:,3) - 1)./norm(transplanenorm);
scatter3(Checker_LIDARPoint_oneframe(:,1), Checker_LIDARPoint_oneframe(:,2), Checker_LIDARPoint_oneframe(:,3), ...
[], dist_oneframe,'o','filled');
hold on
% if Clim(1)>min(dist_oneframe)
% Clim(1) = min(dist_oneframe);
% end
% if Clim(2)<max(dist_oneframe)
% Clim(2) = max(dist_oneframe);
% end
end
daspect([1 1 1]);
graphZ = linspace(-200,400,13);
graphY = linspace(250,850,13);
[gZ,gY] = meshgrid(graphZ,graphY);
gX = -transplanenorm(2)/transplanenorm(1)*gY-transplanenorm(3)/transplanenorm(1)*gZ+1/transplanenorm(1);
s = mesh(gX,gY,gZ,'EdgeColor',[0 0 0], 'FaceColor', 'none', 'FaceAlpha', 1);
daspect([1 1 1]);
grid off;
ax = gca;
ax.CLim = Clim;
ax.XAxis.Visible = 'off';
ax.YAxis.Visible = 'off';
ax.ZAxis.Visible = 'off';
margin = 20;
ylim([450-margin 650+margin]);
zlim([-75-margin 175+margin]);
view(-135,20);
%% 点群を座標変換
print(gcf,'-painters','a','-dpdf'); |
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%% Bounded Data Uncertainty Filter (BDU) %%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% * Description: %
% - Class that implements the Robust Bounded Data Uncertainty Filter %
% (BDU). %
% - A.H. Sayed. A Framework for State-Space Estimation with Uncertain %
% Models. IEEE Trans. Automat. Control. 46 (7) (2001). %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
classdef BDU < Filter
% =================================================================== %
properties
Pp; % Predicted estimation error weighting matrix
end
% =================================================================== %
% =================================================================== %
methods
% --------------------------------------------------------------- %
% Constructor
% --------------------------------------------------------------- %
function f = BDU(N,T,n)
% Base class constructor
f@Filter(N,T,n);
% Filter identification
f.id = "BDU (Sayed, 2001)";
end
% --------------------------------------------------------------- %
% Initialization (at each new experiment)
% --------------------------------------------------------------- %
function initialize(f,init_params)
% Initialize predicted estimation
f.xp = init_params.xp0;
% Initialize predicted estimation error weighting matrix
f.Pp = init_params.P0;
end
% --------------------------------------------------------------- %
% Update State Estimate
% --------------------------------------------------------------- %
function update_estimate(f,e,k,u_k,y_k,sys_model,f_params)
% Start timer
t_start = tic;
% Unpack system model matrices
F = sys_model.F;
H = sys_model.H;
Q = sys_model.Q;
C = sys_model.C;
D = sys_model.D;
R = sys_model.R;
M1 = sys_model.M1;
EF = sys_model.EF;
EH = sys_model.EH;
% Unpack filter parameters
ksi = f_params.ksi;
% Dimensions
n = size(F,1);
t1 = size(EF,1);
% Lambda approximation
lamb = (1+ksi) * norm(M1'*(C'/R*C)*M1);
% Modified sensing model matrices
Rhat = R - 1/lamb * C*(M1*M1')*C';
Re = Rhat + C*f.Pp*C';
% Filtered estimation error matrix
Pf = f.Pp - f.Pp*C'/Re*C*f.Pp;
% Modified system model matrices
Qhat = inv(Q) + ...
lamb * EH'/(eye(t1) + lamb*EF*Pf*EF')*EH;
Phat = Pf - ...
Pf*EF'/(1/lamb*eye(t1) + EF*Pf*EF')*EF*Pf;
Hhat = H - lamb*F*Phat*EF'*EH;
Fhat = (F - lamb*Hhat/Qhat*EH'*EF) * ...
(eye(n) - lamb*Phat*(EF'*EF));
% New filtered state estimate
xf_new = f.xp + Pf*C'/Rhat*(y_k - C*f.xp);
f.xf(:,k,e) = xf_new;
% New predicted estimation error matrix
f.Pp = F*Phat*F' + Hhat/Qhat*Hhat';
% New predicted state estimate
f.xp = Fhat*xf_new;
% Stop timer and update total time
t_iter = toc(t_start);
f.T_ex = f.T_ex + t_iter;
end
% --------------------------------------------------------------- %
end
% =================================================================== %
end
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
function ptest(results)
numbm = size(results,1);
for i = 1:numbm
bmname = results{i,1};
bmerror = results{i,3};
pcnterr = results{i,5};
power = results{i,9};
pactual = results{i,12};
thalf = 5*(1:size(power));
t = 5*(1:size(pactual));
maxtime = max(t);
phat = results{i,10};
winsize = length(power)
maxfuture = 15;
in = 1;
for futurepts=100:25:winsize
newN = winsize+futurepts;
tnew = 5*(futurepts:newN);
p = pactual(futurepts:newN);
hx=median(abs(tnew-median(tnew)))/0.6745*(4/3/winsize)^0.2;
hy=median(abs(p-median(p)))/0.6745*(4/3/winsize)^0.2;
h=sqrt(hy*hx);
r = ksrlin(tnew,p,h,winsize);
uhatsave{i} = r.f(winsize-futurepts:winsize);
in = in + 1;
end
% Now we draw the results.
minpower = min(min(p),min(phat)) - 2;
maxpower = max(max(p),max(phat)) + 2;
hf1 = figure('Name',bmname,'NumberTitle','off');
hold on;
% hfill = jbfill(t,upper,lower,'b','w',1,0.7);
tact = 1:(length(power)+maxfuture);
hl0 = line(5*tact,pactual(tact),...
'LineWidth',2, ...
'Color','b',...
'LineStyle','-');
hl1 = line(thalf,power,'LineWidth',2, ...
'Color','k',...
'LineStyle','-');
in = 1;
for index = 100:25:250
hl{in} = line(5*((1:(index+1))+winsize) ,uhatsave{in}, ...
'LineWidth',2, ...
'Color','r',...
'LineStyle',':');
in = in + 1;
end
% legend([hl1 hl2 hfill],{'Actual','Predicted','Error'})
%legend([hl0,hl1 hl2],{'Actual','Actual', 'Predicted'})
%legend boxoff;
%plot(t,p,'-g',t,phat,'-r','LineWidth',2);
axis([0 maxtime minpower maxpower]);
xlabel('Time (in sec.)', 'fontsize', 12, 'fontweight','b');
ylabel('Power (watts)', 'fontsize',12,'fontweight','b');
%title(bmname);
%applyhatch_pluscolor(hf1,'/',1);
hold off;
clear lower upper;
end
|
function [] = mesh(ref_area, subspace)
% import global parameters needed for domain construction
global vertices;
global Ndof;
global boundaries;
global inputs;
Domain.InputVertex = vertices;
% ---------------------------------------------
% Definizione del dominio a partire dai Vertici
% ---------------------------------------------
% Dichiaro le variabili per delimitare il dominio
Domain.Boundary.Values = 1:4;
% lato di bordo 1 dal nodo 1 al nodo 2
% lato di bordo 2 dal nodo 2 al nodo 3
% lato di bordo 3 dal nodo 3 al nodo 4
% lato di bordo 4 dal nodo 4 al nodo 1
Domain.Holes.Hole = []; % non ci sono buchi nel dominio
Domain.Segments.Segment = []; % non ci sono lati forzati nel dominio
% --------------------------------------------------
% Definizione delle condizioni al contorno a partire
% dai Vertici e dai lati di bordo
% --------------------------------------------------
% numerical (constant) values of BCs (useless since BCs are set in expand function)
BC.Values = [0.0 12.0 0.0 14.0 0.0 16.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0];
% assign BCs markers to borders
BC.Boundary.Values = boundaries;
% assign BCs markers to (input) vertices
BC.InputVertexValues = inputs;
BC.Holes.Hole = [];
BC.Segments.Segment = [];
% --------------------------------------------
% Inserimento dei parametri di triangolazione
% --------------------------------------------
RefiningOptions.CheckArea = 'Y';
RefiningOptions.CheckAngle = 'N';
RefiningOptions.AreaValue = ref_area;
RefiningOptions.AngleValue = [];
RefiningOptions.Subregions = [];
% Construct triangulation
global geom;
[geom] = bbtr30(Domain,BC,RefiningOptions);
% --------------------------------------------------
% --------------------------------------------------
geom.elements.coordinates = geom.elements.coordinates(...
1:geom.nelements.nVertexes,:);
geom.elements.triangles = geom.elements.triangles(...
1:geom.nelements.nTriangles,:);
geom.elements.borders = geom.elements.borders(...
1:geom.nelements.nBorders,:);
geom.elements.neighbourhood = geom.elements.neighbourhood(...
1:geom.nelements.nTriangles,:);
% --------------------------------------------------
j = 1;
Dj = 1;
for i=1:size(geom.pivot.nodelist)
if geom.pivot.nodelist(i)==0
geom.pivot.pivot(i)=j;
j = j+1;
else
geom.pivot.pivot(i)=-Dj;
Dj = Dj + 1;
end
end
% --------------------------------------------------
geom.pivot.pivot = transpose(geom.pivot.pivot);
% --------------------------------------------------
% geom.pivot.Di dopo le operazioni seguenti contiene l`indice dei nodi
% di Dirichlet e il corrispondente marker
[X,I] = sort(geom.pivot.Di(:,1));
geom.pivot.Di = geom.pivot.Di(I,:);
if subspace == 'P2'
P2;
Ndof = 6;
else
Ndof = 3;
end
clear X I;
end |
function val = g(x)
% val = 1./(1+exp(-x));
val = heaviside(x);
end
|
function [ leftMat, rightVec, paramsUsage ] = loadEnsemble( currentResidue, conNames, header )
self = 20;
pair = 400;
files1 = dir(sprintf('*_%s.pdb', currentResidue));
files2 = dir(sprintf('*_%s_*.pdb', currentResidue));
files = {files1.name, files2.name};
%% lay out the residues coverred by a pdb file
residueLists = {};
for i = 1: length(files)
fid = fopen(files{i});
pdbInfo = textscan(fid, '%s', 'Delimiter', '', 'Headerlines', 1);
fclose(fid);
pdbInfo = pdbInfo{1};
residueNums = {};
for j = 1:length(pdbInfo)
resinum = strrep(pdbInfo{j}(22:26), ' ', '');
residueNums = [residueNums, resinum];
end
residues = unique(residueNums, 'stable');
residueLists{i} = residues;
end
AA = {'ALA', 'CYS', 'ASP', 'GLU', 'PHE', 'GLY', 'HIS', 'ILE', 'LYS', 'LEU', 'MET', 'ASN', 'PRO', 'GLN', 'ARG', 'SER', 'THR', 'VAL', 'TRP', 'TYR', 'MSE'};
%% iterate over seq files
Sparse = [];
rightVec = [];
% this part should be in general same
for i = 1: length(files)
seqf = strcat(header, '_', strrep(files{i}, 'pdb', 'seq'));
if exist(seqf, 'file') == 0
continue;
end
fileSplit = regexp( strrep(files{i}, '.pdb', ''), '_', 'split');
cencolid = find(strcmp(residueLists{i}, currentResidue)) + 1;
conInThisFile = [];
if length(fileSplit) > 2
importantResidue = fileSplit(2:end);
conInThisFile = importantResidue(~strcmp(importantResidue, currentResidue));
conInThisFile = conInThisFile(ismember(conInThisFile, conNames));
concolid = zeros(size(conInThisFile));
for c = 1: length(conInThisFile)
concolid(c) = find(strcmp(residueLists{i}, conInThisFile(c))) + 1;
end
end
% test: only use top N sequences;
% TopN = 20000;
% read the seq files
nfield = length(residueLists{i}) + 1;
fid = fopen(seqf);
seqResults = textscan(fid, repmat('%s ' , [1, nfield]));
fclose(fid);
nm = length(seqResults{1});
nc = length(conInThisFile);
% create left sparse matrix
dim1 = reshape( repmat(1:nm*self, nc+1, 1), nm*(nc+1)*self, 1);
dim2 = zeros(size(dim1));
%% read sequence data
seqid = 1:nm;
seqid = seqid';
cenaa = seqResults{cencolid}(seqid);
cenaaind = aaIndex(cenaa);
% fills right vector
rmat = zeros(nm*self, 3);
rmat(self*(seqid - 1) + cenaaind, 1) = 1;
rmat(1:nm*self, 2) = ones(nm*self, 1) * i;
rmat(1:nm*self, 3) = ones(nm*self, 1) * nc;
% fills the self/column part of left matrix
dim2( 1:(nc+1):end ) = repmat((1:self)', nm, 1);
% contact positions
if nc > 0
for d2 = 1:nc
conaa = seqResults{concolid(d2)}(seqid);
conaaind = aaIndex(conaa);
conid = find(strcmp(conNames, conInThisFile(d2)));
try
seeds = self + pair * (conid -1) + self * (conaaind -1) + 1;
catch
disp(files(i));
end
dim2( (d2+1):(nc+1):end ) = reshape(repmat(seeds, 1, self)' + repmat(0:self-1, nm, 1)', nm*self, 1);
end
end
smat = sparse(dim1, dim2, ones(size(dim1, 1), 1), nm*self, size(conNames, 1)*pair + self);
% concatenate to existing data
Sparse = [Sparse; smat];
rightVec = [rightVec; rmat];
end
leftMat = [];
paramsUsage = [];
%% check the parameters usage, and remove the not used columns
if ~isempty(Sparse)
paramsUsage = sum(Sparse(logical(rightVec(:,1)), :), 1);
leftMat = Sparse;
end
%% a side function to deal with non-canonical amino acid
function [aainds] = aaIndex(names)
[~, aainds] = ismember(names, AA);
aainds(aainds == 21) = 11; % MSE
aainds(aainds == 0) = 1; % other made ALA
end
end
|
function [trial_list] = randomized_trials_lds(not_outlier_IDs,outlier_IDs,nOutliers,nTests,rep_limit)
outlier_list = [];
nOutlierTypes = length(outlier_IDs);
if nOutliers >0
for i1=1:ceil(nOutliers/nOutlierTypes)
outlier_list((i1-1)*nOutlierTypes+1:i1*nOutlierTypes)=outlier_IDs;
end
outlier_list = outlier_list(1:nOutliers);
end
not_outlier_list = [];
nNotOutlierTypes = length(not_outlier_IDs);
for i2=1:ceil((nTests-nOutliers)/nNotOutlierTypes)
not_outlier_list((i2-1)*nNotOutlierTypes+1:i2*nNotOutlierTypes)=not_outlier_IDs;
end
not_outlier_list = not_outlier_list(1:(nTests-nOutliers));
total_list = [outlier_list not_outlier_list];
chk2=0;
cnt=0;
IDs = sort(unique(total_list));
while ~chk2
chk2=1;
trial_list = total_list(randperm(length(total_list)));
for i2=outlier_IDs
df_list = diff(find(trial_list==i2));
if find(df_list < rep_limit)
chk2=0;
end
end
% check for repeated speakers
for i2=IDs
df_list = diff(find(trial_list==i2));
if find(df_list == 1)
chk2=0;
end
end
cnt=cnt+1;
if cnt>10000
hWarn=warndlg('Cannot find a randomized trial list with these parameters!');
uiwait(hWarn);
trial_list=[];
return;
end
end |
function C = contract(tensors, indices, kwargs)
% Compute a tensor network contraction.
%
% Usage
% -----
% :code:`C = contract(t1, idx1, t2, idx2, ...)`
%
% :code:`C = contract(..., 'Conj', conjlist)`
%
% :code:`C = contract(..., 'Rank', r)`
%
% Repeating Arguments
% -------------------
% tensors : :class:`Tensor`
% list of tensors that constitute the vertices of the network.
%
% indices : int
% list of indices that define the links and contraction order, using ncon-like syntax.
%
% Keyword Arguments
% -----------------
% Conj : (1, :) logical
% optional list to flag that tensors should be conjugated.
%
% Rank : (1, 2) int
% optionally specify the rank of the resulting tensor.
%
% Returns
% -------
% C : :class:`Tensor` or numeric
% result of the tensor network contraction.
% TODO contraction order checker, order specifier.
arguments (Repeating)
tensors
indices (1, :) {mustBeInteger}
end
arguments
kwargs.Conj (1, :) logical = false(size(tensors))
kwargs.Rank = []
end
assert(length(kwargs.Conj) == length(tensors));
for i = 1:length(tensors)
if length(indices{i}) > 1
assert(length(unique(indices{i})) == length(indices{i}), ...
'Tensors:TBA', 'Traces not implemented.');
end
end
% Special case for single input tensor
if nargin == 2
[~, order] = sort(indices{1}, 'descend');
C = tensors{1};
if isnumeric(C)
C = permute(C, order);
if kwargs.Conj
C = conj(C);
end
else
if kwargs.Conj
C = permute(C', order(length(order):-1:1), kwargs.Rank);
else
C = permute(C, order, kwargs.Rank);
end
end
return
end
% Generate trees
contractindices = cellfun(@(x) x(x > 0), indices, 'UniformOutput', false);
partialtrees = num2cell(1:length(tensors));
tree = generatetree(partialtrees, contractindices);
% contract all subtrees
[A, ia, ca] = contracttree(tensors, indices, kwargs.Conj, tree{1});
[B, ib, cb] = contracttree(tensors, indices, kwargs.Conj, tree{2});
% contract last pair
[dimA, dimB] = contractinds(ia, ib);
if Options.Debug
contractcheck(A, ia, ca, B, ib, cb);
end
C = tensorprod(A, B, dimA, dimB, ca, cb, 'NumDimensionsA', length(ia));
ia(dimA) = []; ib(dimB) = [];
ic = [ia ib];
% permute last tensor
if ~isempty(ic) && length(ic) > 1
[~, order] = sort(ic, 'descend');
if isnumeric(C)
C = permute(C, order);
else
if isempty(kwargs.Rank)
kwargs.Rank = [length(order) 0];
end
C = permute(C, order, kwargs.Rank);
end
end
end
function tree = generatetree(partialtrees, contractindices)
if length(partialtrees) == 1
tree = partialtrees{1};
return
end
if all(cellfun('isempty', contractindices)) % disconnected network
partialtrees{end - 1} = partialtrees(end - 1:end);
partialtrees(end) = [];
contractindices(end) = [];
else
tocontract = min(horzcat(contractindices{:}));
tinds = find(cellfun(@(x) any(tocontract == x), contractindices));
assert(length(tinds) == 2);
partialtrees{tinds(1)} = partialtrees(tinds);
partialtrees(tinds(2)) = [];
contractindices{tinds(1)} = unique1(horzcat(contractindices{tinds}));
contractindices(tinds(2)) = [];
end
tree = generatetree(partialtrees, contractindices);
end
function [C, ic, cc] = contracttree(tensors, indices, conjlist, tree)
if isnumeric(tree)
C = tensors{tree};
ic = indices{tree};
cc = conjlist(tree);
return
end
[A, ia, ca] = contracttree(tensors, indices, conjlist, tree{1});
[B, ib, cb] = contracttree(tensors, indices, conjlist, tree{2});
[dimA, dimB] = contractinds(ia, ib);
if Options.Debug
contractcheck(A, ia, ca, B, ib, cb);
end
C = tensorprod(A, B, dimA, dimB, ca, cb, 'NumDimensionsA', length(ia));
ia(dimA) = [];
ib(dimB) = [];
ic = [ia ib];
cc = false;
end
function contractcheck(A, ia, ca, B, ib, cb)
Aspaces = space(A);
if ca, Aspaces = conj(Aspaces); end
Bspaces = space(B);
if cb, Bspaces = conj(Bspaces); end
[dimA, dimB] = contractinds(ia, ib);
for i = 1:length(dimA)
assert(Aspaces(dimA(i)) == conj(Bspaces(dimB(i))), 'tensors:SpaceMismatch', ...
'Invalid index %d:\n\t%s\n\tis incompatible with\n\t%s', ...
ia(dimA(i)), string(Aspaces(dimA(i))), string(Bspaces(dimB(i))));
end
end
|
function y = grdf_smrt(a,H,x)
y = H*x + a;
end |
%% Assignment 4
close all;
clear all;
tic
% initialization
nbrActions = 4;
map = 1 ;
% gives random start position
gwinit(map);
state = gwstate;
gamma = 0.9;
alpha = 1;
% epsilon=0.9;
% Look-up table
Q = rand(state.xsize, state.ysize, nbrActions);
%edges
Q(1,:,2)=-inf;
Q(state.xsize,:,1)=-inf;
Q(:,1,4)=-inf;
Q(:,state.ysize,3)=-inf;
gwdraw
decisionVector = [];
numIterations = 1000;
numstate=zeros(numIterations,1);
for episodes = 1:numIterations
episodes
% gives random start position
gwinit(map);
state = gwstate;
% epsilon=epsilon-(epsilon*0.9)*episodes/numIterations;
epsilon=0.3+0.6*episodes/numIterations;
% repeat until goal found
while (state.isterminal ~=1 )
% chose an action
if(rand(1)>epsilon)
action = ceil(rand(1)*4);
while Q(state.pos(1), state.pos(2), action) == -inf
action = ceil(rand(1)*4);
end
else
[~,action]=max(Q(state.pos(1), state.pos(2),:));
end
oldstate=gwstate;
state=gwaction(action);
numstate(episodes)=numstate(episodes)+1;
reward=oldstate.feedback;
Q(oldstate.pos(1),oldstate.pos(2), action)=...
(1-alpha)*Q(oldstate.pos(1),oldstate.pos(2),action)+...
alpha*(reward+gamma*max(Q(state.pos(1), state.pos(2),:)));
end
Q(state.pos(1),state.pos(2),:) = 0;
end
%% plot decisions
[~,bestdec]=max(Q,[],3);
gwdraw
for x=1:state.xsize
for y=1:state.ysize
gwplotarrow([x,y]',bestdec(x,y));
end
end
figure(5)
imagesc(max(Q,[],3))
title('max(Q) for map 1')
colorbar
%% plot convergence
figure(6)
plot(numstate)
title('Plot for convergation, map 1')
toc
|
function bdytde=btide(recllh,cdate)
%
% Function btide
% ==============
%
% Computes the radial, s->n and w->e displacements due to boody tides
% for given time and position.
%
% Sintax
% ======
%
% bdytde=btide(recllh,cdate)
%
% Input
% =====
%
% recllh -> receiver geodetic coordinates
% cdate -> current date (cdate=[Y;M;D;H;M;S])
%
% Output
% ======
%
% bdytde -> 3x1 vector with 3D displacements (m)
% bdytde=[U;N;E]
% U -> radial displacement
% N -> S->N displacement
% E -> W->E displacement
%
% Created/Modified
% ================
%
% When Who What
% ---- --- ----
% 2006/07/06 Rodrigo Leandro Function created
%
%
% Comments
% ========
%
%
%
% ==============================
% Copyright 2006 Rodrigo Leandro
% ==============================
d=0.0174532925;
% love numbers
first = 0.609;
shida = 0.085;
dlat=recllh(1,1);
dlon=recllh(2,1);
f=dlat-0.192424*sin(2*dlat)*d;
w=dlon;
dsinf=sin(f);
dcosf=cos(f);
dcw=cos(w);
dsw=sin(w);
% input & use xmoon & xsun positions
dmjd=date2mjd(cdate);
sidt=mjd2sdt(dmjd,cdate);
xsun=suncrd(dmjd,sidt);
xmoon=mooncrd(dmjd,sidt);
rmoon=sqrt(xmoon(1)^2+xmoon(2)^2+xmoon(3)^2);
rmoonc=sqrt(xmoon(1)^2+xmoon(2)^2);
rsun=sqrt(xsun(1)^2+xsun(2)^2+xsun(3)^2);
rsunc=sqrt(xsun(1)^2+xsun(2)^2);
decms=xmoon(3)/rmoon;
decmc=rmoonc/rmoon;
hourms=-((xmoon(2)*dcw-xmoon(1)*dsw)/rmoonc);
hourmc=(xmoon(1)*dcw+xmoon(2)*dsw)/rmoonc;
decss=xsun(3)/rsun;
decsc=rsunc/rsun;
hourss=-((xsun(2)*dcw-xsun(1)*dsw)/rsunc);
hoursc=(xsun(1)*dcw+xsun(2)*dsw)/rsunc;
z11=dsinf*decss;
z12=dcosf*decsc*hoursc;
z1=z11+z12;
z21=dsinf*decms;
z22=dcosf*decmc*hourmc;
z2=z21+z22;
sunr=1.49597870691e11/rsun;
c9=384401e3/rmoon;
c93=c9^3;
sunr3=sunr^3;
argm1=decms^2-(decmc^2)*(hourmc^2);
argm2=2 * decms * decmc * hourmc * cos(2* f);
argm3=2 * dcosf^2 * decmc^2 * hourms * hourmc;
argm4=2 * dsinf * dcosf * decms * decmc * hourms;
args1=decss^2 - decsc^2 * hoursc^2;
args2=2 * decss * decsc * hoursc * cos(2*f);
args3=2 * dcosf^2 * decsc^2 * hourss * hoursc;
args4=2 * dsinf * dcosf * decss * decsc * hourss;
vmlat=53.625 * (sin(2*f)*argm1+argm2)*c93;
vmlon=53.625 * (argm3+argm4)*c93 /dcosf;
vslat=24.625 * (sin(2*f)*args1 + args2)*sunr3;
vslon=24.625 * (args3+args4)*sunr3 /dcosf;
tup=first*(53.625*(z2*z2-1/3)*c93+ ...
24.625*(z1*z1-1/3)*sunr3)/100;
% iers89 conventions correction
tup =tup -0.025*sin(f)*cos(f)*sin(sidt+ w);
tnorth= (vmlat + vslat) * shida/100;
teast=-((vmlon + vslon) * shida/100);
bdytde=[tup;tnorth;teast]; |
function [ret] = pglobals_set(name, value)
%% pglobals_set
%
% File: pglobals_set.m
% Directory: 2_demonstrations/lib/matlab
% Author: Peter Polcz (ppolcz@gmail.com)
%
% Created on 2018. June 09.
%
%%
persistent Old_Values
eval(['global ' name ' ; ']);
if nargin > 1
% Store old variables
if ~isfield(Old_Values, name)
Old_Values.(name) = { eval(name) };
else
Old_Values.(name) = [ Old_Values.(name) eval(name) ];
end
% Set variable
if isnumeric(value) && isscalar(value)
eval([name ' = ' num2str(value) ';']);
end
else
if isfield(Old_Values, name) && ~isempty(Old_Values.(name))
values = Old_Values.(name);
value = values{end};
% Set variable
if (isnumeric(value) || islogical(value)) && isscalar(value)
eval([name ' = ' num2str(value) ';']);
Old_Values.(name) = values(1:end-1);
end
end
end
% display(Old_Values)
end |
addpath(genpath('./activeBrain'));
addpath(genpath('./alignmentTool'));
addpath('/Applications/freesurfer/matlab/');
%% Load files
clc;
clearvars;
%% Settings
pathToFsInstallDirectory='/Applications';
%%
disp('Specify Subject Output Directory (input data will be copied to this folder)');
workspace_path=uigetdir('','Workspace Directory');
wspace_dir=dir(workspace_path);
disp(['Workspace selected: ' workspace_path]);
if(numel(wspace_dir) > 2)
warning ('Workspace directory already contains data, to rerun complete script please delete folder content or create a new folder!');
end
if(~any(contains({wspace_dir.name},'orig_brain_model.mat')))
answer=questdlg('Do you want to load the brain model from a complete .mat or from the freesurfer folder?','What brain model do you want to use?','Load from .mat','Load from Freesurfer','Load from .mat');
switch(answer)
case 'Load from Freesurfer'
[cortex,cmapstruct,ix,tala,vcontribs,viewstruct,brainmodel_path,brainmodel_file]=loadBrainModelfromFreeSurfer(workspace_path,pathToFsInstallDirectory);
case 'Load from .mat'
disp('Load brain model .mat file');
[brainmodel_file,brainmodel_path ]= uigetfile('*.mat','Original Brain Model');
disp(['Loading brain from ' fullfile(brainmodel_path,brainmodel_file)]);
load(fullfile(brainmodel_path,brainmodel_file));
if(~(exist('cmapstruct','var') && exist('cortex','var') && exist('ix','var')&& exist('tala','var') && exist('vcontribs','var') && exist('viewstruct','var')))
error('Unexpected brain model file content!');
end
otherwise
error('Stopped importing brain model!');
end
save(fullfile(workspace_path,'orig_brain_model.mat'),'cortex','cmapstruct','ix','tala','vcontribs','viewstruct','brainmodel_path','brainmodel_file');
else
disp('Found brain model file in Workspace folder!');
load(fullfile(workspace_path,'orig_brain_model.mat'));
end
disp('...done');
disp(' ');
disp(' ');
viewstruct.what2view={'brain','electrodes'};
figure
subplot(1,3,1)
activateBrain(cortex,vcontribs,tala,ix,cmapstruct,viewstruct);
light('Position', -viewstruct.lightpos, 'Style', 'infinite');
title(['Original brain (' fullfile(brainmodel_path,brainmodel_file) ')']);
axis on;
xlabel('x');
ylabel('y');
zlabel('z');
if(~any(contains({wspace_dir.name},'alignment_points.mat')))
disp('Load alignment points folder (contains AC,PC, mid-sag). Alignment folder needs to contain 3 files with one point each');
alignment_folder= uigetdir(brainmodel_path,'Folder to AC, PC, mid-sag files');
files = dir(alignment_folder);
%TODO add the possiblity to load .mat files with correctly projected
%coordinates
if(any(contains({files.name},'AC.dat')))
ac_dat_file=fullfile(alignment_folder,'AC.dat');
else
disp('Couldnt find AC.dat, please specify file');
[fname,path ]= uigetfile('*.*','Load AC point');
ac_dat_file=fullfile(path,fname);
end
if(any(contains({files.name},'PC.dat')))
pc_dat_file=fullfile(alignment_folder,'PC.dat');
else
disp('Couldnt find PC.dat, please specify file');
[fname,path ]= uigetfile('*.*','Load PC point');
pc_dat_file=fullfile(path,fname);
end
if(any(contains({files.name},'mid-sag.dat')))
mid_sag_dat_file=fullfile(alignment_folder,'mid-sag.dat');
else
disp('Couldnt find mid-sag.dat, please specify file');
[fname,path ]= uigetfile('*.*','Load mig-sag point');
mid_sag_dat_file=fullfile(path,fname);
end
ac_point=importelectrodes(ac_dat_file);
pc_point=importelectrodes(pc_dat_file);
mid_sag_point=importelectrodes(mid_sag_dat_file);
disp('Successfully loaded alignment points');
xfrm_matrices = loadXFRMMatrix(workspace_path,pathToFsInstallDirectory);
% move the transform matrices into their own variables
Norig = xfrm_matrices(1:4, :);
Torig = xfrm_matrices(5:8, :);
ac_point = (Torig*inv(Norig)*[ ac_point(:, 1), ac_point(:, 2), ac_point(:, 3), ones(size(ac_point, 1), 1)]')';
pc_point = (Torig*inv(Norig)*[ pc_point(:, 1), pc_point(:, 2), pc_point(:, 3), ones(size(pc_point, 1), 1)]')';
mid_sag_point = (Torig*inv(Norig)*[mid_sag_point(:, 1), mid_sag_point(:, 2), mid_sag_point(:, 3), ones(size(mid_sag_point, 1), 1)]')';
ac_point=ac_point(:,1:3);
pc_point=pc_point(:,1:3);
mid_sag_point=mid_sag_point(:,1:3);
save(fullfile(workspace_path,'alignment_points.mat'),'ac_point','pc_point','mid_sag_point');
else
disp('Found alignment points file in Workspace folder!');
load(fullfile(workspace_path,'alignment_points.mat'))
end
disp('...done');
disp(' ');
disp(' ');
disp('Projecting model into talairach space!');
disp(['Initial AC position (x y z): ' num2str(round(ac_point,2))])
disp(['Initial PC position (x y z): ' num2str(round(pc_point,2))])
disp(['Initial mid-sag position (x y z): ' num2str(round(mid_sag_point,2))])
disp('aligning model ...');
answer=input('Which alignment should be performed \n ''none'' = rotation and translation only, \n ''talairach'' = resized according to talairach, \n ''mni'' = projection into talaraich space and transformation from talairach to mni?');
[newcortex,newelectrodes,new_alignment_pos]= projectToStandard(cortex,tala.electrodes,[ac_point;pc_point;mid_sag_point],answer);
disp(['AC position is now at (x y z): ' num2str(round(new_alignment_pos(1,:),2))])
disp(['PC position is now at (x y z): ' num2str(round(new_alignment_pos(2,:),2))])
disp(['mid-sag position is now at (x y z): ' num2str(round(new_alignment_pos(3,:),2))])
disp('...done');
disp(' ');
disp(' ');
newtala=tala;
newtala.electrodes=newelectrodes;
subplot(1,3,2)
cortex=newcortex;
tala=newtala;
activateBrain(newcortex,vcontribs,tala,ix,cmapstruct,viewstruct);
light('Position', -viewstruct.lightpos, 'Style', 'infinite');
save(fullfile(workspace_path,[answer '_aligned_brain_model']),'cortex','tala','ix','cmapstruct','viewstruct');
axis on;
xlabel('x');
ylabel('y');
zlabel('z');
title('Realigned and resized brain');
load('tal_brain/pial_talairach.mat')
subplot(1,3,3)
activateBrain(cortex,vcontribs,newtala,ix,cmapstruct,viewstruct);
light('Position', -viewstruct.lightpos, 'Style', 'infinite');
title('Electrode positions on standard talaraich brain');
axis on;
xlabel('x');
ylabel('y');
zlabel('z');
save(fullfile(workspace_path,[answer '_tal_brain_model']),'cortex','tala','ix','cmapstruct','viewstruct');
disp('Projected electrodes and stored mat files in workspace folder!');
|
function [ u ] = deconvolution_FA( f, nit, tau, lambda, epsilon, sigma )
%DECONVOLUTION_FA Calcul de la deconvolution par fonctionnelle approchee.
% Notre algorithme se base sur une descente de gradient
% Inputs :
% f : l'image bruitee
% nit : nombre d'iterations
% tau : pas de temps
% lambda
% epsilon : parametre empechant l'instabilite de notre division par |grad(u)|
% sigma^2 : variance du noyau gaussien
u = f;
[M N] = size(f);
for i=1:nit
convol1 = Convolution_Gaussian(u,sigma);
convol = Convolution_Gaussian(convol1-f, sigma); % calcul du premier terme
[gradu_x gradu_y] = gradient(u); % calcul du second terme de l'eq. 39
dist2 = (gradu_x.^2 + gradu_y.^2);
gu_x = gradu_x./(epsilon^2 + dist2);
gu_y = gradu_y./(epsilon^2 + dist2);
div_nu = divergence(gu_x, gu_y);
u = u - tau*(convol - lambda*div_nu);
% Affichage
figure(1); colormap(gray);imagesc(u);axis equal
title(sprintf('Deconvolution desc grad : Iteration %i/%i',i-1,nit));
end
end
|
% Copyright (c) 2009, Kyle Scott
% All rights reserved.
%
% Redistribution and use in source and binary forms, with or without
% modification, are permitted provided that the following conditions are
% met:
%
% * Redistributions of source code must retain the above copyright
% notice, this list of conditions and the following disclaimer.
% * Redistributions in binary form must reproduce the above copyright
% notice, this list of conditions and the following disclaimer in
% the documentation and/or other materials provided with the distribution
%
% THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
% AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
% IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
% ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
% LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
% CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
% SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
% INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
% CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
% ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
% POSSIBILITY OF SUCH DAMAGE.
% 07/15/2009, bw2rgb
%
% bw2rgb reformats a binary (black and white) image into a true
% color RGB image
%
% Note: requires Image Processing Toolbox
%
% Color Conversion
% Binary RGB
% 0 [0 0 0]
% 1 [255 255 255]
%
% bw2rgb(bwPic) converts a binary image to a black and white
% image in true color RGB format
%
% bwPic --> binary image (e.g. bwImage)
%
% example: rgbImage = bw2rgb(bwImage);
function rgbPic = bw2rgb(bwPic)
bwPicSize = size(bwPic);
rgbPic = zeros(bwPicSize(1),bwPicSize(2),3);
rgbPic(bwPic==1)=255;
rgbPic(:,:,2) = rgbPic(:,:,1);
rgbPic(:,:,3) = rgbPic(:,:,1);
rgbPic = im2uint8(rgbPic); |
function [A] = denavit_hartenberg(r, a, d, teta)
A = zeros(4, 4);
A = [cos(teta) -sin(teta)*cos(a) sin(teta)*sin(a) r*cos(teta);
sin(teta) cos(teta)*cos(a) -cos(teta)*sin(a) r*sin(teta);
0 sin(a) cos(a) d;
0 0 0 1];
end |
x = -0.9: 0.1: 1;
% Plotting for Task 1
plot(x, log(1+x), 'r', 'LineWidth', 3);
hold on;
title('Graph for ln(1+x)');
xlabel('values of x');
ylabel('values of ln(1+x)');
grid;
% Plotting for Task 2
plot(x, naturalLog(x, 1), 'b--', 'LineWidth', 3);
hold on;
plot(x, naturalLog(x, 3), 'c:', 'LineWidth', 3);
hold on;
plot(x, naturalLog(x, 5), 'k-.', 'LineWidth', 3);
hold on;
plot(x, naturalLog(x, 20), 'g', 'LineWidth', 3);
hold off;
legend('y = ln(1+x)', 'y = nLog(x, 1)', 'y = nLog(x, 3)', 'y = nLog(x, 5)', 'y = nLog(x, 20)', 'location', 'southeast'); |
function [fn]=mapwindow()
[filename, pathname]=uigetfile('*.*','Select a image');
if isequal(filename,0) || isequal(pathname,0)
fn=0;
else
fn = fullfile(pathname, filename);
end
|
function [S_out,S_in,MSE]= Th_LMMSE_Simu_Sort_De(K,N,H,snRdB,snrNo,modType,Q_StepSize,B_Bit1,B_Bit2,B_Bit3,S1,S2,S3)
Q(1,:) = [2.638, 1.925, 1.519, 1.277, 1.131, 1.043, 0.990]; % optimal step size of 1-bit
Q(2,:) = [0.992, 0.874, 0.801, 0.759, 0.735, 0.721, 0.713]; % optimal step size of 2-bit
Q(3,:) = [0.583, 0.514, 0.475, 0.448, 0.432, 0.423, 0.419]; % optimal step size of 3-bit
Q(4,:) = [0.187, 0.165, 0.152, 0.145, 0.145, 0.145, 0.145]; % optimal step size of 5-bit
Q(5,:) = [0.103, 0.092, 0.084, 0.079, 0.077, 0.075, 0.075]; % optimal step size of 6-bit
Q(6,:) = [0.039, 0.036, 0.034, 0.006, 0.004, 0.003, 0.003]; % optimal step size of 9-bit
Q_StepSize1 = Q(1,snrNo); % chooose the optimal step for 1-bit or 2-bit
Q_StepSize2 = Q(5,snrNo); % chooose the optimal step for 7-bit
Q_StepSize3 = Q(3,snrNo); % chooose the optimal step for 9-bit
sigma2=10^(-snRdB/10);
W=(randn(N,1)+1j*randn(N,1))*1/sqrt(2)*sqrt(sigma2);
[X,M]=Source_Gen(K,modType);
H(:,K+1)=sum(abs(H').^2)';
H=sortrows(H,-(K+1));
H=H(:,1:K);
Y= H*X+W;
% save sortY Y;
YY=[real(Y);imag(Y)];
% save YY YY;
% YY_hat1=Quan(YY,B_Bit1,Q_StepSize);
% YY_hat2=Quan(YY,B_Bit2,Q_StepSize);
% YY_hat = [YY_hat1(1:S1);YY_hat2(S1+1:N);YY_hat1(N+1:N+S1);YY_hat2(N+S1+1:2*N)];
YY_hat1=Quan(YY,B_Bit1,Q_StepSize1);
YY_hat2=Quan(YY,B_Bit2,Q_StepSize2);
YY_hat3=Quan(YY,B_Bit3,Q_StepSize3);
YY_hat = [YY_hat1(1:S1);YY_hat2(S1+1:S1+S2);YY_hat3(S1+S2+1:N);...
YY_hat1(N+1:N+S1);YY_hat2(N+S1+1:N+S1+S2);YY_hat3(N+S1+S2+1:2*N)];
% save YY_hat1 YY_hat1;
% save YY_hat2 YY_hat2;
% save YY_hat3 YY_hat3;
% save YY_hat YY_hat;
Y_hat=YY_hat(1:N)+1j*YY_hat(N+1:end);
S_in=qamdemod(X,M,0);
X_LMMSE=(H'*H+sigma2*eye(K))\H'*Y_hat;
MSE = norm([real(X);imag(X)]-[real(X_LMMSE);imag(X_LMMSE)],2)^2/(2*K);
S_out=qamdemod(X_LMMSE,M,0); % MMSE
end
|
function [V1,V2,Phase,RatioPfMd]=Vaccination_over_Time_without_Kids(UpTake, Date)
Separation=11*7;
Delay=14;
Inf_to_Symp=5;
load Surrogate_Vacc_Data.mat
load Regional_PP.mat
% Remove the later data and use approximation instead
Vacc1(:,Date:end,:)=[];
Vacc2(:,Date:end,:)=[];
a80=[17:21]; p80=Region_PP(1:11,a80)/sum(Region_PP(2:11,a80),'all'); %Note average for UK data !!
aW=[4:13]; pW=Region_PP(1:11,aW)/sum(Region_PP(2:11,aW),'all'); %Note average for UK data !!
T=[datenum(2021,1,4):datenum(2022,11,1)] +1-datenum(2020,1,1);
V1=zeros(11,2000,21); V2=zeros(11,2000,21);
% Approximations to the amount of vaccine going forwards if we don't have
% the data (or if we didn't have the data at the time).
v(350:370)=5e5/7;
v(370:440)=2.5e6/7;
v(440:540)=3.2e6/7;
v(540:600)=1.3e6/7;
v(600:2000)=2.5e5/7;
% Using TRUE early data to date
T=300:size(Vacc1,2);
V1(2:8,T,:)=Vacc1(2:8,T,:);
V2(2:8,T,:)=Vacc2(2:8,T,:);
Vacc1(1,:,:)=sum(Vacc1(2:8,:,:),1);
Vacc2(1,:,:)=sum(Vacc2(2:8,:,:),1);
%FORECASTING FORWARDS
T=(size(Vacc1,2)+1):(datenum(2023,7,1) +1-datenum(2020,1,1));
RPPall=sum(Region_PP(1:11,:),2)/sum(Region_PP(2:11,:),'all'); % RPPall = proportion of population in UK
for R=2:11
RPP(R,1:21)=Region_PP(R,:)./sum(Region_PP(2:11,:),1);
end
Keep_UpTake=UpTake;
UpTake(1:3)=0; % remove 0-14 year olds.
for R=2:11
for t=T
if t>500
Separation=Separation-1; %reduce until at 8 weeks.
Separation(Separation<8*7)=8*7;
end
V1(R,t,:)=0;
V2(R,t,:)=sum(V1(R,1:(t-Separation),:),2)-sum(V2(R,1:(t-1),:),2);
V2(R,t,1:3)=0;
V2(R,t,(V2(R,t,:)<0))=0;
if sum(V2(R,t,:),3)>RPPall(R)*v(t)
V2(R,t,:)=V2(R,t,:)*RPPall(R)*v(t)/sum(V2(R,t,:),3);
end
v1=v(t)*RPPall(R)-sum(V2(R,t,:),3); % Amount of vaccine left to distribute.
%v1=v(t)*RPPall(R);
v1(v1<0)=0;
Phase(t)=2; TbV=a80;
if sum(V1(R,1:t,aW),'all')>UpTake*(0.7e6+1.6e6)*sum(Region_PP(R,aW),2)/sum(Region_PP(2:11,aW),'all') % Done HCW
VHCW=0;
else
VHCW=v1/2;
end
Vold=v1-VHCW;
if sum(V1(R,1:t,TbV),'all')>sum(UpTake(TbV).*Region_PP(R,TbV),'all') % over 80s & CareHomes
Phase(t)=3; TbV=(75/5)+1;
if sum(V1(R,1:t,TbV),'all')>sum(UpTake(TbV).*Region_PP(R,TbV),'all') % 75-79
Phase(t)=4; TbV=(70/5)+1;
if sum(V1(R,1:t,TbV),'all')>sum(UpTake(TbV).*Region_PP(R,TbV),'all') % 70-74
Phase(t)=4.5; TbV=[5:14];
if sum(V1(R,1:t,TbV),'all')>mean(UpTake(TbV))*(2.3e6+2.2e6)*sum(Region_PP(R,TbV),2)/sum(Region_PP(2:11,TbV),'all') % Extremely vulnerable
Phase(t)=5; TbV=(65/5)+1;
if sum(V1(R,1:t,TbV),'all')>sum(UpTake(TbV).*Region_PP(R,TbV),'all') % 65-69
Phase(t)=6; TbV=[5:12];
if sum(V1(R,1:t,TbV),'all')>mean(UpTake(TbV))*(2.3e6+2.2e6+8.5e6)*sum(Region_PP(R,TbV),2)/sum(Region_PP(2:11,TbV),'all') % Health Conditions <65
Phase(t)=7; TbV=(60/5)+1;
if sum(V1(R,1:t,TbV),'all')>sum(UpTake(TbV).*Region_PP(R,TbV),'all') % 60-64
Phase(t)=8; TbV=(55/5)+1;
if sum(V1(R,1:t,TbV),'all')>sum(UpTake(TbV).*Region_PP(R,TbV),'all') % 55-59
Phase(t)=9; TbV=(50/5)+1;
if sum(V1(R,1:t,TbV),'all')>sum(UpTake(TbV).*Region_PP(R,TbV),'all') % 50-54
Phase(t)=10; TbV=[9:10];
if sum(V1(R,1:t,TbV),'all')>sum(UpTake(TbV).*Region_PP(R,TbV),'all') % 40-49
Phase(t)=10; TbV=[7:8];
if sum(V1(R,1:t,TbV),'all')>sum(UpTake(TbV).*Region_PP(R,TbV),'all') % 30-39
Phase(t)=10; TbV=[5:6];
if sum(V1(R,1:t,TbV),'all')>sum(UpTake(TbV).*Region_PP(R,TbV),'all') % 20-29
Phase(t)=10; TbV=[4];
if sum(V1(R,1:t,TbV),'all')>sum(UpTake(TbV).*Region_PP(R,TbV),'all') % 15-19
Phase(t)=10; TbV=3;
if sum(V1(R,1:t,TbV),'all')>sum(UpTake(TbV).*Region_PP(R,TbV),'all') % 10-14 not used in this code
Phase(t)=10; TbV=2;
if sum(V1(R,1:t,TbV),'all')>sum(UpTake(TbV).*Region_PP(R,TbV),'all') % 5-9 not used in this code
Phase(t)=10; TbV=1;
if sum(V1(R,1:t,TbV),'all')>sum(UpTake(TbV).*Region_PP(R,TbV),'all') % 0-5 not used in this code
Phase(t)=0; TbV=aW; Vold=0;
end
end
end
end
end
end
end
end
end
end
end
end
end
end
end
end
V1(R,t,aW)=pW(R,:)*(VHCW);
V1(R,t,TbV)=Vold*Region_PP(R,TbV)/sum(Region_PP(R,TbV),'all');
end
end
% Remove any impossible values
for R=2:11
for A=1:21
if squeeze(sum(V1(R,:,A),2))>Region_PP(R,A)
V1(R,:,A)=V1(R,:,A)*0.99*Region_PP(R,A)/squeeze(sum(V1(R,:,A),2));
end
if squeeze(sum(V2(R,:,A),2))>Region_PP(R,A)
V2(R,:,A)=V2(R,:,A)*0.99*Region_PP(R,A)/squeeze(sum(V2(R,:,A),2));
end
end
end
% Put in the delay to effect
V1(:,[1:end]+Delay,:)=V1; V1(:,1:Delay,:)=0;
V2(:,[1:end]+Delay,:)=V2; V2(:,1:Delay,:)=0;
|
%function [I, phan] = loadCTPhantomFromTiff( dataPath, dataName, geom )
dataPath = '\data\prostate-patient-ct-fiducial\';
dataName = 'prostateScans';
noSlices = 149;
info = dicominfo([ dataPath 'reference'] );
dx = info.PixelSpacing(1);
dy = info.PixelSpacing(2);
dz = info.SliceThickness;
nx = info.Rows;
ny = info.Columns;
nz = noSlices;
I = zeros( [nx ny nz], 'single' );
for i = 1:nz
if mod(i,20) == 1
fprintf('\tslice %d \n', i);
end
slice = imread([dataPath dataName '.tif'], 'Index', i);
I(:,:,i) = slice;
end
if min( I(:) ) > -100
I = I - 1000;
end
meta.NDims = 3;
meta.DimSize = size(I);
meta.ElementSpacing = [ dx, dy, dz];
writeMetaImage(I, [dataName '.mhd'], meta);
|
function [model, gap_vec_heuristic, num_passes, progress] = solver_multiLambda_BCFW_hybrid( param, options )
% [model, gap_vec_heuristic, num_passes, progress] = solver_multiLambda_BCFW_hybrid( param, options )
%
% solver_multiLambda_BCFW_hybrid solves the SSVM problem for multiple values of regularization parameter lambda.
% Supported regimes: grid search with/without warm start and eps-approximate/heuristic regularization path.
%
% The description of the methods is provided in the paper
%
% [A] Anton Osokin, Jean-Baptiste Alayrac, Isabella Lukasewitz, Puneet K. Dokania, Simon Lacoste-Julien
% Minding the Gaps for Block Frank-Wolfe Optimization of Structured SVMs,
% International Conference on Machine Learning, 2016
%
% Please, cite the paper in any resulting publications.
%
% Fuction solver_multiLambda_BCFW_hybrid relies on solver_BCFW_hybrid.m as an optimization routine for one lambda.
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% INPUTS: param, options
%
% param: a structure describing the problem. Identical to the on eof solver_BCFW_hybrid.m
% options: (an optional) structure with some of the following fields to customize the optimization algorithm:
%
% Fields sample, gapDegree, gap_check, stepType, useCache, cacheNu, cacheFactor, maxCacheSize,
% doCacheConsistencyCheck, rand_seed, quit_passes_heuristic_gap, quit_passes_heuristic_gap_eps_multiplyer,
% logging_level, do_batch_step
% control the solver for one value of lambda: solver_BCFW_hybrid.m
%
% regularization_path --controls whether to compute the regularization path or do the grid search
% (default: false) - do grid search
%
% true_gap_when_converged -- flag saying whether to demand computation of the true gap at convergence
% (default: true) - eps-approximate path or grid search with guarantees
%
% check_lambda_change --flag saying whether to do appropriate checks when lambda is changed, significantly slows down the code
% (default: false)
%
%%%%%%%%%% GRID SEARCH
%
% lambda_values --vector of lambdas defining the grid
% (default: 10.^(4: -1: -3)
%
% gap_threshold --accuracy, until which to optimize for each lambda
% (default: 0.1)
%
% warm_start_type -- type of warm start to use: 'keep_primal' or 'keep_dual' or 'none';
% (default: 'keep_primal')
%
%%%%%%%%%% REGULARIZATION PATH
%
% regularization_path_min_lambda -- minimum value of lambda determining the last breakpoint
% The method will quit earlier if the stopping criterion is hit.
% (default: 1e-5)
%
% regularization_path_eps --eps parameter for eps-approximate path
% (default: 1e-1)
%
% regularization_path_a --kappa parameter from the paper. The internal SSVM server is
% run with kappa*eps gap stopping criterion
% (default: 0.9)
%
% regularization_path_b --when doing the induction step to get the new breakpoint we can be
% less conservative of more conservative.
% (default: 1 - options.regularization_path_a)
%
%%%%%%%%%% COMPUTATIONAL BUDGET (whole process)
%
% num_passes -- max number of passes through data
% (default: 200000) - effectively unlimites
% time_budget -- max running time
% (default: 60*24) - 24 hours
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% OUTPUTS: model, gap_vec_heuristic, num_passes, progress, cache, exact_gap_flag
%
% model --model.w contains the obtained parameters w (column vector)
% model.ell contains b'*alpha which is useful to compute duality gap, etc.
% model.wMat contains one parameter vector w_i per training object
% model.ellMat contains one value \ell_i per training object
% model.v is present only when there are come positivity constraints (untruncated version of model.w)
%
% gap_vec_heuristic -- the estimates of the block gaps obtained at the end of the method.
% If the method has converged and options.true_gap_when_converged == true
% the estimates equal the exact block gaps.
%
% num_passes -- number of effective passes over the dataset performed by the method
%
% progress --logged information about the run of the method. The ammount of information are determined by options.logging_level
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Authors: Anton Osokin, Jean-Baptiste Alayrac, Simon Lacoste-Julien
% Project web page: http://www.di.ens.fr/sierra/research/gapBCFW
% Code: https://github.com/aosokin/gapBCFW
%% parse the options
options_default = defaultOptions;
if exist('options', 'var')
options = processOptions(options, options_default);
else
options = options_default;
end
fprintf('Running %s on %d examples.\n', mfilename, numel(param.patterns));
switch options.stepType
case 0
options.use_FW_steps = true;
options.use_away_steps = true;
options.use_pairwise_steps = true;
fprintf('Launching the combined block coordinate method: FW, away and pairwise steps\n');
case 1
options.use_FW_steps = true;
options.use_away_steps = false;
options.use_pairwise_steps = false;
fprintf('Launching block coordinate FW (BCFW)\n');
case 2
options.use_FW_steps = false;
options.use_away_steps = false;
options.use_pairwise_steps = true;
fprintf('Launching block coordinate pairwise FW (BC-P-FW)\n');
case 3
options.use_FW_steps = true;
options.use_away_steps = true;
options.use_pairwise_steps = false;
fprintf('Launching block coordinate FW with away steps (BC-A-FW)\n');
otherwise
error([mfilename, ': unknown execution mode'])
end
% if cache is used or we are going to do pairwise or away steps we will need to create and maintain dual variables
options.update_dual_vars = options.useCache || options.use_away_steps || options.use_pairwise_steps;
% values of lambda need to be decreasing
options.lambda_values = sort(options.lambda_values, 'descend');
% the following flag must always be false when steps with away corners are used
% otherwise solver_multiLambda_BCFW_hybrid.m will be outputting dual variables inconsistent with the primal
options.output_model_before_batch_step = false;
% when we do regularization path we optimize at each lambda up to A*eps gap
if options.regularization_path
options.gap_threshold = options.regularization_path_a * options.regularization_path_eps;
end
% the regularization path mode
% when options.regularization_path == true parameters options.lambda_values and options.gap_threshold are ignored
% type of warm start; has to be 'keep_primal' for regularization path
if options.regularization_path
options.warm_start_type = 'keep_primal';
end
fprintf('The options are as follows:\n');
options
%% start the timer
tStart = tic();
%% fix the seed
init_rand_seed = options.rand_seed;
rng('default');
rng(init_rand_seed);
%% check that the loss on ground truth itself is zero:
% param.lossFn(param, label_gt, label_gt) == 0
% otherwise some of the equations below are incorrect
for i_object = 1 : param.n
label_gt = param.labels{i_object};
loss_gt = param.lossFn(param, label_gt, label_gt);
if loss_gt>1e-12
error(['On object ', num2str(i_object), ' loss netween GT and GT is non zero. This case is currently not supported.']);
end
end
%% initialization
if ~options.regularization_path
% without the reg. path we do regular initialization with zeros
if options.update_dual_vars
% create cache for zero-initialized model
[cache, model] = cache_initilize_model( param, options.maxCacheSize );
else
cache = [];
if ~exist('model', 'var')
model = initialize_model_zeros(param);
end
end
change_lambda_flag = false;
lambda_previous = inf;
exact_gap_flag = false;
if ~exist('gap_vec_heuristic', 'var')
gap_vec_heuristic = inf(param.n,1);
end
else
% in case of the regularization path we need to find lambda, primal and dual variables, such that gap is small enough
fprintf('Initializing the regularizarion path\n');
[options.lambda_values, model, gap_vec_heuristic, cache] = initialize_regularization_path( param, options.gap_threshold, options.update_dual_vars, options.maxCacheSize );
change_lambda_flag = true;
lambda_previous = options.lambda_values;
exact_gap_flag = false;
options.init_gap_value_for_cache = sum(gap_vec_heuristic);
end
%% do the main loop
num_lambdas = numel(options.lambda_values);
progress = cell(0, 1);
i_lambda = 0;
time_budget_full = options.time_budget;
num_passes = 0;
while (~options.regularization_path && i_lambda < numel(options.lambda_values)) || ...
(options.regularization_path && (i_lambda==0 || options.lambda_values( i_lambda ) > options.regularization_path_min_lambda))
%% init pass for the new value of lambda
i_lambda = i_lambda + 1;
options.rand_seed = init_rand_seed + i_lambda - 1;
tStartLambda = tic();
%% get the new value of lambda
% compute the vector update for the gaps, it is used for the reg. path and later to update the gap estimate
gap_vec_heuristic_update = model.ellMat - lambda_previous*(model.wMat'*model.w(:));
if ~options.regularization_path
options.lambda = options.lambda_values( i_lambda );
fprintf('Processing lambda %f: %d of %d\n', options.lambda, i_lambda, num_lambdas);
else
% current gap estimate is supposed to be smaller than options.regularization_path_a * options.regularization_path_eps
gap_heuristic = sum( gap_vec_heuristic );
if gap_heuristic > options.regularization_path_a * options.regularization_path_eps + 1e-12
fprintf('CAUTION (%s): current gap estimate=%f is greater than A*eps= %f, something probably went wrong.\n', mfilename, gap_heuristic, options.regularization_path_a*options.regularization_path_eps);
end
gap_heuristic_update = sum( gap_vec_heuristic_update );
% we can do larger steps taking into account the current estimates of the gaps
max_allowed_gap_update = (options.regularization_path_b + options.regularization_path_a) * options.regularization_path_eps - gap_heuristic;
% small update stopping criterion
if gap_heuristic_update <= max_allowed_gap_update
% this case happens when gap_heuristic_update <= B * eps
% it means that for any lambdas smaller than the current one new gap will be smaller than (A+B)*eps
fprintf('Gap update is smaller than B*eps. Terminating.\n');
break;
end
% computing the maximum change of lambda that we can do
lambda_ratio = 1 - max_allowed_gap_update / gap_heuristic_update;
if lambda_ratio > 1
error('CAUTION: next lambda is going to be larger than the previous one. Heuristic gap estimate is negative! Something is wrong!');
end
if lambda_ratio <= 0
error('CAUTION: next lambda is going to be negative. Something is wrong!');
end
options.lambda = lambda_ratio * lambda_previous;
options.lambda_values = [options.lambda_values; options.lambda];
fprintf('Processing lambda %f: %d of the regularization path, min lambda: %f\n', options.lambda, i_lambda, options.regularization_path_min_lambda);
end
%% update model and cache when changing lambda
can_do_warm_start = true;
if isequal( options.warm_start_type, 'keep_primal' )
if change_lambda_flag
% update the model when changing lambda
lambda_ratio = options.lambda / lambda_previous;
% to keep result of the oracle the same, we keep primal variables model.w, model.wMat
% update the gap values
% if the old gap value was exact than the new gap value will be exact as well
if ~exact_gap_flag
% if the old gap estimates were not correct we do not decrease the gap to be conservative
gap_vec_heuristic_update( gap_vec_heuristic_update < 0 ) = 0;
end
gap_vec_heuristic = gap_vec_heuristic + (1-lambda_ratio)*gap_vec_heuristic_update;
% update values model.ell, model.ellMat
model.ell = model.ell * lambda_ratio;
model.ellMat = model.ellMat * lambda_ratio;
% the dual variables need to be changed if maintained
if options.update_dual_vars
for i_object = 1 : param.n
% find index of the GT label
label_gt = param.labels{i_object};
yhash_i_gt = param.hashFn( label_gt );
[cache{i_object}, cache_gt_index ] = cache_get_entry_by_hash( cache{i_object}, yhash_i_gt, zeros(param.d, 1), param.lossFn(param, label_gt, label_gt), ...
[], options.maxCacheSize, false );
alphas = cache_get_dual_vars( cache{i_object} );
if ~cache{i_object}.alpha_sum_to_one
fprintf('CAUTION (%s): Alphas of object %d in the cache do not sum up to one: can not reinitialize for another lambda.', mfilename, i_object);
can_do_warm_start = false;
end
% all alphas corresponding to non-groundtruth labelings are multiplies by ratios lambda_new / lambda_old
% the rest of the mass goes into the ground-truth lambda
alphas = alphas * lambda_ratio;
alphas(cache_gt_index) = 0;
alphas(cache_gt_index) = 1 - sum(alphas);
cache{i_object} = cache_set_dual_vars( cache{i_object}, alphas );
end
end
end
%% check model and cache after the update
if options.check_lambda_change
if options.update_dual_vars
% check if the model reconstructed from the cache is eqauivalent to the maintained one
if options.do_batch_step && options.output_model_before_batch_step
% when options.do_batch_step==true and options.output_model_before_batch_step==true
% checking the gap does not make sense, because it is outdated after the batch FW step
% only methods not relying on the dual variables should be used in this regime
fprintf('Skipping the check of the model in %s because it is outdated in this regime.\n', mfilename);
if options.use_away_steps || options.use_pairwise_steps
fprintf('CAUTION (%s): away and pairwise steps will be incorrect in this regime because the dual variables are not consistent with the primal ones. Use at your own risk!\n', mfilename);
end
else
[ cache, model_dual ] = cache_initilize_model( param, options.maxCacheSize, cache, [], options.lambda );
if ~isequal_models(model, model_dual)
fprintf('CAUTION (%s): the update to lambda %f leads to inconsistent model\n', mfilename, options.lambda);
end
end
end
if exact_gap_flag
% checking the gap values makes sense only the maintained gaps equal the true gaps
[~, gap_vec_check] = duality_gap_vec( param, param.oracleFn, model, options.lambda, model.wMat, model.ellMat );
if any( abs(gap_vec_heuristic(:) - gap_vec_check(:)) > 1e-8 )
fprintf('CAUTION (%s): gaps reconstructed for lambda %f do not equal the maintained ones, max diff: %f\n', mfilename, options.lambda, max(abs(gap_vec_heuristic(:) - gap_vec_check(:) )) );
end
end
end
elseif isequal( options.warm_start_type, 'keep_dual' )
if ~options.update_dual_vars
error('Cannot warm start from dual variables, because dual variables are not initialized.');
end
% construct primal model from dual variables stored in cache
[ cache, model ] = cache_initilize_model( param, options.maxCacheSize, cache, [], options.lambda );
% with this type of warm start we have not control of what is happening with the gaps
% we can either set the gaps to infinity or do a batch pass to update them
% [~, gap_vec_heuristic] = duality_gap_vec( param, param.oracleFn, model, options.lambda, model.wMat, model.ellMat );
% exact_gap_flag = true;
gap_vec_heuristic = inf(param.n,1);
exact_gap_flag = false;
elseif isequal( options.warm_start_type, 'none' )
can_do_warm_start = false;
else
error('Unknown warm start type!');
end
%% run the solver on the new value of lambda
% update the time budget for the solver:
options.time_budget = time_budget_full - toc( tStart )/60;
if exact_gap_flag
options.init_gap_value_for_cache = sum(gap_vec_heuristic);
else
% in this case we just turn off global criterion for cache
options.init_gap_value_for_cache = 0;
end
if can_do_warm_start
[ model, gap_vec_heuristic, curNumPasses, progress{i_lambda}, cache, exact_gap_flag ] = solver_BCFW_hybrid(param, options, model, gap_vec_heuristic, cache);
else
[ model, gap_vec_heuristic, curNumPasses, progress{i_lambda}, cache, exact_gap_flag ] = solver_BCFW_hybrid(param, options);
end
num_passes = num_passes + curNumPasses;
%% time budget stopping criterion
if toc(tStart)/60 > time_budget_full
% time budget fully used
fprintf('Spent %fs on lambda %f; did not converge\n', toc(tStartLambda), options.lambda);
fprintf('Time budget was fully used. Outputting the part that was computed\n');
break;
end
%% final touches
lambda_previous = options.lambda;
change_lambda_flag = true;
fprintf('Spent %fs on lambda %f\n', toc(tStartLambda), options.lambda);
end
%% final steps
fprintf('Spent %fs for %d values of lambda\n', toc(tStart), num_lambdas )
end % solverBCFWH
function equal = isequal_models(model1, model2, accuracy)
if ~exist('accuracy', 'var') || isempty(accuracy)
accuracy = 1e-12;
end
equal = true;
if any( abs(model1.w(:) - model2.w(:)) > accuracy )
fprintf('CAUTION (%s): field <w> of the two models has max difference of %f\n', mfilename, max(abs(model1.w(:) - model2.w(:))) );
equal= false;
end
if any( abs(model1.ell(:) - model2.ell(:)) > accuracy )
fprintf('CAUTION (%s): field <ell> of the two models has max difference of %f\n', mfilename, max(abs(model1.ell(:) - model2.ell(:))) );
equal= false;
end
if any( abs(model1.wMat(:) - model2.wMat(:)) > accuracy )
fprintf('CAUTION (%s): field <wMat> of the two models has max difference of %f\n', mfilename, max(abs(model1.wMat(:) - model2.wMat(:))) );
equal= false;
end
if any( abs(model1.ellMat(:) - model2.ellMat(:)) > accuracy )
fprintf('CAUTION (%s): field <ellMat> of the two models has max difference of %f\n', mfilename, max(abs(model1.ellMat(:) - model2.ellMat(:))) );
equal= false;
end
if isfield(model1, 'v') ~= isfield(model2, 'v')
fprintf('CAUTION (%s): the two models are inconsistent in terms of the support of positivity constraints\n', mfilename);
equal= false;
end
if isfield(model1, 'v') && isfield(model2, 'v') && any( abs(model1.v(:) - model2.v(:)) > accuracy )
fprintf('CAUTION (%s): field <v> of the two models has max difference of %f\n', mfilename, max(abs(model1.v(:) - model2.v(:))) );
equal= false;
end
end
function options = defaultOptions
options = struct;
% for grid search
options.lambda_values = 10.^(4: -1: -3);
options.gap_threshold = 0.1;
% stopping parameters. joint budget for whole solver_multiLambda_BCFW_hybrid.m
options.num_passes = 200000; % max number of passes through data
options.time_budget = 60*24;
% flag saying whether to demand computation of the true gap at the convergence
options.true_gap_when_converged = true;
% flag saying whether to do appropriate checks when lambda is changed, significantly slows down the code
options.check_lambda_change = false;
% type of warm start; has to be 'keep_primal' for regularization path
options.warm_start_type = 'keep_primal'; % 'keep_primal' or 'keep_dual' or 'none';
% the regularization path mode
% when options.regularization_path == true parameters options.lambda_values and options.gap_threshold are ignored
options.regularization_path = false;
options.regularization_path_eps = 1e-1;
options.regularization_path_a = 0.9;
options.regularization_path_b = 1 - options.regularization_path_a;
options.regularization_path_min_lambda = 1e-5;
%% parameters from solver_BCFW_hybrid.m
% gap sampling: 'uniform' or 'perm' or 'gap' or 'maxGap'
options.sample = 'gap';
options.gapDegree = 1.0;
% cache options
options.useCache = true;
options.cacheNu = 0.01;
options.cacheFactor = 0.25;
options.maxCacheSize = 100;
options.doCacheConsistencyCheck = false; % simple check is always done anyway; if true slow exhaustive check is done
% choose in which mode to run the combined solver
% 0 - all steps
% 1 - only FW
% 2 - pairwise
% 3 - FW and away
options.stepType = 2;
% do batch FW step or just compute the duality gap
options.do_batch_step = false;
% other parameters
options.gap_check = 10; % how often to compute the true gap
options.rand_seed = 1; % random seed
% flag to exit onePass_BCFW_hybrid.m based on the heuristic gaps computed inside
% this flag affects the method, but convergence guarantees at the end at still exact
options.quit_passes_heuristic_gap = true;
options.quit_passes_heuristic_gap_eps_multiplyer = 0.8; % quit BC passes if heuristic gap is this factor smaller than gap_threshold; < 1 for underetimated gaps; > 1 for overestimated gaps
% the logging level, how many things to store?
% - 0: no logging at all
% - 1: save models after each pass, and some data statistics, should not cause huge memory and speed overheads
% - 2: save everything reasonable, including O(n) numbers per each gap check pass
% - 3: crazy logging, save everything possible per point at each pass; huge computational overheads, use this only for specific plots
options.logging_level = 1;
end % defaultOptions
|
function feature_output = voxel_Volume(varargin)
global image_global;
global image_property;
global mask_for_TA;
global tumor_volume_CGITA;
% global image_global_ni;
% global mask_ni;
%image_property.pixel_spacing = img_obj.pixel_spacing;
if exist('image_global')==1
temp1 = image_global(:);
nonzero_voxels = length(temp1(find(mask_for_TA)));
%nonzero_voxels = sum(mask_ni(:));
feature_output = nonzero_voxels * prod(image_property.pixel_spacing) / 1e3; % convert to mL
tumor_volume_CGITA = feature_output;
else
error('The parent image must be computed first');
end
return; |
%------------- BEGIN CODE --------------
% ekran ve bellek on temizleme
close all ; clear all ; clc ;
xa = 1;
%xu = 3;
xu = 7;
tol = 0.0001;
xo = (xa + xu) / 2;
%fxo = (xo^2)-5;
%fxa = (xa^2)-5;
fxo = xo^3 + xo^2 - 12 * xo;
fxa = xa^3 + xa^2 - 12 * xa;
iter = 0;
while (abs(fxo) > tol)
if (fxa * fxo) < 0
xu = xo;
else
xa = xo;
end
xo = (xa + xu) / 2;
%fxo = (xo^2)-5;
%fxa = (xa^2)-5;
fxo = xo^3 + xo^2 - 12 * xo;
fxa = xa^3 + xa^2 - 12 * xa;
iter = iter + 1;
end
xo
fxo
iter
%------------- END OF CODE -------------- |
% extract directions from interest vectors
nmember = size(m,1);
direction = m;
for i = 1 : nmember
direction(i,:) = m(i,:) / norm(m(i,:));
end
% build the graph
graph = zeros(nmember);
for i = 1 : nmember
for j = 1 : nmember
dif = direction(i,:) - direction(j,:);
graph(i,j) = sum(abs(dif));
end
end
% enumerate all solutions
ngroup1 = floor(nmember/2);
ngroup2 = nmember-ngroup1;
group1cand = nchoosek (1:nmember, ngroup1);
% evaluate all solutions
score = zeros(size(group1cand,1),1);
for i = 1 : size(group1cand,1)
group1 = group1cand(i,:);
group2 = setdiff(1:nmember, group1);
score1 = sum(sum(graph(group1, group1))) / (ngroup1 * (ngroup1 - 1));
score2 = sum(sum(graph(group2, group2))) / (ngroup2 * (ngroup2 - 1));
score(i) = score1 + score2;
end
% select the best solution
[minscore, minpos] = min(score);
% print out the solution
bestgroup1 = group1cand(minpos,:);
fprintf ('Group 1:\n');
for i = 1 : length(bestgroup1)
fprintf (' %s\n', name{bestgroup1(i)});
end
fprintf ('Group 2:\n');
bestgroup2 = setdiff(1:nmember,group1cand(minpos,:));
for i = 1 : length(bestgroup2)
fprintf (' %s\n', name{bestgroup2(i)});
end
|
%% go to the directory in tier 2
cd('V:\users\Aaron\150814_BMWR17')
load 'Run1ShiftsCorr1'
%% display the correlation of one of the shifts
Shifts = cell2mat( FirstMap(:,1));
Corr = cell2mat( FirstMap(:,2));
X = Shifts(:,1);
Y = Shifts(:,2);
Z = Shifts(:,3 );
close all
figure
scatter3(X,Y,Z,40,Corr,'filled')
colormap hsv
colorbar
[a,b] = max(Corr);ss
title(['x: ' num2str(X(b)) ' y:' num2str(Y(b)) ' z:' num2str(Z(b))])
%%
load Run1Try
scatter3(coorList(:,1),coorList(:,2),coorList(:,3),30,Try)
%%
load('Run1newcoorArray')
load('Run1flatTarget')
%%
close all
hFig = figure();
colormap(hFig,'gray');
cameratoolbar();
hAx = axes('Parent',hFig,'Color','black');
view(hAx,37,45);
%
% coorList = newcoorArray;
Zs = unique(coorList(:,3));
for i = 1:length(Zs)
x = coorList(coorList(:,3)==Zs( i),2);
x = [min(x) max(x)];
y = coorList(coorList(:,3)==Zs(i),1);
y = [min(y) max(y)];
z = Zs(i);
[x,y,z] = meshgrid([x(1) x(2)],[y(1) y(2)],z);
hSurf = surface('Parent',hAx,...
'XData',x,...
'YData',y,...
'ZData',z,...
'FaceColor','texturemap',...
'CDataMapping','scaled',...
'FaceLighting','none',...
'EdgeColor','none');
data = new(coorList(:,3)==Zs(i));
set(hSurf,'CData',data)
pause
end
%%
scatter3(coorList(:,1),coorList(:,2),coorList(:,3),30,flatTarget)
%%
flatTarget2= reshape(Mean,105840,1);
%%
Index1 = 1:72;
Index2 = 1:35;
[X,Y] = meshgrid(Index1,Index2);
X = X(:);
Y= Y(:);
%%
figure
scatter3(X,Y,coorList(1:2520,3),30,flatTarget2(1:2520))
figure()
imshow(Mean(:,:,1),[])
%% unpacking the mean from flat 1d array
Zs = unique(coorList(:,3));
Volume = zeros(80,200,length(Zs));
Counter = zeros(80,200,length(Zs));
for i = 1: length(Zs)
Z =Zs(i);
Zindexs = Z == coorList(:,3);
X = coorList(Zindexs,1);
Y = coorList(Zindexs,2);
Data = newMean2(Zindexs);
X = X - min(X) +1;
Y = Y - min(Y) + 1;
for j = 1:length(X)
Volume(X(j),Y(j),i) = Volume(X(j),Y(j),i)*Counter(X(j),Y(j),i) + ...
Data(j);
Counter(X(j),Y(j),i) = Counter(X(j),Y(j),i)+1;
end
end
%
writetiff(Volume,'newRegMean2.tif') |
function [ desired_state ] = trajectory_generator(t, qn, W_in, t_in)
% TRAJECTORY_GENERATOR: Turn a Dijkstra or A* path into a trajectory
%
% NOTE: This function would be called with variable number of input
% arguments. In init_script, it will be called with arguments
% trajectory_generator([], [], map, path) and later, in test_trajectory,
% it will be called with only t and qn as arguments, so your code should
% be able to handle that. This can be done by checking the number of
% arguments to the function using the "nargin" variable, check the
% MATLAB documentation for more information.
%
% map: The map structure returned by your load_map function
% path: This is the path returned by your planner (dijkstra function)
%
% desired_state: Contains all the information that is passed to the
% controller, as in phase 2
%
% It is suggested to use "persistent" variables to store map and path
% during the initialization call of trajectory_generator, e.g.
% persistent map0 path0
% map0 = map;
% path0 = path;
% Quintic Interpolation along straight segments with line of sight checks
persistent coeff ts t_cum
% Parameters ====================================
yaw = 0;
yawdot = 0;
n = 3; % Min snap
% Pre-compute ===================================
if isempty(t)
[N,~,n_wp] = size(W_in);
% Save waypoint segment durations
ts = t_in;
t_cum = cumsum([0,ts(1:(end-1))]);
% Calculate and save spline coefficients
for wp = 1:(n_wp-1)
for robo = 1:N
coeff{wp,robo} = min_n_traj([W_in(robo,:,wp);W_in(robo,:,wp+1)],zeros(n-1,3),zeros(n-1,3),...
n,[0 ts(wp)]');
end
end
return
end
% Run ===========================================
% Find which waypoint segment we are in
seg = find(t >= t_cum, 1, 'last');
t_s = t - t_cum(seg);
% Calculate desired position and derivatives from splines
pos = follower(coeff{seg,qn},n,[0 ts(seg)]',t_s);
% Output desired state
desired_state.pos = pos(1,:)';
desired_state.vel = pos(2,:)';
desired_state.acc = pos(3,:)';
desired_state.yaw = yaw;
desired_state.yawdot = yawdot;
end
|
%load data.mat
%copy this:
%stats=runCrawford(patientScores,controlMean,controlSd,nC,1);
function stats=runCrawford(patientScores,controlMean,controlSd,nC, ...
plotFig,allControls)
% structure of input
mC=controlMean;%mean
sC=controlSd;%STD
nCond=length(mC);%each column is a variable
[c,d]=size(patientScores);%each column is a variable
% check nCond matches for patient + control Data
if(d~=nCond)
error('MisMatch N Conditions');
end
% to estimate
t=zeros(nCond,1);
df=zeros(nCond,1);
p=zeros(nCond,3); %% has each condition on diff row
CI=zeros(nCond,4); %% each condition on diff. row, 1st 2 num 95%, 2nd two 99% CI
% run crawford independently for each condition of data
for i=1:nCond
[t(i),df(i),p(i,:),CI(i,:)]=crawford_tCI(patientScores(i),mC(i),sC(i),nC);
end
% and plot the results with a nice error bar plot
if(plotFig)
CI2=abs(CI-repmat(mC',1,4));
close all
figure('position',[100,100,1200,1200])
%Crawford cuttoff errorbar
h=errorbar(1:nCond,mC,CI2(:,1),'Color',[0 0 0],'LineWidth',6,'LineStyle','none')
hold on
line(1:nCond,mC,'Color',[0 0 0],'LineWidth',6,'LineStyle','--')
h.CapSize = 24;
%all control lines
for controlN = 1:length(allControls)
hold on
line(1:nCond,allControls(controlN,:),'Color',[0 0.8314 0.9608],'LineWidth',2,'LineStyle','--')
end
%Patient circle markers
hold on
plot(1:nCond,patientScores,'o','MarkerSize',40,'LineWidth',7, ...
'Color',[1 0 1])
end
% output
stats=[];
stats.t=t';
stats.df=df;
stats.p=p';
stats.CI=CI; |
function a_init = genInitActions(policy, J, type, actionTitles, varargin)
%genInitActions generate distributions for actions in initial rollouts.
%
% 1=gaussian, 2=Uniform, 3=Random Walk (Brownian), 4=sinusoid
%% Code
a_init = cell(1,J);
% Normally distributed initial rollouts:
if type==1
t = varargin{1};
H = varargin{2};
initMean = policy.maxU; initVar = policy.maxU.*2;
for i=1:J
a_init{i} = [t(1:H),gaussian(initMean,diag(initVar),H)'];
end
elseif type==2
% Uniformly distributed initial rollouts:
t = varargin{1};
H = varargin{2};
for i=1:J
a_init{i} = [t(1:H), repmat(policy.minU(policy.impIdx),H,1) + repmat(policy.maxU(policy.impIdx).*2-policy.minU(policy.impIdx),H,1).*rand(H,2)];
end
elseif type==3
% Ohrnstein-Uhlenbeek Stochastic Process initial rollouts:
ou_opts = sdeset('RandSeed',2);
t = varargin{1};
if nargin > 5
sig_ratio = varargin{2};
if nargin > 6
th = ones(1,J)*varargin{3}; % convergence speed
else
th = ones(1,J)*0.3;
end
else
sig_ratio = 5; % diffusion ratio
th = ones(1,J)*0.3;
end
sig = repmat(policy.maxU(1)*2/sig_ratio(1),J,1); % spread
startPoint = [policy.minU(policy.impIdx);
policy.maxU(policy.impIdx).*2;
policy.maxU(policy.impIdx)*1.25];
mu = [policy.maxU(policy.impIdx).*2;
policy.minU(policy.impIdx);
policy.maxU(policy.impIdx)*1.25];
for i=1:J
a_init{i} = [t, min(policy.maxU(1)*2,max(policy.minU(1),sde_ou(th(i),mu(i,:),sig(i),t,startPoint(i,:),ou_opts)))];
end
elseif type==4
t = varargin{1};
f = varargin{2};
jitter = varargin{3};
for i=1:J
a_init{i} = [t, repmat(policy.maxU(policy.impIdx),length(t),1).*[sin(f*t + rand), cos(f*t + rand)]];
a_init{i}(:,2:end) = a_init{i}(:,2:end) + repmat(policy.maxU(policy.impIdx),length(t),1) + rand(length(t),size(a_init{i},2)-1).*repmat(policy.maxU(policy.impIdx)./jitter,length(t),1);
end
end
%% Plot Results:
if ~ishandle(22)
figure(22);
else
set(0,'CurrentFigure',22);
end
clf(22);
for i=1:J
stairs(a_init{i}(:,2:end),'Linewidt',1.5); hold on;
end
axis tight
grid minor;
title('\fontsize{16}Initial Stiffness Trajectories','Interpreter','Tex');
xlabel('\fontsize{16}Time steps [d_t = 0.1]','Interpreter','Tex');
ylabel('\fontsize{16} K_P [N/m]','Interpreter','Tex');
xt = get(gca, 'XTick');
set(xt, 'FontSize', 16)
legendVec = cell(1,J*length(policy.maxU(policy.impIdx)));
for j=1:J
for u=1:length(policy.maxU(policy.impIdx))
temp{u+2*(j-1)} = strcat(actionTitles(u),' (J=',num2str(j),')');
legendVec{u+2*(j-1)} = temp{u+2*(j-1)}{1};
end
end
legend(legendVec);
end |
% NMEA parser
f = fopen('nmea1.txt', 'r');
fo = fopen('nmea1.out', 'w');
while (! feof(f))
buf = fgetl(f);
bufa = strsplit(buf, ',');
%checksum bitxor(a,b)
bufascii = double(buf(2:strcmp(buf, '*')));
cs = double(buf(2));
for c = buf(3:end-3)
cs = bitxor(cs, double(c));
end
if (sprintf('%X', cs) ~= buf(end-1:end))
printf('checksum error\n');
continue;
end
switch (bufa{1}(4:6))
case 'GGA'
% GGA, time, latitude, N/S, longitude, E/W, solution type, number of satellitess, hdop, height, M, undulation,M,empty,empty,checksum
if (str2num(bufa{7}) == 1)
% use only GPS fix
lat = nmea2deg(bufa{3});
if (bufa{4} == 'S')
lat = -lat;
end
lon = nmea2deg(bufa{5});
if (bufa{6} == 'W')
lon = 360.0 - lon;
end
height = str2num(bufa{10});
fprintf(fo, '%.6f,%.6f,%.2f\n', lat, lon, height);
end
end
end
fclose(f);
fclose(fo);
|
% Calibrate to match 1960
clear
format compact;
%%% employment shares [L M] sectors (based on lswt), rows correpsond to 1950, 1960, 19670, 19980, 2000, 2007
emp_targets=[0.310725868 0.382496238
0.267881393 0.422307283
0.272726774 0.38019526
0.278642148 0.344125986
0.315280378 0.28167513
0.328471035 0.248585254
0.344552815 0.222892925
];
emp_targets(:,3)=1-emp_targets(:,1)-emp_targets(:,2); % H-sector employment shares
%%% relative wage targets - residual average wages
% avg log wage L and avg log wage H, compared to M;
rw_targets=[
-0.285 -0.032
-0.314 0.021
-0.224 0.081
-0.194 0.078
-0.199 0.136
-0.172 0.173
-0.183 0.213
];
% calibration targets for 1960
emp_targets2 = emp_targets(2,:); % 1960
rw_targets2 = rw_targets(2,:); % 1960
disp_targets2 = 0.187; % from PSID non-agricultural non-transitory component of log wage dispersion
options_fsolve=optimset('TolFun', 1e-3, 'Display', 'iter', 'MaxFunEvals', 500); % Option to display output
mean_l=1;
mean_m=1;
mean_s=1;
% z_init = [log(0.2), log(0.25), log(0.25)];
z_init = [-1.2275 -0.5614 -0.3453];
for s=2
corr_lm = 0+0.3*(s-1);
corr_ms = 0+0.3*(s-1);
corr_ls = 0+0.3*(s-1);
eval(['load calib_',num2str(corr_lm*10),'_',num2str(corr_ms*10),'_',num2str(corr_ls*10),'.mat']);
% fixed initial productivities
AL0 = 1;
AM0 = 1;
AS0 = 1;
productivities1.AL=AL0;
productivities1.AM=AM0;
productivities1.AS=AS0;
% fixed model parameter values
EPPS=0.2;
param_fixed.EPPS=EPPS;
param_fixed.MU=MU;
param_fixed.SIGMA=SIGMA;
% find paramters of the ability distribution to match employment
% and relative wage data in 1960
options_fsolve=optimset('TolFun', 1e-5, 'Display','off','MaxFunEvals', 1000); % Option to display output
% initial value for parameters left to calibrate
guess_TL=1;
guess_TS=1;
[xbest1,fval1,exitflag1] = fsolve(@(x)...
calibration_func_unitwage(x, targets, productivities1, param_fixed),...
[log(guess_TL), log(guess_TS)],...
optimset('TolFun', 1e-4, 'TolX', 1e-4, 'Display','iter','MaxFunEvals', 1000));
if exitflag1==0,
par_init=xbest1;
[xbest2,fval2,exitflag2] = fsolve(@(x)...
calibration_func_unitwage(x, targets, productivities1, param_fixed),...
par_init,...
optimset('TolFun', 1e-4, 'TolX', 1e-4, 'Display','iter','MaxFunEvals', 1000));
xbest=xbest2;
elseif exitflag1<0
keyboard
else
xbest=xbest1;
end
[fval,eq11]=calibration_func_unitwage(xbest, targets, productivities1, param_fixed)
TL=exp(xbest(1))
TS=exp(xbest(2))
eval(['save calib_epps2_',num2str(corr_lm*10),'_',num2str(corr_ms*10),'_',num2str(corr_ls*10),'.mat']);
end |
function do_canonical_variatescva3
% Function implementing the Canonical Variates Analysis method
% written by Prof Geoff Bohling - March 2006
% from the University of Kansas, US
% Modified by Dr William Sellers, University of Manchester, UK to produce various color images
% Modified to read Class text files produced by ImageJ version 1.49v and
% a set of multispectral images
%
F1 = dir('*.tif');
strname = F1(1,1).name;
new_claim = strrep(strname,'.tif','');
F = dir('class*.txt');
s1 = size(F,1);
for i=1:s1
bin_names{i} = F(i).name;
end
for i=1:s1
fid(i,1) = fopen(F(i,1).name,'r');
tline = fgetl(fid(i,1));
C{i,1} =textscan(fid(i,1), '%d %f %d %d %d %d %d');
end
X(:,1) = C{1,1}(1,6);
Y(:,1) = C{1,1}(1,7);
for i=2:s1
X = [X;C{i,1}(1,6)];
end
for i=2:s1
Y = [Y;C{i,1}(1,7)];
end
index=0;
for i =1:size(Y,1)
for j =1:size(Y{i,1},1)
index = index+1;
YY(index,1) = Y{i,1}(j,1);
end
end
Y=YY;
index=0;
for i =1:size(X,1)
for j =1:size(X{i,1},1)
index = index+1;
XX(index,1) = X{i,1}(j,1);
end
end
X=XX;
for i=1:s1
fclose(fid(i,1));
end
prst=C{1,1}(1,7);
N= size(prst{1,1},1);
for i=2:s1
prst=C{i,1}(1,7);
N=N+size(prst{1,1},1);
end
index=0;
for j=1:s1
prst=C{j,1}(1,7);
bin_letters(j,1) =sprintf('%d',j);
for i=1:size(prst{1,1},1)
index = index +1;
Class1(index,1) = sprintf('%d',j);
%bin_letters(index) = sprintf('%d',j);
end
end
n_images = length(F1);
n_samples = size(X,1);
data_matrix = zeros(n_samples, n_images);
grouping_vector = zeros(n_samples, 1);
image_list = {};
for i_image = 1: n_images
indices(i_image) = i_image;
file_path = fullfile(F1(i_image).name);
fprintf('Reading %s\n', file_path);
data1 = imread(file_path);%reads a grayscale or a colour photo
if size(data1,2) > 1
data = data1(:,:,1);
else
data = data1;
end
if i_image == 1
[height, width] = size(data);
image_list = zeros(height, width, n_images, 'uint8');
end
image_list(:, :, i_image) = data;
for i_sample = 1: n_samples
index = match_string(bin_letters, Class1(i_sample));
if index > 0
grouping_vector(i_sample) = index;
end
end
end
tic
[coef,score,ev,S,B] = canonize(data_matrix, grouping_vector);
size(S);
size(B);
size(ev);
new_image_cv1 = zeros(height, width);
new_image_cv2 = zeros(height, width);
new_image_cv3 = zeros(height, width);
new_image_cv4 = zeros(height, width);
image_list_cols = reshape(image_list, [width*height, n_images]); % this puts the data for each image data into a single column
image_list_cols(1:100)
new_image_cv1 = double(image_list_cols) * double(coef(:, 1));
new_image_cv2 = double(image_list_cols) * double(coef(:, 2));
new_image_cv3 = double(image_list_cols) * double(coef(:, 3));
new_image_cv1 = reshape(new_image_cv1, [height, width]);
new_image_cv2 = reshape(new_image_cv2, [height, width]);
new_image_cv3 = reshape(new_image_cv3, [height, width]);%h - 7216
range_cv1 = range_map(new_image_cv1, min(min(new_image_cv1)), max(max(new_image_cv1)), 0, 255, 0, 255);
range_cv2 = range_map(new_image_cv2, min(min(new_image_cv2)), max(max(new_image_cv2)), 0, 255, 0, 255);
range_cv3 = range_map(new_image_cv3, min(min(new_image_cv3)), max(max(new_image_cv3)), 0, 255, 0, 255);
range_cv4 = range_map(new_image_cv4, min(min(new_image_cv4)), max(max(new_image_cv4)), 0, 255, 0, 255);
new_image_colour = zeros(height, width, 3, 'uint8');
new_image_colour(:,:,1) = uint8(range_cv1);
new_image_colour(:,:,2) = uint8(range_cv2);
new_image_colour(:,:,3) = uint8(range_cv3);
imwrite(new_image_colour(:,:,:), strcat(new_claim,'image_rangecva.tif'), 'tiff', 'compression', 'lzw');
score_cv1 = range_map(new_image_cv1, min(score(:,1)), max(score(:,1)), 0, 255, 0, 255);
score_cv2 = range_map(new_image_cv2, min(score(:,2)), max(score(:,2)), 0, 255, 0, 255);
score_cv3 = range_map(new_image_cv3, min(score(:,3)), max(score(:,3)), 0, 255, 0, 255);
new_image_colour(:,:,1) = uint8(score_cv1);
new_image_colour(:,:,2) = uint8(score_cv2);
new_image_colour(:,:,3) = uint8(score_cv3);
imwrite(new_image_colour(:,:,:), strcat(new_claim,'image_score_rangecva.tif'), 'tiff', 'compression', 'lzw');
required_percentiles = [0.01, 0.1, 1, 5, 99.99, 99.9, 99, 95];
n_percentiles = length(required_percentiles) / 2;
percentiles_cv1 = Percentile(new_image_cv1, required_percentiles);
percentiles_cv2 = Percentile(new_image_cv2, required_percentiles);
percentiles_cv3 = Percentile(new_image_cv3, required_percentiles);
for i = 1: n_percentiles
fprintf('Processing percentile %f\n', required_percentiles(i));
per_cv1 = range_map(new_image_cv1, percentiles_cv1(i), percentiles_cv1(i + n_percentiles), 0, 255, 0, 255);
per_cv2 = range_map(new_image_cv2, percentiles_cv2(i), percentiles_cv2(i + n_percentiles), 0, 255, 0, 255);
per_cv3 = range_map(new_image_cv3, percentiles_cv3(i), percentiles_cv3(i + n_percentiles), 0, 255, 0, 255);
new_image_colour(:,:,1) = uint8(per_cv1);
new_image_colour(:,:,2) = uint8(per_cv2);
new_image_colour(:,:,3) = uint8(per_cv3);
imwrite(new_image_colour(:,:,:), strcat(new_claim,sprintf('image_percentile_%0.2fcva.tif', required_percentiles(i))), 'tiff', 'compression', 'lzw');
end
toc
function [coef,score,ev,S,B] = canonize(X,grp)
% Performs canonical variate matrix of data in X.
% gcb, 04 March 2006
% X: N x D data matrix, data points in rows, variables in columns
% grp: grouping vector, containing integers between 1 and K
% where K is the number of groups
% coef: coefficients for forming canonical variates
% (eigenvectors of inv(S)*B)
% score: canonical variate scores
% S: within-groups covariance matrix
% B: between-groups covariance matrix
%size(X) - 200 23
%size(grp) - 200 1
%X = [1 4 2;5 7 8;20 12 10;5 12 4;15 5 9;3 8 9;13 2 7;3 9 8;3 9 8];
%grp =[1;1;1;2;2;2;3;3; 3];
%X
%ee
N = size(X,1); % number of data (rows)
D = size(X,2); % number of variables (columns)
K = max(grp); % number of groups
xmg = mean(X); % global mean vector (1 x D)
xmk = zeros(K,D); % will hold group means
nk = zeros(K,1); % number of data per group
for k = 1:K
X(grp==k,:);
xmk(k,:) = mean(X(grp==k,:));
nk(k) = size(X(grp==k,:),1);
end
% calc within-groups cov. mat.
S = zeros(D,D);
for i = 1:N
% get diff from appropriate group mean, as row vector
xdiff = X(i,:) - xmk(grp(i),:);
xdiff'*xdiff;
S = S + xdiff'*xdiff;
end
S;
S = S/(N-K);
S;
% calc between-groups cov. mat.
B = zeros(D,D);
for k = 1:K
B = B + nk(k)*(xmk(k,:)-xmg)'*(xmk(k,:)-xmg);
end
B = ((K/(K-1)).*B)./N;
[coef,ev] = eig(B,S);
[ev,iev] = sort(diag(ev),1,'descend');
coef = coef(:,iev);
X;
size(coef);
% compute matrix of scores
score = X*coef;
X;
return
% this function returns the index of the matching element in a list
% it returns 0 on error
function index = match_string(list, target)
for i = 1: length(list)
if strcmp(list(i), target)
index = i;
return
end
end
index = 0;
return
% this function reads data from a whitespace delimited ascii file
% and creates a map object with the colum headings as keys
function [output_map] = read_named_columns_as_map(file_name)
[names, data] = read_named_columns(file_name);
output_map = containers.Map();
for i = 1: length(names)
tf = isKey(output_map, names{i});
if tf ~= 0
fprintf('Duplicate column heading: %s\n', names{i})
end
output_map(names{i}) = data{i};
end
return
% this function reads data from a whitespace delimited ascii file
% with colum headings
% it tries to work out whether the columns contain numbers or text
function [names, data] = read_named_columns(file_name)
fprintf('Reading "%s"\n', file_name);
% read the data
[fid, message] = fopen(file_name);
if (fid == -1)
error(message);
end
header_line = fgetl(fid); % reads line without end of line character
data_start_position = ftell(fid);
names = regexp(header_line, '\S*', 'match');
% now try parsing the first data line
test_line = fgetl(fid);
test_data = regexp(test_line, '\S*', 'match');
format_str = '';
all_numbers = 1;
num_cols = length(test_data);
for i = 1: num_cols
match = regexp(test_data{i}, '^([+-]?)(?=\d|\.\d)\d*(\.\d*)?([Ee]([+-]?\d+))?$', 'match');
if isempty(match)
format_str = [format_str '%s'];
all_numbers = 0;
else
format_str = [format_str '%f'];
end
end
% now read the data in the file for real
fseek(fid, data_start_position, 'bof');
if all_numbers == 0
data = textscan(fid, format_str);
else
raw_data = fread(fid,'uint8=>char');
data_array = reshape(sscanf(raw_data, format_str), num_cols, []);
data = {};
for i = 1: num_cols
data{i} = data_array(i, :)';
end
end
fclose(fid);
return
% function to produce a scatter plot identified by symbol and colour
function two_group_scatter(x, y, symbol_group, colour_group, symbols, colours)
scatter(x,y,'Marker','none');
for i = 1: length(x)
text(x(i), y(i), symbols(symbol_group(i)), 'Color', colours(colour_group(i)), 'HorizontalAlignment', 'center', 'VerticalAlignment', 'middle');
end
% function to produce a scatter plot identified by symbol
function one_group_scatter(x, y, symbol_group, symbols)
scatter(x,y,'Marker','none');
for i = 1: length(x)
text(x(i), y(i), symbols(symbol_group(i)), 'Color', 'k', 'HorizontalAlignment', 'center', 'VerticalAlignment', 'middle');
end
% function to produce a 3d scatter plot identified by colour
% colour_group can contain numbers or letters
function one_group_3d_scatter(x, y, z, colour_group, markers, colours)
x_data_map = containers.Map();
y_data_map = containers.Map();
z_data_map = containers.Map();
for i = 1: length(colour_group)
tf = isKey(x_data_map, colour_group{i});
if tf ~= 0
data = x_data_map(colour_group{i});
data(end + 1) = x(i);
x_data_map(colour_group{i}) = data;
data = y_data_map(colour_group{i});
data(end + 1) = y(i);
y_data_map(colour_group{i}) = data;
data = z_data_map(colour_group{i});
data(end + 1) = z(i);
z_data_map(colour_group{i}) = data;
else
x_data_map(colour_group{i}) = [x(i)];
y_data_map(colour_group{i}) = [y(i)];
z_data_map(colour_group{i}) = [z(i)];
end
end
allKeys = keys(x_data_map);
hold on;
for i = 1: length(allKeys)
plot3(x_data_map(allKeys{i}), y_data_map(allKeys{i}), z_data_map(allKeys{i}), 'LineStyle', 'none', 'Marker', markers{i}, 'color', colours{i});
end
return
% this function maps the dynamic range of an image from one range to another
function out_image = range_map(in_image, in_low, in_high, out_low, out_high, out_range_low, out_range_high)
out_image = in_image;
[ny, nx] = size(in_image);
for ix = 1: nx
for iy = 1: ny
v = double(in_image(iy, ix));
if v < in_low
v2 = out_range_low;
else
if v > in_high
v2 = out_range_high;
else
v2 = ((v - in_low) / (in_high - in_low)) * (out_high - out_low) + out_low;
end
end
out_image(iy, ix) = uint8(floor(v2 + 0.5));
end
end
return
% calculate the percentiles of a list of values
function percentile_values = Percentile(arr, percentiles)
vals = sort(reshape(arr, 1, []));
percentile_values = percentiles;
for i = 1: length(percentiles)
index = round(1 + (percentiles(i)/100) * (length(vals) - 1));
percentile_values(i) = vals(index);
end
return
% function to convert a list of numbers to an equivalent list of strings
function names = num2stringlist(numbers)
names = cell(length(numbers), 1);
for i = 1: length(numbers)
names{i} = num2str(numbers(i));
end
return
|
%% B3
clear,clc,close all;
% Vp(s)/Vm(s)
% These values are taken from PartA.m
km = 82;
alpha = 30;
% Known Resistor Values
R2 = 33e3;
R1 = 10e3;
% Removed the -ve as it shouldnt matter as it only controls the direction of the motor.
k = R2/R1;
% Cascaded foward path
num = k*km;
den = [1 alpha 0];
sys = tf(num, den);
%% B4
% Closed loop transfer function
Gc = feedback(sys,1);
%% B5
% Calculate the characteristics of the response
stepinfo(Gc)
[wn, zeta] = damp(Gc);
% Clean up arrays
wn = wn(1);
zeta = zeta(1);
Ts = 4 / (zeta*wn);
OS = exp((-zeta*pi)/sqrt(1-(zeta^2)))*100;
Tp = pi/(wn*sqrt(1-(zeta^2)));
t = linspace(0,1,100);
y1 = step(0.5 * Gc,t);
% Plot the initial response
figure(1)
plot(t,y1);
title('\bf\itStep response closed loop model');
xlabel('\bf\itTime (s)');
ylabel('\bf\itVoltage (V)');
%% B6
% % Find the gain required for a 5% overshoot
percentOS = 5 / 100;
zetaB6 = -log(percentOS) / sqrt(pi^2 + log(percentOS)^2);
wnB6 = alpha / (2 * zetaB6);
% To get 5% overshoot make the numerator and co-efficient of s^0 the same.
% Therefore km * kB6 = wnB6^2
kB6 = wnB6^2 / km;
num = kB6 * km;
den = [1 (2 * zetaB6 * wnB6) (wnB6^2)];
GB6 = tf(num,den);
% Display results to verify a 5% overshoot
stepinfo(GB6)
%% B7
load 'test_b_day2_1.mat';
% Find where the time vector goes above zero and start all the vectors at
% the same point
te = te(197:439);
ye = ye(197:439);
ye(ye < 0) = 0;
te = te - 0.0958;
te(te < 0) = 0;
te = te(16:end);
ye = ye(16:end);
% Generate the step responses
ye = smooth(ye);
y1 = step(0.5 * Gc, te);
% Plot measured data agaisnt the simulated data
figure(2)
hold on;
plot(te, ye, 'b-');
plot(te, y1, 'r-');
title('\bfClosed loop response (no overshoot)');
legend('Location','NorthWest', 'Experimental Data', 'Calculated Data');
xlabel('\bf\itTime (s)');
ylabel('\bf\itVoltage (V)');
hold off;
%% B8
load('OvershootData.mat')
Volt1 = smooth(Volt1);
% Get the %OS value
max = max(Volt1);
min = mean(Volt1(649:end)); % mean of the steady state
OS = (max - min) / min
% Plot the input and output of the system
figure(3)
hold on;
grid on;
grid minor;
plot(second,Volt);
plot(second,Volt1,'r-');
plot_title = sprintf('Experimental Data - Closed loop Response ( %0.2f %% overshoot)', OS*100);
title(plot_title,'FontWeight','bold');
legend('Location','NorthWest', 'Input Step', 'Output Response');
xlabel('\bf\itTime (s)');
ylabel('\bf\itVoltage (V)');
hold off;
|
function output = Pairwise_Dis(Centers_X,Centers_Y)
N = length(Centers_X);
A = zeros(N,N);
for i =1:N-1
for j=i+1:N
A(i,j) = sqrt((Centers_X(i)-Centers_X(j))^2+(Centers_Y(i)-Centers_Y(j))^2);
end
end
output = A+A';
end |
function dist = chordal_dist(theta1, theta2, phi)
% compute chordal dist
theta = theta1-theta2;
dist = 2*sqrt((sin(theta/2))^2+sin(theta1)*sin(theta2)*(sin(phi/2))^2);
end |
% ctrl_figure_stuff
function ctrl_figure_stuff(input, k)
global REMORA PARAMS
switch input
case 'get'
set(REMORA.fig.disk_handles{k}, 'String', uigetdir);
case 'ctn_disk'
% get string from each of the disk fields
blanks = false;
for k = 1:length(REMORA.hrp.dfs)
str = get(REMORA.fig.disk_handles{k}, 'String');
if isempty(str)
set(REMORA.fig.disk_handles{k}, 'BackgroundColor', 'red');
blanks = true;
else
set(REMORA.fig.disk_handles{k}, 'BackgroundColor', 'white');
end
REMORA.hrp.saveloc{k} = str;
end
% if missing fields
if blanks
return
end
% close and resume
uiresume(REMORA.fig.hrp);
close(REMORA.fig.hrp);
REMORA.fig = rmfield(REMORA.fig, 'browsebois');
REMORA.fig = rmfield(REMORA.fig, 'disk_handles');
case 'ctn_ltsa'
REMORA.hrp.ltsas = zeros(1, 3);
REMORA.hrp.tave = zeros(1, 3);
REMORA.hrp.dfreq = zeros(1, 3);
% check whether or not each radio button is activated
blanks = false;
for k = 1:length(REMORA.hrp.dfs)
state = get(REMORA.fig.radio{k}, 'Value');
REMORA.hrp.ltsas(k) = state;
if state
tave = get(REMORA.fig.tave_fig{k}, 'String');
dfreq = get(REMORA.fig.dfreq_fig{k}, 'String');
if isempty(tave)
blanks = true;
set(REMORA.fig.tave_fig{k}, 'BackgroundColor', 'red');
else
REMORA.hrp.tave(k) = str2num(tave);
set(REMORA.fig.tave_fig{k}, 'BackgroundColor', 'white');
end
if isempty(dfreq)
blanks = true;
set(REMORA.fig.dfreq_fig{k}, 'BackgroundColor', 'red');
else
REMORA.hrp.dfreq(k) = str2num(dfreq);
set(REMORA.fig.dfreq_fig{k}, 'BackgroundColor', 'white');
end
end
end
if blanks
return
end
% close figure and clear unnecessary figure fields
uiresume(REMORA.fig.hrp);
close(REMORA.fig.hrp);
REMORA.fig = rmfield(REMORA.fig,'dfreq_fig');
REMORA.fig = rmfield(REMORA.fig, 'tave_fig');
REMORA.fig = rmfield(REMORA.fig, 'radio');
case 'rad_rf'
state = get(REMORA.fig.wholeradio, 'Value');
% if whole disk selected disable edit boxes
if state
set(REMORA.fig.start,'Enable','off');
set(REMORA.fig.end,'Enable','off');
else
set(REMORA.fig.start,'Enable','on');
set(REMORA.fig.end,'Enable','on');
end
case 'ctn_rf'
state = get(REMORA.fig.wholeradio, 'Value');
sk = get(REMORA.fig.skip, 'String');
blanks = false;
% if we want whole disk processed
if state
REMORA.hrp.rf_start = 1;
REMORA.hrp.rf_end = 0;
else
s = get(REMORA.fig.start, 'String');
e = get(REMORA.fig.end, 'String');
if isempty(s)
set(REMORA.fig.start,'BackgroundColor','red');
blanks = true;
else
set(REMORA.fig.start,'BackgroundColor','white');
REMORA.hrp.rf_start = str2num(s);
end
if isempty(e)
set(REMORA.fig.end,'BackgroundColor','red');
blanks = true;
else
set(REMORA.fig.end,'BackgroundColor','white');
REMORA.hrp.rf_end = str2num(e);
end
end
% assign rf #s to skip
if isempty(sk)
REMORA.hrp.rf_skip = [];
else
sk = strsplit(sk, {',',', '});
REMORA.hrp.rf_skip = [];
for k = 1:length(sk)
rf = str2num(sk{k});
REMORA.hrp.rf_skip = [REMORA.hrp.rf_skip, rf];
end
end
% return for rest of numbers or continue; clear REMORA fields
if blanks
return;
end
uiresume(REMORA.fig.hrp);
close(REMORA.fig.hrp);
REMORA.fig = rmfield(REMORA.fig, 'wholeradio');
REMORA.fig = rmfield(REMORA.fig, 'end');
REMORA.fig = rmfield(REMORA.fig, 'start');
REMORA.fig = rmfield(REMORA.fig, 'skip');
case 'enb'
state = get(REMORA.fig.fix_rad, 'Value');
if state
set(REMORA.fig.fix_files_rad, 'Enable', 'on');
else
set(REMORA.fig.fix_files_rad, 'Enable', 'off');
set(REMORA.fig.fix_files_rad, 'Value', 0);
end
case 'save'
% don't want to save figure handles so temporarily remove
temp = REMORA.fig;
REMORA = rmfield(REMORA,'fig');
try
name = 'my_procparams';
[savefile, savepath] = uiputfile('*.mat', 'Save file',name);
save(fullfile(savepath, savefile), 'REMORA', 'PARAMS');
catch
disp('Invalid file selected or cancel button pushed.')
end
% fix REMORA field
REMORA.fig = temp;
case 'go'
REMORA.hrp.fixTimes = get(REMORA.fig.fix_rad, 'Value');
REMORA.hrp.rmfifo = get(REMORA.fig.rmfifo_rad, 'Value');
REMORA.hrp.resumeDisk = get(REMORA.fig.resume_rad, 'Value');
PARAMS.dflag = get(REMORA.fig.disp_rad, 'Value');
REMORA.hrp.diary_bool = get(REMORA.fig.diary_rad, 'Value');
REMORA.hrp.use_mod = get(REMORA.fig.fix_files_rad, 'Value');
uiresume(REMORA.fig.hrp);
close(REMORA.fig.hrp);
REMORA.fig = rmfield(REMORA.fig, 'fix_rad');
REMORA.fig = rmfield(REMORA.fig, 'rmfifo_rad');
REMORA.fig = rmfield(REMORA.fig, 'resume_rad');
REMORA.fig = rmfield(REMORA.fig, 'disp_rad');
REMORA.fig = rmfield(REMORA.fig, 'diary_rad');
REMORA.fig = rmfield(REMORA.fig, 'fix_files_rad');
end |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.