FuzzySystems/Work 4/source/plot_results1.m

166 lines
6.7 KiB
Matlab

function plot_results1(results, classLabels, cfg)
% PLOT_RESULTS1 — Scenario 1 plotting suite (Classification)
% Generates and saves:
% (A) Confusion matrix per model
% (B) PA/UA bars per model
% (C) Membership functions BEFORE/AFTER training per model
% (D) Learning curves (train/val error) per model
% (E) Predictions vs Truth (test set) per model
% (F) OA and Kappa across models (bars)
% (G) Rules vs Accuracy scatter
%
% All PNGs saved under cfg.outDir.
outDir = cfg.outDir;
if ~exist(outDir,'dir'), mkdir(outDir); end
nRuns = numel(results);
OA = zeros(nRuns,1);
Kap = zeros(nRuns,1);
nRules = zeros(nRuns,1);
modes = strings(nRuns,1);
radii = zeros(nRuns,1);
for i = 1:nRuns
OA(i) = results(i).metrics.OA;
Kap(i) = results(i).metrics.Kappa;
nRules(i) = results(i).nRules;
modes(i) = string(results(i).mode);
radii(i) = results(i).radius;
end
% -------- Per-model plots --------
for i = 1:nRuns
tag = sprintf('run%02d_%s_r%.2f_rules%d', ...
i, results(i).mode, results(i).radius, results(i).nRules);
% (A) Confusion matrix
fig = figure('Color','w');
confusionchart(results(i).metrics.confMat, string(classLabels), ...
'Title', sprintf('Confusion — %s (r=%.2f, rules=%d)', ...
results(i).mode, results(i).radius, results(i).nRules));
exportgraphics(fig, fullfile(outDir, ['cm_' tag '.png']), 'Resolution', 200);
close(fig);
% (B) PA / UA bars
fig = figure('Color','w');
t = tiledlayout(2,1,'TileSpacing','compact','Padding','compact');
nexttile;
bar(results(i).metrics.PA); ylim([0 1]);
xticks(1:numel(classLabels)); xticklabels(string(classLabels));
ylabel('PA (Recall)');
title(sprintf('Producer''s Accuracy — %s (r=%.2f)', results(i).mode, results(i).radius));
nexttile;
bar(results(i).metrics.UA); ylim([0 1]);
xticks(1:numel(classLabels)); xticklabels(string(classLabels));
ylabel('UA (Precision)');
title(sprintf('User''s Accuracy — %s (r=%.2f)', results(i).mode, results(i).radius));
exportgraphics(fig, fullfile(outDir, ['pa_ua_' tag '.png']), 'Resolution', 200);
close(fig);
% (C) Membership functions BEFORE/AFTER
% Layout: 2 rows (Before/After) x D columns (inputs)
try
plot_mfs_before_after(results(i).initFis, results(i).fis, ...
sprintf('%s (r=%.2f, %s)', results(i).mode, results(i).radius, tag), ...
fullfile(outDir, ['mfs_' tag '.png']));
catch ME
warning('MF plot failed for %s: %s', tag, ME.message);
end
% (D) Learning curves (ANFIS)
trErr = results(i).trError; % may be vector
ckErr = results(i).ckError; % may be vector
if ~isempty(trErr)
fig = figure('Color','w');
plot(1:numel(trErr), trErr, 'LineWidth', 1.2); hold on;
if ~isempty(ckErr)
plot(1:numel(ckErr), ckErr, '--', 'LineWidth', 1.2);
legend('Training Error','Validation Error','Location','best');
else
legend('Training Error','Location','best');
end
xlabel('Epoch'); ylabel('Error'); grid on;
title(sprintf('Learning Curve — %s (r=%.2f, rules=%d)', ...
results(i).mode, results(i).radius, results(i).nRules));
exportgraphics(fig, fullfile(outDir, ['learning_' tag '.png']), 'Resolution', 200);
close(fig);
end
% (E) Predictions vs Truth (test set) — like report example
if isfield(results(i),'yhat') && isfield(results(i),'ytrue')
yhat = results(i).yhat(:);
ytrue = results(i).ytrue(:);
fig = figure('Color','w');
plot(ytrue, 'LineWidth', 1.0); hold on;
plot(yhat, '--', 'LineWidth', 1.0);
xlabel('Test sample index'); ylabel('Class label');
title(sprintf('Truth vs Prediction — %s (r=%.2f)', results(i).mode, results(i).radius));
legend('Truth','Prediction','Location','best'); grid on;
exportgraphics(fig, fullfile(outDir, ['pred_vs_truth_' tag '.png']), 'Resolution', 200);
close(fig);
end
end
% -------- Across-model summaries --------
[~, idxSort] = sortrows([double(modes=='class-independent'), radii], [1 2]);
OA_s = OA(idxSort);
Kap_s = Kap(idxSort);
modes_s = modes(idxSort);
radii_s = radii(idxSort);
labels = arrayfun(@(j) sprintf('%s\nr=%.2f', modes_s(j), radii_s(j)), 1:nRuns, 'uni', 0);
fig = figure('Color','w');
bar(OA_s*100); xticks(1:nRuns); xticklabels(labels); xtickangle(30);
ylabel('Overall Accuracy (%)'); title('Overall Accuracy across models'); grid on;
exportgraphics(fig, fullfile(outDir, 'overall_accuracy_across_models.png'), 'Resolution', 200);
close(fig);
fig = figure('Color','w');
bar(Kap_s); xticks(1:nRuns); xticklabels(labels); xtickangle(30);
ylabel('Cohen''s \kappa'); title('Kappa across models'); grid on;
exportgraphics(fig, fullfile(outDir, 'kappa_across_models.png'), 'Resolution', 200);
close(fig);
fig = figure('Color','w');
gscatter(nRules, OA*100, modes, [], [], 8);
xlabel('#Rules'); ylabel('OA (%)'); title('Rules vs Accuracy'); grid on; legend('Location','best');
exportgraphics(fig, fullfile(outDir, 'rules_vs_accuracy.png'), 'Resolution', 200);
close(fig);
% CSV summary
T = table((1:nRuns)', modes, radii, nRules, OA, Kap, ...
'VariableNames', {'Run','Mode','Radius','Rules','OA','Kappa'});
writetable(T, fullfile(outDir, 'summary_scn1.csv'));
end
% ------------------ helpers ------------------
function plot_mfs_before_after(fisBefore, fisAfter, suptitleStr, outPng)
% Plot input MFs before/after in a 2xD tiled layout.
D = numel(fisAfter.Inputs); % assume same D
fig = figure('Color','w','Position',[100 100 1200 420]);
t = tiledlayout(2, D, 'TileSpacing','compact','Padding','compact');
for d = 1:D
nexttile(d); hold on;
try
[xB, yB] = plotmf(fisBefore, 'input', d);
plot(xB, yB, 'LineWidth', 1.0);
catch
% fallback: skip before if not available
end
title(sprintf('Input %d — BEFORE', d));
ylim([0 1]); grid on;
nexttile(D + d); hold on;
[xA, yA] = plotmf(fisAfter, 'input', d);
plot(xA, yA, 'LineWidth', 1.0);
title(sprintf('Input %d — AFTER', d));
ylim([0 1]); grid on;
end
sgtitle(['MFs ' suptitleStr]);
exportgraphics(fig, outPng, 'Resolution', 200);
close(fig);
end