HW3: Source and results for both of the scenarios added

This commit is contained in:
Christos Choutouridis 2025-10-26 17:21:27 +02:00
parent 5e17867857
commit d1afa4ca45
43 changed files with 25380 additions and 0 deletions

3
.gitmodules vendored
View File

@ -4,3 +4,6 @@
[submodule "Work 2/report/AUThReport"] [submodule "Work 2/report/AUThReport"]
path = Work 2/report/AUThReport path = Work 2/report/AUThReport
url = ssh://git@git.hoo2.net:222/hoo2/AUThReport.git url = ssh://git@git.hoo2.net:222/hoo2/AUThReport.git
[submodule "Work 3/report/AUThReport"]
path = Work 3/report/AUThReport
url = ssh://git@git.hoo2.net:222/hoo2/AUThReport.git

8
Work 3/report/.gitignore vendored Normal file
View File

@ -0,0 +1,8 @@
# LaTeX auxiliary files
*.aux
*.log
*.out
*.synctex.gz
_minted*/*

@ -0,0 +1 @@
Subproject commit 74ec4b5f6c66382e5f1b6d2e6930897e4ed53ea6

Binary file not shown.

View File

@ -0,0 +1,205 @@
%
% !TEX TS-program = xelatex
% !TEX encoding = UTF-8 Unicode
% !TEX spellcheck = el-GR
%
% Fuzzy Systems Assignment 3
%
% Requires compilation with pdfLaTeX or XeLaTeX
%
% authors:
% Χρήστος Χουτουρίδης ΑΕΜ 8997
% cchoutou@ece.auth.gr
% Options:
%
% 1) mainlang=<language>
% Default: english
% Set the default language of the document which affects hyphenations,
% localization (section, dates, etc...)
%
% example: \documentclass[mainlang=greek]{AUThReport}
%
% 2) <language>
% Add hyphenation and typesetting support for other languages
% Currently supports: english, greek, german, frenc
%
% example: \documentclass[english, greek]{AUThReport}
%
% 3) short: Requests a shorter title for the document
% Default: no short
%
% example: \documentclass[short]{AUThReport}
%
\documentclass[a4paper, 11pt, mainlang=greek, english]{AUThReport/AUThReport}
\CurrentDate{\today}
% Greek report document setup suggestions
%---------------------------------
% \WorkGroup{Ομάδα Χ}
\AuthorName{Χρήστος Χουτουρίδης}
\AuthorAEM{8997}
\AuthorMail{cchoutou@ece.auth.gr}
%\CoAuthorName{Όνομα Επίθετο}
%\CoAuthorAEM{1234}
%\CoAuthorMail{xxx@ece.auth.gr}
\DocTitle{Εργασία 3}
\DocSubTitle{Επίλυση προβλήματος παλινδρόμησης με χρήση μοντέλων TSK}
\Department{Τμήμα ΗΜΜΥ. Τομέας Ηλεκτρονικής}
\ClassName{Ασαφή Συστήματα (Υπολογιστική Νοημοσύνη)}
\InstructorName{Θεοχάρης Ιωάννης}
\InstructorMail{theochar@ece.auth.gr}
\CoInstructorName{Χαδουλός Χρήστος}
\CoInstructorMail{christgc@auth.gr}
\usepackage{float}
\usepackage{minted}
\usepackage{xcolor} %
\usepackage{amsmath, amssymb, amsfonts}
\usepackage{diagbox}
%\usepackage{tabular}
\setminted[cpp]{
fontsize=\small,
breaklines,
autogobble,
baselinestretch=1.1,
tabsize=2,
numbersep=8pt,
gobble=0
}
\newcommand{\repo}{https://git.hoo2.net/hoo2/FuzzySystems/src/branch/master/Work\%203}
\begin{document}
% Request a title page or header
\InsertTitle
%\InsertTitle[img/background.png][0.8\textwidth][2cm]
\section{Εισαγωγή}
Στην παρούσα εργασία καλούμαστε...
- Ας γράψουμε μια εισαγωγή βασισμένη κάπως ως πολύ πολύ μικρή περίλυψη των ουσιοδών πραγμάτων που είναι να υλοποιήσουμε, αλλά και την σημασία τους και τον ρόλο που παίζουν (πχ πληροφορίες για τα TSK μοντέλα και πως χρησιμοποιούντε (;!))
\subsection{Παραδοτέα}
Τα παραδοτέα της εργασίας αποτελούνται από:
\begin{itemize}
\item Την παρούσα αναφορά.
\item Τον κατάλογο \textbf{source}, με τον κώδικα της matlab.
\item Το \href{\repo}{σύνδεσμο με το αποθετήριο} που περιέχει τον κώδικα της matlab καθώς και αυτόν της αναφοράς.
\end{itemize}
\section{Υλοποίηση}
- Ας αναφέρουμε ότι μπήκαμε στη λογική να κάνουμε κάποιες αφαιρέσεις, που θα βοηθούσαν στην διενέργεια καί των δύο υπο-tasks που έχει η εργασία.
- Να πούμε ότι η εργασία μπορεί να εκτελεστεί εκτελώντας τα δύο scripts (scenario1 και scenario2) και ότι από αυτά τα script καλούνται οι συναρτήσεις.
- Να συνεχίσουμε με κάποια πάσα (κείμενο) για την υλοποίηση των συναρτήσεων προτού περάσουμε στα scripts που είναι τα ζητούμενα της εργασίας
\subsection{Διαχωρισμός δοδομένων - split\_data()}
- Να εξηγήσουμε τη λειτουργία της συνάρτησης
- Να εξηγήσουμε τι ρόλο παίζει το seed και γιατί το χρησιμοποιήσαμε
- Να εξηγήσουμε γιατί αποφασίσαμε να επιστρέφουμε X και y
\subsection{Προεπεξεργασία δοδομένων - preprocess\_data()}
- Ας ξεκινήσουμε αναφαίροντας το τι είναι και γιατί χρειάζεται
- Να εξηγήσουμε τη δική μας προσέγγιση (και ότι πειραματιστήκαμε με το z-score αλλα δεν το χρησιμοποιήσαμε)
- Να εξηγήσουμε την λειτουργία της συνάρτησης
-
\subsection{Αξιολόγιση - evaluate()}
- Να εξηγήσουμε το βήμα evaluate και πως χρησιμοποιείται
- Να εξηγήσουμε τη δική μας προσέγγιση
- Να εξηγήσουμε πως δουλεύει η συνάρτηση
\subsection{Εμφάνιση αποτελεσμάτων}
- Να αναφέρουμε συνοπτικά ότι υλοποιήσαμε 2 συναρτήσεις για να εμφανίζουμε και να αποθηκεύουμε συστηματικά τα plots για ευκολία.
- Να πούμε κάποια βασικά για την υλοποίηση αν έχουνε κάτι ενδιαφέρον μέσα
\section {Σενάριο 1 - Εφαρμογή σε απλό Dataset}
- Να ξεκινήσουμε αναφέροντας τι είναι το ζητούμενο εδώ
- Να εξηγήσουμε τη δική μας προσέγγιση στη λύση (πως επιλέξαμε αρ. mf, αρ. εποχών, μετρικές κλπ)
- Για τα 4 μοντέλα εκπαίδευσης να αναφέρουμε (παραθέτοντας και τις εικόνες) με τη σειρά
\subsection {Συναρτήσεις συμμετοχής}
- Αρχικά τις συναρτήσεις συμμετοχής των μοντέλων.
\InsertFigure{!ht}{0.7}{fig:scn1_mf1}{../source/figures_scn1/model_1_mfs_all_inputs.png}{Περιγραφή...}
\InsertFigure{!ht}{0.7}{fig:scn1_mf2}{../source/figures_scn1/model_2_mfs_all_inputs.png}{Περιγραφή...}
\InsertFigure{!ht}{0.7}{fig:scn1_mf3}{../source/figures_scn1/model_3_mfs_all_inputs.png}{Περιγραφή...}
\InsertFigure{!ht}{0.7}{fig:scn1_mf4}{../source/figures_scn1/model_4_mfs_all_inputs.png}{Περιγραφή...}
Να πούμε κάποια λόγια και μικρές παρατηρήσεις
\subsection {Διαγράμματα μάθησης}
- Έπειτα Να βάλουμε σαν τετράδα τα διαγράμματα μάθησης (πχ στο σχήμα χχ παραθέτουμε τα διαγράμματα)
\begin{figure}[!ht]
\includegraphics[width=0.48\textwidth]{../source/figures_scn1/model_1_learning_curves.png}
\hspace{1em}
\includegraphics[width=0.48\textwidth]{../source/figures_scn1/model_2_learning_curves.png} \\
\vspace{1em}
\includegraphics[width=0.48\textwidth]{../source/figures_scn1/model_3_learning_curves.png}
\hspace{1em}
\includegraphics[width=0.48\textwidth]{../source/figures_scn1/model_4_learning_curves.png} \\
\caption{Σχεδίαση και απόκριση γραμμικού ελεγκτή.} % Προσάρμοσέ μου τα caption να να έχει ένα η κάθε εικόνα
\label{fig:scn1_learning_curves}
\end{figure}
Να πούμε κάποια λόγια και μικρές παρατηρήσεις
\subsection {Σφάλματα πρόβλεψης}
- Έπειτα Να βάλουμε σαν τετράδα τα σφάλματα (πχ στο σχήμα χχ παραθέτουμε τα )
\begin{figure}[!ht]
\includegraphics[width=0.48\textwidth]{../source/figures_scn1/model_1_error.png}
\hspace{1em}
\includegraphics[width=0.48\textwidth]{../source/figures_scn1/model_2_error.png} \\
\vspace{1em}
\includegraphics[width=0.48\textwidth]{../source/figures_scn1/model_3_error.png}
\hspace{1em}
\includegraphics[width=0.48\textwidth]{../source/figures_scn1/model_4_error.png} \\
\caption{Σχεδίαση και απόκριση γραμμικού ελεγκτή.} % Προσάρμοσέ μου τα caption να να έχει ένα η κάθε εικόνα
\label{fig:scn1_error}
\end{figure}
Να πούμε κάποια λόγια και μικρές παρατηρήσεις
\subsection {Σύγκριση actual - predicted} % Ίσως θέλει προραρμογή ο τίτλος
- Έπειτα Να βάλουμε σαν τετράδα τα σφάλματα (πχ στο σχήμα χχ παραθέτουμε τα )
\begin{figure}[!ht]
\includegraphics[width=0.48\textwidth]{../source/figures_scn1/model_1_pred_vs_actual.png}
\hspace{1em}
\includegraphics[width=0.48\textwidth]{../source/figures_scn1/model_2_pred_vs_actual.png} \\
\vspace{1em}
\includegraphics[width=0.48\textwidth]{../source/figures_scn1/model_3_pred_vs_actual.png}
\hspace{1em}
\includegraphics[width=0.48\textwidth]{../source/figures_scn1/model_4_pred_vs_actual.png} \\
\caption{Σχεδίαση και απόκριση γραμμικού ελεγκτή.} % Προσάρμοσέ μου τα caption να να έχει ένα η κάθε εικόνα
\label{fig:scn1_predicted_vs_actual}
\end{figure}
Να πούμε κάποια λόγια και μικρές παρατηρήσεις
\subsection {Συνολικά}
Να αναφαίρουμε σε πίνακα τους δείκτες
Metrics | Model-1 Model-2 Model-3 Model-4
----------------------------------------------------
MSE | 16.926 17.141 12.895 30.475
RMSE | 4.1142 4.1401 3.591 5.5204
R2 | 0.66468 0.66043 0.74454 0.39627
NMSE | 0.33532 0.33957 0.25546 0.60373
NDEI | 0.57907 0.58272 0.50543 0.777
Να σχολιάσουμε τα αποτελέσματα
\section {Σενάριο 2 - Dataset με υψηλή διαστασιμότητα}
\section {Συμπεράσματα}
\end{document}

Binary file not shown.

After

Width:  |  Height:  |  Size: 86 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 54 KiB

3
Work 3/source/.gitignore vendored Normal file
View File

@ -0,0 +1,3 @@
# Matlab auxiliary files
*.asv

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

620
Work 3/source/Scenario1.log Normal file
View File

@ -0,0 +1,620 @@
Model 1: TSK0-2MF (gbellmf × 2 per input)
=====================================
Initial rules: 32
ANFIS info:
Number of nodes: 92
Number of linear parameters: 32
Number of nonlinear parameters: 30
Total number of parameters: 62
Number of training data pairs: 902
Number of checking data pairs: 301
Number of fuzzy rules: 32
Start training ANFIS ...
1 4.25643 4.51914
2 4.22775 4.48492
3 4.1994 4.45216
4 4.17142 4.42081
Step size increases to 0.011000 after epoch 5.
5 4.14386 4.39082
6 4.11676 4.36215
7 4.08753 4.33208
8 4.05896 4.3035
Step size increases to 0.012100 after epoch 9.
9 4.0311 4.27637
10 4.004 4.25063
11 3.97511 4.22389
12 3.94724 4.19874
Step size increases to 0.013310 after epoch 13.
13 3.92044 4.17515
14 3.89473 4.15306
15 3.86777 4.13049
16 3.84221 4.10967
Step size increases to 0.014641 after epoch 17.
17 3.81806 4.09058
18 3.79532 4.0732
19 3.77194 4.05604
20 3.75022 4.04091
Step size increases to 0.016105 after epoch 21.
21 3.73013 4.0278
22 3.7116 4.0167
23 3.69293 4.00682
24 3.67596 3.99934
Step size increases to 0.017716 after epoch 25.
25 3.66054 3.99425
26 3.64656 3.99151
27 3.63269 3.99118
28 3.62021 3.99352
Step size increases to 0.019487 after epoch 29.
29 3.60898 3.99839
30 3.59883 4.00557
31 3.58878 4.01587
32 3.57974 4.02819
Step size increases to 0.021436 after epoch 33.
33 3.57161 4.04196
34 3.56429 4.05654
35 3.55707 4.07282
36 3.55066 4.08823
Step size increases to 0.023579 after epoch 37.
37 3.54498 4.10185
38 3.53995 4.1129
39 3.53508 4.12181
40 3.53083 4.12685
Step size increases to 0.025937 after epoch 41.
41 3.5271 4.12846
42 3.52381 4.12733
43 3.5206 4.12391
44 3.51772 4.11867
Step size increases to 0.028531 after epoch 45.
45 3.51509 4.11214
46 3.51267 4.10466
47 3.51021 4.09579
48 3.50794 4.08656
Step size increases to 0.031384 after epoch 49.
49 3.50585 4.07728
50 3.50391 4.06815
51 3.50193 4.05832
52 3.50009 4.04887
Step size increases to 0.034523 after epoch 53.
53 3.49836 4.03931
54 3.4968 4.03148
55 3.49646 4.01735
56 3.49577 4.01706
Step size increases to 0.037975 after epoch 57.
57 3.49528 4.00918
58 3.4945 4.01098
59 3.4945 4.0023
60 3.49332 4.00347
61 3.49319 3.99792
62 3.49216 3.99805
Step size increases to 0.041772 after epoch 63.
63 3.49194 3.99353
64 3.49123 3.99357
65 3.49167 3.98955
66 3.49062 3.98891
67 3.49093 3.98663
Step size decreases to 0.037595 after epoch 68.
68 3.48995 3.98598
69 3.49029 3.98279
70 3.4883 3.98284
71 3.48954 3.98056
Step size decreases to 0.033836 after epoch 72.
72 3.48769 3.98048
73 3.48891 3.97664
74 3.48635 3.97757
75 3.4881 3.97462
Step size decreases to 0.030452 after epoch 76.
76 3.48584 3.97574
77 3.48749 3.9711
78 3.48473 3.97298
79 3.4867 3.96937
Step size decreases to 0.027407 after epoch 80.
80 3.48431 3.97148
81 3.48615 3.96648
82 3.48339 3.96895
83 3.48544 3.96508
Step size decreases to 0.024666 after epoch 84.
84 3.48307 3.96771
85 3.48498 3.96281
86 3.48233 3.96547
87 3.48437 3.96168
Step size decreases to 0.022200 after epoch 88.
88 3.4821 3.96443
89 3.484 3.95987
90 3.48151 3.96249
91 3.4835 3.95894
Step size decreases to 0.019980 after epoch 92.
92 3.48135 3.96161
93 3.48319 3.95745
94 3.4809 3.95993
95 3.48275 3.95666
Step size decreases to 0.017982 after epoch 96.
96 3.48078 3.95917
97 3.48248 3.95537
98 3.48043 3.9577
99 3.48207 3.95468
Step size decreases to 0.016184 after epoch 100.
100 3.48033 3.95705
Designated epoch number reached. ANFIS training completed at epoch 100.
Minimal training RMSE = 3.48033
Minimal checking RMSE = 3.95468
MSE : 16.9264
RMSE: 4.11417
R2 : 0.664677
NMSE: 0.335323
NDEI: 0.57907
Model 2: TSK0-3MF (gbellmf × 3 per input)
=====================================
Initial rules: 243
ANFIS info:
Number of nodes: 524
Number of linear parameters: 243
Number of nonlinear parameters: 45
Total number of parameters: 288
Number of training data pairs: 902
Number of checking data pairs: 301
Number of fuzzy rules: 243
Start training ANFIS ...
1 3.48949 6.18916
2 3.45915 6.06247
3 3.41872 5.91346
4 3.38236 5.73296
Step size increases to 0.011000 after epoch 5.
5 3.353 5.57846
6 3.32197 5.47698
7 3.28593 5.3751
8 3.2483 5.26731
Step size increases to 0.012100 after epoch 9.
9 3.20873 5.1596
10 3.16709 5.06056
11 3.11912 4.97062
12 3.07004 4.90448
Step size increases to 0.013310 after epoch 13.
13 3.02187 4.86429
14 2.97737 4.85458
15 2.93582 4.88936
16 2.90427 4.98087
Step size increases to 0.014641 after epoch 17.
17 2.88219 5.12211
18 2.86471 5.26311
19 2.84624 5.36588
20 2.82829 5.46658
Step size increases to 0.016105 after epoch 21.
21 2.81115 5.55392
22 2.79487 5.67287
23 2.77853 5.64686
24 2.76872 6.11556
25 2.77206 5.14558
26 2.75702 6.19896
27 2.7602 5.14994
Step size decreases to 0.014495 after epoch 28.
28 2.74638 6.24052
29 2.75098 5.11992
30 2.73409 6.17768
31 2.74283 5.10258
Step size decreases to 0.013045 after epoch 32.
32 2.72821 6.17703
33 2.73726 5.07044
34 2.7204 6.07671
35 2.73153 5.05552
Step size decreases to 0.011741 after epoch 36.
36 2.7168 6.06383
37 2.7272 5.03036
38 2.71115 5.95635
39 2.72224 5.02069
Step size decreases to 0.010567 after epoch 40.
40 2.70844 5.94394
41 2.71842 5.00057
42 2.70399 5.83984
43 2.71392 4.99542
Step size decreases to 0.009510 after epoch 44.
44 2.70177 5.83091
45 2.71053 4.9791
46 2.69812 5.73434
47 2.70652 4.97696
Step size decreases to 0.008559 after epoch 48.
48 2.69623 5.72879
49 2.70354 4.96362
50 2.69316 5.64101
51 2.70004 4.96338
Step size decreases to 0.007703 after epoch 52.
52 2.69153 5.63813
53 2.69744 4.95251
54 2.68891 5.55924
55 2.69442 4.9536
Step size decreases to 0.006933 after epoch 56.
56 2.68746 5.55841
57 2.69215 4.94479
58 2.6852 5.48794
59 2.68954 4.94684
Step size decreases to 0.006239 after epoch 60.
60 2.68391 5.48866
61 2.68755 4.93971
62 2.68193 5.42588
63 2.68528 4.94243
Step size decreases to 0.005616 after epoch 64.
64 2.68076 5.42767
65 2.68353 4.93659
66 2.67901 5.37175
67 2.68155 4.93972
Step size decreases to 0.005054 after epoch 68.
68 2.67793 5.37416
69 2.68 4.93481
70 2.67637 5.32424
71 2.67826 4.93813
Step size decreases to 0.004549 after epoch 72.
72 2.67537 5.32691
73 2.67687 4.93383
74 2.67396 5.28218
75 2.67533 4.93716
Step size decreases to 0.004094 after epoch 76.
76 2.67302 5.28479
77 2.67407 4.93317
78 2.67174 5.24452
79 2.6727 4.93637
Step size decreases to 0.003684 after epoch 80.
80 2.67084 5.24682
81 2.67156 4.93247
82 2.66967 5.21037
83 2.67031 4.93543
Step size decreases to 0.003316 after epoch 84.
84 2.66881 5.21218
85 2.66927 4.93142
86 2.66772 5.17901
87 2.66813 4.93412
Step size decreases to 0.002984 after epoch 88.
88 2.66689 5.1802
89 2.66716 4.92983
90 2.66588 5.14986
91 2.66611 4.93226
Step size decreases to 0.002686 after epoch 92.
92 2.66507 5.15035
93 2.6652 4.92758
94 2.66412 5.12247
95 2.66422 4.92977
Step size decreases to 0.002417 after epoch 96.
96 2.66333 5.1222
97 2.66335 4.92461
98 2.66244 5.09648
99 2.66243 4.92661
100 2.66165 5.09539
Designated epoch number reached. ANFIS training completed at epoch 100.
Minimal training RMSE = 2.66165
Minimal checking RMSE = 4.85458
MSE : 17.1406
RMSE: 4.14012
R2 : 0.660434
NMSE: 0.339566
NDEI: 0.582723
Model 3: TSK1-2MF (gbellmf × 2 per input)
=====================================
Initial rules: 32
ANFIS info:
Number of nodes: 92
Number of linear parameters: 192
Number of nonlinear parameters: 30
Total number of parameters: 222
Number of training data pairs: 902
Number of checking data pairs: 301
Number of fuzzy rules: 32
Start training ANFIS ...
1 2.85009 4.69235
2 2.83691 4.61659
3 2.82381 4.5385
4 2.81077 4.45873
Step size increases to 0.011000 after epoch 5.
5 2.79775 4.37785
6 2.78474 4.29631
7 2.7704 4.2064
8 2.756 4.11658
Step size increases to 0.012100 after epoch 9.
9 2.7415 4.02712
10 2.72683 3.93818
11 2.71042 3.84107
12 2.69361 3.74462
Step size increases to 0.013310 after epoch 13.
13 2.67624 3.64883
14 2.65807 3.5539
15 2.63684 3.45151
16 2.6138 3.35379
Step size increases to 0.014641 after epoch 17.
17 2.5883 3.2663
18 2.55954 3.19899
19 2.52297 3.16663
20 2.47983 3.20424
Step size increases to 0.016105 after epoch 21.
21 2.42917 3.3412
22 2.37416 3.59905
23 2.34422 3.89625
24 2.36179 3.60671
25 2.33613 3.94345
26 2.3545 3.59925
Step size decreases to 0.014495 after epoch 27.
27 2.32932 3.98109
28 2.34864 3.58976
29 2.31899 4.00715
30 2.34073 3.59264
Step size decreases to 0.013045 after epoch 31.
31 2.3134 4.05635
32 2.33536 3.58825
33 2.30527 4.05997
34 2.32785 3.59718
Step size decreases to 0.011741 after epoch 35.
35 2.30083 4.10646
36 2.32309 3.59599
37 2.29443 4.0886
38 2.31615 3.60889
Step size decreases to 0.010567 after epoch 39.
39 2.29086 4.12772
40 2.31186 3.61014
41 2.2857 4.09768
42 2.30547 3.62593
Step size decreases to 0.009510 after epoch 43.
43 2.28273 4.13163
44 2.30157 3.62894
45 2.27844 4.09721
46 2.29579 3.64627
Step size decreases to 0.008559 after epoch 47.
47 2.27589 4.1275
48 2.29224 3.65042
49 2.27223 4.09275
50 2.2871 3.6682
Step size decreases to 0.007703 after epoch 51.
51 2.27 4.11992
52 2.28389 3.67307
53 2.26682 4.08658
54 2.27936 3.69061
Step size decreases to 0.006933 after epoch 55.
55 2.26483 4.11089
56 2.27645 3.6959
57 2.26204 4.07957
58 2.27247 3.7128
Step size decreases to 0.006239 after epoch 59.
59 2.26024 4.10119
60 2.26983 3.71823
61 2.25776 4.07206
62 2.26633 3.73423
Step size decreases to 0.005616 after epoch 63.
63 2.25611 4.09117
64 2.26395 3.73959
65 2.25389 4.06416
66 2.26087 3.75453
Step size decreases to 0.005054 after epoch 67.
67 2.25236 4.08094
68 2.25871 3.75961
69 2.25036 4.0559
70 2.25599 3.77339
Step size decreases to 0.004549 after epoch 71.
71 2.24893 4.07052
72 2.25403 3.77802
73 2.24711 4.04729
74 2.25162 3.7906
Step size decreases to 0.004094 after epoch 75.
75 2.24576 4.05991
76 2.24983 3.79463
77 2.24409 4.03828
78 2.24767 3.806
Step size decreases to 0.003684 after epoch 79.
79 2.2428 4.04906
80 2.24604 3.80931
81 2.24127 4.02885
82 2.2441 3.81953
Step size decreases to 0.003316 after epoch 83.
83 2.24002 4.03794
84 2.2426 3.82204
85 2.2386 4.019
86 2.24084 3.83118
Step size decreases to 0.002984 after epoch 87.
87 2.2374 4.02654
88 2.23945 3.83283
89 2.23608 4.00874
90 2.23783 3.84101
Step size decreases to 0.002686 after epoch 91.
91 2.23491 4.01486
92 2.23654 3.84179
93 2.23367 3.99812
94 2.23504 3.84916
Step size decreases to 0.002417 after epoch 95.
95 2.23253 4.00297
96 2.23383 3.84907
97 2.23136 3.98724
98 2.23243 3.8558
Step size decreases to 0.002176 after epoch 99.
99 2.23025 3.99091
100 2.23128 3.85487
Designated epoch number reached. ANFIS training completed at epoch 100.
Minimal training RMSE = 2.23025
Minimal checking RMSE = 3.16663
MSE : 12.8953
RMSE: 3.591
R2 : 0.744537
NMSE: 0.255463
NDEI: 0.505433
Model 4: TSK1-3MF (gbellmf × 3 per input)
=====================================
Initial rules: 243
ANFIS info:
Number of nodes: 524
Number of linear parameters: 1458
Number of nonlinear parameters: 45
Total number of parameters: 1503
Number of training data pairs: 902
Number of checking data pairs: 301
Number of fuzzy rules: 243
Warning: Number of training data is smaller than number of modifiable parameters.
> In anfis>trainFIS (line 203)
In anfis>anfisWithOptionObject (line 109)
In anfis (line 68)
In scenario1 (line 66)
Start training ANFIS ...
1 2.18266 13.9384
2 2.16266 14.302
3 2.14218 14.771
4 2.12092 15.3612
Step size increases to 0.011000 after epoch 5.
5 2.0985 16.0849
6 2.07446 16.9408
7 2.04563 17.9967
8 2.01387 19.1103
Step size increases to 0.012100 after epoch 9.
9 1.97901 20.1856
10 1.94113 21.1309
11 1.89631 21.9439
12 1.84864 22.4876
Step size increases to 0.013310 after epoch 13.
13 1.79858 22.7812
14 1.74656 22.8849
15 1.68767 22.8664
16 1.62771 22.7684
Step size increases to 0.014641 after epoch 17.
17 1.56748 22.5829
18 1.50801 22.2279
19 1.44544 21.5009
20 1.38899 20.2966
Step size increases to 0.016105 after epoch 21.
21 1.34211 18.5912
22 1.30602 16.488
23 1.27558 14.1361
24 1.25531 13.5366
Step size increases to 0.017716 after epoch 25.
25 1.22833 11.9472
26 1.20732 11.145
27 1.17735 9.2776
28 1.15854 8.56663
Step size increases to 0.019487 after epoch 29.
29 1.12679 6.41978
30 1.12503 6.60586
31 1.09297 4.9284
32 1.10129 5.326
33 1.06957 4.68211
34 1.08124 4.74092
Step size decreases to 0.017538 after epoch 35.
35 1.05297 4.99736
36 1.0659 4.73721
37 1.03553 5.54205
38 1.04981 5.0522
Step size decreases to 0.015785 after epoch 39.
39 1.02529 6.28831
40 1.03688 5.44902
41 1.009 7.02564
42 1.02048 5.91912
Step size decreases to 0.014206 after epoch 43.
43 0.998267 8.03611
44 1.00675 6.31568
45 0.981053 8.87018
46 0.992248 6.70868
Step size decreases to 0.012786 after epoch 47.
47 0.973147 9.9055
48 0.983062 6.97916
49 0.958957 10.505
50 0.973683 7.26996
Step size decreases to 0.011507 after epoch 51.
51 0.954367 11.3465
52 0.967501 7.4856
53 0.942356 11.7345
54 0.960308 7.75761
Step size decreases to 0.010356 after epoch 55.
55 0.939278 12.4896
56 0.955315 7.97285
57 0.928728 12.7622
58 0.948976 8.26813
Step size decreases to 0.009321 after epoch 59.
59 0.926441 13.4981
60 0.944515 8.50717
61 0.91711 13.7127
62 0.938576 8.84703
Step size decreases to 0.008389 after epoch 63.
63 0.915346 14.4517
64 0.934437 9.12183
65 0.907143 14.6391
66 0.928786 9.51736
Step size decreases to 0.007550 after epoch 67.
67 0.905787 15.3775
68 0.924938 9.83222
69 0.898656 15.5517
70 0.919613 10.2861
Step size decreases to 0.006795 after epoch 71.
71 0.89764 16.2705
72 0.916096 10.6381
73 0.891518 16.4343
74 0.911178 11.1444
Step size decreases to 0.006115 after epoch 75.
75 0.890784 17.1082
76 0.908033 11.5239
77 0.885587 17.2586
78 0.903579 12.0686
Step size decreases to 0.005504 after epoch 79.
79 0.885076 17.8654
80 0.90081 12.4607
81 0.8807 17.9986
82 0.896814 13.0247
Step size decreases to 0.004953 after epoch 83.
83 0.880351 18.5253
84 0.894379 13.4131
85 0.876676 18.6391
86 0.890789 13.9761
Step size decreases to 0.004458 after epoch 87.
87 0.876438 19.0838
88 0.888624 14.3467
89 0.873343 19.1782
90 0.885379 14.8903
Step size decreases to 0.004012 after epoch 91.
91 0.873172 19.5467
92 0.883427 15.2336
93 0.870543 19.6236
94 0.880489 15.7433
Step size decreases to 0.003611 after epoch 95.
95 0.870406 19.9261
96 0.878716 16.0547
97 0.868147 19.9882
98 0.876081 16.5202
Step size decreases to 0.003250 after epoch 99.
99 0.868021 20.2362
100 0.874473 16.7991
Designated epoch number reached. ANFIS training completed at epoch 100.
Minimal training RMSE = 0.868021
Minimal checking RMSE = 4.68211
MSE : 30.4753
RMSE: 5.52045
R2 : 0.396266
NMSE: 0.603734
NDEI: 0.777003
Metrics Model_1 Model_2 Model_3 Model_4
_______ _______ _______ _______ _______
MSE 16.926 17.141 12.895 30.475
RMSE 4.1142 4.1401 3.591 5.5204
R2 0.66468 0.66043 0.74454 0.39627
NMSE 0.33532 0.33957 0.25546 0.60373
NDEI 0.57907 0.58272 0.50543 0.777

1163
Work 3/source/Scenario2.log Normal file

File diff suppressed because it is too large Load Diff

17
Work 3/source/evaluate.m Normal file
View File

@ -0,0 +1,17 @@
function [mse, rmse, r2, nmse, ndei] = evaluate(pred, truth)
%EVALUATE Summary of this function goes here
% Detailed explanation goes here
% MSE / RMSE
mse = mean((truth - pred).^2);
rmse = sqrt(mse);
% R^2 - NMSE - NDEI
ss_res = sum((truth - pred).^2);
ss_tot = sum((truth - mean(truth)).^2);
nmse = ss_res / ss_tot;
ndei = sqrt(nmse);
r2 = 1 - nmse;
end

Binary file not shown.

After

Width:  |  Height:  |  Size: 76 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 48 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 133 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 57 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 76 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 68 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 154 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 55 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 74 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 66 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 125 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 54 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 52 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 68 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 154 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 48 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 49 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 45 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 47 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 67 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 63 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 308 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 64 KiB

View File

@ -0,0 +1,148 @@
function plot_results1(init_fis, final_fis, trn_error, val_error, y_true, y_pred, model_id)
%PLOT_RESULTS
% Creates and saves:
% (A) One consolidated figure for membership functions (all inputs):
% - 2 x Ninputs tiled layout
% - Top row: BEFORE training (all MFs per input in one subplot)
% - Bottom row: AFTER training (all MFs per input in one subplot)
% (B) Learning curves (train & validation)
% (C) Predicted vs Actual (with y=x reference)
% (D) Prediction Error (time series + MAE)
%
% All figures are saved to ./figures as both PDF and PNG.
%
% Usage:
% plot_results(init_fis, final_fis, trn_error, val_error, y_true, y_pred, model_id)
% Input checks
if nargin < 7
error('plot_results: not enough inputs. Expected 7 arguments.');
end
if numel(y_true) ~= numel(y_pred)
error('plot_results: y_true and y_pred must have the same length.');
end
y_true = y_true(:);
y_pred = y_pred(:);
% Output directory
outdir = fullfile('.', 'figures_scn1');
if ~exist(outdir, 'dir'); mkdir(outdir); end
% (A) MEMBERSHIP FUNCTIONS ONE FIGURE, 2 x Ninputs (BEFORE / AFTER)
% =====================================================================
nInputs = numel(init_fis.Inputs);
% Precompute x/y for all inputs to keep consistent axes
Xb = cell(1, nInputs); Yb = cell(1, nInputs); % BEFORE
Xa = cell(1, nInputs); Ya = cell(1, nInputs); % AFTER
nMFb = zeros(1, nInputs); nMFa = zeros(1, nInputs);
for inIdx = 1:nInputs
[xb, yb] = plotmf(init_fis, 'input', inIdx);
[xa, ya] = plotmf(final_fis, 'input', inIdx);
Xb{inIdx} = xb; Yb{inIdx} = yb; nMFb(inIdx) = size(yb,2);
Xa{inIdx} = xa; Ya{inIdx} = ya; nMFa(inIdx) = size(ya,2);
end
% Create consolidated figure
f = figure('Name', sprintf('MFs Model %d', model_id), 'Color','w');
tl = tiledlayout(f, 2, nInputs, 'TileSpacing','compact', 'Padding','compact');
% Titles for rows
rowTitles = {'BEFORE training', 'AFTER training'};
for inIdx = 1:nInputs
% BEFORE (top row)
nexttile(tl, inIdx); % row 1, col inIdx
hold on; grid on;
xb = Xb{inIdx}; yb = Yb{inIdx}; kB = size(yb,2);
for k = 1:kB
plot(xb, yb(:,k), 'LineWidth', 1.5);
end
xlabel(sprintf('Input x%d', inIdx), 'Interpreter','latex');
ylabel('Membership', 'Interpreter','latex');
title(sprintf('%s x%d', rowTitles{1}, inIdx), 'Interpreter','latex');
% Optional legend only on the first tile to reduce clutter
if inIdx == 1
lgdB = arrayfun(@(k) sprintf('MF %d', k), 1:kB, 'UniformOutput', false);
legend(lgdB, 'Location','best');
end
% AFTER (bottom row)
nexttile(tl, nInputs + inIdx); % row 2, col inIdx
hold on; grid on;
xa = Xa{inIdx}; ya = Ya{inIdx}; kA = size(ya,2);
for k = 1:kA
plot(xa, ya(:,k), 'LineWidth', 1.5);
end
xlabel(sprintf('Input x%d', inIdx), 'Interpreter','latex');
ylabel('Membership', 'Interpreter','latex');
title(sprintf('%s x%d', rowTitles{2}, inIdx), 'Interpreter','latex');
if inIdx == 1
lgdA = arrayfun(@(k) sprintf('MF %d', k), 1:kA, 'UniformOutput', false);
legend(lgdA, 'Location','best');
end
end
% Super-title
sgtitle(tl, sprintf('Membership Functions Model %d', model_id), 'Interpreter','latex');
% Save consolidated MF figure
save_figure(f, fullfile(outdir, sprintf('model_%d_mfs_all_inputs', model_id)));
% (B) LEARNING CURVES (train & validation)
% ============================================
f = figure('Name', sprintf('Learning Curves - Model %d', model_id), 'Color','w');
plot(trn_error, 'LineWidth', 1.5); hold on; grid on;
if ~isempty(val_error)
plot(val_error, 'LineWidth', 1.5);
legend('Train','Validation','Location','best');
else
legend('Train','Location','best');
end
xlabel('Epoch', 'Interpreter','latex');
ylabel('Error', 'Interpreter','latex');
title(sprintf('Learning Curves | Model %d', model_id), 'Interpreter','latex');
save_figure(f, fullfile(outdir, sprintf('model_%d_learning_curves', model_id)));
% (C) PREDICTED vs ACTUAL
% ============================================
f = figure('Name', sprintf('Predicted vs Actual - Model %d', model_id), 'Color','w');
plot(y_true, y_pred, '.', 'MarkerSize', 10); hold on; grid on;
mins = min([y_true; y_pred]);
maxs = max([y_true; y_pred]);
plot([mins maxs], [mins maxs], 'k-', 'LineWidth', 1);
xlabel('Actual', 'Interpreter','latex');
ylabel('Predicted', 'Interpreter','latex');
title(sprintf('Predicted vs Actual | Model %d', model_id), 'Interpreter','latex');
save_figure(f, fullfile(outdir, sprintf('model_%d_pred_vs_actual', model_id)));
% (D) PREDICTION ERROR
% ============================================
err = y_true - y_pred;
f = figure('Name', sprintf('Prediction Error - Model %d', model_id), 'Color','w');
plot(err, 'k'); grid on;
xlabel('Iteration', 'Interpreter','latex');
ylabel('Prediction Error', 'Interpreter','latex');
title('Prediction Error', 'Interpreter','latex');
mae = mean(abs(err));
try
subtitle(sprintf('Mean absolute error: %f', mae), 'Interpreter','latex');
catch
title(sprintf('Prediction Error (MAE=%.6f)', mae), 'Interpreter','latex');
end
save_figure(f, fullfile(outdir, sprintf('model_%d_error', model_id)));
end
% Helper: consistent saving (PDF + PNG, landscape)
function save_figure(fig_handle, basepath)
set(fig_handle, 'PaperUnits','normalized');
set(fig_handle, 'PaperPosition', [0 0 1 1]);
set(fig_handle, 'PaperOrientation', 'landscape');
% print(fig_handle, '-dpdf', [basepath '.pdf']);
print(fig_handle, '-dpng', [basepath '.png']);
end

View File

@ -0,0 +1,107 @@
function plot_results2(init_fis, final_fis, trn_error, val_error, y_true, y_pred, ...
feature_grid, radius_grid, cv_scores, cv_rules, sel_idx)
% PLOTS FOR SCENARIO 2:
% (1) CV heatmap (mean CV error over grid) with overlaid #rules
% (2) Error vs rules (aggregated) & Error vs #features (best over radii)
% (3) Final model: Learning curves, Predicted vs Actual, Residuals
% (4) MFs (subset of selected inputs) BEFORE/AFTER
%
% All figures are saved under ./figures
outdir = fullfile('.', 'figures_scn2'); if ~exist(outdir,'dir'), mkdir(outdir); end
% (1) CV heatmap
f = figure('Color','w','Name','CV Heatmap');
imagesc(radius_grid, feature_grid, cv_scores);
set(gca,'YDir','normal'); xlabel('Cluster radius r_\alpha','Interpreter','latex');
ylabel('#Features','Interpreter','latex');
title('Mean CV Error','Interpreter','latex'); colorbar; grid on;
% overlay mean rules
hold on;
for i=1:numel(feature_grid)
for j=1:numel(radius_grid)
text(radius_grid(j), feature_grid(i), sprintf('%d', cv_rules(i,j)), ...
'HorizontalAlignment','center','Color','w','FontSize',8);
end
end
try
subtitle('Numbers show mean #rules','Interpreter','latex');
end
print(f, fullfile(outdir,'scn2_cv_heatmap'), '-dpng');
% (2a) Error vs #rules (aggregate by identical rule-counts)
uniq_rules = unique(cv_rules(:));
err_vs_rules = zeros(size(uniq_rules));
for k=1:numel(uniq_rules)
err_vs_rules(k) = mean(cv_scores(cv_rules == uniq_rules(k)));
end
f = figure('Color','w','Name','Error vs Rules');
plot(uniq_rules, err_vs_rules, 'o-','LineWidth',1.5); grid on;
xlabel('#Rules','Interpreter','latex'); ylabel('Mean CV Error','Interpreter','latex');
title('Error vs Rules','Interpreter','latex');
print(f, fullfile(outdir,'scn2_error_vs_rules'), '-dpng');
% (2b) Best CV error vs #features (min across radii)
[min_err_per_feat,~] = min(cv_scores,[],2);
f = figure('Color','w','Name','Error vs Features');
plot(feature_grid, min_err_per_feat, 's-','LineWidth',1.5); grid on;
xlabel('#Features','Interpreter','latex'); ylabel('Best CV Error','Interpreter','latex');
title('Best CV Error vs #Features','Interpreter','latex');
print(f, fullfile(outdir,'scn2_error_vs_features'), '-dpng');
% (3a) Learning curves
f = figure('Color','w','Name','Learning Curves');
plot(trn_error,'LineWidth',1.5); hold on; grid on;
if ~isempty(val_error)
plot(val_error,'LineWidth',1.5); legend('Train','Validation','Location','best');
else
legend('Train','Location','best');
end
xlabel('Epoch','Interpreter','latex'); ylabel('Error','Interpreter','latex');
title('Learning Curves','Interpreter','latex');
print(f, fullfile(outdir,'scn2_final_learning_curves'), '-dpng');
% (3b) Predicted vs Actual
y_true = y_true(:); y_pred = y_pred(:);
f = figure('Color','w','Name','Predicted vs Actual');
plot(y_true, y_pred, '.', 'MarkerSize', 10); hold on; grid on;
mins = min([y_true; y_pred]); maxs = max([y_true; y_pred]);
plot([mins maxs], [mins maxs], 'k-', 'LineWidth', 1);
xlabel('Actual','Interpreter','latex'); ylabel('Predicted','Interpreter','latex');
title('Predicted vs Actual','Interpreter','latex');
print(f, fullfile(outdir,'scn2_final_pred_vs_actual'), '-dpng');
% (3c) Residuals (time series)
err = y_true - y_pred;
f = figure('Color','w','Name','Prediction Error');
plot(err, 'k'); grid on;
xlabel('Sample','Interpreter','latex'); ylabel('Error','Interpreter','latex');
title('Prediction Error','Interpreter','latex');
mae = mean(abs(err));
try
subtitle(sprintf('Mean absolute error: %.6f', mae), 'Interpreter','latex');
end
print(f, fullfile(outdir,'scn2_final_error_series'), '-dpng');
% (4) MFs (subset of selected inputs) BEFORE/AFTER
% Show up to 3 selected inputs for clarity
nShow = min( min(3, numel(sel_idx)), numel(init_fis.Inputs) );
if nShow > 0
f = figure('Color','w','Name','MFs (subset)');
tl = tiledlayout(2, nShow, 'TileSpacing','compact', 'Padding','compact');
for k=1:nShow
inIdx = k; % first few selected
[xb,yb] = plotmf(init_fis,'input',inIdx);
[xa,ya] = plotmf(final_fis,'input',inIdx);
nexttile(tl,k); hold on; grid on; plot(xb,yb,'LineWidth',1.2);
title(sprintf('BEFORE x_{%d}', sel_idx(k)),'Interpreter','latex');
xlabel(sprintf('x_{%d}', sel_idx(k)),'Interpreter','latex'); ylabel('Membership','Interpreter','latex');
nexttile(tl,nShow+k); hold on; grid on; plot(xa,ya,'LineWidth',1.2);
title(sprintf('AFTER x_{%d}', sel_idx(k)),'Interpreter','latex');
xlabel(sprintf('x_{%d}', sel_idx(k)),'Interpreter','latex'); ylabel('Membership','Interpreter','latex');
end
sgtitle(tl, 'Membership Functions (subset)','Interpreter','latex');
print(f, fullfile(outdir,'scn2_final_mfs_subset'), '-dpng');
end
end

View File

@ -0,0 +1,39 @@
function [X_trn_s, X_val_s, X_chk_s, stats] = preprocess_data(X_trn, X_val, X_chk, mode)
%SPLITSET Splits the data-set to train, eval,check data
% Randomly split the data set according to ratios
%
if nargin < 4, mode = 1; end % 1: min-max, 2: z-score
switch mode
case 1 % MinMax to [0,1] using TRAIN stats
xmin = min(X_trn,[],1);
xmax = max(X_trn,[],1);
range = xmax - xmin;
range(range==0) = 1;
X_trn_s = (X_trn - xmin) ./ range;
X_val_s = (X_val - xmin) ./ range;
X_chk_s = (X_chk - xmin) ./ range;
stats = struct( ...
'type', 'minmax', ...
'xmin', xmin, ...
'xmax', xmax ...
);
case 2 % Z-score using TRAIN stats (chatGPT gave me this one)
mu = mean(X_trn,1);
sig = std(X_trn,0,1);
sig(sig==0) = 1;
X_trn_s = (X_trn - mu) ./ sig;
X_val_s = (X_val - mu) ./ sig;
X_chk_s = (X_chk - mu) ./ sig;
stats = struct(...
'type', 'zscore', ...
'mu', mu, ...
'sig', sig ...
);
otherwise
error('Unknown mode.');
end
end

94
Work 3/source/scenario1.m Normal file
View File

@ -0,0 +1,94 @@
%% Scenario1 (TSK - Airfoil Self-Noise)
%
% Assignment 3 in Fuzzy systems
%
% author:
% Christos Choutouridis ΑΕΜ 8997
% cchoutou@ece.auth.gr
%
clear; clc; close all;
% Configuration
% --------------------------------
config.mf_types = ["constant", "constant", "linear", "linear"];
config.num_mf = [2, 3, 2, 3]; % #MFs per input
config.model_names = ["TSK0-2MF", "TSK0-3MF", "TSK1-2MF", "TSK1-3MF"];
config.Nepochs = 100;
rng(42,'twister');
% Load dataset (Airfoil Self-Noise: 5 inputs, 1 target)
data = load("Datasets/airfoil_self_noise.dat");
% Split data: 60% train, 20% val, 20% check(test)
[X_trn, y_trn, X_val, y_val, X_chk, y_chk] = split_data(data, [0.6 0.2 0.2], 42);
% Preprocess inputs (mode=1 -> MinMax to [0,1])
[X_trn, X_val, X_chk, stats] = preprocess_data(X_trn, X_val, X_chk, 1);
% Pack data for anfis/evalfis convenience
trn_data = [X_trn, y_trn];
val_data = [X_val, y_val];
chk_X = X_chk;
chk_y = y_chk;
% Results
T = table(['MSE '; 'RMSE'; 'R2 '; 'NMSE'; 'NDEI'], 'VariableNames', {'Metrics'});
all_models = cell(1,4);
all_trnErr = cell(1,4);
all_valErr = cell(1,4);
for i = 1:length(config.mf_types)
fprintf('\n');
fprintf('Model %d: %s (gbellmf × %d per input)\n', i, config.model_names(i), config.num_mf(i));
fprintf('=====================================\n');
% Step 1 - Initial FIS via Grid Partition
opt_g = genfisOptions("GridPartition", ...
"InputMembershipFunctionType", "gbellmf", ...
"NumMembershipFunctions", config.num_mf(i), ...
"OutputMembershipFunctionType", config.mf_types(i));
init_fis = genfis(X_trn, y_trn, opt_g);
% Info rules and MF
nRules = numel(init_fis.Rules);
fprintf('Initial rules: %d\n', nRules);
% Step 2 - Train with ANFIS (Hybrid: BP + LSE)
opt_a = anfisOptions( ...
"InitialFis", init_fis, ...
"ValidationData", val_data, ...
"EpochNumber", config.Nepochs, ...
"OptimizationMethod", 1 ... % 1=Hybrid
);
[trn_fis, trn_error, ~, val_fis, val_error] = anfis(trn_data, opt_a);
final_fis = val_fis; % Select final
% Step 3 - Evaluate on Check/Test set
y_hat = evalfis(final_fis, chk_X);
[mse, rmse, r2, nmse, ndei] = evaluate(y_hat, chk_y);
% Print & store metrics
fprintf('MSE : %g\n', mse);
fprintf('RMSE: %g\n', rmse);
fprintf('R2 : %g\n', r2);
fprintf('NMSE: %g\n', nmse);
fprintf('NDEI: %g\n', ndei);
T = addvars(T, [mse; rmse; r2; nmse; ndei], 'NewVariableNames', sprintf('Model_%d',i));
% Plots & figure exports
% - MFs before/after
% - Learning curves
% - Predicted vs Actual
% - Prediction Error
plot_results1(init_fis, final_fis, trn_error, val_error, y_chk, y_hat, i);
end
% Show and save table
disp(T);
writetable(T, 'scenario1_metrics.csv');

View File

@ -0,0 +1,6 @@
Metrics,Model_1,Model_2,Model_3,Model_4
MSE ,16.9264182248583,17.1406109263442,12.8952585467096,30.4753160187243
RMSE,4.11417284819906,4.14012209075339,3.59099687367027,5.520445273592
R2 ,0.664677433758077,0.660434146998977,0.744537141243185,0.396265586807705
NMSE,0.335322566241923,0.339565853001023,0.255462858756815,0.603734413192295
NDEI,0.579070432885261,0.582722792587541,0.505433337599347,0.777003483384917
1 Metrics Model_1 Model_2 Model_3 Model_4
2 MSE 16.9264182248583 17.1406109263442 12.8952585467096 30.4753160187243
3 RMSE 4.11417284819906 4.14012209075339 3.59099687367027 5.520445273592
4 R2 0.664677433758077 0.660434146998977 0.744537141243185 0.396265586807705
5 NMSE 0.335322566241923 0.339565853001023 0.255462858756815 0.603734413192295
6 NDEI 0.579070432885261 0.582722792587541 0.505433337599347 0.777003483384917

175
Work 3/source/scenario2.m Normal file
View File

@ -0,0 +1,175 @@
%% Scenario2 (TSK - Superconductivity, High-dimensional)
%
% Assignment 3 in Fuzzy systems
%
% author:
% Christos Choutouridis ΑΕΜ 8997
% cchoutou@ece.auth.gr
%
% Notes:
% - 5-fold CV grid-search over (#features, SC radius) with careful
% no-leakage pipeline (scaling & ReliefF inside each fold)
% - Final training on Train with Validation on Val, Test on Test
% - Diagnostics: CV heatmap, error-vs-rules, error-vs-#features
% - Classic plots: learning curves, pred vs actual, residuals, MFs (subset)
clear; clc; close all;
% Configuration
% --------------------------------
% Grid of hyperparameters
config.feature_grid = [5 8 11 15];
config.radius_grid = [0.25 0.5 0.75 1.0];
config.K = 5; % 5-fold CV
config.Nepochs = 100;
rng(42,'twister');
fprintf('Scenario 2 Superconduct\n');
fprintf('===============================\n\n');
% Load dataset
fprintf('Loading dataset (superconduct.csv)...\n');
data = load("Datasets/superconduct.csv");
fprintf(' Done: %d samples, %d features + 1 target\n\n', size(data,1), size(data,2)-1);
% Split 60/20/20 and scale using TRAIN stats
fprintf('Splitting dataset into Train/Val/Test [0.6/0.2/0.2] ...\n');
[X_trn, y_trn, X_val_raw, y_val, X_chk_raw, y_chk] = split_data(data, [0.6 0.2 0.2], 42);
fprintf(' Done: Train=%d, Val=%d, Test=%d\n', ...
size(X_trn,1), size(X_val_raw,1), size(X_chk_raw,1));
fprintf('Preprocessing (MinMax scaling to [0,1]) ...\n');
[X_trn_s, X_val_s, X_chk_s, scale_stats] = preprocess_data(X_trn, X_val_raw, X_chk_raw, 1);
fprintf(' Done (mode = %s)\n\n', scale_stats.type);
fprintf('Starting %d×%d grid-search (%d folds per point)...\n', ...
numel(config.feature_grid), numel(config.radius_grid), config.K);
cv_scores = zeros(numel(config.feature_grid), numel(config.radius_grid));
cv_rules = zeros(numel(config.feature_grid), numel(config.radius_grid));
Ntr = size(X_trn_s,1);
folds = cvpartition(Ntr, "KFold", config.K);
for fi = 1:numel(config.feature_grid)
kfeat = config.feature_grid(fi);
for rj = 1:numel(config.radius_grid)
rad = config.radius_grid(rj);
fprintf(' Combo: features=%2d | radius=%.2f ...\n', kfeat, rad);
fold_err = zeros(config.K,1);
fold_rule = zeros(config.K,1);
for k = 1:config.K
fprintf(' Fold %d/%d ... ', k, config.K);
Itr = training(folds, k);
Ivl = test(folds, k);
% Raw (pre-global-scale) slices
x_tr_raw = X_trn(Itr,:); y_tr_f = y_trn(Itr);
x_vl_raw = X_trn(Ivl,:); y_vl_f = y_trn(Ivl);
% Per-fold scaling
[x_tr_f, x_vl_f, ~, ~] = preprocess_data(x_tr_raw, x_vl_raw, x_vl_raw, 1);
% Feature selection (ReliefF)
[idxF, ~] = relieff(x_tr_f, y_tr_f, 10);
kkeep = min(kfeat, size(x_tr_f,2));
sel = idxF(1:kkeep);
x_tr_f = x_tr_f(:, sel);
x_vl_f = x_vl_f(:, sel);
% Init FIS (SC)
gopt = genfisOptions("SubtractiveClustering", ...
"ClusterInfluenceRange", rad);
init_fis = genfis(x_tr_f, y_tr_f, gopt);
% Train (Hybrid)
aopt = anfisOptions("InitialFis", init_fis, ...
"ValidationData", [x_vl_f y_vl_f], ...
"EpochNumber", config.Nepochs, ...
"OptimizationMethod", 1, ...
"DisplayErrorValues", 0, ...
"DisplayStepSize", 0);
[~, ~, ~, vl_fis, vl_err] = anfis([x_tr_f y_tr_f], aopt);
fold_err(k) = min(vl_err);
fold_rule(k) = numel(vl_fis.Rules);
fprintf(' Done (val err=%.4g, rules=%d)\n', fold_err(k), fold_rule(k));
end
cv_scores(fi, rj) = mean(fold_err);
cv_rules(fi, rj) = round(mean(fold_rule));
fprintf(' Mean CV error=%.4g | Mean rules=%d\n\n', ...
cv_scores(fi,rj), cv_rules(fi,rj));
end
end
fprintf('Grid-search completed.\n');
% Pick best hyper-parameters
[minErr, ix] = min(cv_scores(:));
[fi_best, rj_best] = ind2sub(size(cv_scores), ix);
best_feats = config.feature_grid(fi_best);
best_rad = config.radius_grid(rj_best);
fprintf('\n');
fprintf(' Best combo found: features=%d, radius=%.2f\n', best_feats, best_rad);
fprintf(' Mean CV error : %.4g\n', minErr);
fprintf('===========================================\n\n');
% Final training phase
fprintf('Final training using best hyper-parameters...\n');
[idx_all, ~] = relieff(X_trn_s, y_trn, 10);
kkeep = min(best_feats, size(X_trn_s,2));
sel_final = idx_all(1:kkeep);
XtrF = X_trn_s(:, sel_final);
XvlF = X_val_s(:, sel_final);
XteF = X_chk_s(:, sel_final);
fprintf(' Selected %d features (ReliefF top indices)\n', kkeep);
fprintf(' Building initial FIS (SC radius=%.2f)...\n', best_rad);
gopt = genfisOptions("SubtractiveClustering", "ClusterInfluenceRange", best_rad);
init_fis = genfis(XtrF, y_trn, gopt);
fprintf(' Initial FIS created with %d rules\n', numel(init_fis.Rules));
aopt = anfisOptions("InitialFis", init_fis, ...
"ValidationData", [XvlF y_val], ...
"EpochNumber", config.Nepochs, ...
"OptimizationMethod", 1, ...
"DisplayErrorValues", 1, ...
"DisplayStepSize", 0);
fprintf('Training ANFIS (Hybrid optimization)...\n');
[trn_fis, trn_error, ~, fin_fis, val_error] = anfis([XtrF y_trn], aopt);
fprintf(' Done. Final FIS has %d rules.\n\n', numel(fin_fis.Rules));
% Evaluation on TEST
fprintf('Evaluating on Test set ...\n');
y_hat = evalfis(fin_fis, XteF);
[mse, rmse, r2, nmse, ndei] = evaluate(y_hat, y_chk);
fprintf('\n');
fprintf(' FINAL MODEL PERFORMANCE (Test set)\n');
fprintf('-------------------------------------------\n');
fprintf(' MSE : %g\n', mse);
fprintf(' RMSE: %g\n', rmse);
fprintf(' R2 : %g\n', r2);
fprintf(' NMSE: %g\n', nmse);
fprintf(' NDEI: %g\n', ndei);
fprintf('\n');
% Plots
fprintf('Generating diagnostic plots ...\n');
plot_results2( ...
init_fis, fin_fis, trn_error, val_error, y_chk, y_hat, ...
config.feature_grid, config.radius_grid, cv_scores, cv_rules, sel_final ...
);
fprintf(' All plots saved in ./figures_scn2\n');

View File

@ -0,0 +1,25 @@
function [X_trn, y_trn, X_val, y_val, X_chk, y_chk] = split_data(data, ratios, seed)
%SPLITSET Splits the data-set to train, eval,check data
% Randomly split the data set according to ratios
%
% Argument check and default values
if nargin < 2 || isempty(ratios), ratios = [0.6 0.2 0.2]; end
if nargin == 2, rng(42, 'twister'); end
if nargin > 2, rng(seed,'twister'); end
N = size(data,1); % Scramble the set
idx = randperm(N);
Ntrn = round(ratios(1)*N); % Split positions
Nval = round(ratios(2)*N);
Itrn = idx(1:Ntrn);
Ival = idx(Ntrn+1:Ntrn+Nval);
Ichk = idx(Ntrn+Nval+1:end);
% Pick the data (in X and y)
X_trn = data(Itrn, 1:end-1); y_trn = data(Itrn, end);
X_val = data(Ival, 1:end-1); y_val = data(Ival, end);
X_chk = data(Ichk, 1:end-1); y_chk = data(Ichk, end);
end