5t4l1n commited on
Commit
030c263
·
verified ·
1 Parent(s): b13886d

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +2 -32
  2. .hfignore +6 -0
  3. README.md +2 -0
  4. backend/app/main.py +0 -0
  5. backend/requirements.txt +0 -0
  6. backup.ipynb +765 -0
  7. dataset/.ipynb_checkpoints/training-checkpoint.ipynb +446 -0
  8. dataset/G1020/G1020.csv +1021 -0
  9. dataset/G1020/Images_Cropped/017.jpg +0 -0
  10. dataset/G1020/Images_Cropped/022.jpg +0 -0
  11. dataset/G1020/Images_Cropped/040.jpg +0 -0
  12. dataset/G1020/Images_Cropped/048.jpg +0 -0
  13. dataset/G1020/Images_Cropped/062.jpg +0 -0
  14. dataset/G1020/Images_Cropped/077.jpg +0 -0
  15. dataset/G1020/Images_Cropped/090.jpg +0 -0
  16. dataset/G1020/Images_Cropped/117.jpg +0 -0
  17. dataset/G1020/Images_Cropped/125.jpg +0 -0
  18. dataset/G1020/Images_Cropped/148.jpg +0 -0
  19. dataset/G1020/Images_Cropped/162.jpg +0 -0
  20. dataset/G1020/Images_Cropped/165.jpg +0 -0
  21. dataset/G1020/Images_Cropped/184.jpg +0 -0
  22. dataset/G1020/Images_Cropped/188.jpg +0 -0
  23. dataset/G1020/Images_Cropped/249.jpg +0 -0
  24. dataset/G1020/Images_Cropped/258.jpg +0 -0
  25. dataset/G1020/Images_Cropped/322.jpg +0 -0
  26. dataset/G1020/Images_Cropped/328.jpg +0 -0
  27. dataset/G1020/Images_Cropped/332.jpg +0 -0
  28. dataset/G1020/Images_Cropped/334.jpg +0 -0
  29. dataset/G1020/Images_Cropped/340.jpg +0 -0
  30. dataset/G1020/Images_Cropped/341.jpg +0 -0
  31. dataset/G1020/Images_Cropped/374.jpg +0 -0
  32. dataset/G1020/Images_Cropped/377.jpg +0 -0
  33. dataset/G1020/Images_Cropped/394.jpg +0 -0
  34. dataset/G1020/Images_Cropped/418.jpg +0 -0
  35. dataset/G1020/Images_Cropped/432.jpg +0 -0
  36. dataset/G1020/Images_Cropped/452.jpg +0 -0
  37. dataset/G1020/Images_Cropped/454.jpg +0 -0
  38. dataset/G1020/Images_Cropped/468.jpg +0 -0
  39. dataset/G1020/Images_Cropped/476.jpg +0 -0
  40. dataset/G1020/Images_Cropped/493.jpg +0 -0
  41. dataset/G1020/Images_Cropped/500.jpg +0 -0
  42. dataset/G1020/Images_Cropped/515.jpg +0 -0
  43. dataset/G1020/Images_Cropped/575.jpg +0 -0
  44. dataset/G1020/Images_Cropped/603.jpg +0 -0
  45. dataset/G1020/Images_Cropped/609.jpg +0 -0
  46. dataset/G1020/Images_Cropped/613.jpg +0 -0
  47. dataset/G1020/Images_Cropped/img/image_1095.jpg +0 -0
  48. dataset/G1020/Images_Cropped/img/image_1187.jpg +0 -0
  49. dataset/G1020/Images_Cropped/img/image_1201.jpg +0 -0
  50. dataset/G1020/Images_Cropped/img/image_1203.jpg +0 -0
.gitattributes CHANGED
@@ -1,35 +1,5 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
  *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
- *.model filter=lfs diff=lfs merge=lfs -text
13
- *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
- *.onnx filter=lfs diff=lfs merge=lfs -text
17
- *.ot filter=lfs diff=lfs merge=lfs -text
18
- *.parquet filter=lfs diff=lfs merge=lfs -text
19
- *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
  *.pt filter=lfs diff=lfs merge=lfs -text
23
- *.pth filter=lfs diff=lfs merge=lfs -text
24
- *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
- *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
- *.tflite filter=lfs diff=lfs merge=lfs -text
30
- *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
- *.xz filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
1
  *.h5 filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  *.pt filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
3
  *.zip filter=lfs diff=lfs merge=lfs -text
4
+ *.jpg filter=lfs diff=lfs merge=lfs -text
5
+ *.png filter=lfs diff=lfs merge=lfs -text
.hfignore ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ venv/
2
+ __pycache__/
3
+ *.pyc
4
+ .DS_Store
5
+ *.ipynb_checkpoints
6
+
README.md ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ # ai-eye-disease-detection
2
+ # ai-eye-disease-detection
backend/app/main.py ADDED
File without changes
backend/requirements.txt ADDED
File without changes
backup.ipynb ADDED
@@ -0,0 +1,765 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": null,
6
+ "id": "30e80151",
7
+ "metadata": {
8
+ "vscode": {
9
+ "languageId": "plaintext"
10
+ }
11
+ },
12
+ "outputs": [],
13
+ "source": [
14
+ "#!/usr/bin/env python3\n",
15
+ "\"\"\"\n",
16
+ "Advanced Eye Disease Detection Training Script - Fixed Version\n",
17
+ "\"\"\"\n",
18
+ "\n",
19
+ "import os\n",
20
+ "import sys\n",
21
+ "import json\n",
22
+ "import numpy as np\n",
23
+ "import cv2\n",
24
+ "from pathlib import Path\n",
25
+ "from tqdm import tqdm\n",
26
+ "import matplotlib.pyplot as plt\n",
27
+ "from collections import Counter\n",
28
+ "import warnings\n",
29
+ "warnings.filterwarnings('ignore')\n",
30
+ "\n",
31
+ "# Check if required packages are available\n",
32
+ "try:\n",
33
+ " import tensorflow as tf\n",
34
+ " from tensorflow import keras\n",
35
+ " from tensorflow.keras import layers\n",
36
+ " from tensorflow.keras.applications import EfficientNetB3, ResNet152V2, DenseNet201\n",
37
+ " from tensorflow.keras.preprocessing.image import ImageDataGenerator\n",
38
+ " from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint\n",
39
+ " from sklearn.model_selection import train_test_split, StratifiedKFold\n",
40
+ " from sklearn.preprocessing import LabelEncoder\n",
41
+ " from sklearn.metrics import classification_report, confusion_matrix, accuracy_score\n",
42
+ " from sklearn.utils.class_weight import compute_class_weight\n",
43
+ " import seaborn as sns\n",
44
+ " from imblearn.over_sampling import SMOTE\n",
45
+ " from imblearn.combine import SMOTETomek\n",
46
+ " # Optional: Kaggle API (install if needed)\n",
47
+ " try:\n",
48
+ " from kaggle.api.kaggle_api_extended import KaggleApi\n",
49
+ " KAGGLE_AVAILABLE = True\n",
50
+ " except ImportError:\n",
51
+ " KAGGLE_AVAILABLE = False\n",
52
+ " print(\"⚠️ Kaggle API not available. Manual dataset download required.\")\n",
53
+ "except ImportError as e:\n",
54
+ " print(f\"Missing required package: {e}\")\n",
55
+ " print(\"Please run: pip install tensorflow opencv-python scikit-learn matplotlib tqdm seaborn imbalanced-learn\")\n",
56
+ " sys.exit(1)\n",
57
+ "\n",
58
+ "print(\"✅ All required packages are available!\")\n",
59
+ "\n",
60
+ "class AdvancedFundusPreprocessor:\n",
61
+ " \"\"\"Advanced preprocessing pipeline for fundus images\"\"\"\n",
62
+ " \n",
63
+ " def __init__(self, image_size=(224, 224)):\n",
64
+ " self.image_size = image_size\n",
65
+ " self.setup_clahe_variants()\n",
66
+ " \n",
67
+ " def setup_clahe_variants(self):\n",
68
+ " \"\"\"Setup multiple CLAHE variants for different image characteristics\"\"\"\n",
69
+ " self.clahe_normal = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))\n",
70
+ " self.clahe_aggressive = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(4,4))\n",
71
+ " self.clahe_gentle = cv2.createCLAHE(clipLimit=1.5, tileGridSize=(16,16))\n",
72
+ " \n",
73
+ " def advanced_preprocess_image(self, image_path):\n",
74
+ " \"\"\"Advanced preprocessing with multiple enhancement techniques\"\"\"\n",
75
+ " try:\n",
76
+ " # Read image\n",
77
+ " image = cv2.imread(str(image_path))\n",
78
+ " if image is None:\n",
79
+ " print(f\"⚠️ Could not read image: {image_path}\")\n",
80
+ " return None\n",
81
+ " \n",
82
+ " # Convert to RGB\n",
83
+ " image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n",
84
+ " \n",
85
+ " # Apply advanced preprocessing pipeline\n",
86
+ " image = self.enhance_fundus_image(image)\n",
87
+ " \n",
88
+ " # Resize with high-quality interpolation\n",
89
+ " image = cv2.resize(image, self.image_size, interpolation=cv2.INTER_LANCZOS4)\n",
90
+ " \n",
91
+ " # Normalize to [0, 1]\n",
92
+ " image = image.astype(np.float32) / 255.0\n",
93
+ " \n",
94
+ " return image\n",
95
+ " except Exception as e:\n",
96
+ " print(f\"Error processing {image_path}: {e}\")\n",
97
+ " return None\n",
98
+ " \n",
99
+ " def enhance_fundus_image(self, image):\n",
100
+ " \"\"\"Advanced fundus-specific enhancement techniques\"\"\"\n",
101
+ " # Apply CLAHE to LAB color space\n",
102
+ " lab = cv2.cvtColor(image, cv2.COLOR_RGB2LAB)\n",
103
+ " l_channel = lab[:,:,0]\n",
104
+ " \n",
105
+ " # Apply CLAHE to L channel\n",
106
+ " l_clahe = self.clahe_normal.apply(l_channel)\n",
107
+ " lab[:,:,0] = l_clahe\n",
108
+ " image_enhanced = cv2.cvtColor(lab, cv2.COLOR_LAB2RGB)\n",
109
+ " \n",
110
+ " # Green channel enhancement (important for fundus images)\n",
111
+ " green_enhanced = cv2.equalizeHist(image_enhanced[:,:,1])\n",
112
+ " image_enhanced[:,:,1] = green_enhanced\n",
113
+ " \n",
114
+ " # Apply gamma correction\n",
115
+ " image_enhanced = self.adjust_gamma(image_enhanced, gamma=1.2)\n",
116
+ " \n",
117
+ " return image_enhanced\n",
118
+ " \n",
119
+ " def adjust_gamma(self, image, gamma=1.0):\n",
120
+ " \"\"\"Apply gamma correction\"\"\"\n",
121
+ " inv_gamma = 1.0 / gamma\n",
122
+ " table = np.array([((i / 255.0) ** inv_gamma) * 255 for i in np.arange(0, 256)]).astype(\"uint8\")\n",
123
+ " return cv2.LUT(image, table)\n",
124
+ "\n",
125
+ "class AdvancedEnsembleModel:\n",
126
+ " \"\"\"Advanced ensemble model combining multiple architectures\"\"\"\n",
127
+ " \n",
128
+ " def __init__(self, image_size=(224, 224), num_classes=7):\n",
129
+ " self.image_size = image_size\n",
130
+ " self.num_classes = num_classes\n",
131
+ " \n",
132
+ " def create_efficientnet_model(self):\n",
133
+ " \"\"\"Create EfficientNetB3 based model\"\"\"\n",
134
+ " base_model = EfficientNetB3(\n",
135
+ " weights='imagenet',\n",
136
+ " include_top=False,\n",
137
+ " input_shape=(*self.image_size, 3)\n",
138
+ " )\n",
139
+ " \n",
140
+ " # Fine-tune top layers\n",
141
+ " base_model.trainable = True\n",
142
+ " for layer in base_model.layers[:-30]:\n",
143
+ " layer.trainable = False\n",
144
+ " \n",
145
+ " inputs = keras.Input(shape=(*self.image_size, 3))\n",
146
+ " \n",
147
+ " # Base model\n",
148
+ " x = base_model(inputs, training=False)\n",
149
+ " \n",
150
+ " # Advanced pooling\n",
151
+ " gap = layers.GlobalAveragePooling2D()(x)\n",
152
+ " gmp = layers.GlobalMaxPooling2D()(x)\n",
153
+ " x = layers.Concatenate()([gap, gmp])\n",
154
+ " \n",
155
+ " # Advanced head\n",
156
+ " x = layers.BatchNormalization()(x)\n",
157
+ " x = layers.Dropout(0.5)(x)\n",
158
+ " \n",
159
+ " x = layers.Dense(512, activation='relu')(x)\n",
160
+ " x = layers.BatchNormalization()(x)\n",
161
+ " x = layers.Dropout(0.4)(x)\n",
162
+ " \n",
163
+ " x = layers.Dense(256, activation='relu')(x)\n",
164
+ " x = layers.BatchNormalization()(x)\n",
165
+ " x = layers.Dropout(0.3)(x)\n",
166
+ " \n",
167
+ " outputs = layers.Dense(self.num_classes, activation='softmax')(x)\n",
168
+ " \n",
169
+ " return keras.Model(inputs, outputs, name='EfficientNetB3_Model')\n",
170
+ " \n",
171
+ " def create_resnet_model(self):\n",
172
+ " \"\"\"Create ResNet152V2 based model\"\"\"\n",
173
+ " base_model = ResNet152V2(\n",
174
+ " weights='imagenet',\n",
175
+ " include_top=False,\n",
176
+ " input_shape=(*self.image_size, 3)\n",
177
+ " )\n",
178
+ " \n",
179
+ " base_model.trainable = True\n",
180
+ " for layer in base_model.layers[:-40]:\n",
181
+ " layer.trainable = False\n",
182
+ " \n",
183
+ " inputs = keras.Input(shape=(*self.image_size, 3))\n",
184
+ " \n",
185
+ " x = base_model(inputs, training=False)\n",
186
+ " \n",
187
+ " # Global pooling\n",
188
+ " x = layers.GlobalAveragePooling2D()(x)\n",
189
+ " x = layers.BatchNormalization()(x)\n",
190
+ " x = layers.Dropout(0.5)(x)\n",
191
+ " \n",
192
+ " x = layers.Dense(512, activation='relu')(x)\n",
193
+ " x = layers.BatchNormalization()(x)\n",
194
+ " x = layers.Dropout(0.4)(x)\n",
195
+ " \n",
196
+ " outputs = layers.Dense(self.num_classes, activation='softmax')(x)\n",
197
+ " \n",
198
+ " return keras.Model(inputs, outputs, name='ResNet152V2_Model')\n",
199
+ " \n",
200
+ " def create_densenet_model(self):\n",
201
+ " \"\"\"Create DenseNet201 based model\"\"\"\n",
202
+ " base_model = DenseNet201(\n",
203
+ " weights='imagenet',\n",
204
+ " include_top=False,\n",
205
+ " input_shape=(*self.image_size, 3)\n",
206
+ " )\n",
207
+ " \n",
208
+ " base_model.trainable = True\n",
209
+ " for layer in base_model.layers[:-50]:\n",
210
+ " layer.trainable = False\n",
211
+ " \n",
212
+ " inputs = keras.Input(shape=(*self.image_size, 3))\n",
213
+ " \n",
214
+ " x = base_model(inputs, training=False)\n",
215
+ " \n",
216
+ " x = layers.GlobalAveragePooling2D()(x)\n",
217
+ " x = layers.BatchNormalization()(x)\n",
218
+ " x = layers.Dropout(0.5)(x)\n",
219
+ " \n",
220
+ " x = layers.Dense(512, activation='relu')(x)\n",
221
+ " x = layers.BatchNormalization()(x)\n",
222
+ " x = layers.Dropout(0.4)(x)\n",
223
+ " \n",
224
+ " outputs = layers.Dense(self.num_classes, activation='softmax')(x)\n",
225
+ " \n",
226
+ " return keras.Model(inputs, outputs, name='DenseNet201_Model')\n",
227
+ "\n",
228
+ "class AdvancedFundusTrainer:\n",
229
+ " def __init__(self):\n",
230
+ " self.dataset_path = \"fundus_dataset\"\n",
231
+ " self.model_path = \"models\"\n",
232
+ " self.image_size = (224, 224)\n",
233
+ " self.batch_size = 16\n",
234
+ " \n",
235
+ " # Initialize components\n",
236
+ " self.preprocessor = AdvancedFundusPreprocessor(self.image_size)\n",
237
+ " self.ensemble_model = AdvancedEnsembleModel(self.image_size)\n",
238
+ " \n",
239
+ " # Create directories\n",
240
+ " Path(self.model_path).mkdir(exist_ok=True)\n",
241
+ " Path(\"logs\").mkdir(exist_ok=True)\n",
242
+ " Path(\"plots\").mkdir(exist_ok=True)\n",
243
+ " \n",
244
+ " def download_dataset(self):\n",
245
+ " \"\"\"Download the Kaggle dataset\"\"\"\n",
246
+ " if not KAGGLE_AVAILABLE:\n",
247
+ " print(\"❌ Kaggle API not available. Please download dataset manually.\")\n",
248
+ " print(\"Dataset: https://www.kaggle.com/datasets/linchundan/fundusimage1000\")\n",
249
+ " print(f\"Extract to: {self.dataset_path}/\")\n",
250
+ " return False\n",
251
+ " \n",
252
+ " print(\"📥 Downloading dataset from Kaggle...\")\n",
253
+ " \n",
254
+ " try:\n",
255
+ " api = KaggleApi()\n",
256
+ " api.authenticate()\n",
257
+ " \n",
258
+ " api.dataset_download_files(\n",
259
+ " \"linchundan/fundusimage1000\",\n",
260
+ " path=self.dataset_path,\n",
261
+ " unzip=True\n",
262
+ " )\n",
263
+ " print(\"✅ Dataset downloaded successfully!\")\n",
264
+ " return True\n",
265
+ " \n",
266
+ " except Exception as e:\n",
267
+ " print(f\"❌ Download failed: {e}\")\n",
268
+ " print(\"Please download dataset manually and extract to fundus_dataset/\")\n",
269
+ " return False\n",
270
+ " \n",
271
+ " def load_dataset(self):\n",
272
+ " \"\"\"Load dataset with preprocessing - Fixed Version\"\"\"\n",
273
+ " print(\"📁 Loading dataset...\")\n",
274
+ " \n",
275
+ " images = []\n",
276
+ " labels = []\n",
277
+ " \n",
278
+ " dataset_path = Path(self.dataset_path)\n",
279
+ " \n",
280
+ " # Debug: Print directory structure\n",
281
+ " print(f\"\\nDirectory structure at {dataset_path}:\")\n",
282
+ " for item in dataset_path.rglob('*'):\n",
283
+ " print(f\" {item.relative_to(dataset_path)}\")\n",
284
+ " \n",
285
+ " # Check for common dataset structures\n",
286
+ " possible_paths = [\n",
287
+ " dataset_path,\n",
288
+ " dataset_path / \"1000images\",\n",
289
+ " dataset_path / \"fundusimage1000\",\n",
290
+ " dataset_path / \"images\",\n",
291
+ " dataset_path / \"data\"\n",
292
+ " ]\n",
293
+ " \n",
294
+ " actual_path = None\n",
295
+ " for path in possible_paths:\n",
296
+ " if path.exists():\n",
297
+ " print(f\"\\nChecking path: {path}\")\n",
298
+ " # Count image files in this path\n",
299
+ " image_count = sum(1 for _ in path.rglob('*.[jJ][pP][gG]')) + \\\n",
300
+ " sum(1 for _ in path.rglob('*.[jJ][pP][eE][gG]')) + \\\n",
301
+ " sum(1 for _ in path.rglob('*.[pP][nN][gG]'))\n",
302
+ " print(f\" Found {image_count} images\")\n",
303
+ " if image_count > 0:\n",
304
+ " actual_path = path\n",
305
+ " break\n",
306
+ " \n",
307
+ " if actual_path is None:\n",
308
+ " print(f\"\\n❌ No valid dataset found in {dataset_path}\")\n",
309
+ " print(\"Please ensure the dataset contains image files in one of these structures:\")\n",
310
+ " print(\"1. Directly in fundus_dataset/\")\n",
311
+ " print(\"2. In fundus_dataset/1000images/\")\n",
312
+ " print(\"3. In fundus_dataset/fundusimage1000/\")\n",
313
+ " print(\"4. In subdirectories by class\")\n",
314
+ " return None, None, None\n",
315
+ " \n",
316
+ " print(f\"\\n✅ Using dataset path: {actual_path}\")\n",
317
+ " \n",
318
+ " # Find all image files\n",
319
+ " image_files = list(actual_path.rglob('*.[jJ][pP][gG]')) + \\\n",
320
+ " list(actual_path.rglob('*.[jJ][pP][eE][gG]')) + \\\n",
321
+ " list(actual_path.rglob('*.[pP][nN][gG]'))\n",
322
+ " \n",
323
+ " if not image_files:\n",
324
+ " print(\"❌ No image files found in the dataset directory\")\n",
325
+ " return None, None, None\n",
326
+ " \n",
327
+ " print(f\"\\nFound {len(image_files)} image files\")\n",
328
+ " \n",
329
+ " # Process images\n",
330
+ " class_counts = Counter()\n",
331
+ " for image_file in tqdm(image_files, desc=\"Processing images\"):\n",
332
+ " processed_image = self.preprocessor.advanced_preprocess_image(image_file)\n",
333
+ " if processed_image is not None:\n",
334
+ " images.append(processed_image)\n",
335
+ " # Use parent directory name as class label\n",
336
+ " class_label = image_file.parent.name\n",
337
+ " if class_label == actual_path.name: # If image is in root directory\n",
338
+ " class_label = \"unknown\"\n",
339
+ " labels.append(class_label)\n",
340
+ " class_counts[class_label] += 1\n",
341
+ " \n",
342
+ " if len(images) == 0:\n",
343
+ " print(\"❌ No images loaded successfully\")\n",
344
+ " return None, None, None\n",
345
+ " \n",
346
+ " # Print class distribution\n",
347
+ " print(\"\\nClass distribution:\")\n",
348
+ " for class_name, count in class_counts.most_common():\n",
349
+ " print(f\" {class_name}: {count} images\")\n",
350
+ " \n",
351
+ " # Convert to numpy arrays\n",
352
+ " X = np.array(images)\n",
353
+ " y = np.array(labels)\n",
354
+ " \n",
355
+ " # Encode labels\n",
356
+ " label_encoder = LabelEncoder()\n",
357
+ " y_encoded = label_encoder.fit_transform(y)\n",
358
+ " \n",
359
+ " print(f\"\\n✅ Dataset loaded: {len(X)} images, {len(label_encoder.classes_)} classes\")\n",
360
+ " \n",
361
+ " return X, y_encoded, label_encoder.classes_\n",
362
+ " \n",
363
+ " def balance_dataset(self, X, y):\n",
364
+ " \"\"\"Balance dataset using SMOTE\"\"\"\n",
365
+ " print(\"⚖️ Balancing dataset...\")\n",
366
+ " \n",
367
+ " try:\n",
368
+ " # Reshape for SMOTE\n",
369
+ " X_flattened = X.reshape(X.shape[0], -1)\n",
370
+ " \n",
371
+ " # Apply SMOTE\n",
372
+ " smote = SMOTE(random_state=42, k_neighbors=min(5, len(np.unique(y))-1))\n",
373
+ " X_balanced, y_balanced = smote.fit_resample(X_flattened, y)\n",
374
+ " \n",
375
+ " # Reshape back\n",
376
+ " X_balanced = X_balanced.reshape(-1, *self.image_size, 3)\n",
377
+ " \n",
378
+ " print(f\"Dataset balanced: {len(X_balanced)} samples\")\n",
379
+ " return X_balanced, y_balanced\n",
380
+ " \n",
381
+ " except Exception as e:\n",
382
+ " print(f\"SMOTE failed: {e}, using original dataset\")\n",
383
+ " return X, y\n",
384
+ " \n",
385
+ " def create_data_generators(self, X_train, y_train, X_val, y_val):\n",
386
+ " \"\"\"Create data generators with augmentation\"\"\"\n",
387
+ " # Training data generator with augmentation\n",
388
+ " train_datagen = ImageDataGenerator(\n",
389
+ " rotation_range=20,\n",
390
+ " width_shift_range=0.15,\n",
391
+ " height_shift_range=0.15,\n",
392
+ " horizontal_flip=True,\n",
393
+ " vertical_flip=True,\n",
394
+ " zoom_range=0.15,\n",
395
+ " brightness_range=[0.8, 1.2],\n",
396
+ " shear_range=0.1,\n",
397
+ " fill_mode='reflect'\n",
398
+ " )\n",
399
+ " \n",
400
+ " # Validation data generator (no augmentation)\n",
401
+ " val_datagen = ImageDataGenerator()\n",
402
+ " \n",
403
+ " train_generator = train_datagen.flow(\n",
404
+ " X_train, y_train, \n",
405
+ " batch_size=self.batch_size,\n",
406
+ " shuffle=True\n",
407
+ " )\n",
408
+ " \n",
409
+ " val_generator = val_datagen.flow(\n",
410
+ " X_val, y_val,\n",
411
+ " batch_size=self.batch_size,\n",
412
+ " shuffle=False\n",
413
+ " )\n",
414
+ " \n",
415
+ " return train_generator, val_generator\n",
416
+ " \n",
417
+ " def create_ensemble(self, num_classes):\n",
418
+ " \"\"\"Create ensemble model\"\"\"\n",
419
+ " print(\"🧠 Creating ensemble model...\")\n",
420
+ " \n",
421
+ " # Create individual models\n",
422
+ " efficientnet_model = self.ensemble_model.create_efficientnet_model()\n",
423
+ " resnet_model = self.ensemble_model.create_resnet_model()\n",
424
+ " densenet_model = self.ensemble_model.create_densenet_model()\n",
425
+ " \n",
426
+ " # Update number of classes for each model\n",
427
+ " self.ensemble_model.num_classes = num_classes\n",
428
+ " \n",
429
+ " # Compile models\n",
430
+ " optimizer = keras.optimizers.Adam(learning_rate=1e-4)\n",
431
+ " \n",
432
+ " for model in [efficientnet_model, resnet_model, densenet_model]:\n",
433
+ " model.compile(\n",
434
+ " optimizer=optimizer,\n",
435
+ " loss='sparse_categorical_crossentropy',\n",
436
+ " metrics=['accuracy']\n",
437
+ " )\n",
438
+ " \n",
439
+ " models = [efficientnet_model, resnet_model, densenet_model]\n",
440
+ " model_names = ['EfficientNetB3', 'ResNet152V2', 'DenseNet201']\n",
441
+ " \n",
442
+ " print(f\"✅ Created ensemble with {len(models)} models\")\n",
443
+ " return models, model_names\n",
444
+ " \n",
445
+ " def train_ensemble(self, models, model_names, train_gen, val_gen, class_names, epochs=30):\n",
446
+ " \"\"\"Train ensemble models\"\"\"\n",
447
+ " print(\"🚀 Training ensemble...\")\n",
448
+ " \n",
449
+ " # Calculate class weights for imbalanced data\n",
450
+ " y_train_full = []\n",
451
+ " for _ in range(len(train_gen)):\n",
452
+ " _, y_batch = next(train_gen)\n",
453
+ " y_train_full.extend(y_batch)\n",
454
+ " \n",
455
+ " class_weights = compute_class_weight(\n",
456
+ " 'balanced',\n",
457
+ " classes=np.unique(y_train_full),\n",
458
+ " y=y_train_full\n",
459
+ " )\n",
460
+ " class_weight_dict = dict(enumerate(class_weights))\n",
461
+ " \n",
462
+ " # Reset generator\n",
463
+ " train_gen.reset()\n",
464
+ " \n",
465
+ " # Train each model\n",
466
+ " histories = []\n",
467
+ " for model, model_name in zip(models, model_names):\n",
468
+ " print(f\"\\n{'='*50}\")\n",
469
+ " print(f\"Training {model_name}\")\n",
470
+ " print(f\"{'='*50}\")\n",
471
+ " \n",
472
+ " # Callbacks\n",
473
+ " callbacks = [\n",
474
+ " EarlyStopping(\n",
475
+ " monitor='val_accuracy',\n",
476
+ " patience=10,\n",
477
+ " restore_best_weights=True,\n",
478
+ " verbose=1\n",
479
+ " ),\n",
480
+ " ReduceLROnPlateau(\n",
481
+ " monitor='val_loss',\n",
482
+ " patience=5,\n",
483
+ " factor=0.5,\n",
484
+ " min_lr=1e-7,\n",
485
+ " verbose=1\n",
486
+ " ),\n",
487
+ " ModelCheckpoint(\n",
488
+ " f\"{self.model_path}/best_{model_name.lower()}_model.h5\",\n",
489
+ " monitor='val_accuracy',\n",
490
+ " save_best_only=True,\n",
491
+ " verbose=1\n",
492
+ " )\n",
493
+ " ]\n",
494
+ " \n",
495
+ " history = model.fit(\n",
496
+ " train_gen,\n",
497
+ " epochs=epochs,\n",
498
+ " validation_data=val_gen,\n",
499
+ " callbacks=callbacks,\n",
500
+ " class_weight=class_weight_dict,\n",
501
+ " verbose=1\n",
502
+ " )\n",
503
+ " \n",
504
+ " histories.append(history)\n",
505
+ " train_gen.reset()\n",
506
+ " val_gen.reset()\n",
507
+ " \n",
508
+ " return histories\n",
509
+ " \n",
510
+ " def evaluate_ensemble(self, models, model_names, X_test, y_test, class_names):\n",
511
+ " \"\"\"Evaluate ensemble model\"\"\"\n",
512
+ " print(\"📊 Evaluating ensemble...\")\n",
513
+ " \n",
514
+ " # Individual model predictions\n",
515
+ " all_predictions = []\n",
516
+ " individual_scores = []\n",
517
+ " \n",
518
+ " for model, model_name in zip(models, model_names):\n",
519
+ " print(f\"\\nEvaluating {model_name}:\")\n",
520
+ " \n",
521
+ " test_loss, test_accuracy = model.evaluate(X_test, y_test, verbose=0)\n",
522
+ " predictions = model.predict(X_test, verbose=0)\n",
523
+ " all_predictions.append(predictions)\n",
524
+ " \n",
525
+ " individual_scores.append({\n",
526
+ " 'model': model_name,\n",
527
+ " 'accuracy': test_accuracy,\n",
528
+ " 'loss': test_loss\n",
529
+ " })\n",
530
+ " \n",
531
+ " print(f\" Accuracy: {test_accuracy:.4f}\")\n",
532
+ " \n",
533
+ " # Ensemble predictions (weighted average)\n",
534
+ " weights = [0.4, 0.35, 0.25] # EfficientNet gets highest weight\n",
535
+ " ensemble_predictions = np.average(all_predictions, axis=0, weights=weights)\n",
536
+ " ensemble_pred_classes = np.argmax(ensemble_predictions, axis=1)\n",
537
+ " \n",
538
+ " # Calculate ensemble accuracy\n",
539
+ " ensemble_accuracy = accuracy_score(y_test, ensemble_pred_classes)\n",
540
+ " \n",
541
+ " print(f\"\\n🎯 ENSEMBLE RESULTS:\")\n",
542
+ " print(f\"Ensemble Accuracy: {ensemble_accuracy:.4f} ({ensemble_accuracy*100:.2f}%)\")\n",
543
+ " \n",
544
+ " # Classification report\n",
545
+ " print(\"\\nEnsemble Classification Report:\")\n",
546
+ " report = classification_report(\n",
547
+ " y_test, ensemble_pred_classes, \n",
548
+ " target_names=class_names,\n",
549
+ " output_dict=True,\n",
550
+ " zero_division=0\n",
551
+ " )\n",
552
+ " print(classification_report(y_test, ensemble_pred_classes, target_names=class_names, zero_division=0))\n",
553
+ " \n",
554
+ " # Plot confusion matrix\n",
555
+ " self.plot_confusion_matrix(y_test, ensemble_pred_classes, class_names)\n",
556
+ " \n",
557
+ " return ensemble_accuracy, individual_scores, report\n",
558
+ " \n",
559
+ " def plot_confusion_matrix(self, y_true, y_pred, class_names):\n",
560
+ " \"\"\"Plot and save confusion matrix\"\"\"\n",
561
+ " cm = confusion_matrix(y_true, y_pred)\n",
562
+ " \n",
563
+ " plt.figure(figsize=(10, 8))\n",
564
+ " sns.heatmap(\n",
565
+ " cm, \n",
566
+ " annot=True, \n",
567
+ " fmt='d', \n",
568
+ " cmap='Blues',\n",
569
+ " xticklabels=class_names,\n",
570
+ " yticklabels=class_names,\n",
571
+ " annot_kws={'size': 10}\n",
572
+ " )\n",
573
+ " plt.title('Ensemble Confusion Matrix', fontsize=16)\n",
574
+ " plt.ylabel('True Label', fontsize=12)\n",
575
+ " plt.xlabel('Predicted Label', fontsize=12)\n",
576
+ " plt.xticks(rotation=45, ha='right')\n",
577
+ " plt.yticks(rotation=0)\n",
578
+ " plt.tight_layout()\n",
579
+ " \n",
580
+ " plt.savefig('plots/confusion_matrix.png', dpi=300, bbox_inches='tight')\n",
581
+ " plt.close()\n",
582
+ " \n",
583
+ " print(\"✅ Confusion matrix saved to plots/confusion_matrix.png\")\n",
584
+ " \n",
585
+ " def plot_training_history(self, histories, model_names):\n",
586
+ " \"\"\"Plot training history\"\"\"\n",
587
+ " plt.figure(figsize=(15, 10))\n",
588
+ " \n",
589
+ " colors = ['blue', 'red', 'green']\n",
590
+ " \n",
591
+ " # Accuracy\n",
592
+ " plt.subplot(2, 2, 1)\n",
593
+ " for history, model_name, color in zip(histories, model_names, colors):\n",
594
+ " plt.plot(history.history['accuracy'], label=f'{model_name} Train', color=color, linestyle='-')\n",
595
+ " plt.plot(history.history['val_accuracy'], label=f'{model_name} Val', color=color, linestyle='--')\n",
596
+ " \n",
597
+ " plt.title('Model Accuracy')\n",
598
+ " plt.xlabel('Epoch')\n",
599
+ " plt.ylabel('Accuracy')\n",
600
+ " plt.legend()\n",
601
+ " plt.grid(True)\n",
602
+ " \n",
603
+ " # Loss\n",
604
+ " plt.subplot(2, 2, 2)\n",
605
+ " for history, model_name, color in zip(histories, model_names, colors):\n",
606
+ " plt.plot(history.history['loss'], label=f'{model_name} Train', color=color, linestyle='-')\n",
607
+ " plt.plot(history.history['val_loss'], label=f'{model_name} Val', color=color, linestyle='--')\n",
608
+ " \n",
609
+ " plt.title('Model Loss')\n",
610
+ " plt.xlabel('Epoch')\n",
611
+ " plt.ylabel('Loss')\n",
612
+ " plt.legend()\n",
613
+ " plt.grid(True)\n",
614
+ " \n",
615
+ " plt.tight_layout()\n",
616
+ " plt.savefig('plots/training_history.png', dpi=300, bbox_inches='tight')\n",
617
+ " plt.close()\n",
618
+ " \n",
619
+ " print(\"✅ Training history saved to plots/training_history.png\")\n",
620
+ " \n",
621
+ " def save_ensemble(self, models, model_names, class_names, ensemble_accuracy, individual_scores):\n",
622
+ " \"\"\"Save ensemble models and metadata\"\"\"\n",
623
+ " print(\"💾 Saving ensemble...\")\n",
624
+ " \n",
625
+ " # Save individual models\n",
626
+ " model_files = []\n",
627
+ " for model, model_name in zip(models, model_names):\n",
628
+ " model_file = f\"{self.model_path}/{model_name.lower()}_model.keras\"\n",
629
+ " model.save(model_file)\n",
630
+ " model_files.append(model_file)\n",
631
+ " print(f\"✅ {model_name} saved to: {model_file}\")\n",
632
+ " \n",
633
+ " # Save metadata\n",
634
+ " metadata = {\n",
635
+ " 'ensemble_accuracy': float(ensemble_accuracy),\n",
636
+ " 'individual_models': individual_scores,\n",
637
+ " 'model_files': model_files,\n",
638
+ " 'num_classes': len(class_names),\n",
639
+ " 'image_size': self.image_size,\n",
640
+ " 'class_names': class_names.tolist(),\n",
641
+ " 'ensemble_weights': [0.4, 0.35, 0.25]\n",
642
+ " }\n",
643
+ " \n",
644
+ " metadata_file = f\"{self.model_path}/ensemble_metadata.json\"\n",
645
+ " with open(metadata_file, 'w') as f:\n",
646
+ " json.dump(metadata, f, indent=2)\n",
647
+ " \n",
648
+ " # Save class names\n",
649
+ " classes_file = f\"{self.model_path}/classes.json\"\n",
650
+ " with open(classes_file, 'w') as f:\n",
651
+ " json.dump(class_names.tolist(), f, indent=2)\n",
652
+ " \n",
653
+ " print(f\"✅ Metadata saved to: {metadata_file}\")\n",
654
+ " print(f\"✅ Classes saved to: {classes_file}\")\n",
655
+ " \n",
656
+ " def run_training(self):\n",
657
+ " \"\"\"Run the complete training pipeline\"\"\"\n",
658
+ " print(\"🎯 Eye Disease Detection - Advanced Training Pipeline\")\n",
659
+ " print(\"=\" * 60)\n",
660
+ " \n",
661
+ " # Step 1: Download dataset if needed\n",
662
+ " if not os.path.exists(self.dataset_path):\n",
663
+ " if not self.download_dataset():\n",
664
+ " print(\"Please download and extract the dataset manually.\")\n",
665
+ " return False\n",
666
+ " \n",
667
+ " # Step 2: Load dataset\n",
668
+ " result = self.load_dataset()\n",
669
+ " if result[0] is None:\n",
670
+ " print(\"❌ Failed to load dataset. Exiting.\")\n",
671
+ " return False\n",
672
+ " \n",
673
+ " X, y, class_names = result\n",
674
+ " \n",
675
+ " if len(X) < 50:\n",
676
+ " print(f\"❌ Not enough images ({len(X)}). Need at least 50 for training.\")\n",
677
+ " return False\n",
678
+ " \n",
679
+ " # Step 3: Balance dataset\n",
680
+ " X_balanced, y_balanced = self.balance_dataset(X, y)\n",
681
+ " \n",
682
+ " # Step 4: Split dataset\n",
683
+ " print(\"✂️ Splitting dataset...\")\n",
684
+ " X_train, X_temp, y_train, y_temp = train_test_split(\n",
685
+ " X_balanced, y_balanced, test_size=0.3, random_state=42, stratify=y_balanced\n",
686
+ " )\n",
687
+ " X_val, X_test, y_val, y_test = train_test_split(\n",
688
+ " X_temp, y_temp, test_size=0.5, random_state=42, stratify=y_temp\n",
689
+ " )\n",
690
+ " \n",
691
+ " print(f\"Train: {len(X_train)}, Val: {len(X_val)}, Test: {len(X_test)}\")\n",
692
+ " \n",
693
+ " # Step 5: Create data generators\n",
694
+ " train_gen, val_gen = self.create_data_generators(X_train, y_train, X_val, y_val)\n",
695
+ " \n",
696
+ " # Step 6: Create and train ensemble\n",
697
+ " models, model_names = self.create_ensemble(len(class_names))\n",
698
+ " histories = self.train_ensemble(models, model_names, train_gen, val_gen, class_names)\n",
699
+ " \n",
700
+ " # Step 7: Evaluate ensemble\n",
701
+ " ensemble_accuracy, individual_scores, report = self.evaluate_ensemble(\n",
702
+ " models, model_names, X_test, y_test, class_names\n",
703
+ " )\n",
704
+ " \n",
705
+ " # Step 8: Save results\n",
706
+ " self.save_ensemble(models, model_names, class_names, ensemble_accuracy, individual_scores)\n",
707
+ " self.plot_training_history(histories, model_names)\n",
708
+ " \n",
709
+ " print(\"\\n🎉 TRAINING COMPLETED!\")\n",
710
+ " print(f\"🎯 Final Ensemble Accuracy: {ensemble_accuracy:.4f} ({ensemble_accuracy*100:.2f}%)\")\n",
711
+ " \n",
712
+ " return True\n",
713
+ "\n",
714
+ "def main():\n",
715
+ " \"\"\"Main function\"\"\"\n",
716
+ " print(\"🚀 Advanced Eye Disease Detection Training\")\n",
717
+ " print(\"=\" * 50)\n",
718
+ " \n",
719
+ " # System information\n",
720
+ " print(f\"🔧 System Information:\")\n",
721
+ " print(f\" TensorFlow version: {tf.__version__}\")\n",
722
+ " print(f\" GPU available: {len(tf.config.list_physical_devices('GPU')) > 0}\")\n",
723
+ " \n",
724
+ " # GPU setup\n",
725
+ " gpus = tf.config.experimental.list_physical_devices('GPU')\n",
726
+ " if gpus:\n",
727
+ " try:\n",
728
+ " for gpu in gpus:\n",
729
+ " tf.config.experimental.set_memory_growth(gpu, True)\n",
730
+ " print(\" ✅ GPU memory growth enabled\")\n",
731
+ " except RuntimeError as e:\n",
732
+ " print(f\" ⚠️ GPU setup warning: {e}\")\n",
733
+ " \n",
734
+ " print()\n",
735
+ " \n",
736
+ " # Initialize and run trainer\n",
737
+ " trainer = AdvancedFundusTrainer()\n",
738
+ " success = trainer.run_training()\n",
739
+ " \n",
740
+ " if success:\n",
741
+ " print(\"\\n\" + \"=\"*50)\n",
742
+ " print(\"🎉 TRAINING SUCCESSFULLY COMPLETED!\")\n",
743
+ " print(\"=\"*50)\n",
744
+ " print(\"\\n📦 Generated Assets:\")\n",
745
+ " print(\" 🤖 Ensemble models (EfficientNetB3 + ResNet152V2 + DenseNet201)\")\n",
746
+ " print(\" 📊 Performance analysis and visualizations\")\n",
747
+ " print(\" 📋 Metadata for deployment\")\n",
748
+ " \n",
749
+ " else:\n",
750
+ " print(\"\\n❌ TRAINING FAILED\")\n",
751
+ " print(\"Please check the error messages above.\")\n",
752
+ "\n",
753
+ "if __name__ == \"__main__\":\n",
754
+ " main()\n"
755
+ ]
756
+ }
757
+ ],
758
+ "metadata": {
759
+ "language_info": {
760
+ "name": "python"
761
+ }
762
+ },
763
+ "nbformat": 4,
764
+ "nbformat_minor": 5
765
+ }
dataset/.ipynb_checkpoints/training-checkpoint.ipynb ADDED
@@ -0,0 +1,446 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": null,
6
+ "id": "039f2cb9",
7
+ "metadata": {
8
+ "vscode": {
9
+ "languageId": "plaintext"
10
+ }
11
+ },
12
+ "outputs": [],
13
+ "source": [
14
+ "import torch\n",
15
+ "import torch.nn as nn\n",
16
+ "import torch.optim as optim\n",
17
+ "from torch.utils.data import Dataset, DataLoader\n",
18
+ "from torchvision import transforms\n",
19
+ "from torchvision.models import resnet101\n",
20
+ "from sklearn.model_selection import train_test_split\n",
21
+ "from sklearn.metrics import f1_score, precision_score, recall_score\n",
22
+ "import pandas as pd\n",
23
+ "import os\n",
24
+ "import cv2\n",
25
+ "import json\n",
26
+ "from PIL import Image\n",
27
+ "import glob\n",
28
+ "\n",
29
+ "# Define the G1020 Dataset class\n",
30
+ "class G1020Dataset(Dataset):\n",
31
+ " def __init__(self, img_paths, cdr_values, transform=None):\n",
32
+ " self.img_paths = img_paths\n",
33
+ " self.cdr_values = cdr_values\n",
34
+ " self.transform = transform\n",
35
+ " \n",
36
+ " def __len__(self):\n",
37
+ " return len(self.img_paths)\n",
38
+ " \n",
39
+ " def __getitem__(self, idx):\n",
40
+ " # Load image\n",
41
+ " img_path = self.img_paths[idx]\n",
42
+ " img = cv2.imread(img_path)\n",
43
+ " \n",
44
+ " # Check if the image was loaded successfully\n",
45
+ " if img is None:\n",
46
+ " raise FileNotFoundError(f\"Image not found or unable to load: {img_path}\")\n",
47
+ " \n",
48
+ " img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n",
49
+ " ls Images\n",
50
+ "image_0.jpg\timage_1248.jpg image_1495.jpg image_1748.jpg image_2002.jpg image_2223.jpg image_2430.jpg image_2623.jpg\timage_2861.jpg\timage_3117.jpg image_489.jpg image_704.jpg\n",
51
+ "image_0.json\timage_1248.json image_1495.json image_1748.json image_2002.json image_2223.json image_2430.json image_2623.json\timage_2861.json image_3117.json image_489.json image_704.json\n",
52
+ "image_1000.jpg\timage_124.jpg\t image_1496.jpg image_1759.jpg image_200.jpg image_2225.jpg image_2431.jpg image_262.jpg\timage_2880.jpg\timage_3120.jpg image_48.jpg\t image_708.jpg\n",
53
+ "image_1000.json image_124.json image_1496.json image_1759.json image_200.json image_2225.json image_2431.json image_262.json\timage_2880.json image_3120.json image_48.json image_708.json\n",
54
+ "image_1001.jpg\timage_1250.jpg image_1497.jpg image_1760.jpg image_2019.jpg image_2226.jpg image_2434.jpg image_264.jpg\timage_2881.jpg\timage_3121.jpg image_490.jpg image_709.jpg\n",
55
+ "image_1001.json image_1250.json image_1497.json image_1760.json image_2019.json image_2226.json image_2434.json image_264.json\timage_2881.json image_3121.json image_490.json image_709.json\n",
56
+ "image_1002.jpg\timage_1254.jpg image_1498.jpg image_1765.jpg image_201.jpg image_2229.jpg image_2435.jpg image_2651.jpg\timage_2885.jpg\timage_3122.jpg image_491.jpg image_726.jpg\n",
57
+ "image_1002.json image_1254.json image_1498.json image_1765.json image_201.json image_2229.json image_2435.json image_2651.json\timage_2885.json image_3122.json image_491.json image_726.json\n",
58
+ "image_1003.jpg\timage_1255.jpg image_1504.jpg image_1766.jpg image_2020.jpg image_222.jpg image_2436.jpg image_2652.jpg\timage_2886.jpg\timage_3123.jpg image_492.jpg image_727.jpg\n",
59
+ "image_1003.json image_1255.json image_1504.json image_1766.json image_2020.json image_222.json image_2436.json image_2652.json\timage_2886.json image_3123.json image_492.json image_727.json\n",
60
+ "image_1004.jpg\timage_125.jpg\t image_1507.jpg image_1767.jpg image_2022.jpg image_2230.jpg image_2437.jpg image_2654.jpg\timage_2887.jpg\timage_3124.jpg image_493.jpg image_745.jpg\n",
61
+ "image_1004.json image_125.json image_1507.json image_1767.json image_2022.json image_2230.json image_2437.json image_2654.json\timage_2887.json image_3124.json image_493.json image_745.json\n",
62
+ "image_1032.jpg\timage_1260.jpg image_1508.jpg image_1768.jpg image_2028.jpg image_2231.jpg image_244.jpg image_2655.jpg\timage_2888.jpg\timage_3129.jpg image_494.jpg image_746.jpg\n",
63
+ "image_1032.json image_1260.json image_1508.json image_1768.json image_2028.json image_2231.json image_244.json image_2655.json\timage_2888.json image_3129.json image_494.json image_746.json\n",
64
+ "image_1033.jpg\timage_1261.jpg image_150.jpg image_1786.jpg image_202.jpg image_2232.jpg image_2455.jpg image_2656.jpg\timage_2891.jpg\timage_3130.jpg image_496.jpg image_747.jpg\n",
65
+ "image_1033.json image_1261.json image_150.json image_1786.json image_202.json image_2232.json image_2455.json image_2656.json\timage_2891.json image_3130.json image_496.json image_747.json\n",
66
+ "image_1034.jpg\timage_126.jpg\t image_1515.jpg image_1787.jpg image_2030.jpg image_2234.jpg image_2456.jpg image_2658.jpg\timage_2892.jpg\timage_3131.jpg image_498.jpg image_748.jpg\n",
67
+ "image_1034.json image_126.json image_1515.json image_1787.json image_2030.json image_2234.json image_2456.json image_2658.json\timage_2892.json image_3131.json image_498.json image_748.json\n",
68
+ "image_1035.jpg\timage_1270.jpg image_1516.jpg image_1788.jpg image_2031.jpg image_223.jpg image_2457.jpg image_265.jpg\timage_2897.jpg\timage_3132.jpg image_49.jpg\t image_756.jpg\n",
69
+ "image_1035.json image_1270.json image_1516.json image_1788.json image_2031.json image_223.json image_2457.json image_265.json\timage_2897.json image_3132.json image_49.json image_756.json\n",
70
+ "image_103.jpg\timage_1271.jpg image_1519.jpg image_1789.jpg image_2032.jpg image_2244.jpg image_2458.jpg image_2661.jpg\timage_2898.jpg\timage_3137.jpg image_4.jpg\t image_757.jpg\n",
71
+ "image_103.json\timage_1271.json image_1519.json image_1789.json image_2032.json image_2244.json image_2458.json image_2661.json\timage_2898.json image_3137.json image_4.json\t image_757.json\n",
72
+ "image_104.jpg\timage_1285.jpg image_151.jpg image_1790.jpg image_2033.jpg image_2245.jpg image_2459.jpg image_2662.jpg\timage_2899.jpg\timage_3138.jpg image_505.jpg image_764.jpg\n",
73
+ "image_104.json\timage_1285.json image_151.json image_1790.json image_2033.json image_2245.json image_2459.json image_2662.json\timage_2899.json image_3138.json image_505.json image_764.json\n",
74
+ "image_105.jpg\timage_12.jpg\t image_1520.jpg image_1791.jpg image_2034.jpg image_2246.jpg image_245.jpg image_2668.jpg\timage_2900.jpg\timage_3143.jpg image_50.jpg\t image_765.jpg\n",
75
+ "image_105.json\timage_12.json\t image_1520.json image_1791.json image_2034.json image_2246.json image_245.json image_2668.json\timage_2900.json image_3143.json image_50.json image_765.json\n",
76
+ "image_106.jpg\timage_1303.jpg image_1521.jpg image_1796.jpg image_2035.jpg image_2247.jpg image_246.jpg image_2669.jpg\timage_2901.jpg\timage_3144.jpg image_512.jpg image_768.jpg\n",
77
+ "image_106.json\timage_1303.json image_1521.json image_1796.json image_2035.json image_2247.json image_246.json image_2669.json\timage_2901.json image_3144.json image_512.json image_768.json\n",
78
+ "image_1076.jpg\timage_1304.jpg image_1522.jpg image_1797.jpg image_2036.jpg image_2248.jpg image_2470.jpg image_266.jpg\timage_2902.jpg\timage_3159.jpg image_513.jpg image_769.jpg\n",
79
+ "image_1076.json image_1304.json image_1522.json image_1797.json image_2036.json image_2248.json image_2470.json image_266.\n",
80
+ " # Get CDR and severity\n",
81
+ " cdr = self.cdr_values[idx]\n",
82
+ " severity = cdr_to_severity(cdr)\n",
83
+ " \n",
84
+ " # Apply transforms\n",
85
+ " if self.transform:\n",
86
+ " img = self.transform(img)\n",
87
+ " \n",
88
+ " return img, torch.tensor(cdr, dtype=torch.float32), torch.tensor(severity, dtype=torch.long)\n",
89
+ "\n",
90
+ "# Define the severity classification function\n",
91
+ "def cdr_to_severity(cdr):\n",
92
+ " if cdr < 0.7:\n",
93
+ " return 0 # Mild\n",
94
+ " elif cdr < 0.9:\n",
95
+ " return 1 # Moderate\n",
96
+ " else:\n",
97
+ " return 2 # Severe\n",
98
+ "\n",
99
+ "# Define the Squeeze-and-Excitation (SE) Block\n",
100
+ "class SEBlock(nn.Module):\n",
101
+ " def __init__(self, channel, reduction=16):\n",
102
+ " super().__init__()\n",
103
+ " self.avg_pool = nn.AdaptiveAvgPool2d(1)\n",
104
+ " self.fc = nn.Sequential(\n",
105
+ " nn.Linear(channel, channel // reduction),\n",
106
+ " nn.ReLU(),\n",
107
+ " nn.Linear(channel // reduction, channel),\n",
108
+ " nn.Sigmoid())\n",
109
+ " \n",
110
+ " def forward(self, x):\n",
111
+ " b, c, _, _ = x.size()\n",
112
+ " y = self.avg_pool(x).view(b, c)\n",
113
+ " y = self.fc(y).view(b, c, 1, 1)\n",
114
+ " return x * y\n",
115
+ "\n",
116
+ "# Define the Glaucoma Severity Model with SE Blocks\n",
117
+ "class GlaucomaSeverityModel(nn.Module):\n",
118
+ " def __init__(self):\n",
119
+ " super().__init__()\n",
120
+ " self.backbone = resnet101(pretrained=True)\n",
121
+ " self.backbone.fc = nn.Identity() # Remove the original FC layer\n",
122
+ " \n",
123
+ " # Add SE Blocks to the backbone\n",
124
+ " self.backbone.layer1 = nn.Sequential(self.backbone.layer1, SEBlock(256))\n",
125
+ " self.backbone.layer2 = nn.Sequential(self.backbone.layer2, SEBlock(512))\n",
126
+ " self.backbone.layer3 = nn.Sequential(self.backbone.layer3, SEBlock(1024))\n",
127
+ " self.backbone.layer4 = nn.Sequential(self.backbone.layer4, SEBlock(2048))\n",
128
+ " \n",
129
+ " # Regression head for CDR prediction\n",
130
+ " self.regressor = nn.Sequential(\n",
131
+ " nn.Linear(2048, 512),\n",
132
+ " nn.BatchNorm1d(512),\n",
133
+ " nn.ReLU(),\n",
134
+ " nn.Dropout(0.5),\n",
135
+ " nn.Linear(512, 1))\n",
136
+ " \n",
137
+ " # Classification head for severity\n",
138
+ " self.classifier = nn.Sequential(\n",
139
+ " nn.Linear(2048, 512),\n",
140
+ " nn.BatchNorm1d(512),\n",
141
+ " nn.ReLU(),\n",
142
+ " nn.Dropout(0.5),\n",
143
+ " nn.Linear(512, 3)) # 3 classes: Mild, Moderate, Severe\n",
144
+ " \n",
145
+ " def forward(self, x):\n",
146
+ " features = self.backbone(x)\n",
147
+ " cdr = self.regressor(features)\n",
148
+ " severity = self.classifier(features)\n",
149
+ " return cdr, severity\n",
150
+ "\n",
151
+ "# Define Focal Loss\n",
152
+ "class FocalLoss(nn.Module):\n",
153
+ " def __init__(self, alpha=1, gamma=2):\n",
154
+ " super().__init__()\n",
155
+ " self.alpha = alpha\n",
156
+ " self.gamma = gamma\n",
157
+ "\n",
158
+ " def forward(self, inputs, targets):\n",
159
+ " ce_loss = nn.CrossEntropyLoss(reduction='none')(inputs, targets)\n",
160
+ " pt = torch.exp(-ce_loss)\n",
161
+ " focal_loss = self.alpha * (1 - pt) ** self.gamma * ce_loss\n",
162
+ " return focal_loss.mean()\n",
163
+ "\n",
164
+ "# Define the training and validation transforms with advanced augmentation\n",
165
+ "train_transform = transforms.Compose([\n",
166
+ " transforms.ToPILImage(),\n",
167
+ " transforms.Resize((256, 256)),\n",
168
+ " transforms.RandomHorizontalFlip(),\n",
169
+ " transforms.RandomRotation(15),\n",
170
+ " transforms.RandomAffine(degrees=0, translate=(0.1, 0.1)),\n",
171
+ " transforms.ColorJitter(brightness=0.2, contrast=0.2, saturation=0.2),\n",
172
+ " transforms.GaussianBlur(kernel_size=3),\n",
173
+ " transforms.ToTensor(),\n",
174
+ " transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),\n",
175
+ "])\n",
176
+ "\n",
177
+ "val_transform = transforms.Compose([\n",
178
+ " transforms.ToPILImage(),\n",
179
+ " transforms.Resize((256, 256)),\n",
180
+ " transforms.ToTensor(),\n",
181
+ " transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),\n",
182
+ "])\n",
183
+ "\n",
184
+ "# Function to load G1020 dataset\n",
185
+ "def load_g1020_dataset(base_dir):\n",
186
+ " \"\"\"\n",
187
+ " Load G1020 dataset from the specified directory structure.\n",
188
+ " \n",
189
+ " Args:\n",
190
+ " base_dir: Path to G1020 directory\n",
191
+ " \n",
192
+ " Returns:\n",
193
+ " img_paths: List of image file paths\n",
194
+ " cdr_values: List of CDR values\n",
195
+ " \"\"\"\n",
196
+ " \n",
197
+ " # Try to find the CSV file with annotations\n",
198
+ " csv_files = glob.glob(os.path.join(base_dir, \"*.csv\"))\n",
199
+ " \n",
200
+ " if not csv_files:\n",
201
+ " raise FileNotFoundError(f\"No CSV files found in {base_dir}\")\n",
202
+ " \n",
203
+ " # Use the first CSV file found (typically G1020.csv)\n",
204
+ " csv_file = csv_files[0]\n",
205
+ " print(f\"Loading metadata from: {csv_file}\")\n",
206
+ " \n",
207
+ " # Load the metadata\n",
208
+ " try:\n",
209
+ " metadata = pd.read_csv(csv_file)\n",
210
+ " print(f\"CSV columns: {metadata.columns.tolist()}\")\n",
211
+ " print(f\"First few rows:\\n{metadata.head()}\")\n",
212
+ " except Exception as e:\n",
213
+ " print(f\"Error reading CSV: {e}\")\n",
214
+ " raise\n",
215
+ " \n",
216
+ " # Define possible image directories in order of preference\n",
217
+ " image_dirs = [\"Images_Cropped\", \"Images_Square\", \"Images\"]\n",
218
+ " \n",
219
+ " selected_img_dir = None\n",
220
+ " for img_dir in image_dirs:\n",
221
+ " full_img_dir = os.path.join(base_dir, img_dir)\n",
222
+ " if os.path.exists(full_img_dir):\n",
223
+ " selected_img_dir = full_img_dir\n",
224
+ " print(f\"Using image directory: {selected_img_dir}\")\n",
225
+ " break\n",
226
+ " \n",
227
+ " if selected_img_dir is None:\n",
228
+ " raise FileNotFoundError(f\"No valid image directory found in {base_dir}\")\n",
229
+ " \n",
230
+ " # Get list of image files\n",
231
+ " image_extensions = ['*.jpg', '*.jpeg', '*.png', '*.bmp', '*.tiff', '*.tif']\n",
232
+ " all_images = []\n",
233
+ " for ext in image_extensions:\n",
234
+ " all_images.extend(glob.glob(os.path.join(selected_img_dir, ext)))\n",
235
+ " all_images.extend(glob.glob(os.path.join(selected_img_dir, ext.upper())))\n",
236
+ " \n",
237
+ " print(f\"Found {len(all_images)} images in {selected_img_dir}\")\n",
238
+ " \n",
239
+ " # Try to match images with metadata\n",
240
+ " img_paths = []\n",
241
+ " cdr_values = []\n",
242
+ " \n",
243
+ " # Common column names for CDR in G1020 dataset\n",
244
+ " cdr_columns = ['CDR', 'cdr', 'vertical_cdr', 'Vertical_CDR', 'ExpCDR', 'cup_disc_ratio']\n",
245
+ " filename_columns = ['filename', 'Filename', 'image', 'Image', 'image_name', 'ImageName']\n",
246
+ " \n",
247
+ " # Find the correct CDR column\n",
248
+ " cdr_col = None\n",
249
+ " for col in cdr_columns:\n",
250
+ " if col in metadata.columns:\n",
251
+ " cdr_col = col\n",
252
+ " break\n",
253
+ " \n",
254
+ " if cdr_col is None:\n",
255
+ " print(f\"Available columns: {metadata.columns.tolist()}\")\n",
256
+ " raise ValueError(\"Could not find CDR column in metadata. Please check the CSV structure.\")\n",
257
+ " \n",
258
+ " # Find the correct filename column\n",
259
+ " filename_col = None\n",
260
+ " for col in filename_columns:\n",
261
+ " if col in metadata.columns:\n",
262
+ " filename_col = col\n",
263
+ " break\n",
264
+ " \n",
265
+ " print(f\"Using CDR column: {cdr_col}\")\n",
266
+ " print(f\"Using filename column: {filename_col}\")\n",
267
+ " \n",
268
+ " if filename_col is not None:\n",
269
+ " # Match using filenames from CSV\n",
270
+ " for _, row in metadata.iterrows():\n",
271
+ " filename = row[filename_col]\n",
272
+ " cdr = row[cdr_col]\n",
273
+ " \n",
274
+ " # Skip if CDR is NaN\n",
275
+ " if pd.isna(cdr):\n",
276
+ " continue\n",
277
+ " \n",
278
+ " # Find matching image file\n",
279
+ " matching_files = [img for img in all_images if os.path.basename(img) == filename or \n",
280
+ " os.path.splitext(os.path.basename(img))[0] == os.path.splitext(filename)[0]]\n",
281
+ " \n",
282
+ " if matching_files:\n",
283
+ " img_paths.append(matching_files[0])\n",
284
+ " cdr_values.append(float(cdr))\n",
285
+ " else:\n",
286
+ " # If no filename column, try to match by order or use all images with default CDR\n",
287
+ " print(\"No filename column found. Using all images with metadata in order.\")\n",
288
+ " for i, (img_path, (_, row)) in enumerate(zip(all_images[:len(metadata)], metadata.iterrows())):\n",
289
+ " cdr = row[cdr_col]\n",
290
+ " if not pd.isna(cdr):\n",
291
+ " img_paths.append(img_path)\n",
292
+ " cdr_values.append(float(cdr))\n",
293
+ " \n",
294
+ " print(f\"Successfully matched {len(img_paths)} images with CDR values\")\n",
295
+ " \n",
296
+ " if len(img_paths) == 0:\n",
297
+ " raise ValueError(\"No images could be matched with CDR values\")\n",
298
+ " \n",
299
+ " return img_paths, cdr_values\n",
300
+ "\n",
301
+ "# Load the G1020 dataset\n",
302
+ "base_dir = \"/home/stalin/Projects/ai-eye-disease-detection/dataset/G1020\"\n",
303
+ "\n",
304
+ "try:\n",
305
+ " img_paths, cdr_values = load_g1020_dataset(base_dir)\n",
306
+ " print(f\"Dataset loaded successfully!\")\n",
307
+ " print(f\"Number of images: {len(img_paths)}\")\n",
308
+ " print(f\"CDR value range: {min(cdr_values):.3f} - {max(cdr_values):.3f}\")\n",
309
+ " print(f\"Sample image paths: {img_paths[:3]}\")\n",
310
+ " print(f\"Sample CDR values: {cdr_values[:3]}\")\n",
311
+ " \n",
312
+ "except Exception as e:\n",
313
+ " print(f\"Error loading dataset: {e}\")\n",
314
+ " raise\n",
315
+ "\n",
316
+ "# Split the data into training and validation sets\n",
317
+ "train_img_paths, val_img_paths, train_cdr_values, val_cdr_values = train_test_split(\n",
318
+ " img_paths, cdr_values, test_size=0.2, random_state=42, stratify=[cdr_to_severity(cdr) for cdr in cdr_values]\n",
319
+ ")\n",
320
+ "\n",
321
+ "print(f\"Training samples: {len(train_img_paths)}\")\n",
322
+ "print(f\"Validation samples: {len(val_img_paths)}\")\n",
323
+ "\n",
324
+ "# Create datasets\n",
325
+ "train_dataset = G1020Dataset(train_img_paths, train_cdr_values, transform=train_transform)\n",
326
+ "val_dataset = G1020Dataset(val_img_paths, val_cdr_values, transform=val_transform)\n",
327
+ "\n",
328
+ "# Create DataLoader objects\n",
329
+ "batch_size = 32\n",
330
+ "train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=4)\n",
331
+ "val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=False, num_workers=4)\n",
332
+ "\n",
333
+ "# Define the device\n",
334
+ "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
335
+ "print(f\"Using device: {device}\")\n",
336
+ "\n",
337
+ "# Initialize the model\n",
338
+ "model = GlaucomaSeverityModel().to(device)\n",
339
+ "\n",
340
+ "# Loss functions\n",
341
+ "regression_loss = nn.MSELoss()\n",
342
+ "classification_loss = FocalLoss() # Use Focal Loss for classification\n",
343
+ "\n",
344
+ "# Optimizer\n",
345
+ "optimizer = optim.AdamW(model.parameters(), lr=1e-4, weight_decay=1e-5)\n",
346
+ "\n",
347
+ "# Learning rate scheduler with warmup\n",
348
+ "from torch.optim.lr_scheduler import LambdaLR\n",
349
+ "\n",
350
+ "def lr_lambda(epoch):\n",
351
+ " if epoch < 5: # Warmup for 5 epochs\n",
352
+ " return (epoch + 1) / 5\n",
353
+ " else:\n",
354
+ " return 0.1 ** (epoch // 10) # Reduce LR by 0.1 every 10 epochs\n",
355
+ "\n",
356
+ "scheduler = LambdaLR(optimizer, lr_lambda)\n",
357
+ "\n",
358
+ "# Training loop\n",
359
+ "def train_model(model, train_loader, val_loader, epochs=20):\n",
360
+ " best_val_loss = float('inf')\n",
361
+ " patience = 5\n",
362
+ " early_stopping_counter = 0\n",
363
+ "\n",
364
+ " for epoch in range(epochs):\n",
365
+ " model.train()\n",
366
+ " train_loss = 0.0\n",
367
+ " for imgs, cdr_true, severity_true in train_loader:\n",
368
+ " imgs, cdr_true, severity_true = imgs.to(device), cdr_true.to(device), severity_true.to(device)\n",
369
+ " \n",
370
+ " optimizer.zero_grad()\n",
371
+ " cdr_pred, severity_pred = model(imgs)\n",
372
+ " \n",
373
+ " # Combined loss\n",
374
+ " loss = 0.5 * regression_loss(cdr_pred.squeeze(), cdr_true) + \\\n",
375
+ " 0.5 * classification_loss(severity_pred, severity_true)\n",
376
+ " \n",
377
+ " loss.backward()\n",
378
+ " torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0) # Gradient clipping\n",
379
+ " optimizer.step()\n",
380
+ " train_loss += loss.item()\n",
381
+ " \n",
382
+ " # Validation\n",
383
+ " model.eval()\n",
384
+ " val_loss = 0.0\n",
385
+ " cdr_mae, severity_acc = 0.0, 0.0\n",
386
+ " all_severity_true = []\n",
387
+ " all_severity_pred = []\n",
388
+ " with torch.no_grad():\n",
389
+ " for imgs, cdr_true, severity_true in val_loader:\n",
390
+ " imgs, cdr_true, severity_true = imgs.to(device), cdr_true.to(device), severity_true.to(device)\n",
391
+ " cdr_pred, severity_pred = model(imgs)\n",
392
+ " \n",
393
+ " val_loss += 0.5 * regression_loss(cdr_pred.squeeze(), cdr_true) + \\\n",
394
+ " 0.5 * classification_loss(severity_pred, severity_true)\n",
395
+ " \n",
396
+ " # Metrics\n",
397
+ " cdr_mae += torch.abs(cdr_pred.squeeze() - cdr_true).sum().item()\n",
398
+ " severity_acc += (severity_pred.argmax(dim=1) == severity_true).sum().item()\n",
399
+ " all_severity_true.extend(severity_true.cpu().numpy())\n",
400
+ " all_severity_pred.extend(severity_pred.argmax(dim=1).cpu().numpy())\n",
401
+ " \n",
402
+ " # Calculate additional metrics\n",
403
+ " f1 = f1_score(all_severity_true, all_severity_pred, average='weighted')\n",
404
+ " precision = precision_score(all_severity_true, all_severity_pred, average='weighted')\n",
405
+ " recall = recall_score(all_severity_true, all_severity_pred, average='weighted')\n",
406
+ " \n",
407
+ " print(f\"Epoch {epoch+1}/{epochs}\")\n",
408
+ " print(f\"Train Loss: {train_loss/len(train_loader):.4f}\")\n",
409
+ " print(f\"Val Loss: {val_loss/len(val_loader):.4f}\")\n",
410
+ " print(f\"CDR MAE: {cdr_mae/len(val_dataset):.4f}\")\n",
411
+ " print(f\"Severity Acc: {severity_acc/len(val_dataset):.4f}\")\n",
412
+ " print(f\"F1 Score: {f1:.4f}\")\n",
413
+ " print(f\"Precision: {precision:.4f}\")\n",
414
+ " print(f\"Recall: {recall:.4f}\\n\")\n",
415
+ "\n",
416
+ " # Early stopping\n",
417
+ " if val_loss < best_val_loss:\n",
418
+ " best_val_loss = val_loss\n",
419
+ " early_stopping_counter = 0\n",
420
+ " # Save the best model\n",
421
+ " torch.save(model.state_dict(), \"best_g1020_model.pth\")\n",
422
+ " else:\n",
423
+ " early_stopping_counter += 1\n",
424
+ " if early_stopping_counter >= patience:\n",
425
+ " print(\"Early stopping triggered!\")\n",
426
+ " break\n",
427
+ "\n",
428
+ " # Learning rate scheduling\n",
429
+ " scheduler.step()\n",
430
+ "\n",
431
+ "# Train the model\n",
432
+ "if __name__ == \"__main__\":\n",
433
+ " print(\"Starting training...\")\n",
434
+ " train_model(model, train_loader, val_loader, epochs=20)\n",
435
+ " print(\"Training completed!\")"
436
+ ]
437
+ }
438
+ ],
439
+ "metadata": {
440
+ "language_info": {
441
+ "name": "python"
442
+ }
443
+ },
444
+ "nbformat": 4,
445
+ "nbformat_minor": 5
446
+ }
dataset/G1020/G1020.csv ADDED
@@ -0,0 +1,1021 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ imageID,binaryLabels
2
+ image_0.jpg,0
3
+ image_1.jpg,0
4
+ image_3.jpg,0
5
+ image_4.jpg,0
6
+ image_5.jpg,0
7
+ image_6.jpg,0
8
+ image_9.jpg,0
9
+ image_10.jpg,0
10
+ image_11.jpg,0
11
+ image_12.jpg,0
12
+ image_13.jpg,0
13
+ image_18.jpg,1
14
+ image_19.jpg,1
15
+ image_20.jpg,1
16
+ image_21.jpg,0
17
+ image_22.jpg,0
18
+ image_31.jpg,0
19
+ image_32.jpg,0
20
+ image_46.jpg,0
21
+ image_47.jpg,0
22
+ image_48.jpg,0
23
+ image_49.jpg,0
24
+ image_50.jpg,0
25
+ image_53.jpg,0
26
+ image_54.jpg,0
27
+ image_55.jpg,0
28
+ image_56.jpg,0
29
+ image_63.jpg,0
30
+ image_64.jpg,0
31
+ image_65.jpg,1
32
+ image_66.jpg,1
33
+ image_77.jpg,0
34
+ image_78.jpg,0
35
+ image_103.jpg,0
36
+ image_104.jpg,0
37
+ image_105.jpg,0
38
+ image_106.jpg,0
39
+ image_107.jpg,0
40
+ image_110.jpg,1
41
+ image_111.jpg,1
42
+ image_112.jpg,1
43
+ image_113.jpg,1
44
+ image_114.jpg,1
45
+ image_115.jpg,1
46
+ image_120.jpg,0
47
+ image_121.jpg,0
48
+ image_122.jpg,0
49
+ image_123.jpg,0
50
+ image_124.jpg,0
51
+ image_125.jpg,0
52
+ image_126.jpg,0
53
+ image_143.jpg,0
54
+ image_144.jpg,0
55
+ image_150.jpg,0
56
+ image_151.jpg,0
57
+ image_152.jpg,0
58
+ image_153.jpg,0
59
+ image_154.jpg,0
60
+ image_163.jpg,0
61
+ image_164.jpg,0
62
+ image_170.jpg,0
63
+ image_171.jpg,0
64
+ image_193.jpg,0
65
+ image_194.jpg,0
66
+ image_199.jpg,1
67
+ image_200.jpg,1
68
+ image_201.jpg,1
69
+ image_202.jpg,1
70
+ image_214.jpg,0
71
+ image_215.jpg,0
72
+ image_216.jpg,0
73
+ image_217.jpg,0
74
+ image_218.jpg,0
75
+ image_219.jpg,0
76
+ image_222.jpg,0
77
+ image_223.jpg,0
78
+ image_226.jpg,0
79
+ image_227.jpg,0
80
+ image_230.jpg,0
81
+ image_231.jpg,0
82
+ image_232.jpg,1
83
+ image_233.jpg,1
84
+ image_234.jpg,0
85
+ image_235.jpg,0
86
+ image_236.jpg,0
87
+ image_237.jpg,0
88
+ image_244.jpg,0
89
+ image_245.jpg,0
90
+ image_246.jpg,0
91
+ image_247.jpg,0
92
+ image_248.jpg,0
93
+ image_249.jpg,0
94
+ image_250.jpg,0
95
+ image_255.jpg,0
96
+ image_256.jpg,0
97
+ image_261.jpg,0
98
+ image_262.jpg,0
99
+ image_264.jpg,0
100
+ image_265.jpg,0
101
+ image_266.jpg,0
102
+ image_292.jpg,0
103
+ image_293.jpg,0
104
+ image_295.jpg,0
105
+ image_304.jpg,0
106
+ image_305.jpg,0
107
+ image_315.jpg,1
108
+ image_316.jpg,1
109
+ image_327.jpg,0
110
+ image_328.jpg,0
111
+ image_334.jpg,1
112
+ image_335.jpg,1
113
+ image_336.jpg,0
114
+ image_337.jpg,0
115
+ image_338.jpg,0
116
+ image_339.jpg,0
117
+ image_353.jpg,0
118
+ image_354.jpg,0
119
+ image_356.jpg,0
120
+ image_360.jpg,1
121
+ image_361.jpg,1
122
+ image_363.jpg,0
123
+ image_366.jpg,0
124
+ image_367.jpg,0
125
+ image_372.jpg,0
126
+ image_373.jpg,0
127
+ image_384.jpg,1
128
+ image_385.jpg,1
129
+ image_386.jpg,1
130
+ image_387.jpg,1
131
+ image_420.jpg,1
132
+ image_421.jpg,1
133
+ image_436.jpg,0
134
+ image_437.jpg,0
135
+ image_444.jpg,0
136
+ image_445.jpg,0
137
+ image_446.jpg,0
138
+ image_453.jpg,0
139
+ image_454.jpg,0
140
+ image_457.jpg,0
141
+ image_458.jpg,0
142
+ image_459.jpg,0
143
+ image_460.jpg,0
144
+ image_467.jpg,0
145
+ image_468.jpg,0
146
+ image_469.jpg,0
147
+ image_470.jpg,0
148
+ image_482.jpg,0
149
+ image_483.jpg,0
150
+ image_484.jpg,0
151
+ image_485.jpg,0
152
+ image_486.jpg,0
153
+ image_487.jpg,0
154
+ image_488.jpg,0
155
+ image_489.jpg,0
156
+ image_490.jpg,0
157
+ image_491.jpg,0
158
+ image_492.jpg,0
159
+ image_493.jpg,0
160
+ image_494.jpg,0
161
+ image_496.jpg,0
162
+ image_498.jpg,0
163
+ image_505.jpg,0
164
+ image_512.jpg,0
165
+ image_513.jpg,0
166
+ image_518.jpg,0
167
+ image_519.jpg,0
168
+ image_526.jpg,0
169
+ image_527.jpg,0
170
+ image_532.jpg,0
171
+ image_533.jpg,0
172
+ image_534.jpg,0
173
+ image_535.jpg,0
174
+ image_536.jpg,0
175
+ image_537.jpg,0
176
+ image_542.jpg,0
177
+ image_543.jpg,0
178
+ image_545.jpg,1
179
+ image_550.jpg,0
180
+ image_551.jpg,0
181
+ image_560.jpg,0
182
+ image_561.jpg,0
183
+ image_562.jpg,0
184
+ image_563.jpg,0
185
+ image_566.jpg,0
186
+ image_567.jpg,0
187
+ image_570.jpg,0
188
+ image_571.jpg,0
189
+ image_580.jpg,0
190
+ image_581.jpg,0
191
+ image_583.jpg,1
192
+ image_584.jpg,1
193
+ image_585.jpg,0
194
+ image_595.jpg,0
195
+ image_596.jpg,0
196
+ image_597.jpg,0
197
+ image_598.jpg,0
198
+ image_599.jpg,0
199
+ image_600.jpg,0
200
+ image_601.jpg,0
201
+ image_602.jpg,0
202
+ image_607.jpg,1
203
+ image_608.jpg,1
204
+ image_609.jpg,0
205
+ image_610.jpg,0
206
+ image_613.jpg,0
207
+ image_614.jpg,0
208
+ image_626.jpg,0
209
+ image_627.jpg,0
210
+ image_628.jpg,0
211
+ image_630.jpg,0
212
+ image_631.jpg,0
213
+ image_642.jpg,0
214
+ image_643.jpg,0
215
+ image_646.jpg,0
216
+ image_647.jpg,0
217
+ image_654.jpg,0
218
+ image_655.jpg,0
219
+ image_657.jpg,1
220
+ image_658.jpg,1
221
+ image_659.jpg,1
222
+ image_698.jpg,0
223
+ image_699.jpg,0
224
+ image_700.jpg,0
225
+ image_703.jpg,0
226
+ image_704.jpg,0
227
+ image_708.jpg,0
228
+ image_709.jpg,0
229
+ image_726.jpg,1
230
+ image_727.jpg,1
231
+ image_745.jpg,0
232
+ image_746.jpg,0
233
+ image_747.jpg,1
234
+ image_748.jpg,1
235
+ image_756.jpg,0
236
+ image_757.jpg,0
237
+ image_764.jpg,0
238
+ image_765.jpg,0
239
+ image_768.jpg,0
240
+ image_769.jpg,0
241
+ image_772.jpg,1
242
+ image_773.jpg,1
243
+ image_774.jpg,0
244
+ image_775.jpg,0
245
+ image_776.jpg,1
246
+ image_777.jpg,1
247
+ image_778.jpg,0
248
+ image_779.jpg,0
249
+ image_780.jpg,1
250
+ image_781.jpg,1
251
+ image_791.jpg,0
252
+ image_805.jpg,0
253
+ image_806.jpg,0
254
+ image_807.jpg,1
255
+ image_808.jpg,1
256
+ image_815.jpg,0
257
+ image_816.jpg,0
258
+ image_819.jpg,0
259
+ image_820.jpg,0
260
+ image_821.jpg,0
261
+ image_840.jpg,1
262
+ image_841.jpg,1
263
+ image_856.jpg,1
264
+ image_857.jpg,1
265
+ image_860.jpg,0
266
+ image_861.jpg,0
267
+ image_868.jpg,0
268
+ image_869.jpg,0
269
+ image_874.jpg,0
270
+ image_875.jpg,0
271
+ image_878.jpg,0
272
+ image_879.jpg,0
273
+ image_880.jpg,0
274
+ image_881.jpg,0
275
+ image_882.jpg,0
276
+ image_883.jpg,0
277
+ image_896.jpg,0
278
+ image_897.jpg,0
279
+ image_903.jpg,1
280
+ image_904.jpg,1
281
+ image_910.jpg,0
282
+ image_911.jpg,0
283
+ image_912.jpg,0
284
+ image_915.jpg,1
285
+ image_916.jpg,1
286
+ image_924.jpg,0
287
+ image_930.jpg,0
288
+ image_931.jpg,0
289
+ image_936.jpg,0
290
+ image_940.jpg,1
291
+ image_941.jpg,1
292
+ image_942.jpg,0
293
+ image_943.jpg,0
294
+ image_944.jpg,0
295
+ image_945.jpg,0
296
+ image_952.jpg,0
297
+ image_953.jpg,0
298
+ image_954.jpg,0
299
+ image_955.jpg,0
300
+ image_956.jpg,0
301
+ image_957.jpg,0
302
+ image_958.jpg,0
303
+ image_959.jpg,0
304
+ image_964.jpg,0
305
+ image_965.jpg,0
306
+ image_980.jpg,0
307
+ image_981.jpg,0
308
+ image_1000.jpg,1
309
+ image_1001.jpg,1
310
+ image_1002.jpg,1
311
+ image_1003.jpg,1
312
+ image_1004.jpg,1
313
+ image_1032.jpg,1
314
+ image_1033.jpg,1
315
+ image_1034.jpg,0
316
+ image_1035.jpg,0
317
+ image_1076.jpg,0
318
+ image_1077.jpg,0
319
+ image_1080.jpg,1
320
+ image_1081.jpg,1
321
+ image_1087.jpg,0
322
+ image_1088.jpg,0
323
+ image_1092.jpg,0
324
+ image_1093.jpg,0
325
+ image_1094.jpg,0
326
+ image_1095.jpg,0
327
+ image_1096.jpg,0
328
+ image_1112.jpg,1
329
+ image_1113.jpg,1
330
+ image_1136.jpg,0
331
+ image_1137.jpg,0
332
+ image_1138.jpg,0
333
+ image_1139.jpg,0
334
+ image_1140.jpg,0
335
+ image_1141.jpg,0
336
+ image_1142.jpg,0
337
+ image_1143.jpg,0
338
+ image_1144.jpg,0
339
+ image_1160.jpg,0
340
+ image_1161.jpg,0
341
+ image_1164.jpg,0
342
+ image_1165.jpg,0
343
+ image_1187.jpg,0
344
+ image_1188.jpg,0
345
+ image_1189.jpg,0
346
+ image_1190.jpg,0
347
+ image_1197.jpg,0
348
+ image_1198.jpg,0
349
+ image_1199.jpg,0
350
+ image_1201.jpg,1
351
+ image_1202.jpg,1
352
+ image_1203.jpg,0
353
+ image_1204.jpg,0
354
+ image_1213.jpg,0
355
+ image_1214.jpg,0
356
+ image_1215.jpg,0
357
+ image_1216.jpg,0
358
+ image_1217.jpg,0
359
+ image_1218.jpg,0
360
+ image_1225.jpg,0
361
+ image_1226.jpg,1
362
+ image_1227.jpg,1
363
+ image_1230.jpg,1
364
+ image_1231.jpg,1
365
+ image_1232.jpg,1
366
+ image_1233.jpg,1
367
+ image_1234.jpg,1
368
+ image_1238.jpg,0
369
+ image_1240.jpg,0
370
+ image_1241.jpg,0
371
+ image_1242.jpg,0
372
+ image_1243.jpg,0
373
+ image_1246.jpg,1
374
+ image_1247.jpg,1
375
+ image_1248.jpg,0
376
+ image_1250.jpg,0
377
+ image_1254.jpg,0
378
+ image_1255.jpg,0
379
+ image_1260.jpg,0
380
+ image_1261.jpg,0
381
+ image_1270.jpg,1
382
+ image_1271.jpg,1
383
+ image_1285.jpg,0
384
+ image_1303.jpg,0
385
+ image_1304.jpg,0
386
+ image_1317.jpg,1
387
+ image_1318.jpg,1
388
+ image_1319.jpg,1
389
+ image_1321.jpg,0
390
+ image_1322.jpg,0
391
+ image_1325.jpg,0
392
+ image_1326.jpg,0
393
+ image_1327.jpg,0
394
+ image_1328.jpg,0
395
+ image_1329.jpg,0
396
+ image_1330.jpg,0
397
+ image_1331.jpg,0
398
+ image_1332.jpg,0
399
+ image_1333.jpg,0
400
+ image_1334.jpg,1
401
+ image_1335.jpg,1
402
+ image_1336.jpg,1
403
+ image_1337.jpg,1
404
+ image_1338.jpg,1
405
+ image_1339.jpg,1
406
+ image_1344.jpg,0
407
+ image_1348.jpg,0
408
+ image_1349.jpg,0
409
+ image_1350.jpg,0
410
+ image_1351.jpg,0
411
+ image_1352.jpg,0
412
+ image_1355.jpg,1
413
+ image_1356.jpg,1
414
+ image_1359.jpg,0
415
+ image_1360.jpg,0
416
+ image_1361.jpg,0
417
+ image_1362.jpg,0
418
+ image_1366.jpg,0
419
+ image_1367.jpg,0
420
+ image_1368.jpg,0
421
+ image_1377.jpg,1
422
+ image_1378.jpg,1
423
+ image_1384.jpg,0
424
+ image_1386.jpg,0
425
+ image_1387.jpg,0
426
+ image_1388.jpg,0
427
+ image_1389.jpg,0
428
+ image_1391.jpg,0
429
+ image_1392.jpg,0
430
+ image_1401.jpg,0
431
+ image_1402.jpg,0
432
+ image_1403.jpg,0
433
+ image_1404.jpg,0
434
+ image_1413.jpg,0
435
+ image_1414.jpg,0
436
+ image_1415.jpg,0
437
+ image_1416.jpg,0
438
+ image_1421.jpg,1
439
+ image_1422.jpg,1
440
+ image_1426.jpg,0
441
+ image_1431.jpg,0
442
+ image_1432.jpg,0
443
+ image_1452.jpg,0
444
+ image_1453.jpg,0
445
+ image_1462.jpg,0
446
+ image_1463.jpg,0
447
+ image_1470.jpg,0
448
+ image_1471.jpg,0
449
+ image_1478.jpg,0
450
+ image_1479.jpg,0
451
+ image_1480.jpg,0
452
+ image_1481.jpg,0
453
+ image_1495.jpg,1
454
+ image_1496.jpg,1
455
+ image_1497.jpg,1
456
+ image_1498.jpg,1
457
+ image_1504.jpg,0
458
+ image_1507.jpg,0
459
+ image_1508.jpg,0
460
+ image_1515.jpg,1
461
+ image_1516.jpg,1
462
+ image_1519.jpg,0
463
+ image_1520.jpg,0
464
+ image_1521.jpg,1
465
+ image_1522.jpg,1
466
+ image_1524.jpg,1
467
+ image_1525.jpg,1
468
+ image_1526.jpg,1
469
+ image_1543.jpg,0
470
+ image_1544.jpg,0
471
+ image_1553.jpg,0
472
+ image_1554.jpg,0
473
+ image_1575.jpg,0
474
+ image_1576.jpg,0
475
+ image_1579.jpg,1
476
+ image_1580.jpg,1
477
+ image_1589.jpg,0
478
+ image_1590.jpg,0
479
+ image_1591.jpg,1
480
+ image_1592.jpg,1
481
+ image_1594.jpg,1
482
+ image_1595.jpg,1
483
+ image_1598.jpg,0
484
+ image_1599.jpg,0
485
+ image_1607.jpg,1
486
+ image_1608.jpg,1
487
+ image_1609.jpg,1
488
+ image_1610.jpg,1
489
+ image_1616.jpg,0
490
+ image_1623.jpg,0
491
+ image_1624.jpg,0
492
+ image_1625.jpg,0
493
+ image_1626.jpg,0
494
+ image_1644.jpg,0
495
+ image_1645.jpg,0
496
+ image_1652.jpg,0
497
+ image_1653.jpg,0
498
+ image_1686.jpg,0
499
+ image_1687.jpg,0
500
+ image_1691.jpg,0
501
+ image_1692.jpg,0
502
+ image_1693.jpg,0
503
+ image_1694.jpg,0
504
+ image_1695.jpg,0
505
+ image_1696.jpg,0
506
+ image_1697.jpg,1
507
+ image_1698.jpg,1
508
+ image_1699.jpg,1
509
+ image_1700.jpg,1
510
+ image_1701.jpg,1
511
+ image_1702.jpg,1
512
+ image_1703.jpg,0
513
+ image_1704.jpg,0
514
+ image_1710.jpg,1
515
+ image_1711.jpg,1
516
+ image_1712.jpg,1
517
+ image_1713.jpg,1
518
+ image_1714.jpg,1
519
+ image_1715.jpg,1
520
+ image_1716.jpg,0
521
+ image_1717.jpg,0
522
+ image_1722.jpg,0
523
+ image_1723.jpg,0
524
+ image_1724.jpg,0
525
+ image_1742.jpg,1
526
+ image_1745.jpg,0
527
+ image_1746.jpg,0
528
+ image_1747.jpg,1
529
+ image_1748.jpg,1
530
+ image_1759.jpg,0
531
+ image_1760.jpg,0
532
+ image_1765.jpg,0
533
+ image_1766.jpg,0
534
+ image_1767.jpg,0
535
+ image_1768.jpg,0
536
+ image_1786.jpg,1
537
+ image_1787.jpg,1
538
+ image_1788.jpg,1
539
+ image_1789.jpg,0
540
+ image_1790.jpg,0
541
+ image_1791.jpg,0
542
+ image_1796.jpg,1
543
+ image_1797.jpg,1
544
+ image_1798.jpg,1
545
+ image_1799.jpg,1
546
+ image_1824.jpg,0
547
+ image_1825.jpg,0
548
+ image_1842.jpg,0
549
+ image_1843.jpg,0
550
+ image_1844.jpg,0
551
+ image_1845.jpg,0
552
+ image_1847.jpg,0
553
+ image_1860.jpg,1
554
+ image_1861.jpg,1
555
+ image_1862.jpg,1
556
+ image_1865.jpg,0
557
+ image_1866.jpg,0
558
+ image_1874.jpg,1
559
+ image_1875.jpg,1
560
+ image_1876.jpg,1
561
+ image_1877.jpg,1
562
+ image_1892.jpg,1
563
+ image_1893.jpg,1
564
+ image_1894.jpg,1
565
+ image_1895.jpg,1
566
+ image_1896.jpg,1
567
+ image_1897.jpg,1
568
+ image_1898.jpg,1
569
+ image_1899.jpg,1
570
+ image_1901.jpg,1
571
+ image_1902.jpg,1
572
+ image_1903.jpg,0
573
+ image_1904.jpg,0
574
+ image_1912.jpg,0
575
+ image_1913.jpg,0
576
+ image_1914.jpg,0
577
+ image_1915.jpg,0
578
+ image_1916.jpg,0
579
+ image_1917.jpg,0
580
+ image_1922.jpg,0
581
+ image_1923.jpg,0
582
+ image_1924.jpg,0
583
+ image_1925.jpg,0
584
+ image_1926.jpg,0
585
+ image_1927.jpg,0
586
+ image_1931.jpg,1
587
+ image_1932.jpg,1
588
+ image_1933.jpg,1
589
+ image_1934.jpg,1
590
+ image_1935.jpg,1
591
+ image_1938.jpg,0
592
+ image_1939.jpg,0
593
+ image_1940.jpg,0
594
+ image_1941.jpg,0
595
+ image_1956.jpg,1
596
+ image_1957.jpg,1
597
+ image_1958.jpg,0
598
+ image_1959.jpg,0
599
+ image_1960.jpg,0
600
+ image_1961.jpg,0
601
+ image_1977.jpg,0
602
+ image_1978.jpg,0
603
+ image_1981.jpg,0
604
+ image_1982.jpg,0
605
+ image_1999.jpg,1
606
+ image_2000.jpg,1
607
+ image_2001.jpg,1
608
+ image_2002.jpg,1
609
+ image_2019.jpg,1
610
+ image_2020.jpg,1
611
+ image_2022.jpg,0
612
+ image_2028.jpg,0
613
+ image_2030.jpg,0
614
+ image_2031.jpg,0
615
+ image_2032.jpg,1
616
+ image_2033.jpg,1
617
+ image_2034.jpg,1
618
+ image_2035.jpg,1
619
+ image_2036.jpg,1
620
+ image_2037.jpg,1
621
+ image_2039.jpg,0
622
+ image_2041.jpg,1
623
+ image_2042.jpg,1
624
+ image_2045.jpg,0
625
+ image_2046.jpg,0
626
+ image_2047.jpg,1
627
+ image_2048.jpg,1
628
+ image_2049.jpg,1
629
+ image_2050.jpg,1
630
+ image_2064.jpg,0
631
+ image_2065.jpg,0
632
+ image_2078.jpg,0
633
+ image_2084.jpg,0
634
+ image_2085.jpg,0
635
+ image_2097.jpg,0
636
+ image_2101.jpg,0
637
+ image_2102.jpg,0
638
+ image_2110.jpg,0
639
+ image_2111.jpg,0
640
+ image_2116.jpg,0
641
+ image_2117.jpg,0
642
+ image_2124.jpg,0
643
+ image_2125.jpg,0
644
+ image_2139.jpg,0
645
+ image_2140.jpg,1
646
+ image_2141.jpg,1
647
+ image_2142.jpg,1
648
+ image_2143.jpg,1
649
+ image_2144.jpg,1
650
+ image_2146.jpg,1
651
+ image_2147.jpg,1
652
+ image_2148.jpg,1
653
+ image_2149.jpg,1
654
+ image_2151.jpg,0
655
+ image_2152.jpg,0
656
+ image_2154.jpg,0
657
+ image_2155.jpg,0
658
+ image_2156.jpg,0
659
+ image_2162.jpg,0
660
+ image_2163.jpg,0
661
+ image_2164.jpg,0
662
+ image_2170.jpg,0
663
+ image_2171.jpg,0
664
+ image_2172.jpg,0
665
+ image_2173.jpg,0
666
+ image_2188.jpg,0
667
+ image_2189.jpg,0
668
+ image_2199.jpg,0
669
+ image_2200.jpg,0
670
+ image_2201.jpg,1
671
+ image_2202.jpg,1
672
+ image_2209.jpg,0
673
+ image_2210.jpg,0
674
+ image_2211.jpg,0
675
+ image_2212.jpg,0
676
+ image_2215.jpg,0
677
+ image_2216.jpg,0
678
+ image_2217.jpg,0
679
+ image_2218.jpg,0
680
+ image_2219.jpg,0
681
+ image_2220.jpg,0
682
+ image_2223.jpg,0
683
+ image_2225.jpg,0
684
+ image_2226.jpg,0
685
+ image_2229.jpg,1
686
+ image_2230.jpg,1
687
+ image_2231.jpg,1
688
+ image_2232.jpg,1
689
+ image_2234.jpg,0
690
+ image_2244.jpg,0
691
+ image_2245.jpg,0
692
+ image_2246.jpg,0
693
+ image_2247.jpg,0
694
+ image_2248.jpg,0
695
+ image_2249.jpg,0
696
+ image_2276.jpg,1
697
+ image_2277.jpg,1
698
+ image_2278.jpg,0
699
+ image_2279.jpg,0
700
+ image_2289.jpg,0
701
+ image_2290.jpg,0
702
+ image_2291.jpg,0
703
+ image_2292.jpg,0
704
+ image_2299.jpg,0
705
+ image_2300.jpg,0
706
+ image_2303.jpg,0
707
+ image_2304.jpg,0
708
+ image_2307.jpg,1
709
+ image_2308.jpg,1
710
+ image_2309.jpg,0
711
+ image_2310.jpg,0
712
+ image_2313.jpg,1
713
+ image_2314.jpg,1
714
+ image_2315.jpg,0
715
+ image_2316.jpg,0
716
+ image_2319.jpg,0
717
+ image_2320.jpg,0
718
+ image_2321.jpg,1
719
+ image_2322.jpg,1
720
+ image_2326.jpg,0
721
+ image_2327.jpg,0
722
+ image_2345.jpg,1
723
+ image_2346.jpg,1
724
+ image_2354.jpg,0
725
+ image_2355.jpg,0
726
+ image_2359.jpg,0
727
+ image_2360.jpg,0
728
+ image_2361.jpg,0
729
+ image_2362.jpg,0
730
+ image_2363.jpg,0
731
+ image_2364.jpg,0
732
+ image_2365.jpg,0
733
+ image_2366.jpg,0
734
+ image_2367.jpg,1
735
+ image_2368.jpg,1
736
+ image_2375.jpg,0
737
+ image_2376.jpg,0
738
+ image_2402.jpg,1
739
+ image_2403.jpg,1
740
+ image_2404.jpg,1
741
+ image_2407.jpg,0
742
+ image_2408.jpg,0
743
+ image_2411.jpg,1
744
+ image_2412.jpg,1
745
+ image_2413.jpg,0
746
+ image_2414.jpg,0
747
+ image_2417.jpg,0
748
+ image_2418.jpg,0
749
+ image_2421.jpg,0
750
+ image_2423.jpg,0
751
+ image_2424.jpg,0
752
+ image_2425.jpg,0
753
+ image_2426.jpg,1
754
+ image_2430.jpg,0
755
+ image_2431.jpg,0
756
+ image_2434.jpg,0
757
+ image_2435.jpg,0
758
+ image_2436.jpg,0
759
+ image_2437.jpg,0
760
+ image_2455.jpg,1
761
+ image_2456.jpg,1
762
+ image_2457.jpg,1
763
+ image_2458.jpg,1
764
+ image_2459.jpg,1
765
+ image_2470.jpg,1
766
+ image_2471.jpg,1
767
+ image_2473.jpg,1
768
+ image_2474.jpg,0
769
+ image_2475.jpg,0
770
+ image_2482.jpg,0
771
+ image_2483.jpg,0
772
+ image_2503.jpg,0
773
+ image_2504.jpg,0
774
+ image_2505.jpg,1
775
+ image_2507.jpg,0
776
+ image_2512.jpg,1
777
+ image_2513.jpg,0
778
+ image_2514.jpg,0
779
+ image_2515.jpg,0
780
+ image_2516.jpg,0
781
+ image_2523.jpg,0
782
+ image_2524.jpg,0
783
+ image_2525.jpg,0
784
+ image_2526.jpg,0
785
+ image_2527.jpg,0
786
+ image_2528.jpg,0
787
+ image_2529.jpg,1
788
+ image_2530.jpg,1
789
+ image_2531.jpg,1
790
+ image_2532.jpg,1
791
+ image_2533.jpg,1
792
+ image_2534.jpg,1
793
+ image_2550.jpg,0
794
+ image_2551.jpg,0
795
+ image_2552.jpg,0
796
+ image_2553.jpg,0
797
+ image_2554.jpg,0
798
+ image_2555.jpg,0
799
+ image_2558.jpg,0
800
+ image_2559.jpg,0
801
+ image_2562.jpg,1
802
+ image_2563.jpg,1
803
+ image_2564.jpg,1
804
+ image_2565.jpg,1
805
+ image_2566.jpg,1
806
+ image_2567.jpg,1
807
+ image_2569.jpg,0
808
+ image_2570.jpg,0
809
+ image_2571.jpg,0
810
+ image_2572.jpg,0
811
+ image_2573.jpg,0
812
+ image_2574.jpg,0
813
+ image_2575.jpg,0
814
+ image_2576.jpg,0
815
+ image_2577.jpg,0
816
+ image_2578.jpg,0
817
+ image_2579.jpg,0
818
+ image_2588.jpg,0
819
+ image_2589.jpg,0
820
+ image_2602.jpg,1
821
+ image_2605.jpg,1
822
+ image_2606.jpg,1
823
+ image_2607.jpg,0
824
+ image_2608.jpg,0
825
+ image_2611.jpg,0
826
+ image_2612.jpg,0
827
+ image_2622.jpg,0
828
+ image_2623.jpg,0
829
+ image_2651.jpg,0
830
+ image_2652.jpg,0
831
+ image_2654.jpg,0
832
+ image_2655.jpg,1
833
+ image_2656.jpg,1
834
+ image_2658.jpg,1
835
+ image_2661.jpg,1
836
+ image_2662.jpg,1
837
+ image_2668.jpg,0
838
+ image_2669.jpg,0
839
+ image_2676.jpg,0
840
+ image_2677.jpg,0
841
+ image_2685.jpg,0
842
+ image_2686.jpg,0
843
+ image_2687.jpg,0
844
+ image_2688.jpg,0
845
+ image_2689.jpg,0
846
+ image_2690.jpg,0
847
+ image_2691.jpg,0
848
+ image_2692.jpg,0
849
+ image_2693.jpg,0
850
+ image_2694.jpg,0
851
+ image_2695.jpg,0
852
+ image_2698.jpg,0
853
+ image_2699.jpg,0
854
+ image_2700.jpg,0
855
+ image_2701.jpg,0
856
+ image_2702.jpg,0
857
+ image_2703.jpg,0
858
+ image_2707.jpg,0
859
+ image_2708.jpg,0
860
+ image_2709.jpg,0
861
+ image_2710.jpg,0
862
+ image_2711.jpg,0
863
+ image_2712.jpg,0
864
+ image_2713.jpg,0
865
+ image_2714.jpg,0
866
+ image_2728.jpg,0
867
+ image_2729.jpg,0
868
+ image_2730.jpg,1
869
+ image_2731.jpg,1
870
+ image_2732.jpg,1
871
+ image_2733.jpg,1
872
+ image_2734.jpg,1
873
+ image_2735.jpg,1
874
+ image_2736.jpg,0
875
+ image_2737.jpg,0
876
+ image_2738.jpg,1
877
+ image_2739.jpg,1
878
+ image_2765.jpg,0
879
+ image_2766.jpg,0
880
+ image_2767.jpg,0
881
+ image_2768.jpg,0
882
+ image_2781.jpg,1
883
+ image_2782.jpg,1
884
+ image_2783.jpg,1
885
+ image_2784.jpg,1
886
+ image_2785.jpg,0
887
+ image_2786.jpg,0
888
+ image_2787.jpg,0
889
+ image_2788.jpg,0
890
+ image_2795.jpg,0
891
+ image_2796.jpg,0
892
+ image_2805.jpg,0
893
+ image_2806.jpg,0
894
+ image_2807.jpg,0
895
+ image_2808.jpg,0
896
+ image_2809.jpg,1
897
+ image_2810.jpg,1
898
+ image_2823.jpg,0
899
+ image_2824.jpg,0
900
+ image_2825.jpg,0
901
+ image_2826.jpg,0
902
+ image_2827.jpg,0
903
+ image_2828.jpg,0
904
+ image_2839.jpg,0
905
+ image_2840.jpg,0
906
+ image_2856.jpg,0
907
+ image_2857.jpg,0
908
+ image_2860.jpg,1
909
+ image_2861.jpg,1
910
+ image_2880.jpg,0
911
+ image_2881.jpg,0
912
+ image_2885.jpg,0
913
+ image_2886.jpg,0
914
+ image_2887.jpg,0
915
+ image_2888.jpg,0
916
+ image_2891.jpg,0
917
+ image_2892.jpg,0
918
+ image_2897.jpg,0
919
+ image_2898.jpg,0
920
+ image_2899.jpg,0
921
+ image_2900.jpg,0
922
+ image_2901.jpg,0
923
+ image_2902.jpg,0
924
+ image_2906.jpg,0
925
+ image_2912.jpg,0
926
+ image_2913.jpg,0
927
+ image_2914.jpg,0
928
+ image_2915.jpg,0
929
+ image_2916.jpg,0
930
+ image_2917.jpg,0
931
+ image_2935.jpg,0
932
+ image_2936.jpg,0
933
+ image_2941.jpg,0
934
+ image_2942.jpg,0
935
+ image_2945.jpg,0
936
+ image_2946.jpg,0
937
+ image_2947.jpg,0
938
+ image_2949.jpg,0
939
+ image_2950.jpg,0
940
+ image_2956.jpg,0
941
+ image_2957.jpg,0
942
+ image_2970.jpg,0
943
+ image_2971.jpg,0
944
+ image_2972.jpg,1
945
+ image_2973.jpg,1
946
+ image_2974.jpg,0
947
+ image_2975.jpg,0
948
+ image_2980.jpg,0
949
+ image_2981.jpg,0
950
+ image_2984.jpg,0
951
+ image_2985.jpg,0
952
+ image_2991.jpg,0
953
+ image_2992.jpg,0
954
+ image_2998.jpg,0
955
+ image_2999.jpg,0
956
+ image_3000.jpg,0
957
+ image_3017.jpg,0
958
+ image_3018.jpg,0
959
+ image_3019.jpg,0
960
+ image_3020.jpg,0
961
+ image_3021.jpg,0
962
+ image_3022.jpg,0
963
+ image_3023.jpg,1
964
+ image_3024.jpg,1
965
+ image_3025.jpg,1
966
+ image_3026.jpg,1
967
+ image_3029.jpg,0
968
+ image_3038.jpg,1
969
+ image_3039.jpg,1
970
+ image_3040.jpg,0
971
+ image_3041.jpg,0
972
+ image_3044.jpg,0
973
+ image_3045.jpg,0
974
+ image_3058.jpg,0
975
+ image_3059.jpg,0
976
+ image_3064.jpg,1
977
+ image_3065.jpg,1
978
+ image_3066.jpg,1
979
+ image_3067.jpg,1
980
+ image_3076.jpg,0
981
+ image_3080.jpg,1
982
+ image_3081.jpg,1
983
+ image_3082.jpg,0
984
+ image_3084.jpg,1
985
+ image_3085.jpg,1
986
+ image_3090.jpg,1
987
+ image_3091.jpg,1
988
+ image_3116.jpg,0
989
+ image_3117.jpg,0
990
+ image_3120.jpg,0
991
+ image_3121.jpg,0
992
+ image_3122.jpg,1
993
+ image_3123.jpg,1
994
+ image_3124.jpg,0
995
+ image_3129.jpg,0
996
+ image_3130.jpg,0
997
+ image_3131.jpg,0
998
+ image_3132.jpg,0
999
+ image_3137.jpg,0
1000
+ image_3138.jpg,0
1001
+ image_3143.jpg,1
1002
+ image_3144.jpg,1
1003
+ image_3159.jpg,0
1004
+ image_3160.jpg,0
1005
+ image_3163.jpg,1
1006
+ image_3164.jpg,1
1007
+ image_3165.jpg,1
1008
+ image_3166.jpg,1
1009
+ image_3167.jpg,1
1010
+ image_3168.jpg,1
1011
+ image_3169.jpg,0
1012
+ image_3170.jpg,0
1013
+ image_3182.jpg,1
1014
+ image_3183.jpg,1
1015
+ image_3184.jpg,1
1016
+ image_3185.jpg,1
1017
+ image_3198.jpg,0
1018
+ image_3199.jpg,0
1019
+ image_3201.jpg,1
1020
+ image_3202.jpg,1
1021
+ image_2568.jpg,0
dataset/G1020/Images_Cropped/017.jpg ADDED
dataset/G1020/Images_Cropped/022.jpg ADDED
dataset/G1020/Images_Cropped/040.jpg ADDED
dataset/G1020/Images_Cropped/048.jpg ADDED
dataset/G1020/Images_Cropped/062.jpg ADDED
dataset/G1020/Images_Cropped/077.jpg ADDED
dataset/G1020/Images_Cropped/090.jpg ADDED
dataset/G1020/Images_Cropped/117.jpg ADDED
dataset/G1020/Images_Cropped/125.jpg ADDED
dataset/G1020/Images_Cropped/148.jpg ADDED
dataset/G1020/Images_Cropped/162.jpg ADDED
dataset/G1020/Images_Cropped/165.jpg ADDED
dataset/G1020/Images_Cropped/184.jpg ADDED
dataset/G1020/Images_Cropped/188.jpg ADDED
dataset/G1020/Images_Cropped/249.jpg ADDED
dataset/G1020/Images_Cropped/258.jpg ADDED
dataset/G1020/Images_Cropped/322.jpg ADDED
dataset/G1020/Images_Cropped/328.jpg ADDED
dataset/G1020/Images_Cropped/332.jpg ADDED
dataset/G1020/Images_Cropped/334.jpg ADDED
dataset/G1020/Images_Cropped/340.jpg ADDED
dataset/G1020/Images_Cropped/341.jpg ADDED
dataset/G1020/Images_Cropped/374.jpg ADDED
dataset/G1020/Images_Cropped/377.jpg ADDED
dataset/G1020/Images_Cropped/394.jpg ADDED
dataset/G1020/Images_Cropped/418.jpg ADDED
dataset/G1020/Images_Cropped/432.jpg ADDED
dataset/G1020/Images_Cropped/452.jpg ADDED
dataset/G1020/Images_Cropped/454.jpg ADDED
dataset/G1020/Images_Cropped/468.jpg ADDED
dataset/G1020/Images_Cropped/476.jpg ADDED
dataset/G1020/Images_Cropped/493.jpg ADDED
dataset/G1020/Images_Cropped/500.jpg ADDED
dataset/G1020/Images_Cropped/515.jpg ADDED
dataset/G1020/Images_Cropped/575.jpg ADDED
dataset/G1020/Images_Cropped/603.jpg ADDED
dataset/G1020/Images_Cropped/609.jpg ADDED
dataset/G1020/Images_Cropped/613.jpg ADDED
dataset/G1020/Images_Cropped/img/image_1095.jpg ADDED
dataset/G1020/Images_Cropped/img/image_1187.jpg ADDED
dataset/G1020/Images_Cropped/img/image_1201.jpg ADDED
dataset/G1020/Images_Cropped/img/image_1203.jpg ADDED