Spaces:
Sleeping
Sleeping
Abid Ali Awan
commited on
Commit
·
87b61df
1
Parent(s):
826a241
Remove legacy files and configurations in preparation for a new project structure
Browse files- Deleted DVC-related files including .dvcignore, dvc.lock, dvc.yaml, and model files.
- Removed training and evaluation scripts, dataset processing scripts, and associated parameters.
- Cleaned up the project by deleting unused directories and files, including logs and requirements for Heroku deployment.
- Updated the app to streamline the inference process and improve demo mode functionality.
This commit sets the foundation for a simplified deployment and development workflow.
- .dvc/.gitignore +0 -3
- .dvc/config +0 -7
- .dvc/plots/confusion.json +0 -30
- .dvc/plots/default.json +0 -29
- .dvc/plots/scatter.json +0 -27
- .dvc/plots/smooth.json +0 -39
- .dvcignore +0 -3
- LICENSE +0 -8
- Makefile +0 -37
- Notebooks/SavtaDepth_Colab.ipynb +0 -0
- README_HF.md +0 -53
- app/app_savta.py +14 -139
- dvc.lock +0 -75
- dvc.yaml +0 -38
- flagged/log.csv +0 -4
- heroku/.slugignore +0 -0
- heroku/Aptfile +0 -1
- heroku/DVC-heroku-deployment.md +0 -21
- heroku/Dockerfile +0 -19
- heroku/Procfile +0 -1
- heroku/heroku.yml +0 -5
- heroku/requirements_heroku.txt +0 -15
- heroku/setup.sh +0 -2
- logs/test_metrics.csv +0 -10
- logs/train_metrics.csv +0 -0
- logs/train_params.yml +0 -25
- models/model.pth +3 -0
- requirements.txt +0 -4
- run_dev_env.sh +0 -7
- src/.gitignore +0 -2
- src/code/custom_data_loading.py +0 -51
- src/code/eval.py +0 -56
- src/code/eval_metric_calculation.py +0 -79
- src/code/make_dataset.py +0 -121
- src/code/params.yml +0 -13
- src/code/training.py +0 -44
- src/data/.gitignore +0 -1
- src/data/raw/.gitignore +0 -2
- src/data/raw/nyu_depth_v2_labeled.mat.dvc +0 -9
- src/data/raw/splits.mat.dvc +0 -9
.dvc/.gitignore
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
/config.local
|
| 2 |
-
/tmp
|
| 3 |
-
/cache
|
|
|
|
|
|
|
|
|
|
|
|
.dvc/config
DELETED
|
@@ -1,7 +0,0 @@
|
|
| 1 |
-
[core]
|
| 2 |
-
analytics = false
|
| 3 |
-
remote = origin
|
| 4 |
-
['remote "origin"']
|
| 5 |
-
url = https://dagshub.com/OperationSavta/SavtaDepth.dvc
|
| 6 |
-
['remote "upstream"']
|
| 7 |
-
url = https://dagshub.com/OperationSavta/SavtaDepth.dvc
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
.dvc/plots/confusion.json
DELETED
|
@@ -1,30 +0,0 @@
|
|
| 1 |
-
{
|
| 2 |
-
"$schema": "https://vega.github.io/schema/vega-lite/v4.json",
|
| 3 |
-
"data": {
|
| 4 |
-
"values": "<DVC_METRIC_DATA>"
|
| 5 |
-
},
|
| 6 |
-
"title": "<DVC_METRIC_TITLE>",
|
| 7 |
-
"mark": "rect",
|
| 8 |
-
"encoding": {
|
| 9 |
-
"x": {
|
| 10 |
-
"field": "<DVC_METRIC_X>",
|
| 11 |
-
"type": "nominal",
|
| 12 |
-
"sort": "ascending",
|
| 13 |
-
"title": "<DVC_METRIC_X_LABEL>"
|
| 14 |
-
},
|
| 15 |
-
"y": {
|
| 16 |
-
"field": "<DVC_METRIC_Y>",
|
| 17 |
-
"type": "nominal",
|
| 18 |
-
"sort": "ascending",
|
| 19 |
-
"title": "<DVC_METRIC_Y_LABEL>"
|
| 20 |
-
},
|
| 21 |
-
"color": {
|
| 22 |
-
"aggregate": "count",
|
| 23 |
-
"type": "quantitative"
|
| 24 |
-
},
|
| 25 |
-
"facet": {
|
| 26 |
-
"field": "rev",
|
| 27 |
-
"type": "nominal"
|
| 28 |
-
}
|
| 29 |
-
}
|
| 30 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
.dvc/plots/default.json
DELETED
|
@@ -1,29 +0,0 @@
|
|
| 1 |
-
{
|
| 2 |
-
"$schema": "https://vega.github.io/schema/vega-lite/v4.json",
|
| 3 |
-
"data": {
|
| 4 |
-
"values": "<DVC_METRIC_DATA>"
|
| 5 |
-
},
|
| 6 |
-
"title": "<DVC_METRIC_TITLE>",
|
| 7 |
-
"mark": {
|
| 8 |
-
"type": "line"
|
| 9 |
-
},
|
| 10 |
-
"encoding": {
|
| 11 |
-
"x": {
|
| 12 |
-
"field": "<DVC_METRIC_X>",
|
| 13 |
-
"type": "quantitative",
|
| 14 |
-
"title": "<DVC_METRIC_X_LABEL>"
|
| 15 |
-
},
|
| 16 |
-
"y": {
|
| 17 |
-
"field": "<DVC_METRIC_Y>",
|
| 18 |
-
"type": "quantitative",
|
| 19 |
-
"title": "<DVC_METRIC_Y_LABEL>",
|
| 20 |
-
"scale": {
|
| 21 |
-
"zero": false
|
| 22 |
-
}
|
| 23 |
-
},
|
| 24 |
-
"color": {
|
| 25 |
-
"field": "rev",
|
| 26 |
-
"type": "nominal"
|
| 27 |
-
}
|
| 28 |
-
}
|
| 29 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
.dvc/plots/scatter.json
DELETED
|
@@ -1,27 +0,0 @@
|
|
| 1 |
-
{
|
| 2 |
-
"$schema": "https://vega.github.io/schema/vega-lite/v4.json",
|
| 3 |
-
"data": {
|
| 4 |
-
"values": "<DVC_METRIC_DATA>"
|
| 5 |
-
},
|
| 6 |
-
"title": "<DVC_METRIC_TITLE>",
|
| 7 |
-
"mark": "point",
|
| 8 |
-
"encoding": {
|
| 9 |
-
"x": {
|
| 10 |
-
"field": "<DVC_METRIC_X>",
|
| 11 |
-
"type": "quantitative",
|
| 12 |
-
"title": "<DVC_METRIC_X_LABEL>"
|
| 13 |
-
},
|
| 14 |
-
"y": {
|
| 15 |
-
"field": "<DVC_METRIC_Y>",
|
| 16 |
-
"type": "quantitative",
|
| 17 |
-
"title": "<DVC_METRIC_Y_LABEL>",
|
| 18 |
-
"scale": {
|
| 19 |
-
"zero": false
|
| 20 |
-
}
|
| 21 |
-
},
|
| 22 |
-
"color": {
|
| 23 |
-
"field": "rev",
|
| 24 |
-
"type": "nominal"
|
| 25 |
-
}
|
| 26 |
-
}
|
| 27 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
.dvc/plots/smooth.json
DELETED
|
@@ -1,39 +0,0 @@
|
|
| 1 |
-
{
|
| 2 |
-
"$schema": "https://vega.github.io/schema/vega-lite/v4.json",
|
| 3 |
-
"data": {
|
| 4 |
-
"values": "<DVC_METRIC_DATA>"
|
| 5 |
-
},
|
| 6 |
-
"title": "<DVC_METRIC_TITLE>",
|
| 7 |
-
"mark": {
|
| 8 |
-
"type": "line"
|
| 9 |
-
},
|
| 10 |
-
"encoding": {
|
| 11 |
-
"x": {
|
| 12 |
-
"field": "<DVC_METRIC_X>",
|
| 13 |
-
"type": "quantitative",
|
| 14 |
-
"title": "<DVC_METRIC_X_LABEL>"
|
| 15 |
-
},
|
| 16 |
-
"y": {
|
| 17 |
-
"field": "<DVC_METRIC_Y>",
|
| 18 |
-
"type": "quantitative",
|
| 19 |
-
"title": "<DVC_METRIC_Y_LABEL>",
|
| 20 |
-
"scale": {
|
| 21 |
-
"zero": false
|
| 22 |
-
}
|
| 23 |
-
},
|
| 24 |
-
"color": {
|
| 25 |
-
"field": "rev",
|
| 26 |
-
"type": "nominal"
|
| 27 |
-
}
|
| 28 |
-
},
|
| 29 |
-
"transform": [
|
| 30 |
-
{
|
| 31 |
-
"loess": "<DVC_METRIC_Y>",
|
| 32 |
-
"on": "<DVC_METRIC_X>",
|
| 33 |
-
"groupby": [
|
| 34 |
-
"rev"
|
| 35 |
-
],
|
| 36 |
-
"bandwidth": 0.3
|
| 37 |
-
}
|
| 38 |
-
]
|
| 39 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
.dvcignore
DELETED
|
@@ -1,3 +0,0 @@
|
|
| 1 |
-
# Add patterns of files dvc should ignore, which could improve
|
| 2 |
-
# the performance. Learn more at
|
| 3 |
-
# https://dvc.org/doc/user-guide/dvcignore
|
|
|
|
|
|
|
|
|
|
|
|
LICENSE
DELETED
|
@@ -1,8 +0,0 @@
|
|
| 1 |
-
MIT License
|
| 2 |
-
Copyright (c) 2021 DAGsHub
|
| 3 |
-
|
| 4 |
-
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
| 5 |
-
|
| 6 |
-
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
| 7 |
-
|
| 8 |
-
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Makefile
DELETED
|
@@ -1,37 +0,0 @@
|
|
| 1 |
-
#################################################################################
|
| 2 |
-
# GLOBALS #
|
| 3 |
-
#################################################################################
|
| 4 |
-
|
| 5 |
-
PROJECT_DIR := $(shell dirname $(realpath $(lastword $(MAKEFILE_LIST))))
|
| 6 |
-
PROJECT_NAME = savta_depth
|
| 7 |
-
PYTHON_INTERPRETER = python3
|
| 8 |
-
|
| 9 |
-
ifeq (,$(shell which conda))
|
| 10 |
-
HAS_CONDA=False
|
| 11 |
-
else
|
| 12 |
-
HAS_CONDA=True
|
| 13 |
-
endif
|
| 14 |
-
|
| 15 |
-
#################################################################################
|
| 16 |
-
# COMMANDS #
|
| 17 |
-
#################################################################################
|
| 18 |
-
|
| 19 |
-
env:
|
| 20 |
-
ifeq (True,$(HAS_CONDA))
|
| 21 |
-
@echo ">>> Detected conda, creating conda environment."
|
| 22 |
-
conda create -y --name $(PROJECT_NAME) python=3.7.6
|
| 23 |
-
@echo ">>> New conda env created. Activate with:\nconda activate $(PROJECT_NAME)"
|
| 24 |
-
else
|
| 25 |
-
@echo ">>> No conda detected, creating venv environment."
|
| 26 |
-
$(PYTHON_INTERPRETER) -m venv env
|
| 27 |
-
@echo ">>> New virtual env created. Activate with:\nsource env/bin/activate ."
|
| 28 |
-
endif
|
| 29 |
-
|
| 30 |
-
load_requirements:
|
| 31 |
-
@echo ">>> Installing requirements. Make sure your virtual environment is activated."
|
| 32 |
-
$(PYTHON_INTERPRETER) -m pip install -U pip setuptools wheel
|
| 33 |
-
$(PYTHON_INTERPRETER) -m pip install -r requirements.txt
|
| 34 |
-
|
| 35 |
-
save_requirements:
|
| 36 |
-
@echo ">>> Saving requirements."
|
| 37 |
-
pip list --format=freeze > requirements.txt
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Notebooks/SavtaDepth_Colab.ipynb
DELETED
|
The diff for this file is too large to render.
See raw diff
|
|
|
README_HF.md
DELETED
|
@@ -1,53 +0,0 @@
|
|
| 1 |
-
# SavtaDepth - Hugging Face Spaces
|
| 2 |
-
|
| 3 |
-
This is a Gradio demo for monocular depth estimation.
|
| 4 |
-
|
| 5 |
-
## 🚀 Quick Start
|
| 6 |
-
|
| 7 |
-
The Space is configured to run without a model by default. To enable full depth estimation functionality:
|
| 8 |
-
|
| 9 |
-
### Option 1: Upload via Hugging Face Space Settings
|
| 10 |
-
1. Go to your Space settings
|
| 11 |
-
2. Upload your trained `model.pth` file to the `models/` directory
|
| 12 |
-
3. Restart the Space
|
| 13 |
-
|
| 14 |
-
### Option 2: For Local Development
|
| 15 |
-
1. Clone this repository
|
| 16 |
-
2. Place your trained model at `models/model.pth`
|
| 17 |
-
3. Run locally: `python app/app_savta.py`
|
| 18 |
-
|
| 19 |
-
## 📋 Requirements
|
| 20 |
-
|
| 21 |
-
The model should be:
|
| 22 |
-
- A PyTorch model file (.pth extension)
|
| 23 |
-
- Compatible with UNet architecture with ResNet34 backbone
|
| 24 |
-
- Trained for depth estimation
|
| 25 |
-
|
| 26 |
-
## 🎯 Demo Mode
|
| 27 |
-
|
| 28 |
-
When no model is present, the app will:
|
| 29 |
-
- Show a warning message in orange
|
| 30 |
-
- Convert uploaded images to grayscale instead of depth prediction
|
| 31 |
-
- Allow you to test the UI functionality
|
| 32 |
-
|
| 33 |
-
## 🛠️ Model Training
|
| 34 |
-
|
| 35 |
-
To train your own model, refer to the original project repository at:
|
| 36 |
-
https://dagshub.com/OperationSavta/SavtaDepth
|
| 37 |
-
|
| 38 |
-
## 📊 Model Architecture
|
| 39 |
-
|
| 40 |
-
- **Backbone**: ResNet34
|
| 41 |
-
- **Architecture**: UNet
|
| 42 |
-
- **Input**: RGB images (any size)
|
| 43 |
-
- **Output**: Depth maps (single channel grayscale)
|
| 44 |
-
|
| 45 |
-
## ⚡ Performance
|
| 46 |
-
|
| 47 |
-
- **Inference Time**: ~100ms per image (depending on hardware)
|
| 48 |
-
- **Memory Usage**: ~500MB VRAM
|
| 49 |
-
- **Supported Formats**: JPG, PNG
|
| 50 |
-
|
| 51 |
-
---
|
| 52 |
-
|
| 53 |
-
*Built with [Gradio](https://gradio.app/) and [FastAI](https://www.fast.ai/)*
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app/app_savta.py
CHANGED
|
@@ -1,157 +1,37 @@
|
|
| 1 |
-
import os, sys, tempfile, subprocess
|
| 2 |
from pathlib import Path
|
| 3 |
|
| 4 |
-
import torch
|
| 5 |
-
from fastai.vision.all import *
|
| 6 |
import gradio as gr
|
|
|
|
| 7 |
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
#######################
|
| 11 |
-
|
| 12 |
-
HF_TOKEN = os.getenv("HF_TOKEN")
|
| 13 |
-
|
| 14 |
-
try:
|
| 15 |
-
from gradio.flagging import HuggingFaceDatasetSaver # type: ignore
|
| 16 |
-
|
| 17 |
-
hf_writer: gr.FlaggingCallback | None = HuggingFaceDatasetSaver(
|
| 18 |
-
repo_id="savtadepth-flags-V2", token=HF_TOKEN
|
| 19 |
-
)
|
| 20 |
-
allow_flagging: str | bool = "manual"
|
| 21 |
-
except (ImportError, AttributeError):
|
| 22 |
-
hf_writer = None
|
| 23 |
-
allow_flagging = "never" # hide flag button if callback unavailable
|
| 24 |
-
|
| 25 |
-
############
|
| 26 |
-
# DVC #
|
| 27 |
-
############
|
| 28 |
-
|
| 29 |
-
DAGSHUB_USER = os.getenv("DAGSHUB_USER")
|
| 30 |
-
DAGSHUB_TOKEN = os.getenv("DAGSHUB_USER_TOKEN")
|
| 31 |
-
|
| 32 |
-
if not DAGSHUB_USER or not DAGSHUB_TOKEN:
|
| 33 |
-
sys.exit("❌ Please set DAGSHUB_USER and DAGSHUB_USER_TOKEN environment variables.")
|
| 34 |
-
|
| 35 |
-
REPO = "kingabzpro/SavtaDepth"
|
| 36 |
-
PROD_MODEL_PATH = Path("src/models/model.pth")
|
| 37 |
-
TRAIN_DIR = Path("src/data/processed/train/bathroom")
|
| 38 |
-
TEST_DIR = Path("src/data/processed/test/bathroom")
|
| 39 |
-
|
| 40 |
-
def _run(cmd: list[str]):
|
| 41 |
-
return subprocess.run(cmd, text=True, capture_output=True)
|
| 42 |
-
|
| 43 |
-
def dagshub_download(remote: str, local: Path):
|
| 44 |
-
local.parent.mkdir(parents=True, exist_ok=True)
|
| 45 |
-
cmd = ["dagshub", "download", REPO, remote, str(local)]
|
| 46 |
-
res = _run(cmd)
|
| 47 |
-
if res.returncode != 0:
|
| 48 |
-
sys.exit(f"❌ dagshub download failed\nRemote: {remote}\nSTDERR:\n{res.stderr}\nSTDOUT:\n{res.stdout}")
|
| 49 |
-
print(f"✅ Downloaded {remote} → {local}")
|
| 50 |
-
|
| 51 |
-
# 1) Model file
|
| 52 |
-
if not PROD_MODEL_PATH.exists():
|
| 53 |
-
dagshub_download("src/models/model.pth", PROD_MODEL_PATH)
|
| 54 |
-
|
| 55 |
-
# 2) Train/Test directories (recursive)
|
| 56 |
-
if not TRAIN_DIR.exists():
|
| 57 |
-
dagshub_download("src/data/processed/train/bathroom", TRAIN_DIR)
|
| 58 |
-
if not TEST_DIR.exists():
|
| 59 |
-
dagshub_download("src/data/processed/test/bathroom", TEST_DIR)
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
#######################
|
| 63 |
-
# Data & Learner #
|
| 64 |
-
#######################
|
| 65 |
-
|
| 66 |
-
class ImageImageDataLoaders(DataLoaders):
|
| 67 |
-
"""Create DataLoaders for image→image tasks."""
|
| 68 |
-
|
| 69 |
-
@classmethod
|
| 70 |
-
@delegates(DataLoaders.from_dblock)
|
| 71 |
-
def from_label_func(
|
| 72 |
-
cls,
|
| 73 |
-
path: Path,
|
| 74 |
-
filenames,
|
| 75 |
-
label_func,
|
| 76 |
-
valid_pct: float = 0.2,
|
| 77 |
-
seed: int | None = None,
|
| 78 |
-
item_transforms=None,
|
| 79 |
-
batch_transforms=None,
|
| 80 |
-
**kwargs,
|
| 81 |
-
):
|
| 82 |
-
dblock = DataBlock(
|
| 83 |
-
blocks=(ImageBlock(cls=PILImage), ImageBlock(cls=PILImageBW)),
|
| 84 |
-
get_y=label_func,
|
| 85 |
-
splitter=RandomSplitter(valid_pct, seed=seed),
|
| 86 |
-
item_tfms=item_transforms,
|
| 87 |
-
batch_tfms=batch_transforms,
|
| 88 |
-
)
|
| 89 |
-
return cls.from_dblock(dblock, filenames, path=path, **kwargs)
|
| 90 |
-
|
| 91 |
-
|
| 92 |
-
def get_y_fn(x: Path) -> Path:
|
| 93 |
-
return Path(str(x).replace(".jpg", "_depth.png"))
|
| 94 |
-
|
| 95 |
-
|
| 96 |
-
def create_data(data_path: Path):
|
| 97 |
-
fnames = get_files(data_path / "train", extensions=".jpg")
|
| 98 |
-
return ImageImageDataLoaders.from_label_func(
|
| 99 |
-
data_path / "train",
|
| 100 |
-
seed=42,
|
| 101 |
-
bs=4,
|
| 102 |
-
num_workers=0,
|
| 103 |
-
filenames=fnames,
|
| 104 |
-
label_func=get_y_fn,
|
| 105 |
-
)
|
| 106 |
|
|
|
|
|
|
|
| 107 |
|
| 108 |
-
|
| 109 |
-
learner = unet_learner(
|
| 110 |
-
data,
|
| 111 |
-
resnet34,
|
| 112 |
-
metrics=rmse,
|
| 113 |
-
wd=1e-2,
|
| 114 |
-
n_out=3,
|
| 115 |
-
loss_func=MSELossFlat(),
|
| 116 |
-
path="src/",
|
| 117 |
-
)
|
| 118 |
-
learner.load("model")
|
| 119 |
|
| 120 |
-
#####################
|
| 121 |
-
# Inference Logic #
|
| 122 |
-
#####################
|
| 123 |
|
|
|
|
| 124 |
def predict_depth(input_img: PILImage) -> PILImageBW:
|
| 125 |
depth, *_ = learner.predict(input_img)
|
| 126 |
return PILImageBW.create(depth).convert("L")
|
| 127 |
|
| 128 |
-
#####################
|
| 129 |
-
# Gradio UI #
|
| 130 |
-
#####################
|
| 131 |
|
| 132 |
-
|
|
|
|
| 133 |
|
| 134 |
-
description_md =
|
| 135 |
-
"""
|
| 136 |
<p style="text-align:center;font-size:1.05rem;max-width:760px;margin:auto;">
|
| 137 |
Upload an RGB image on the left and get a grayscale depth map on the right.
|
| 138 |
</p>
|
| 139 |
"""
|
| 140 |
-
)
|
| 141 |
|
| 142 |
-
|
| 143 |
-
|
| 144 |
-
<p style='text-align:center;font-size:0.9rem;'>
|
| 145 |
-
<a href='https://dagshub.com/OperationSavta/SavtaDepth' target='_blank'>Project on DAGsHub</a> •
|
| 146 |
-
<a href='https://colab.research.google.com/drive/1XU4DgQ217_hUMU1dllppeQNw3pTRlHy1?usp=sharing' target='_blank'>Google Colab Demo</a>
|
| 147 |
-
</p>
|
| 148 |
-
"""
|
| 149 |
-
)
|
| 150 |
-
|
| 151 |
-
examples = [["examples/00008.jpg"], ["examples/00045.jpg"]]
|
| 152 |
|
| 153 |
-
input_component = gr.Image(width=640, height=480, label="Input
|
| 154 |
-
output_component = gr.Image(label="Predicted
|
| 155 |
|
| 156 |
with gr.Blocks(title=title, theme=gr.themes.Soft()) as demo:
|
| 157 |
gr.Markdown(f"<center><h1>{title}</h1></center>")
|
|
@@ -161,14 +41,9 @@ with gr.Blocks(title=title, theme=gr.themes.Soft()) as demo:
|
|
| 161 |
fn=predict_depth,
|
| 162 |
inputs=input_component,
|
| 163 |
outputs=output_component,
|
| 164 |
-
allow_flagging=allow_flagging,
|
| 165 |
-
flagging_options=["incorrect", "worst", "ambiguous"],
|
| 166 |
-
flagging_callback=hf_writer,
|
| 167 |
examples=examples,
|
| 168 |
cache_examples=False,
|
| 169 |
)
|
| 170 |
|
| 171 |
-
gr.HTML(footer_html)
|
| 172 |
-
|
| 173 |
if __name__ == "__main__":
|
| 174 |
demo.queue().launch()
|
|
|
|
|
|
|
| 1 |
from pathlib import Path
|
| 2 |
|
|
|
|
|
|
|
| 3 |
import gradio as gr
|
| 4 |
+
from fastai.vision.all import *
|
| 5 |
|
| 6 |
+
# Model setup
|
| 7 |
+
MODEL_PATH = Path(__file__).parent.parent / "models" / "model.pth"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 8 |
|
| 9 |
+
if not MODEL_PATH.exists():
|
| 10 |
+
raise FileNotFoundError(f"Model not found at {MODEL_PATH}")
|
| 11 |
|
| 12 |
+
learner = load_learner(MODEL_PATH)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 13 |
|
|
|
|
|
|
|
|
|
|
| 14 |
|
| 15 |
+
# Inference function
|
| 16 |
def predict_depth(input_img: PILImage) -> PILImageBW:
|
| 17 |
depth, *_ = learner.predict(input_img)
|
| 18 |
return PILImageBW.create(depth).convert("L")
|
| 19 |
|
|
|
|
|
|
|
|
|
|
| 20 |
|
| 21 |
+
# Gradio UI
|
| 22 |
+
title = "📷 SavtaDepth WebApp"
|
| 23 |
|
| 24 |
+
description_md = """
|
|
|
|
| 25 |
<p style="text-align:center;font-size:1.05rem;max-width:760px;margin:auto;">
|
| 26 |
Upload an RGB image on the left and get a grayscale depth map on the right.
|
| 27 |
</p>
|
| 28 |
"""
|
|
|
|
| 29 |
|
| 30 |
+
examples_dir = Path(__file__).parent.parent / "examples"
|
| 31 |
+
examples = [[str(examples_dir / "00008.jpg")], [str(examples_dir / "00045.jpg")]]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 32 |
|
| 33 |
+
input_component = gr.Image(width=640, height=480, label="Input RGB")
|
| 34 |
+
output_component = gr.Image(label="Predicted Depth", image_mode="L")
|
| 35 |
|
| 36 |
with gr.Blocks(title=title, theme=gr.themes.Soft()) as demo:
|
| 37 |
gr.Markdown(f"<center><h1>{title}</h1></center>")
|
|
|
|
| 41 |
fn=predict_depth,
|
| 42 |
inputs=input_component,
|
| 43 |
outputs=output_component,
|
|
|
|
|
|
|
|
|
|
| 44 |
examples=examples,
|
| 45 |
cache_examples=False,
|
| 46 |
)
|
| 47 |
|
|
|
|
|
|
|
| 48 |
if __name__ == "__main__":
|
| 49 |
demo.queue().launch()
|
dvc.lock
DELETED
|
@@ -1,75 +0,0 @@
|
|
| 1 |
-
process_data:
|
| 2 |
-
cmd: python3 src/code/make_dataset.py src/data/raw/nyu_depth_v2_labeled.mat src/data/raw/splits.mat
|
| 3 |
-
src/data/processed
|
| 4 |
-
deps:
|
| 5 |
-
- path: src/code/make_dataset.py
|
| 6 |
-
md5: e069c7323c9be16baedd8f988375e145
|
| 7 |
-
size: 5256
|
| 8 |
-
- path: src/data/raw/nyu_depth_v2_labeled.mat
|
| 9 |
-
md5: 520609c519fba3ba5ac58c8fefcc3530
|
| 10 |
-
size: 2972037809
|
| 11 |
-
- path: src/data/raw/splits.mat
|
| 12 |
-
md5: 08e3c3aea27130ac7c01ffd739a4535f
|
| 13 |
-
size: 2626
|
| 14 |
-
outs:
|
| 15 |
-
- path: src/data/processed/
|
| 16 |
-
md5: 9a1f43f46e8b1c387532e994e721d57d.dir
|
| 17 |
-
size: 197717291
|
| 18 |
-
nfiles: 2898
|
| 19 |
-
train:
|
| 20 |
-
cmd: python3 src/code/training.py src/data/processed/train
|
| 21 |
-
deps:
|
| 22 |
-
- path: src/code/custom_data_loading.py
|
| 23 |
-
md5: c94ea029ed76ca94bb1ad4c1655e5e68
|
| 24 |
-
size: 1916
|
| 25 |
-
- path: src/code/params.yml
|
| 26 |
-
md5: 88b982495a09b6d9355903e31e0b3b3f
|
| 27 |
-
size: 218
|
| 28 |
-
- path: src/code/training.py
|
| 29 |
-
md5: e3dff7f4b59e4ebf818d7631d3e6803a
|
| 30 |
-
size: 1683
|
| 31 |
-
- path: src/data/processed/train
|
| 32 |
-
md5: 9956d748dcadc3abadd1ff966a6e2b92.dir
|
| 33 |
-
size: 109120921
|
| 34 |
-
nfiles: 1590
|
| 35 |
-
outs:
|
| 36 |
-
- path: logs/train_metrics.csv
|
| 37 |
-
md5: 15b14c6b8a3c310a7149b3f1bac5d86f
|
| 38 |
-
size: 5920790
|
| 39 |
-
- path: logs/train_params.yml
|
| 40 |
-
md5: 4d148d75cab3dbaa91ec5fccb3382541
|
| 41 |
-
size: 887
|
| 42 |
-
- path: src/models/
|
| 43 |
-
md5: 8586da76f372efa83d832a9d0e664817.dir
|
| 44 |
-
size: 494927324
|
| 45 |
-
nfiles: 1
|
| 46 |
-
eval:
|
| 47 |
-
cmd: python3 src/code/eval.py src/data/processed/test
|
| 48 |
-
deps:
|
| 49 |
-
- path: src/code/custom_data_loading.py
|
| 50 |
-
md5: c94ea029ed76ca94bb1ad4c1655e5e68
|
| 51 |
-
size: 1916
|
| 52 |
-
- path: src/code/eval.py
|
| 53 |
-
md5: 9ea6a6624fa14f15b4d51f9139395663
|
| 54 |
-
size: 1893
|
| 55 |
-
- path: src/code/eval_metric_calculation.py
|
| 56 |
-
md5: 2fc866e1107042a996087d5716d44bf0
|
| 57 |
-
size: 2999
|
| 58 |
-
- path: src/code/params.yml
|
| 59 |
-
md5: 88b982495a09b6d9355903e31e0b3b3f
|
| 60 |
-
size: 218
|
| 61 |
-
- path: src/data/processed/test
|
| 62 |
-
md5: bcccd66f3f561b53ba97c89a558c08a0.dir
|
| 63 |
-
size: 88596370
|
| 64 |
-
nfiles: 1308
|
| 65 |
-
- path: src/models/model.pth
|
| 66 |
-
md5: f421fb113498c7186fb734928484e013
|
| 67 |
-
size: 494927324
|
| 68 |
-
outs:
|
| 69 |
-
- path: logs/test_metrics.csv
|
| 70 |
-
md5: ee70c01208cdd018a567debd0abb1643
|
| 71 |
-
size: 340
|
| 72 |
-
- path: src/eval/examples/
|
| 73 |
-
md5: 70fdd803300cbdc6dd76dec2148e0e0c.dir
|
| 74 |
-
size: 425678
|
| 75 |
-
nfiles: 10
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
dvc.yaml
DELETED
|
@@ -1,38 +0,0 @@
|
|
| 1 |
-
stages:
|
| 2 |
-
process_data:
|
| 3 |
-
cmd: python3 src/code/make_dataset.py src/data/raw/nyu_depth_v2_labeled.mat src/data/raw/splits.mat
|
| 4 |
-
src/data/processed
|
| 5 |
-
deps:
|
| 6 |
-
- src/code/make_dataset.py
|
| 7 |
-
- src/data/raw/nyu_depth_v2_labeled.mat
|
| 8 |
-
- src/data/raw/splits.mat
|
| 9 |
-
outs:
|
| 10 |
-
- src/data/processed/
|
| 11 |
-
train:
|
| 12 |
-
cmd: python3 src/code/training.py src/data/processed/train
|
| 13 |
-
deps:
|
| 14 |
-
- src/code/custom_data_loading.py
|
| 15 |
-
- src/code/params.yml
|
| 16 |
-
- src/code/training.py
|
| 17 |
-
- src/data/processed/train
|
| 18 |
-
outs:
|
| 19 |
-
- src/models/
|
| 20 |
-
- logs/train_params.yml:
|
| 21 |
-
cache: false
|
| 22 |
-
metrics:
|
| 23 |
-
- logs/train_metrics.csv:
|
| 24 |
-
cache: false
|
| 25 |
-
eval:
|
| 26 |
-
cmd: python3 src/code/eval.py src/data/processed/test
|
| 27 |
-
deps:
|
| 28 |
-
- src/code/params.yml
|
| 29 |
-
- src/code/custom_data_loading.py
|
| 30 |
-
- src/code/eval_metric_calculation.py
|
| 31 |
-
- src/code/eval.py
|
| 32 |
-
- src/models/model.pth
|
| 33 |
-
- src/data/processed/test
|
| 34 |
-
outs:
|
| 35 |
-
- src/eval/examples/
|
| 36 |
-
metrics:
|
| 37 |
-
- logs/test_metrics.csv:
|
| 38 |
-
cache: false
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
flagged/log.csv
DELETED
|
@@ -1,4 +0,0 @@
|
|
| 1 |
-
name,Output,timestamp
|
| 2 |
-
Google,Hello Google!,2022-02-09 18:25:43.892129
|
| 3 |
-
Google,Hello Google!,2022-02-09 18:25:45.867548
|
| 4 |
-
Google,Hello Google!,2022-02-09 18:25:52.447643
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
heroku/.slugignore
DELETED
|
File without changes
|
heroku/Aptfile
DELETED
|
@@ -1 +0,0 @@
|
|
| 1 |
-
https://github.com/iterative/dvc/releases/download/2.9.3/dvc_2.9.3_amd64.deb
|
|
|
|
|
|
heroku/DVC-heroku-deployment.md
DELETED
|
@@ -1,21 +0,0 @@
|
|
| 1 |
-
We need to give Heroku the ability to pull in data from DVC upon app start up. We will install a [buildpack](https://elements.heroku.com/buildpacks/heroku/heroku-buildpack-apt) that allows the installation of apt-files and then define the Aptfile that contains a path to DVC. I.e., in the CLI run:
|
| 2 |
-
|
| 3 |
-
```
|
| 4 |
-
heroku buildpacks:add --index 1 heroku-community/apt
|
| 5 |
-
```
|
| 6 |
-
|
| 7 |
-
Then in your root project folder create a file called `Aptfile` that specifies the release of DVC you want installed, https://github.com/iterative/dvc/releases/download/2.8.3/dvc_2.8.3_amd64.deb
|
| 8 |
-
|
| 9 |
-
Add the following code block to your **streamlit_app.py**:
|
| 10 |
-
|
| 11 |
-
```python
|
| 12 |
-
import os
|
| 13 |
-
|
| 14 |
-
if "DYNO" in os.environ and os.path.isdir(".dvc"):
|
| 15 |
-
os.system("dvc config core.no_scm true")
|
| 16 |
-
if os.system(f"dvc pull {model} {image}") != 0:
|
| 17 |
-
exit("dvc pull failed")
|
| 18 |
-
os.system("rm -r .dvc .apt/usr/lib/dvc")
|
| 19 |
-
```
|
| 20 |
-
|
| 21 |
-
Reference: [Heroku ML](https://github.com/GuilhermeBrejeiro/deploy_ML_model_Heroku_FastAPI)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
heroku/Dockerfile
DELETED
|
@@ -1,19 +0,0 @@
|
|
| 1 |
-
FROM python:3.9.10
|
| 2 |
-
|
| 3 |
-
EXPOSE 7860
|
| 4 |
-
|
| 5 |
-
WORKDIR /app
|
| 6 |
-
|
| 7 |
-
COPY requirements_heroku.txt requirements_heroku.txt
|
| 8 |
-
|
| 9 |
-
RUN pip install -r requirements_heroku.txt
|
| 10 |
-
|
| 11 |
-
COPY /app /app
|
| 12 |
-
COPY /examples /examples
|
| 13 |
-
|
| 14 |
-
COPY setup.sh setup.sh
|
| 15 |
-
|
| 16 |
-
#ENV GRADIO_SERVER_NAME=0.0.0.0
|
| 17 |
-
#ENV GRADIO_SERVER_PORT="$PORT"
|
| 18 |
-
|
| 19 |
-
CMD ["python", "/app/app_savta.py"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
heroku/Procfile
DELETED
|
@@ -1 +0,0 @@
|
|
| 1 |
-
web: source setup.sh && python app/app_savta.py
|
|
|
|
|
|
heroku/heroku.yml
DELETED
|
@@ -1,5 +0,0 @@
|
|
| 1 |
-
build:
|
| 2 |
-
docker:
|
| 3 |
-
web: Dockerfile
|
| 4 |
-
run:
|
| 5 |
-
web: python /app/app_savta.py
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
heroku/requirements_heroku.txt
DELETED
|
@@ -1,15 +0,0 @@
|
|
| 1 |
-
-f https://download.pytorch.org/whl/cpu/torch_stable.html
|
| 2 |
-
dvc
|
| 3 |
-
fastai
|
| 4 |
-
torch==1.10.2+cpu
|
| 5 |
-
h5py
|
| 6 |
-
opencv-python-headless
|
| 7 |
-
tqdm
|
| 8 |
-
numpy
|
| 9 |
-
scikit-learn
|
| 10 |
-
dagshub
|
| 11 |
-
tables
|
| 12 |
-
fastapi
|
| 13 |
-
gradio
|
| 14 |
-
uvicorn
|
| 15 |
-
jinja2
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
heroku/setup.sh
DELETED
|
@@ -1,2 +0,0 @@
|
|
| 1 |
-
export GRADIO_SERVER_NAME=0.0.0.0
|
| 2 |
-
export GRADIO_SERVER_PORT="$PORT"
|
|
|
|
|
|
|
|
|
logs/test_metrics.csv
DELETED
|
@@ -1,10 +0,0 @@
|
|
| 1 |
-
Name,Value,Timestamp,Step
|
| 2 |
-
"a1",0.6736493,1615292013951,1
|
| 3 |
-
"a2",0.9112526,1615292013951,1
|
| 4 |
-
"a3",0.975165,1615292013951,1
|
| 5 |
-
"abs_rel",0.21377242,1615292013951,1
|
| 6 |
-
"sq_rel",0.18530104,1615292013951,1
|
| 7 |
-
"rmse",0.6518149,1615292013951,1
|
| 8 |
-
"rmse_log",0.24310246,1615292013951,1
|
| 9 |
-
"log10",0.08433308,1615292013951,1
|
| 10 |
-
"silog",20.038778,1615292013951,1
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
logs/train_metrics.csv
DELETED
|
The diff for this file is too large to render.
See raw diff
|
|
|
logs/train_params.yml
DELETED
|
@@ -1,25 +0,0 @@
|
|
| 1 |
-
DAGsHubLogger: true
|
| 2 |
-
Learner: <fastai.learner.Learner object at 0x7fcf56717fd0>
|
| 3 |
-
ParamScheduler: true
|
| 4 |
-
ProgressCallback: true
|
| 5 |
-
Recorder: {add_time: true, train_metrics: false, valid_metrics: true}
|
| 6 |
-
TrainEvalCallback: true
|
| 7 |
-
batch per epoch: 159
|
| 8 |
-
batch size: 4
|
| 9 |
-
dataset.tfms: '[Pipeline: PILBase.create, Pipeline: get_y_fn -> PILBase.create]'
|
| 10 |
-
device: cuda
|
| 11 |
-
dls.after_batch: "Pipeline: IntToFloatTensor -- {'div': 255.0, 'div_mask': 1} -> Normalize\
|
| 12 |
-
\ -- {'mean': tensor([[[[0.4850]],\n\n [[0.4560]],\n\n [[0.4060]]]],\
|
| 13 |
-
\ device='cuda:0'), 'std': tensor([[[[0.2290]],\n\n [[0.2240]],\n\n \
|
| 14 |
-
\ [[0.2250]]]], device='cuda:0'), 'axes': (0, 2, 3)}"
|
| 15 |
-
dls.after_item: 'Pipeline: ToTensor'
|
| 16 |
-
dls.before_batch: 'Pipeline: '
|
| 17 |
-
frozen: false
|
| 18 |
-
frozen idx: 0
|
| 19 |
-
input 1 dim 1: 4
|
| 20 |
-
input 1 dim 2: 3
|
| 21 |
-
input 1 dim 3: 480
|
| 22 |
-
input 1 dim 4: 640
|
| 23 |
-
model parameters: 41221268
|
| 24 |
-
n_inp: 1
|
| 25 |
-
success: true
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
models/model.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9bc9733d71d12a9cd8996f72959b280691fddd8b503be6eef60b56f022492196
|
| 3 |
+
size 494927324
|
requirements.txt
CHANGED
|
@@ -1,7 +1,3 @@
|
|
| 1 |
-
# -------- data & version locks -----------
|
| 2 |
-
# Drop heavy DVC stack, switch to DagsHub CLI for downloads
|
| 3 |
-
dagshub==0.3.34 # CLI + streaming support
|
| 4 |
-
pathspec==0.9.0
|
| 5 |
|
| 6 |
# -------- deep-learning stack ------------
|
| 7 |
fastai==2.7.14 # works with torch < 2.3
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
|
| 2 |
# -------- deep-learning stack ------------
|
| 3 |
fastai==2.7.14 # works with torch < 2.3
|
run_dev_env.sh
DELETED
|
@@ -1,7 +0,0 @@
|
|
| 1 |
-
docker run -d \
|
| 2 |
-
-p 8080:8080 \
|
| 3 |
-
--name "dags-ml-workspace" -v "/${PWD}:/workspace" \
|
| 4 |
-
--env AUTHENTICATE_VIA_JUPYTER="dagshub_savta" \
|
| 5 |
-
--shm-size 2G \
|
| 6 |
-
--restart always \
|
| 7 |
-
dagshub/ml-workspace-minimal:latest
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/.gitignore
DELETED
|
@@ -1,2 +0,0 @@
|
|
| 1 |
-
/models
|
| 2 |
-
/eval
|
|
|
|
|
|
|
|
|
src/code/custom_data_loading.py
DELETED
|
@@ -1,51 +0,0 @@
|
|
| 1 |
-
import yaml
|
| 2 |
-
from fastai.vision.all import \
|
| 3 |
-
DataLoaders, \
|
| 4 |
-
delegates, \
|
| 5 |
-
DataBlock, \
|
| 6 |
-
ImageBlock, \
|
| 7 |
-
PILImage, \
|
| 8 |
-
PILImageBW, \
|
| 9 |
-
RandomSplitter, \
|
| 10 |
-
Path, \
|
| 11 |
-
get_files
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
class ImageImageDataLoaders(DataLoaders):
|
| 15 |
-
"""Basic wrapper around several `DataLoader`s with factory methods for Image to Image problems"""
|
| 16 |
-
@classmethod
|
| 17 |
-
@delegates(DataLoaders.from_dblock)
|
| 18 |
-
def from_label_func(cls, path, filenames, label_func, valid_pct=0.2, seed=None, item_transforms=None,
|
| 19 |
-
batch_transforms=None, **kwargs):
|
| 20 |
-
"""Create from list of `fnames` in `path`s with `label_func`."""
|
| 21 |
-
datablock = DataBlock(blocks=(ImageBlock(cls=PILImage), ImageBlock(cls=PILImageBW)),
|
| 22 |
-
get_y=label_func,
|
| 23 |
-
splitter=RandomSplitter(valid_pct, seed=seed),
|
| 24 |
-
item_tfms=item_transforms,
|
| 25 |
-
batch_tfms=batch_transforms)
|
| 26 |
-
res = cls.from_dblock(datablock, filenames, path=path, **kwargs)
|
| 27 |
-
return res
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
def get_y_fn(x):
|
| 31 |
-
y = str(x.absolute()).replace('.jpg', '_depth.png')
|
| 32 |
-
y = Path(y)
|
| 33 |
-
|
| 34 |
-
return y
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
def create_data(data_path):
|
| 38 |
-
with open(r"./src/code/params.yml") as f:
|
| 39 |
-
params = yaml.safe_load(f)
|
| 40 |
-
|
| 41 |
-
filenames = get_files(data_path, extensions='.jpg')
|
| 42 |
-
if len(filenames) == 0:
|
| 43 |
-
raise ValueError("Could not find any files in the given path")
|
| 44 |
-
dataset = ImageImageDataLoaders.from_label_func(data_path,
|
| 45 |
-
seed=int(params['seed']),
|
| 46 |
-
bs=int(params['batch_size']),
|
| 47 |
-
num_workers=int(params['num_workers']),
|
| 48 |
-
filenames=filenames,
|
| 49 |
-
label_func=get_y_fn)
|
| 50 |
-
|
| 51 |
-
return dataset
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/code/eval.py
DELETED
|
@@ -1,56 +0,0 @@
|
|
| 1 |
-
import sys
|
| 2 |
-
import yaml
|
| 3 |
-
import torch
|
| 4 |
-
from torchvision import transforms
|
| 5 |
-
from fastai.vision.all import unet_learner, Path, resnet34, MSELossFlat, get_files, L, PILImageBW
|
| 6 |
-
from custom_data_loading import create_data
|
| 7 |
-
from eval_metric_calculation import compute_eval_metrics
|
| 8 |
-
from dagshub import dagshub_logger
|
| 9 |
-
from tqdm import tqdm
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
if __name__ == "__main__":
|
| 13 |
-
if len(sys.argv) < 2:
|
| 14 |
-
print("usage: %s <test_data_path>" % sys.argv[0], file=sys.stderr)
|
| 15 |
-
sys.exit(0)
|
| 16 |
-
|
| 17 |
-
with open(r"./src/code/params.yml") as f:
|
| 18 |
-
params = yaml.safe_load(f)
|
| 19 |
-
|
| 20 |
-
data_path = Path(sys.argv[1])
|
| 21 |
-
data = create_data(data_path)
|
| 22 |
-
|
| 23 |
-
arch = {'resnet34': resnet34}
|
| 24 |
-
loss = {'MSELossFlat': MSELossFlat()}
|
| 25 |
-
|
| 26 |
-
learner = unet_learner(data,
|
| 27 |
-
arch.get(params['architecture']),
|
| 28 |
-
n_out=int(params['num_outs']),
|
| 29 |
-
loss_func=loss.get(params['loss_func']),
|
| 30 |
-
path='src/',
|
| 31 |
-
model_dir='models')
|
| 32 |
-
learner = learner.load('model')
|
| 33 |
-
|
| 34 |
-
filenames = get_files(Path(data_path), extensions='.jpg')
|
| 35 |
-
test_files = L([Path(i) for i in filenames])
|
| 36 |
-
|
| 37 |
-
for i, sample in tqdm(enumerate(test_files.items),
|
| 38 |
-
desc="Predicting on test images",
|
| 39 |
-
total=len(test_files.items)):
|
| 40 |
-
pred = learner.predict(sample)[0]
|
| 41 |
-
pred = PILImageBW.create(pred).convert('L')
|
| 42 |
-
pred.save("src/eval/" + str(sample.stem) + "_pred.png")
|
| 43 |
-
if i < 10:
|
| 44 |
-
pred.save("src/eval/examples/" + str(sample.stem) + "_pred.png")
|
| 45 |
-
|
| 46 |
-
print("Calculating metrics...")
|
| 47 |
-
metrics = compute_eval_metrics(test_files)
|
| 48 |
-
|
| 49 |
-
with dagshub_logger(
|
| 50 |
-
metrics_path="logs/test_metrics.csv",
|
| 51 |
-
should_log_hparams=False
|
| 52 |
-
) as logger:
|
| 53 |
-
# Metric logging
|
| 54 |
-
logger.log_metrics(metrics)
|
| 55 |
-
|
| 56 |
-
print("Evaluation Done!")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/code/eval_metric_calculation.py
DELETED
|
@@ -1,79 +0,0 @@
|
|
| 1 |
-
import numpy as np
|
| 2 |
-
from PIL import Image
|
| 3 |
-
from tqdm import tqdm
|
| 4 |
-
|
| 5 |
-
def compute_errors(target, prediction):
|
| 6 |
-
thresh = np.maximum((target / prediction), (prediction / target))
|
| 7 |
-
a1 = (thresh < 1.25).mean()
|
| 8 |
-
a2 = (thresh < 1.25 ** 2).mean()
|
| 9 |
-
a3 = (thresh < 1.25 ** 3).mean()
|
| 10 |
-
|
| 11 |
-
abs_rel = np.mean(np.abs(target - prediction) / target)
|
| 12 |
-
sq_rel = np.mean(((target - prediction) ** 2) / target)
|
| 13 |
-
|
| 14 |
-
rmse = (target - prediction) ** 2
|
| 15 |
-
rmse = np.sqrt(rmse.mean())
|
| 16 |
-
|
| 17 |
-
rmse_log = (np.log(target) - np.log(prediction)) ** 2
|
| 18 |
-
rmse_log = np.sqrt(rmse_log.mean())
|
| 19 |
-
|
| 20 |
-
err = np.log(prediction) - np.log(target)
|
| 21 |
-
silog = np.sqrt(np.mean(err ** 2) - np.mean(err) ** 2) * 100
|
| 22 |
-
|
| 23 |
-
log_10 = (np.abs(np.log10(target) - np.log10(prediction))).mean()
|
| 24 |
-
|
| 25 |
-
return a1, a2, a3, abs_rel, sq_rel, rmse, rmse_log, silog, log_10
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
def compute_eval_metrics(test_files):
|
| 29 |
-
min_depth_eval = 1e-3
|
| 30 |
-
max_depth_eval = 10
|
| 31 |
-
|
| 32 |
-
num_samples = len(test_files)
|
| 33 |
-
|
| 34 |
-
a1 = np.zeros(num_samples, np.float32)
|
| 35 |
-
a2 = np.zeros(num_samples, np.float32)
|
| 36 |
-
a3 = np.zeros(num_samples, np.float32)
|
| 37 |
-
abs_rel = np.zeros(num_samples, np.float32)
|
| 38 |
-
sq_rel = np.zeros(num_samples, np.float32)
|
| 39 |
-
rmse = np.zeros(num_samples, np.float32)
|
| 40 |
-
rmse_log = np.zeros(num_samples, np.float32)
|
| 41 |
-
silog = np.zeros(num_samples, np.float32)
|
| 42 |
-
log10 = np.zeros(num_samples, np.float32)
|
| 43 |
-
|
| 44 |
-
for i in tqdm(range(num_samples), desc="Calculating metrics for test data", total=num_samples):
|
| 45 |
-
sample_path = test_files[i]
|
| 46 |
-
target_path = str(sample_path.parent/(sample_path.stem + "_depth.png"))
|
| 47 |
-
pred_path = "src/eval/" + str(sample_path.stem) + "_pred.png"
|
| 48 |
-
|
| 49 |
-
target_image = Image.open(target_path)
|
| 50 |
-
pred_image = Image.open(pred_path)
|
| 51 |
-
|
| 52 |
-
target = np.asarray(target_image)
|
| 53 |
-
pred = np.asarray(pred_image)
|
| 54 |
-
|
| 55 |
-
target = target / 25.0
|
| 56 |
-
pred = pred / 25.0
|
| 57 |
-
|
| 58 |
-
pred[pred < min_depth_eval] = min_depth_eval
|
| 59 |
-
pred[pred > max_depth_eval] = max_depth_eval
|
| 60 |
-
pred[np.isinf(pred)] = max_depth_eval
|
| 61 |
-
|
| 62 |
-
target[np.isinf(target)] = 0
|
| 63 |
-
target[np.isnan(target)] = 0
|
| 64 |
-
|
| 65 |
-
valid_mask = np.logical_and(target > min_depth_eval, target < max_depth_eval)
|
| 66 |
-
|
| 67 |
-
a1[i], a2[i], a3[i], abs_rel[i], sq_rel[i], rmse[i], rmse_log[i], silog[i], log10[i] = \
|
| 68 |
-
compute_errors(target[valid_mask], pred[valid_mask])
|
| 69 |
-
|
| 70 |
-
print("{:>7}, {:>7}, {:>7}, {:>7}, {:>7}, {:>7}, {:>7}, {:>7}, {:>7}".format(
|
| 71 |
-
'd1', 'd2', 'd3', 'AbsRel', 'SqRel', 'RMSE', 'RMSElog', 'SILog', 'log10'))
|
| 72 |
-
print("{:7.3f}, {:7.3f}, {:7.3f}, {:7.3f}, {:7.3f}, {:7.3f}, {:7.3f}, {:7.3f}, {:7.3f}".format(
|
| 73 |
-
a1.mean(), a2.mean(), a3.mean(),
|
| 74 |
-
abs_rel.mean(), sq_rel.mean(), rmse.mean(), rmse_log.mean(), silog.mean(), log10.mean()))
|
| 75 |
-
|
| 76 |
-
return dict(a1=a1.mean(), a2=a2.mean(), a3=a3.mean(),
|
| 77 |
-
abs_rel=abs_rel.mean(), sq_rel=sq_rel.mean(),
|
| 78 |
-
rmse=rmse.mean(), rmse_log=rmse_log.mean(),
|
| 79 |
-
log10=log10.mean(), silog=silog.mean())
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/code/make_dataset.py
DELETED
|
@@ -1,121 +0,0 @@
|
|
| 1 |
-
#!/usr/bin/env python
|
| 2 |
-
# -*- coding: utf-8 -*-
|
| 3 |
-
#######################################################################################
|
| 4 |
-
# The MIT License
|
| 5 |
-
|
| 6 |
-
# Copyright (c) 2014 Hannes Schulz, University of Bonn <schulz@ais.uni-bonn.de>
|
| 7 |
-
# Copyright (c) 2013 Benedikt Waldvogel, University of Bonn <mail@bwaldvogel.de>
|
| 8 |
-
# Copyright (c) 2008-2009 Sebastian Nowozin <nowozin@gmail.com>
|
| 9 |
-
|
| 10 |
-
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 11 |
-
# of this software and associated documentation files (the "Software"), to deal
|
| 12 |
-
# in the Software without restriction, including without limitation the rights
|
| 13 |
-
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 14 |
-
# copies of the Software, and to permit persons to whom the Software is
|
| 15 |
-
# furnished to do so, subject to the following conditions:
|
| 16 |
-
#
|
| 17 |
-
# The above copyright notice and this permission notice shall be included in all
|
| 18 |
-
# copies or substantial portions of the Software.
|
| 19 |
-
#
|
| 20 |
-
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 21 |
-
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 22 |
-
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 23 |
-
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 24 |
-
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 25 |
-
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
| 26 |
-
# SOFTWARE.
|
| 27 |
-
#######################################################################################
|
| 28 |
-
#
|
| 29 |
-
# See https://github.com/deeplearningais/curfil/wiki/Training-and-Prediction-with-the-NYU-Depth-v2-Dataset
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
"""Helper script to convert the NYU Depth v2 dataset Matlab file into a set of PNG and JPEG images.
|
| 33 |
-
Receives 3 Files from argparse:
|
| 34 |
-
<h5_file> - Contains the original images, depths maps, and scene types
|
| 35 |
-
<train_test_split> - contains two numpy arrays with the index of the
|
| 36 |
-
images based on the split to train and test sets.
|
| 37 |
-
<out_folder> - Name of the folder to save the original and depth images.
|
| 38 |
-
|
| 39 |
-
Every image in the DB will have it's twine B&W image that indicates the depth
|
| 40 |
-
in the image. the images will be read, converted by the convert_image function
|
| 41 |
-
and finally saved to path based on train test split and Scene types.
|
| 42 |
-
"""
|
| 43 |
-
|
| 44 |
-
from __future__ import print_function
|
| 45 |
-
|
| 46 |
-
import h5py
|
| 47 |
-
import numpy as np
|
| 48 |
-
import os
|
| 49 |
-
import scipy.io
|
| 50 |
-
import sys
|
| 51 |
-
import cv2
|
| 52 |
-
from tqdm import tqdm
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
def convert_image(index, depth_map, img, output_folder):
|
| 56 |
-
"""Processes data images and depth maps
|
| 57 |
-
:param index: int, image index
|
| 58 |
-
:param depth_map: numpy array, image depth - 2D array.
|
| 59 |
-
:param img: numpy array, the original RGB image - 3D array.
|
| 60 |
-
:param output_folder: path to save the image in.
|
| 61 |
-
|
| 62 |
-
Receives an image with it's relevant depth map.
|
| 63 |
-
Normalizes the depth map, and adds a 7 px boundary to the original image.
|
| 64 |
-
Saves both image and depth map to the appropriate processed data folder.
|
| 65 |
-
"""
|
| 66 |
-
|
| 67 |
-
# Normalize the depth image
|
| 68 |
-
# normalized_depth = cv2.normalize(depth_map, None, 0, 255, cv2.NORM_MINMAX)
|
| 69 |
-
img_depth = depth_map * 25.0
|
| 70 |
-
cv2.imwrite("%s/%05d_depth.png" % (output_folder, index), img_depth)
|
| 71 |
-
|
| 72 |
-
# Adding black frame to original image
|
| 73 |
-
img = img[:, :, ::-1] # Flipping the image from RGB to BGR for opencv
|
| 74 |
-
image_black_boundary = np.zeros(img.shape, dtype=np.uint8)
|
| 75 |
-
image_black_boundary[7:image_black_boundary.shape[0] - 6, 7:image_black_boundary.shape[1] - 6, :] = \
|
| 76 |
-
img[7:img.shape[0] - 6, 7:img.shape[1] - 6, :]
|
| 77 |
-
cv2.imwrite("%s/%05d.jpg" % (output_folder, index), image_black_boundary)
|
| 78 |
-
|
| 79 |
-
|
| 80 |
-
if __name__ == "__main__":
|
| 81 |
-
|
| 82 |
-
# Check if got all needed input for argparse
|
| 83 |
-
if len(sys.argv) != 4:
|
| 84 |
-
print("usage: %s <h5_file> <train_test_split> <out_folder>" % sys.argv[0], file=sys.stderr)
|
| 85 |
-
sys.exit(0)
|
| 86 |
-
|
| 87 |
-
# load arguments to variables
|
| 88 |
-
h5_file = h5py.File(sys.argv[1], "r")
|
| 89 |
-
train_test = scipy.io.loadmat(sys.argv[2]) # h5py is not able to open that file. but scipy is
|
| 90 |
-
out_folder = sys.argv[3]
|
| 91 |
-
|
| 92 |
-
# Extract images *indexes* for train and test data sets
|
| 93 |
-
test_images = set([int(x) for x in train_test["testNdxs"]])
|
| 94 |
-
train_images = set([int(x) for x in train_test["trainNdxs"]])
|
| 95 |
-
print("%d training images" % len(train_images))
|
| 96 |
-
print("%d test images" % len(test_images))
|
| 97 |
-
|
| 98 |
-
# Grayscale
|
| 99 |
-
depth = h5_file['depths']
|
| 100 |
-
print("Reading", sys.argv[1])
|
| 101 |
-
images = h5_file['images'] # (num_channels, height, width)
|
| 102 |
-
|
| 103 |
-
# Extract all sceneTypes per image - "office", "classroom", etc.
|
| 104 |
-
scenes = [u''.join(chr(c[0]) for c in h5_file[obj_ref]) for obj_ref in h5_file['sceneTypes'][0]]
|
| 105 |
-
|
| 106 |
-
for i, image in tqdm(enumerate(images), desc="Processing images", total=len(images)):
|
| 107 |
-
idx = int(i) + 1
|
| 108 |
-
if idx in train_images:
|
| 109 |
-
train_test = "train"
|
| 110 |
-
else:
|
| 111 |
-
assert idx in test_images, "index %d neither found in training set nor in test set" % idx
|
| 112 |
-
train_test = "test"
|
| 113 |
-
|
| 114 |
-
# Create path to save image in
|
| 115 |
-
folder = "%s/%s/%s" % (out_folder, train_test, scenes[i])
|
| 116 |
-
if not os.path.exists(folder):
|
| 117 |
-
os.makedirs(folder)
|
| 118 |
-
|
| 119 |
-
convert_image(i, depth[i, :, :].T, image.T, folder)
|
| 120 |
-
|
| 121 |
-
print("Finished")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/code/params.yml
DELETED
|
@@ -1,13 +0,0 @@
|
|
| 1 |
-
seed: 42
|
| 2 |
-
data: nyu_depth_v2
|
| 3 |
-
batch_size: 4
|
| 4 |
-
num_workers: 0
|
| 5 |
-
weight_decay: 1e-2
|
| 6 |
-
learning_rate: 1e-3
|
| 7 |
-
epochs: 50
|
| 8 |
-
num_outs: 3
|
| 9 |
-
source_dir: src
|
| 10 |
-
model_dir: models
|
| 11 |
-
architecture: resnet34
|
| 12 |
-
loss_func: MSELossFlat
|
| 13 |
-
train_metric: rmse
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/code/training.py
DELETED
|
@@ -1,44 +0,0 @@
|
|
| 1 |
-
"""Trains or fine-tunes a model for the task of monocular depth estimation
|
| 2 |
-
Receives 1 arguments from argparse:
|
| 3 |
-
<data_path> - Path to the dataset which is split into 2 folders - train and test.
|
| 4 |
-
"""
|
| 5 |
-
import sys
|
| 6 |
-
import yaml
|
| 7 |
-
from fastai.vision.all import unet_learner, Path, resnet34, rmse, MSELossFlat
|
| 8 |
-
from custom_data_loading import create_data
|
| 9 |
-
from dagshub.fastai import DAGsHubLogger
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
if __name__ == "__main__":
|
| 13 |
-
# Check if got all needed input for argparse
|
| 14 |
-
if len(sys.argv) != 2:
|
| 15 |
-
print("usage: %s <data_path>" % sys.argv[0], file=sys.stderr)
|
| 16 |
-
sys.exit(0)
|
| 17 |
-
|
| 18 |
-
with open(r"./src/code/params.yml") as f:
|
| 19 |
-
params = yaml.safe_load(f)
|
| 20 |
-
|
| 21 |
-
data = create_data(Path(sys.argv[1]))
|
| 22 |
-
|
| 23 |
-
metrics = {'rmse': rmse}
|
| 24 |
-
arch = {'resnet34': resnet34}
|
| 25 |
-
loss = {'MSELossFlat': MSELossFlat()}
|
| 26 |
-
|
| 27 |
-
learner = unet_learner(data,
|
| 28 |
-
arch.get(params['architecture']),
|
| 29 |
-
metrics=metrics.get(params['train_metric']),
|
| 30 |
-
wd=float(params['weight_decay']),
|
| 31 |
-
n_out=int(params['num_outs']),
|
| 32 |
-
loss_func=loss.get(params['loss_func']),
|
| 33 |
-
path=params['source_dir'],
|
| 34 |
-
model_dir=params['model_dir'],
|
| 35 |
-
cbs=DAGsHubLogger(
|
| 36 |
-
metrics_path="logs/train_metrics.csv",
|
| 37 |
-
hparams_path="logs/train_params.yml"))
|
| 38 |
-
|
| 39 |
-
print("Training model...")
|
| 40 |
-
learner.fine_tune(epochs=int(params['epochs']),
|
| 41 |
-
base_lr=float(params['learning_rate']))
|
| 42 |
-
print("Saving model...")
|
| 43 |
-
learner.save('model')
|
| 44 |
-
print("Done!")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/data/.gitignore
DELETED
|
@@ -1 +0,0 @@
|
|
| 1 |
-
/processed
|
|
|
|
|
|
src/data/raw/.gitignore
DELETED
|
@@ -1,2 +0,0 @@
|
|
| 1 |
-
/nyu_depth_v2_labeled.mat
|
| 2 |
-
/splits.mat
|
|
|
|
|
|
|
|
|
src/data/raw/nyu_depth_v2_labeled.mat.dvc
DELETED
|
@@ -1,9 +0,0 @@
|
|
| 1 |
-
md5: d27a0ba6c898f981797a3388c26c2d0f
|
| 2 |
-
frozen: true
|
| 3 |
-
deps:
|
| 4 |
-
- etag: '"b125b2b1-5aa5b95864fc7"'
|
| 5 |
-
path: http://horatio.cs.nyu.edu/mit/silberman/nyu_depth_v2/nyu_depth_v2_labeled.mat
|
| 6 |
-
outs:
|
| 7 |
-
- md5: 520609c519fba3ba5ac58c8fefcc3530
|
| 8 |
-
path: nyu_depth_v2_labeled.mat
|
| 9 |
-
size: 2972037809
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/data/raw/splits.mat.dvc
DELETED
|
@@ -1,9 +0,0 @@
|
|
| 1 |
-
md5: 26011289311c18b92781de66654223a4
|
| 2 |
-
frozen: true
|
| 3 |
-
deps:
|
| 4 |
-
- etag: '"a42-4cb6a5fad2fc0"'
|
| 5 |
-
path: http://horatio.cs.nyu.edu/mit/silberman/indoor_seg_sup/splits.mat
|
| 6 |
-
outs:
|
| 7 |
-
- md5: 08e3c3aea27130ac7c01ffd739a4535f
|
| 8 |
-
path: splits.mat
|
| 9 |
-
size: 2626
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|