Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -17,8 +17,11 @@ from ultralytics import YOLO
|
|
| 17 |
# Keras application imports
|
| 18 |
from tensorflow.keras.applications.vgg16 import VGG16, preprocess_input as vgg16_preprocess
|
| 19 |
from tensorflow.keras.applications.efficientnet import EfficientNetB0, preprocess_input as effnet_preprocess
|
| 20 |
-
|
| 21 |
-
# ------------------------------------------------------------
|
|
|
|
|
|
|
|
|
|
| 22 |
# GLOBAL CONFIG
|
| 23 |
# ------------------------------------------------------------
|
| 24 |
st.set_page_config(
|
|
@@ -27,29 +30,43 @@ st.set_page_config(
|
|
| 27 |
layout="wide",
|
| 28 |
)
|
| 29 |
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
</h3>
|
| 38 |
-
""",
|
| 39 |
-
unsafe_allow_html=True
|
| 40 |
-
)
|
| 41 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 42 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 43 |
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 48 |
|
| 49 |
st.divider()
|
| 50 |
|
| 51 |
-
from pathlib import Path
|
| 52 |
-
|
| 53 |
# Resolve repository root relative to this file (streamlit_app/app.py)
|
| 54 |
THIS_FILE = Path(__file__).resolve()
|
| 55 |
REPO_ROOT = THIS_FILE.parent # repo/
|
|
@@ -539,7 +556,7 @@ page = st.sidebar.radio("Navigate", PAGES)
|
|
| 539 |
# ------------------------------------------------------------
|
| 540 |
# PAGE 1 β HOME
|
| 541 |
# ------------------------------------------------------------
|
| 542 |
-
|
| 543 |
col1, col2 = st.columns([1.2, 1])
|
| 544 |
|
| 545 |
with col1:
|
|
@@ -558,6 +575,34 @@ of **25 COCO classes**. It brings together:
|
|
| 558 |
"""
|
| 559 |
)
|
| 560 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 561 |
with col2:
|
| 562 |
st.subheader("πΉοΈ How to Use This App")
|
| 563 |
st.markdown(
|
|
@@ -597,7 +642,7 @@ of **25 COCO classes**. It brings together:
|
|
| 597 |
cols = st.columns(min(3, len(imgs)))
|
| 598 |
for i, img_path in enumerate(imgs[:3]):
|
| 599 |
with cols[i]:
|
| 600 |
-
st.image(img_path, caption=os.path.basename(img_path), width=520)
|
| 601 |
else:
|
| 602 |
st.info("No sample images found in `inference_outputs/` yet.")
|
| 603 |
else:
|
|
@@ -660,14 +705,11 @@ YOLOv8 will detect all objects and optionally verify them with the best classifi
|
|
| 660 |
"""
|
| 661 |
)
|
| 662 |
|
| 663 |
-
|
| 664 |
-
|
| 665 |
# ---- Replace your current detection block with this ----
|
| 666 |
uploaded_file = None
|
| 667 |
|
| 668 |
with st.form("detection_form"):
|
| 669 |
-
|
| 670 |
-
|
| 671 |
# Put uploader inside the form so uploading doesn't trigger detection
|
| 672 |
uploaded_file = st.file_uploader("Upload an image", type=["jpg", "jpeg", "png"])
|
| 673 |
|
|
|
|
| 17 |
# Keras application imports
|
| 18 |
from tensorflow.keras.applications.vgg16 import VGG16, preprocess_input as vgg16_preprocess
|
| 19 |
from tensorflow.keras.applications.efficientnet import EfficientNetB0, preprocess_input as effnet_preprocess
|
| 20 |
+
rom pathlib import Path
|
| 21 |
+
# ------------------------------------------------------------
|
| 22 |
+
# GLOBAL CONFIG
|
| 23 |
+
# ------------------------------------------------------------
|
| 24 |
+
# ------------------------------------------------------------
|
| 25 |
# GLOBAL CONFIG
|
| 26 |
# ------------------------------------------------------------
|
| 27 |
st.set_page_config(
|
|
|
|
| 30 |
layout="wide",
|
| 31 |
)
|
| 32 |
|
| 33 |
+
# ---- Compact Header Styling ----
|
| 34 |
+
st.markdown("""
|
| 35 |
+
<style>
|
| 36 |
+
/* Reduce Streamlit's default top padding */
|
| 37 |
+
.block-container {
|
| 38 |
+
padding-top: 1rem !important;
|
| 39 |
+
}
|
|
|
|
|
|
|
|
|
|
|
|
|
| 40 |
|
| 41 |
+
/* Tighten spacing between header lines */
|
| 42 |
+
h1 {
|
| 43 |
+
margin-top: 0.2rem !important;
|
| 44 |
+
margin-bottom: 0.1rem !important;
|
| 45 |
+
}
|
| 46 |
|
| 47 |
+
h3 {
|
| 48 |
+
margin-top: -0.3rem !important;
|
| 49 |
+
margin-bottom: 0.1rem !important;
|
| 50 |
+
}
|
| 51 |
|
| 52 |
+
/* Center text utility */
|
| 53 |
+
.center-text {
|
| 54 |
+
text-align: center !important;
|
| 55 |
+
}
|
| 56 |
+
</style>
|
| 57 |
+
""", unsafe_allow_html=True)
|
| 58 |
+
|
| 59 |
+
# ---- Compact Header ----
|
| 60 |
+
st.markdown("""
|
| 61 |
+
<h1 class='center-text'>π€β‘ <b>SmartVision AI</b> β‘π€</h1>
|
| 62 |
+
<h3 class='center-text'>ππ― Intelligent Multi-Class Object Recognition System π―π</h3>
|
| 63 |
+
<p class='center-text' style='color: gray; margin-top:-6px;'>
|
| 64 |
+
End-to-end computer vision pipeline on a COCO subset of 25 everyday object classes
|
| 65 |
+
</p>
|
| 66 |
+
""", unsafe_allow_html=True)
|
| 67 |
|
| 68 |
st.divider()
|
| 69 |
|
|
|
|
|
|
|
| 70 |
# Resolve repository root relative to this file (streamlit_app/app.py)
|
| 71 |
THIS_FILE = Path(__file__).resolve()
|
| 72 |
REPO_ROOT = THIS_FILE.parent # repo/
|
|
|
|
| 556 |
# ------------------------------------------------------------
|
| 557 |
# PAGE 1 β HOME
|
| 558 |
# ------------------------------------------------------------
|
| 559 |
+
iif page == "π Home":
|
| 560 |
col1, col2 = st.columns([1.2, 1])
|
| 561 |
|
| 562 |
with col1:
|
|
|
|
| 575 |
"""
|
| 576 |
)
|
| 577 |
|
| 578 |
+
|
| 579 |
+
st.markdown("""
|
| 580 |
+
### π·οΈ COCO Subset β 25 Classes Used for Training
|
| 581 |
+
|
| 582 |
+
<style>
|
| 583 |
+
.badge {
|
| 584 |
+
display: inline-block;
|
| 585 |
+
padding: 6px 12px;
|
| 586 |
+
margin: 4px;
|
| 587 |
+
background-color: #f0f2f6;
|
| 588 |
+
border-radius: 12px;
|
| 589 |
+
font-size: 14px;
|
| 590 |
+
}
|
| 591 |
+
</style>
|
| 592 |
+
""", unsafe_allow_html=True)
|
| 593 |
+
|
| 594 |
+
classes = [
|
| 595 |
+
'person','bicycle','car','motorcycle','airplane','bus','truck','traffic light',
|
| 596 |
+
'stop sign','bench','bird','cat','dog','horse','cow','elephant','bottle','cup',
|
| 597 |
+
'bowl','pizza','cake','chair','couch','bed','potted plant'
|
| 598 |
+
]
|
| 599 |
+
|
| 600 |
+
# Capitalize first letter of each word
|
| 601 |
+
html = "".join([f"<span class='badge'>{c.title()}</span>" for c in classes])
|
| 602 |
+
|
| 603 |
+
st.markdown(html, unsafe_allow_html=True)
|
| 604 |
+
|
| 605 |
+
|
| 606 |
with col2:
|
| 607 |
st.subheader("πΉοΈ How to Use This App")
|
| 608 |
st.markdown(
|
|
|
|
| 642 |
cols = st.columns(min(3, len(imgs)))
|
| 643 |
for i, img_path in enumerate(imgs[:3]):
|
| 644 |
with cols[i]:
|
| 645 |
+
st.image(img_path, caption=os.path.basename(img_path), width= 520)
|
| 646 |
else:
|
| 647 |
st.info("No sample images found in `inference_outputs/` yet.")
|
| 648 |
else:
|
|
|
|
| 705 |
"""
|
| 706 |
)
|
| 707 |
|
|
|
|
|
|
|
| 708 |
# ---- Replace your current detection block with this ----
|
| 709 |
uploaded_file = None
|
| 710 |
|
| 711 |
with st.form("detection_form"):
|
| 712 |
+
|
|
|
|
| 713 |
# Put uploader inside the form so uploading doesn't trigger detection
|
| 714 |
uploaded_file = st.file_uploader("Upload an image", type=["jpg", "jpeg", "png"])
|
| 715 |
|