Update src/streamlit_app.py
Browse files- src/streamlit_app.py +32 -33
src/streamlit_app.py
CHANGED
|
@@ -596,43 +596,42 @@ if selected_ram_key in LLM_DATABASE and selected_category in LLM_DATABASE[select
|
|
| 596 |
st.subheader(f"π― {selected_category.title()} Models for {selected_ram_range}")
|
| 597 |
|
| 598 |
# Display models in a detailed table
|
| 599 |
-
|
| 600 |
-
with st.container():
|
| 601 |
-
|
| 602 |
-
|
| 603 |
-
|
| 604 |
-
|
| 605 |
-
|
| 606 |
-
|
| 607 |
-
|
| 608 |
-
|
| 609 |
-
|
| 610 |
-
|
| 611 |
-
|
| 612 |
-
|
| 613 |
-
|
| 614 |
-
|
| 615 |
-
|
| 616 |
-
|
| 617 |
-
|
| 618 |
-
|
| 619 |
-
|
| 620 |
-
|
| 621 |
-
|
| 622 |
-
|
| 623 |
-
|
| 624 |
-
|
| 625 |
-
|
| 626 |
-
|
| 627 |
-
|
| 628 |
-
|
| 629 |
-
|
| 630 |
-
|
| 631 |
|
| 632 |
-
st.markdown("---")
|
| 633 |
else:
|
| 634 |
st.info(f"No {selected_category} models available for {selected_ram_range}")
|
| 635 |
|
|
|
|
| 636 |
# Enhanced reference guide
|
| 637 |
with st.expander("π Model Guide & Download Information"):
|
| 638 |
st.markdown("""
|
|
|
|
| 596 |
st.subheader(f"π― {selected_category.title()} Models for {selected_ram_range}")
|
| 597 |
|
| 598 |
# Display models in a detailed table
|
| 599 |
+
for model in models:
|
| 600 |
+
with st.container():
|
| 601 |
+
col1, col2, col3 = st.columns([3, 1, 3])
|
| 602 |
+
with col1:
|
| 603 |
+
st.markdown(f"### {model['name']}")
|
| 604 |
+
with col2:
|
| 605 |
+
st.markdown(f"**{model['size']}**")
|
| 606 |
+
st.caption("Download Size")
|
| 607 |
+
with col3:
|
| 608 |
+
st.markdown(f"*{model['description']}*")
|
| 609 |
+
if "Llama" in model['name']:
|
| 610 |
+
st.caption("π Available on Hugging Face & Ollama")
|
| 611 |
+
elif "Mistral" in model['name']:
|
| 612 |
+
st.caption("π Available on Hugging Face & Mistral AI")
|
| 613 |
+
elif "Gemma" in model['name']:
|
| 614 |
+
st.caption("π Available on Hugging Face & Google")
|
| 615 |
+
else:
|
| 616 |
+
st.caption("π Available on Hugging Face")
|
| 617 |
+
|
| 618 |
+
# π½ Quantization size details
|
| 619 |
+
fp16, q8, q4 = calculate_quantized_sizes(model['size'])
|
| 620 |
+
with st.expander("πΎ Quantized Size Estimates"):
|
| 621 |
+
st.markdown(f"""
|
| 622 |
+
| Format | Size |
|
| 623 |
+
|--------|------|
|
| 624 |
+
| FP16 (Full Precision) | **{fp16}** |
|
| 625 |
+
| 8-bit Quantized | **{q8}** |
|
| 626 |
+
| 4-bit Quantized | **{q4}** |
|
| 627 |
+
""")
|
| 628 |
+
|
| 629 |
+
st.markdown("---") # β
this belongs inside the if block
|
|
|
|
| 630 |
|
|
|
|
| 631 |
else:
|
| 632 |
st.info(f"No {selected_category} models available for {selected_ram_range}")
|
| 633 |
|
| 634 |
+
|
| 635 |
# Enhanced reference guide
|
| 636 |
with st.expander("π Model Guide & Download Information"):
|
| 637 |
st.markdown("""
|