Spaces:
Running
Running
add labels about relaxation
Browse files
app.py
CHANGED
|
@@ -55,12 +55,27 @@ def format_dataframe(df, show_percentage=False, selected_groups=None, compact_vi
|
|
| 55 |
# Create a copy with selected columns
|
| 56 |
display_df = df[selected_cols].copy()
|
| 57 |
|
| 58 |
-
# Add
|
| 59 |
-
if '
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 64 |
|
| 65 |
# Convert count-based metrics to percentages if requested
|
| 66 |
if show_percentage and 'n_structures' in df.columns:
|
|
@@ -237,7 +252,15 @@ def generate_metric_legend_html():
|
|
| 237 |
|
| 238 |
def gradio_interface() -> gr.Blocks:
|
| 239 |
with gr.Blocks() as demo:
|
| 240 |
-
gr.Markdown("
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 241 |
with gr.Tabs(elem_classes="tab-buttons"):
|
| 242 |
with gr.TabItem("π Leaderboard", elem_id="boundary-benchmark-tab-table"):
|
| 243 |
gr.Markdown("# LeMat-GenBench")
|
|
@@ -327,17 +350,14 @@ def gradio_interface() -> gr.Blocks:
|
|
| 327 |
except Exception as e:
|
| 328 |
gr.Markdown(f"Leaderboard is empty or error loading: {str(e)}")
|
| 329 |
|
| 330 |
-
gr.Markdown("
|
| 331 |
-
|
| 332 |
-
|
| 333 |
-
|
| 334 |
-
|
| 335 |
-
## About LeMat-Gen-Bench
|
| 336 |
-
|
| 337 |
-
**Welcome to the LeMat-Bench Leaderboard!** This leaderboard showcases generative models for materials discovery evaluated on the LeMat-Bench benchmark. Read more in our pre-print.
|
| 338 |
|
| 339 |
-
|
| 340 |
-
|
| 341 |
|
| 342 |
with gr.TabItem("βοΈ Submit", elem_id="boundary-benchmark-tab-table"):
|
| 343 |
gr.Markdown(
|
|
|
|
| 55 |
# Create a copy with selected columns
|
| 56 |
display_df = df[selected_cols].copy()
|
| 57 |
|
| 58 |
+
# Add symbols to model names based on various properties
|
| 59 |
+
if 'model_name' in display_df.columns:
|
| 60 |
+
def add_model_symbols(row):
|
| 61 |
+
name = row['model_name']
|
| 62 |
+
symbols = []
|
| 63 |
+
|
| 64 |
+
# Add relaxed symbol
|
| 65 |
+
if 'relaxed' in df.columns and row.get('relaxed', False):
|
| 66 |
+
symbols.append('β‘')
|
| 67 |
+
|
| 68 |
+
# Add reference dataset symbols
|
| 69 |
+
# β
for Alexandria and OQMD (in-distribution, part of reference dataset)
|
| 70 |
+
if name in ['Alexandria', 'OQMD']:
|
| 71 |
+
symbols.append('β
')
|
| 72 |
+
# β for AFLOW (out-of-distribution relative to reference dataset)
|
| 73 |
+
elif name == 'AFLOW':
|
| 74 |
+
symbols.append('β')
|
| 75 |
+
|
| 76 |
+
return f"{name} {' '.join(symbols)}" if symbols else name
|
| 77 |
+
|
| 78 |
+
display_df['model_name'] = df.apply(add_model_symbols, axis=1)
|
| 79 |
|
| 80 |
# Convert count-based metrics to percentages if requested
|
| 81 |
if show_percentage and 'n_structures' in df.columns:
|
|
|
|
| 252 |
|
| 253 |
def gradio_interface() -> gr.Blocks:
|
| 254 |
with gr.Blocks() as demo:
|
| 255 |
+
gr.Markdown("""
|
| 256 |
+
# π¬ LeMat-GenBench: A Unified Benchmark for Generative Models of Crystalline Materials
|
| 257 |
+
|
| 258 |
+
Generative machine learning models hold great promise for accelerating materials discovery, particularly through the inverse design of inorganic crystals, enabling an unprecedented exploration of chemical space. Yet, the lack of standardized evaluation frameworks makes it difficult to evaluate, compare and further develop these ML models meaningfully.
|
| 259 |
+
|
| 260 |
+
**LeMat-GenBench** introduces a unified benchmark for generative models of crystalline materials, with standardized evaluation metrics** for meaningful model comparison, diverse tasks, and this leaderboard to encourage and track community progress.
|
| 261 |
+
|
| 262 |
+
π **Paper**: [arXiv preprint](https://arxiv.org/abs/XXXX.XXXXX) | π» **Code**: [GitHub](https://github.com/LeMaterial/lemat-genbench) | π§ **Contact**: siddharth.betala-ext [at] entalpic.ai, alexandre.duval [at] entalpic.ai
|
| 263 |
+
""")
|
| 264 |
with gr.Tabs(elem_classes="tab-buttons"):
|
| 265 |
with gr.TabItem("π Leaderboard", elem_id="boundary-benchmark-tab-table"):
|
| 266 |
gr.Markdown("# LeMat-GenBench")
|
|
|
|
| 350 |
except Exception as e:
|
| 351 |
gr.Markdown(f"Leaderboard is empty or error loading: {str(e)}")
|
| 352 |
|
| 353 |
+
gr.Markdown("""
|
| 354 |
+
**Symbol Legend:**
|
| 355 |
+
- β‘ Structures were already relaxed
|
| 356 |
+
- β
Contributes to LeMat-Bulk reference dataset (in-distribution)
|
| 357 |
+
- β Out-of-distribution relative to LeMat-Bulk reference dataset
|
|
|
|
|
|
|
|
|
|
| 358 |
|
| 359 |
+
Verified submissions mean the results came from a model submission rather than a CIF submission.
|
| 360 |
+
""")
|
| 361 |
|
| 362 |
with gr.TabItem("βοΈ Submit", elem_id="boundary-benchmark-tab-table"):
|
| 363 |
gr.Markdown(
|