Spaces:
Runtime error
Runtime error
bug fix
Browse files- src/display/utils.py +33 -14
src/display/utils.py
CHANGED
|
@@ -1,4 +1,4 @@
|
|
| 1 |
-
from dataclasses import dataclass, make_dataclass
|
| 2 |
from enum import Enum
|
| 3 |
|
| 4 |
from src.about import Tasks
|
|
@@ -21,24 +21,43 @@ class ColumnContent:
|
|
| 21 |
|
| 22 |
## Leaderboard columns
|
| 23 |
auto_eval_column_dict = []
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 24 |
# Init
|
| 25 |
-
auto_eval_column_dict.append(["model_type_symbol",
|
| 26 |
-
auto_eval_column_dict.append(["model",
|
| 27 |
-
auto_eval_column_dict.append(["average",
|
| 28 |
for task in Tasks:
|
| 29 |
-
auto_eval_column_dict.append([task.name,
|
| 30 |
-
auto_eval_column_dict.append(["model_type",
|
| 31 |
-
auto_eval_column_dict.append(["architecture",
|
| 32 |
-
auto_eval_column_dict.append(["weight_type",
|
| 33 |
-
auto_eval_column_dict.append(["precision",
|
| 34 |
-
auto_eval_column_dict.append(["license",
|
| 35 |
-
auto_eval_column_dict.append(["params",
|
| 36 |
-
auto_eval_column_dict.append(["likes",
|
| 37 |
-
auto_eval_column_dict.append(["still_on_hub",
|
| 38 |
-
auto_eval_column_dict.append(["revision",
|
| 39 |
|
| 40 |
# We use make dataclass to dynamically fill the scores from Tasks
|
| 41 |
AutoEvalColumn = make_dataclass("AutoEvalColumn", auto_eval_column_dict, frozen=True)
|
|
|
|
|
|
|
| 42 |
|
| 43 |
## For the queue columns in the submission tab
|
| 44 |
@dataclass(frozen=True)
|
|
|
|
| 1 |
+
from dataclasses import dataclass, field, make_dataclass
|
| 2 |
from enum import Enum
|
| 3 |
|
| 4 |
from src.about import Tasks
|
|
|
|
| 21 |
|
| 22 |
## Leaderboard columns
|
| 23 |
auto_eval_column_dict = []
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def column_field(name: str, type: str, displayed_by_default: bool, hidden: bool = False, never_hidden: bool = False):
|
| 27 |
+
return (
|
| 28 |
+
ColumnContent,
|
| 29 |
+
field(
|
| 30 |
+
default_factory=lambda: ColumnContent(
|
| 31 |
+
name,
|
| 32 |
+
type,
|
| 33 |
+
displayed_by_default,
|
| 34 |
+
hidden,
|
| 35 |
+
never_hidden,
|
| 36 |
+
)
|
| 37 |
+
),
|
| 38 |
+
)
|
| 39 |
+
|
| 40 |
+
|
| 41 |
# Init
|
| 42 |
+
auto_eval_column_dict.append(["model_type_symbol", *column_field("T", "str", True, never_hidden=True)])
|
| 43 |
+
auto_eval_column_dict.append(["model", *column_field("Model", "markdown", True, never_hidden=True)])
|
| 44 |
+
auto_eval_column_dict.append(["average", *column_field("Average ⬆️", "number", True)])
|
| 45 |
for task in Tasks:
|
| 46 |
+
auto_eval_column_dict.append([task.name, *column_field(task.value.col_name, "number", True)])
|
| 47 |
+
auto_eval_column_dict.append(["model_type", *column_field("Type", "str", False)])
|
| 48 |
+
auto_eval_column_dict.append(["architecture", *column_field("Architecture", "str", False)])
|
| 49 |
+
auto_eval_column_dict.append(["weight_type", *column_field("Weight type", "str", False, True)])
|
| 50 |
+
auto_eval_column_dict.append(["precision", *column_field("Precision", "str", False)])
|
| 51 |
+
auto_eval_column_dict.append(["license", *column_field("Hub License", "str", False)])
|
| 52 |
+
auto_eval_column_dict.append(["params", *column_field("#Params (B)", "number", False)])
|
| 53 |
+
auto_eval_column_dict.append(["likes", *column_field("Hub ❤️", "number", False)])
|
| 54 |
+
auto_eval_column_dict.append(["still_on_hub", *column_field("Available on the hub", "bool", False)])
|
| 55 |
+
auto_eval_column_dict.append(["revision", *column_field("Model sha", "str", False, False)])
|
| 56 |
|
| 57 |
# We use make dataclass to dynamically fill the scores from Tasks
|
| 58 |
AutoEvalColumn = make_dataclass("AutoEvalColumn", auto_eval_column_dict, frozen=True)
|
| 59 |
+
for field_name, _, field_def in auto_eval_column_dict:
|
| 60 |
+
setattr(AutoEvalColumn, field_name, field_def.default_factory())
|
| 61 |
|
| 62 |
## For the queue columns in the submission tab
|
| 63 |
@dataclass(frozen=True)
|