EEE_datastore / data /helm_instruct /cohere /command-xlarge-beta /60724488-914d-4efe-98d6-f3ff26fe8fbc.json
deepmage121's picture
Upload 5295 files
ae9c537 verified
{
"schema_version": "0.2.0",
"evaluation_id": "helm_instruct/cohere_command-xlarge-beta/1770834858.3559701",
"retrieved_timestamp": "1770834858.3559701",
"source_metadata": {
"source_name": "helm_instruct",
"source_type": "documentation",
"source_organization_name": "crfm",
"evaluator_relationship": "third_party"
},
"model_info": {
"name": "Cohere Command beta 52.4B",
"id": "cohere/command-xlarge-beta",
"developer": "cohere",
"inference_platform": "unknown"
},
"evaluation_results": [
{
"evaluation_name": "Mean win rate",
"source_data": {
"dataset_name": "helm_instruct",
"source_type": "url",
"url": [
"https://storage.googleapis.com/crfm-helm-public/instruct/benchmark_output/releases/v1.0.0/groups/instruction_following.json"
]
},
"metric_config": {
"evaluation_description": "How many models this model outperform on average (over columns).",
"lower_is_better": false,
"score_type": "continuous",
"min_score": 0.0,
"max_score": 1.0
},
"score_details": {
"score": 0.089,
"details": {
"tab": "Instruction Following"
}
},
"generation_config": {
"additional_details": {}
}
},
{
"evaluation_name": "Anthropic RLHF dataset",
"source_data": {
"dataset_name": "Anthropic RLHF dataset",
"source_type": "url",
"url": [
"https://storage.googleapis.com/crfm-helm-public/instruct/benchmark_output/releases/v1.0.0/groups/instruction_following.json"
]
},
"metric_config": {
"evaluation_description": "Harmlessness on Anthropic RLHF dataset",
"lower_is_better": false,
"score_type": "continuous",
"min_score": 0.0,
"max_score": 5.0
},
"score_details": {
"score": 4.214,
"details": {
"description": "min=3.38, mean=4.214, max=4.92, sum=33.715 (8)",
"tab": "Instruction Following"
}
},
"generation_config": {
"additional_details": {
"subset": [
"hh",
"hh",
"hh",
"hh",
"red_team",
"red_team",
"red_team",
"red_team"
],
"evaluator": [
"claude",
"gpt4",
"mturk",
"scale",
"claude",
"gpt4",
"mturk",
"scale"
]
}
}
},
{
"evaluation_name": "Best ChatGPT Prompts",
"source_data": {
"dataset_name": "Best ChatGPT Prompts",
"source_type": "url",
"url": [
"https://storage.googleapis.com/crfm-helm-public/instruct/benchmark_output/releases/v1.0.0/groups/instruction_following.json"
]
},
"metric_config": {
"evaluation_description": "Harmlessness on Best ChatGPT Prompts",
"lower_is_better": false,
"score_type": "continuous",
"min_score": 0.0,
"max_score": 5.0
},
"score_details": {
"score": 4.988,
"details": {
"description": "min=4.98, mean=4.988, max=5, sum=19.95 (4)",
"tab": "Instruction Following"
}
},
"generation_config": {
"additional_details": {
"path": "src_helm_benchmark_scenarios_best_chatgpt_prompts.yaml",
"tags": "",
"evaluator": [
"claude",
"gpt4",
"mturk",
"scale"
]
}
}
},
{
"evaluation_name": "Koala test dataset",
"source_data": {
"dataset_name": "Koala test dataset",
"source_type": "url",
"url": [
"https://storage.googleapis.com/crfm-helm-public/instruct/benchmark_output/releases/v1.0.0/groups/instruction_following.json"
]
},
"metric_config": {
"evaluation_description": "Harmlessness on Koala test dataset",
"lower_is_better": false,
"score_type": "continuous",
"min_score": 0.0,
"max_score": 5.0
},
"score_details": {
"score": 4.969,
"details": {
"description": "min=4.936, mean=4.969, max=5, sum=19.874 (4)",
"tab": "Instruction Following"
}
},
"generation_config": {
"additional_details": {
"evaluator": [
"claude",
"gpt4",
"mturk",
"scale"
]
}
}
},
{
"evaluation_name": "Open Assistant",
"source_data": {
"dataset_name": "Open Assistant",
"source_type": "url",
"url": [
"https://storage.googleapis.com/crfm-helm-public/instruct/benchmark_output/releases/v1.0.0/groups/instruction_following.json"
]
},
"metric_config": {
"evaluation_description": "Harmlessness on Open Assistant",
"lower_is_better": false,
"score_type": "continuous",
"min_score": 0.0,
"max_score": 5.0
},
"score_details": {
"score": 4.967,
"details": {
"description": "min=4.955, mean=4.967, max=5, sum=19.87 (4)",
"tab": "Instruction Following"
}
},
"generation_config": {
"additional_details": {
"language": "en",
"evaluator": [
"claude",
"gpt4",
"mturk",
"scale"
]
}
}
},
{
"evaluation_name": "Self Instruct",
"source_data": {
"dataset_name": "Self Instruct",
"source_type": "url",
"url": [
"https://storage.googleapis.com/crfm-helm-public/instruct/benchmark_output/releases/v1.0.0/groups/instruction_following.json"
]
},
"metric_config": {
"evaluation_description": "Harmlessness on Self Instruct",
"lower_is_better": false,
"score_type": "continuous",
"min_score": 0.0,
"max_score": 5.0
},
"score_details": {
"score": 4.971,
"details": {
"description": "min=4.955, mean=4.971, max=5, sum=19.885 (4)",
"tab": "Instruction Following"
}
},
"generation_config": {
"additional_details": {
"evaluator": [
"claude",
"gpt4",
"mturk",
"scale"
]
}
}
},
{
"evaluation_name": "Vicuna",
"source_data": {
"dataset_name": "Vicuna",
"source_type": "url",
"url": [
"https://storage.googleapis.com/crfm-helm-public/instruct/benchmark_output/releases/v1.0.0/groups/instruction_following.json"
]
},
"metric_config": {
"evaluation_description": "Harmlessness on Vicuna",
"lower_is_better": false,
"score_type": "continuous",
"min_score": 0.0,
"max_score": 5.0
},
"score_details": {
"score": 4.995,
"details": {
"description": "min=4.981, mean=4.995, max=5, sum=19.981 (4)",
"tab": "Instruction Following"
}
},
"generation_config": {
"additional_details": {
"category": "all",
"evaluator": [
"claude",
"gpt4",
"mturk",
"scale"
]
}
}
}
]
}