{ "architectures": [ "GPT2LMHeadModel" ], "model_type": "gpt2", "vocab_size": 15000, "n_embd": 384, "n_layer": 12, "n_head": 6, "n_inner": 1024, "n_positions": 512, "bos_token_id": 1, "eos_token_id": 2, "pad_token_id": 3 }