| |
|
|
| |
| |
| |
| |
|
|
| import os |
| import time |
| from pathlib import Path |
|
|
| current_dir = Path(__file__).parent.absolute() |
|
|
| import shutil |
|
|
| import pytest |
| import torch |
| from einops import rearrange |
| from flash_attn.models.gpt import GPTLMHeadModel, combine_state_dicts_tp, shard_state_dict_tp |
| from flash_attn.models.llama import ( |
| config_from_checkpoint, |
| inv_remap_state_dict_hf_llama, |
| llama_config_to_gpt2_config, |
| remap_state_dict_hf_llama, |
| remap_state_dict_meta_llama, |
| state_dicts_from_checkpoint, |
| ) |
| from flash_attn.utils.distributed import all_gather_raw |
| from flash_attn.utils.generation import update_graph_cache |
| from flash_attn.utils.pretrained import state_dict_from_pretrained |
| from transformers import LlamaConfig, LlamaTokenizer |
| from transformers.models.llama.modeling_llama import LlamaForCausalLM |
| from transformers import AutoConfig |
|
|
|
|
| def _pretrained_state_dict_from_checkpoint(checkpoint_path, model_name, config, checkpoint_format): |
| if checkpoint_format == "meta": |
| ckpt_state_dicts = state_dicts_from_checkpoint(checkpoint_path, model_name) |
| pretrained_state_dicts = [remap_state_dict_meta_llama(s, config) for s in ckpt_state_dicts] |
| pretrained_state_dict = combine_state_dicts_tp(pretrained_state_dicts, config) |
| else: |
| pretrained_state_dict = state_dict_from_pretrained( |
| Path(checkpoint_path) / f"{model_name}-hf" |
| ) |
| pretrained_state_dict = remap_state_dict_hf_llama(pretrained_state_dict, config) |
| return pretrained_state_dict |
|
|
|
|
| @pytest.mark.parametrize("model_name", ["7B"]) |
| def test_llama_state_dict(model_name): |
| checkpoint_path = ( |
| Path(os.environ.get("CHECKPOINT_DIR", current_dir.parent.parent / "checkpoints")) / "llama" |
| ) |
| config = llama_config_to_gpt2_config(config_from_checkpoint(checkpoint_path, model_name)) |
| ckpt_state_dicts = state_dicts_from_checkpoint(checkpoint_path, model_name) |
| pretrained_state_dict = remap_state_dict_meta_llama(ckpt_state_dicts[0], config) |
| model = GPTLMHeadModel(config, device="meta") |
| state_dict = model.state_dict() |
| assert state_dict.keys() == pretrained_state_dict.keys() |
| for k in state_dict.keys(): |
| assert state_dict[k].shape == pretrained_state_dict[k].shape |
|
|
|
|
| |
| @pytest.mark.parametrize( |
| "model_name", ["meta-llama/Llama-2-7b-hf", "PY007/TinyLlama-1.1B-step-50K-105b"] |
| ) |
| def test_inv_remap_state_dict_hf_llama(model_name): |
| config = llama_config_to_gpt2_config( |
| AutoConfig.from_pretrained(model_name, trust_remote_code=True) |
| ) |
| state_dict = state_dict_from_pretrained(model_name) |
| |
| state_dict = {key: val for key, val in state_dict.items() if "rotary_emb.inv_freq" not in key} |
| pretrained_state_dict = remap_state_dict_hf_llama(state_dict, config) |
| state_dict_recover = inv_remap_state_dict_hf_llama(pretrained_state_dict, config) |
| assert set(state_dict_recover.keys()) == set(state_dict.keys()) |
| for key in state_dict_recover.keys(): |
| torch.testing.assert_close(state_dict_recover[key], state_dict[key]) |
|
|
|
|
| |
| @pytest.mark.parametrize( |
| "model_name", |
| [ |
| "7B", |
| "13B", |
| "meta-llama/Llama-2-13b-hf", |
| "codellama/CodeLlama-7b-hf", |
| "codellama/CodeLlama-13b-hf", |
| "codellama/CodeLlama-34b-hf", |
| "PY007/TinyLlama-1.1B-step-50K-105b", |
| ], |
| ) |
| def test_llama_optimized(model_name): |
| """Check that our implementation of LLaMa (with all optimizations enabled) matches the |
| HF implementation: the output of our forward pass in fp16 should be around the same as the HF |
| forward pass in fp16, when compared to the HF forward pass in fp32. |
| """ |
| checkpoint_path = ( |
| Path(os.environ.get("CHECKPOINT_DIR", current_dir.parent.parent / "checkpoints")) / "llama" |
| ) |
|
|
| dtype = torch.float16 |
| device = "cuda" |
| if "/" in model_name: |
| config = llama_config_to_gpt2_config( |
| AutoConfig.from_pretrained(model_name, trust_remote_code=True) |
| ) |
| else: |
| config = config_from_checkpoint(checkpoint_path, model_name, checkpoint_format="meta") |
| config = llama_config_to_gpt2_config(config) |
| config.use_flash_attn = True |
| config.fused_bias_fc = True |
| config.fused_mlp = False |
| config.fused_dropout_add_ln = True |
| config.residual_in_fp32 = True |
|
|
| if "/" in model_name: |
| pretrained_state_dict = remap_state_dict_hf_llama( |
| state_dict_from_pretrained(model_name), config |
| ) |
| else: |
| pretrained_state_dict = _pretrained_state_dict_from_checkpoint( |
| checkpoint_path, model_name, config, checkpoint_format="meta" |
| ) |
| model = GPTLMHeadModel(config, device=device, dtype=dtype) |
| model.load_state_dict(pretrained_state_dict) |
| model.eval() |
|
|
| torch.manual_seed(0) |
| batch_size = 2 |
| max_seqlen = 256 |
| seqlens = torch.randint(max_seqlen // 2, max_seqlen + 1, (batch_size,), device=device) |
| input_ids = torch.randint( |
| 0, config.vocab_size, (batch_size, max_seqlen), dtype=torch.long, device=device |
| ) |
| with torch.no_grad(): |
| out = model.transformer(input_ids) |
| logits = model(input_ids).logits |
| del model |
|
|
| |
| |
| model_ref = LlamaForCausalLM.from_pretrained( |
| model_name if "/" in model_name else Path(checkpoint_path) / f"{model_name}-hf", |
| device_map="auto", |
| ) |
| model_ref.eval() |
| with torch.no_grad(): |
| out_ref = model_ref.model(input_ids).last_hidden_state.to(device=device) |
| logits_ref = model_ref(input_ids).logits.to(device=device) |
| del model_ref |
|
|
| model_hf = LlamaForCausalLM.from_pretrained( |
| model_name if "/" in model_name else Path(checkpoint_path) / f"{model_name}-hf", |
| torch_dtype=dtype, |
| device_map={"": device}, |
| ) |
| model_hf.eval() |
| with torch.no_grad(): |
| out_hf = model_hf.model(input_ids).last_hidden_state |
| logits_hf = model_hf(input_ids).logits |
| del model_hf |
|
|
| print(f"Output max diff: {(out - out_ref).abs().max().item()}") |
| print(f"Output mean diff: {(out - out_ref).abs().mean().item()}") |
| print(f"HF fp16 max diff: {(out_hf - out_ref).abs().max().item()}") |
| print(f"HF fp16 mean diff: {(out_hf - out_ref).abs().mean().item()}") |
| assert (out - out_ref).abs().max().item() < 3 * (out_hf - out_ref).abs().max().item() |
|
|
| print(f"Logits max diff: {(logits - logits_ref).abs().max().item()}") |
| print(f"Logits mean diff: {(logits - logits_ref).abs().mean().item()}") |
| print(f"HF fp16 max diff: {(logits_hf - logits_ref).abs().max().item()}") |
| print(f"HF fp16 mean diff: {(logits_hf - logits_ref).abs().mean().item()}") |
| assert (logits - logits_ref).abs().max().item() < 3 * ( |
| logits_hf - logits_ref |
| ).abs().max().item() |
|
|
|
|
| |
| @pytest.mark.parametrize("world_size", [2]) |
| @pytest.mark.parametrize( |
| "model_name", ["13B", "meta-llama/Llama-2-13b-hf", "codellama/CodeLlama-34b-hf"] |
| ) |
| def test_llama_parallel(model_name, world_size): |
| """Check that our implementation of LLaMa (with all optimizations enabled) matches the |
| HF implementation: the output of our forward pass in fp16 should be around the same as the HF |
| forward pass in fp16, when compared to the HF forward pass in fp32. |
| """ |
| from apex.transformer import parallel_state |
|
|
| checkpoint_path = ( |
| Path(os.environ.get("CHECKPOINT_DIR", current_dir.parent.parent / "checkpoints")) / "llama" |
| ) |
|
|
| dtype = torch.float16 |
| if "/" in model_name: |
| config = llama_config_to_gpt2_config( |
| AutoConfig.from_pretrained(model_name, trust_remote_code=True) |
| ) |
| else: |
| config = config_from_checkpoint(checkpoint_path, model_name, checkpoint_format="meta") |
| config = llama_config_to_gpt2_config(config) |
| config.use_flash_attn = True |
| config.fused_bias_fc = True |
| config.fused_mlp = False |
| config.fused_dropout_add_ln = True |
| config.residual_in_fp32 = True |
|
|
| if not torch.distributed.is_initialized(): |
| torch.distributed.init_process_group(backend="nccl", init_method="env://") |
| device = f"cuda:{torch.distributed.get_rank()}" |
| assert world_size <= torch.distributed.get_world_size() |
| parallel_state.initialize_model_parallel(tensor_model_parallel_size_=world_size) |
| rank = parallel_state.get_tensor_model_parallel_rank() |
| process_group = parallel_state.get_tensor_model_parallel_group() |
|
|
| if "/" in model_name: |
| pretrained_state_dict = remap_state_dict_hf_llama( |
| state_dict_from_pretrained(model_name), config |
| ) |
| else: |
| pretrained_state_dict = _pretrained_state_dict_from_checkpoint( |
| checkpoint_path, model_name, config, checkpoint_format="meta" |
| ) |
| model = GPTLMHeadModel(config, process_group=process_group, device=device, dtype=dtype) |
| model.load_state_dict(shard_state_dict_tp(pretrained_state_dict, config, world_size, rank)) |
| model.eval() |
|
|
| torch.manual_seed(0) |
| batch_size = 2 |
| max_seqlen = 256 |
| seqlens = torch.randint(max_seqlen // 2, max_seqlen + 1, (batch_size,), device=device) |
| input_ids = torch.randint( |
| 0, config.vocab_size, (batch_size, max_seqlen), dtype=torch.long, device=device |
| ) |
| with torch.no_grad(): |
| out = model.transformer(input_ids) |
| out, _ = all_gather_raw(out, process_group=process_group) |
| out = rearrange(out, "(b s) d -> b s d", b=batch_size) |
| logits = model(input_ids).logits |
| logits = rearrange(logits, "(b s) d -> b s d", b=batch_size) |
| logits, _ = all_gather_raw(logits, process_group) |
| logits = rearrange(logits, "(n b) ... d -> b ... (n d)", b=batch_size) |
| del model |
|
|
| if rank == 0: |
| |
| model_ref = LlamaForCausalLM.from_pretrained( |
| model_name if "/" in model_name else Path(checkpoint_path) / f"{model_name}-hf", |
| device_map="auto", |
| ) |
| model_ref.eval() |
| with torch.no_grad(): |
| out_ref = model_ref.model(input_ids).last_hidden_state.to(device=device) |
| logits_ref = model_ref(input_ids).logits.to(device=device) |
| del model_ref |
|
|
| model_hf = LlamaForCausalLM.from_pretrained( |
| model_name if "/" in model_name else Path(checkpoint_path) / f"{model_name}-hf", |
| torch_dtype=dtype, |
| device_map="auto", |
| ) |
| model_hf.eval() |
| with torch.no_grad(): |
| out_hf = model_hf.model(input_ids).last_hidden_state.to(device=device) |
| logits_hf = model_hf(input_ids).logits.to(device=device) |
| del model_hf |
|
|
| print(f"Output max diff: {(out - out_ref).abs().max().item()}") |
| print(f"Output mean diff: {(out - out_ref).abs().mean().item()}") |
| print(f"HF fp16 max diff: {(out_hf - out_ref).abs().max().item()}") |
| print(f"HF fp16 mean diff: {(out_hf - out_ref).abs().mean().item()}") |
| assert (out - out_ref).abs().max().item() < 2 * (out_hf - out_ref).abs().max().item() |
|
|
| print(f"Logits max diff: {(logits - logits_ref).abs().max().item()}") |
| print(f"Logits mean diff: {(logits - logits_ref).abs().mean().item()}") |
| print(f"HF fp16 max diff: {(logits_hf - logits_ref).abs().max().item()}") |
| print(f"HF fp16 mean diff: {(logits_hf - logits_ref).abs().mean().item()}") |
| assert (logits - logits_ref).abs().max().item() < 2 * ( |
| logits_hf - logits_ref |
| ).abs().max().item() |
|
|
|
|
| |
| @pytest.mark.parametrize("model_name", ["7B"]) |
| @pytest.mark.parametrize("checkpoint_format", ["meta", "hf"]) |
| def test_llama_generation(model_name, checkpoint_format): |
| checkpoint_path = ( |
| Path(os.environ.get("CHECKPOINT_DIR", current_dir.parent.parent / "checkpoints")) / "llama" |
| ) |
|
|
| dtype = torch.float16 |
| device = "cuda" |
| config = config_from_checkpoint(checkpoint_path, model_name, checkpoint_format) |
| config = llama_config_to_gpt2_config(config) |
| config.use_flash_attn = True |
| config.fused_bias_fc = True |
| config.fused_mlp = False |
| config.fused_dropout_add_ln = True |
| config.residual_in_fp32 = True |
|
|
| tokenizer = LlamaTokenizer.from_pretrained(Path(checkpoint_path) / f"{model_name}-hf") |
| eos_token_id = tokenizer.eos_token_id |
|
|
| torch.manual_seed(0) |
| batch_size = 1 |
| seqlen = 100 |
| max_length = 150 |
| input_ids = torch.randint( |
| 0, config.vocab_size, (batch_size, seqlen), dtype=torch.long, device=device |
| ) |
|
|
| model_hf = LlamaForCausalLM.from_pretrained( |
| Path(checkpoint_path) / f"{model_name}-hf", torch_dtype=dtype, device_map={"": device} |
| ) |
| model_hf.eval() |
| print("HF fp16") |
| torch.cuda.synchronize() |
| start = time.time() |
| out_hf = model_hf.generate( |
| input_ids=input_ids, max_length=max_length, return_dict_in_generate=True, output_scores=True |
| ) |
| torch.cuda.synchronize() |
| print(f"Prompt processing + decoding time: {(time.time() - start) * 1000:.0f}ms") |
| del model_hf |
|
|
| |
| model_ref = LlamaForCausalLM.from_pretrained( |
| Path(checkpoint_path) / f"{model_name}-hf", device_map="auto" |
| ) |
| model_ref.eval() |
| with torch.no_grad(): |
| logits_ref = model_ref(out_hf.sequences).logits[:, (seqlen - 1) : -1].to(device=device) |
| del model_ref |
|
|
| pretrained_state_dict = _pretrained_state_dict_from_checkpoint( |
| checkpoint_path, model_name, config, checkpoint_format |
| ) |
| model = GPTLMHeadModel(config, device=device, dtype=dtype) |
| model.load_state_dict(pretrained_state_dict) |
| model.eval() |
|
|
| print("Without CUDA graph") |
| torch.cuda.synchronize() |
| start = time.time() |
| out = model.generate( |
| input_ids=input_ids, |
| max_length=max_length, |
| eos_token_id=eos_token_id, |
| return_dict_in_generate=True, |
| output_scores=True, |
| enable_timing=True, |
| teacher_outputs=out_hf.sequences, |
| ) |
| torch.cuda.synchronize() |
| print(f"Prompt processing + decoding time: {(time.time() - start) * 1000:.0f}ms") |
|
|
| |
| batch_size, seqlen_og = input_ids.shape |
| model._decoding_cache = update_graph_cache(model, None, batch_size, seqlen_og, max_length) |
| print("With CUDA graph") |
| torch.cuda.synchronize() |
| start = time.time() |
| out_cg = model.generate( |
| input_ids=input_ids, |
| max_length=max_length, |
| cg=True, |
| return_dict_in_generate=True, |
| output_scores=True, |
| enable_timing=True, |
| teacher_outputs=out_hf.sequences, |
| ) |
| torch.cuda.synchronize() |
| print(f"Prompt processing + decoding time: {(time.time() - start) * 1000:.0f}ms") |
|
|
| with torch.no_grad(): |
| logits_parallel = model(out_hf.sequences).logits[:, (seqlen - 1) : -1] |
| logits_hf = torch.stack(out_hf.scores, dim=1) |
| logits = torch.stack(out.scores, dim=1) |
| logits_cg = torch.stack(out_cg.scores, dim=1) |
|
|
| del model |
|
|
| hf_error = (logits_hf - logits_ref).abs().max().item() |
|
|
| print(f"HF fp16 logits max diff: {hf_error}") |
| print(f"Logits max diff: {(logits - logits_ref).abs().max().item()}") |
| print(f"Logits CG max diff: {(logits_cg - logits_ref).abs().max().item()}") |
|
|
| assert (logits_parallel - logits_ref).abs().max().item() < 2 * hf_error |
| assert (logits - logits_ref).abs().max().item() < 2 * hf_error |
| assert torch.equal(logits_cg, logits) |
|
|
|
|
| |
| @pytest.mark.parametrize("world_size", [2]) |
| @pytest.mark.parametrize( |
| "model_name", ["13B", "meta-llama/Llama-2-13b-hf", "codellama/CodeLlama-34b-hf"] |
| ) |
| def test_llama_parallel_generation(model_name, world_size): |
| """Check that our implementation matches the HF implementation: |
| the scores in fp16 should be around the same as the HF scores in fp16, when compared to |
| the HF scores in fp32. |
| """ |
| from apex.transformer import parallel_state |
|
|
| checkpoint_path = ( |
| Path(os.environ.get("CHECKPOINT_DIR", current_dir.parent.parent / "checkpoints")) / "llama" |
| ) |
|
|
| dtype = torch.float16 |
| if "/" in model_name: |
| config = llama_config_to_gpt2_config( |
| AutoConfig.from_pretrained(model_name, trust_remote_code=True) |
| ) |
| else: |
| config = config_from_checkpoint(checkpoint_path, model_name, checkpoint_format="meta") |
| config = llama_config_to_gpt2_config(config) |
| config.use_flash_attn = True |
| config.fused_bias_fc = True |
| config.fused_mlp = False |
| config.fused_dropout_add_ln = True |
| config.residual_in_fp32 = True |
| config.pad_vocab_size_multiple = 8 * world_size |
| config.sequence_parallel = False |
|
|
| os.environ["NCCL_ASYNC_ERROR_HANDLING"] = "0" |
| if not torch.distributed.is_initialized(): |
| torch.distributed.init_process_group(backend="nccl", init_method="env://") |
| device = f"cuda:{torch.distributed.get_rank()}" |
| assert world_size <= torch.distributed.get_world_size() |
| parallel_state.initialize_model_parallel(tensor_model_parallel_size_=world_size) |
| rank = parallel_state.get_tensor_model_parallel_rank() |
| process_group = parallel_state.get_tensor_model_parallel_group() |
|
|
| torch.manual_seed(0) |
| batch_size = 1 |
| seqlen = 100 |
| max_length = 150 |
| input_ids = torch.randint( |
| 0, config.vocab_size, (batch_size, seqlen), dtype=torch.long, device=device |
| ) |
|
|
| |
| |
| torch.cuda.set_device(device) |
|
|
| if "/" in model_name: |
| pretrained_state_dict = remap_state_dict_hf_llama( |
| state_dict_from_pretrained(model_name), config |
| ) |
| else: |
| pretrained_state_dict = _pretrained_state_dict_from_checkpoint( |
| checkpoint_path, model_name, config, checkpoint_format="meta" |
| ) |
| model = GPTLMHeadModel(config, process_group=process_group, device=device, dtype=dtype) |
| model.load_state_dict(shard_state_dict_tp(pretrained_state_dict, config, world_size, rank)) |
| model.eval() |
|
|
| print("Without CUDA graph") |
| out = model.generate( |
| input_ids=input_ids, |
| max_length=max_length, |
| tensor_parallel=world_size, |
| vocab_size=config.vocab_size, |
| |
| return_dict_in_generate=True, |
| output_scores=True, |
| enable_timing=True, |
| ) |
|
|
| |
| batch_size, seqlen_og = input_ids.shape |
| model._decoding_cache = update_graph_cache(model, None, batch_size, seqlen_og, max_length) |
| print("With CUDA graph") |
| out_cg = model.generate( |
| input_ids=input_ids, |
| max_length=max_length, |
| tensor_parallel=world_size, |
| vocab_size=config.vocab_size, |
| cg=True, |
| |
| return_dict_in_generate=True, |
| output_scores=True, |
| enable_timing=True, |
| ) |
| del model |
| parallel_state.destroy_model_parallel() |
|
|
| if rank == 0: |
| |
| model_hf = LlamaForCausalLM.from_pretrained( |
| model_name if "/" in model_name else Path(checkpoint_path) / f"{model_name}-hf", |
| torch_dtype=dtype, |
| device_map="auto", |
| ) |
| model_hf.eval() |
| print("HF fp16") |
| torch.cuda.synchronize() |
| start = time.time() |
| with torch.inference_mode(): |
| out_hf = model_hf.generate( |
| input_ids=input_ids, |
| max_length=max_length, |
| return_dict_in_generate=True, |
| output_scores=True, |
| ) |
| torch.cuda.synchronize() |
| print(f"Prompt processing + decoding time: {(time.time() - start) * 1000:.0f}ms") |
| del model_hf |
|
|
| model_ref = LlamaForCausalLM.from_pretrained( |
| model_name if "/" in model_name else Path(checkpoint_path) / f"{model_name}-hf", |
| device_map="auto", |
| ) |
| model_ref.eval() |
| with torch.inference_mode(): |
| logits_ref = model_ref(out_hf.sequences).logits[:, (seqlen - 1) : -1] |
| del model_ref |
| logits_hf = torch.stack(out_hf.scores, dim=1) |
|
|
| logits = torch.stack(out.scores, dim=1) |
| logits_cg = torch.stack(out_cg.scores, dim=1) |
|
|
| hf_error = (logits_hf - logits_ref).abs().max().item() |
| print(f"HF fp16 logits max diff: {hf_error}") |
| print(f"Logits max diff: {(logits - logits_ref).abs().max().item()}") |
| assert (logits - logits_ref).abs().max().item() < 2 * hf_error |
| print(f"Logits CG max diff: {(logits_cg - logits_ref).abs().max().item()}") |
| assert torch.equal(logits_cg, logits) |
|
|
|
|
| @torch.no_grad() |
| @pytest.mark.parametrize("world_size", [2]) |
| def test_llama_parallel_uneven_num_heads(world_size): |
| from apex.transformer import parallel_state |
|
|
| checkpoint_path = ( |
| Path(os.environ.get("CHECKPOINT_DIR", current_dir.parent.parent / "checkpoints")) / "llama" |
| ) |
| num_attention_heads = world_size + 1 |
| model_name = f"teeny-{num_attention_heads}-heads" |
|
|
| if not torch.distributed.is_initialized(): |
| torch.distributed.init_process_group(backend="nccl", init_method="env://") |
| device = f"cuda:{torch.distributed.get_rank()}" |
| assert world_size <= torch.distributed.get_world_size() |
| parallel_state.initialize_model_parallel(tensor_model_parallel_size_=world_size) |
| rank = parallel_state.get_tensor_model_parallel_rank() |
| process_group = parallel_state.get_tensor_model_parallel_group() |
|
|
| dtype = torch.float16 |
| llama_config = LlamaConfig( |
| hidden_size=256 |
| * num_attention_heads, |
| intermediate_size=256 * num_attention_heads * 4, |
| num_hidden_layers=4, |
| num_attention_heads=num_attention_heads, |
| initializer_range=0.5, |
| ) |
| config = llama_config_to_gpt2_config(llama_config) |
| config.use_flash_attn = True |
| config.fused_bias_fc = True |
| config.fused_mlp = False |
| config.fused_dropout_add_ln = True |
| config.residual_in_fp32 = True |
|
|
| torch.manual_seed(0) |
| batch_size = 2 |
| max_seqlen = 256 |
| seqlens = torch.randint(max_seqlen // 2, max_seqlen + 1, (batch_size,), device=device) |
| input_ids = torch.randint( |
| 0, config.vocab_size, (batch_size, max_seqlen), dtype=torch.long, device=device |
| ) |
|
|
| |
| if rank == 0: |
| LlamaForCausalLM(config=llama_config).save_pretrained(checkpoint_path / f"{model_name}-hf") |
| torch.distributed.barrier() |
|
|
| |
| pretrained_state_dict = _pretrained_state_dict_from_checkpoint( |
| checkpoint_path, model_name, config, checkpoint_format="hf" |
| ) |
| model = GPTLMHeadModel(config, process_group=process_group, device=device, dtype=dtype) |
| model.load_state_dict(shard_state_dict_tp(pretrained_state_dict, config, world_size, rank)) |
| model.eval() |
|
|
| |
| out = model.transformer(input_ids) |
| out, _ = all_gather_raw(out, process_group=process_group) |
| out = rearrange(out, "(b s) d -> b s d", b=batch_size) |
| logits = model(input_ids).logits |
| logits = rearrange(logits, "(b s) d -> b s d", b=batch_size) |
| logits, _ = all_gather_raw(logits, process_group) |
| logits = rearrange(logits, "(n b) ... d -> b ... (n d)", b=batch_size) |
|
|
| if rank == 0: |
| model_ref = LlamaForCausalLM.from_pretrained( |
| Path(checkpoint_path) / f"{model_name}-hf", device_map={"": device} |
| ) |
| model_ref = model_ref.to(device=device) |
| model_ref.eval() |
| out_ref = model_ref.model(input_ids).last_hidden_state |
| logits_ref = model_ref(input_ids).logits |
| del model_ref |
|
|
| model_hf = LlamaForCausalLM.from_pretrained( |
| Path(checkpoint_path) / f"{model_name}-hf", torch_dtype=dtype, device_map={"": device} |
| ) |
| model_hf.eval() |
| out_hf = model_hf.model(input_ids).last_hidden_state.to(device=device) |
| logits_hf = model_hf(input_ids).logits.to(device=device) |
| del model_hf |
|
|
| print(f"Output max diff: {(out - out_ref).abs().max().item()}") |
| print(f"Output mean diff: {(out - out_ref).abs().mean().item()}") |
| print(f"HF fp16 max diff: {(out_hf - out_ref).abs().max().item()}") |
| print(f"HF fp16 mean diff: {(out_hf - out_ref).abs().mean().item()}") |
| assert (out - out_ref).abs().max().item() < 2 * (out_hf - out_ref).abs().max().item() |
|
|
| print(f"Logits max diff: {(logits - logits_ref).abs().max().item()}") |
| print(f"Logits mean diff: {(logits - logits_ref).abs().mean().item()}") |
| print(f"HF fp16 max diff: {(logits_hf - logits_ref).abs().max().item()}") |
| print(f"HF fp16 mean diff: {(logits_hf - logits_ref).abs().mean().item()}") |
| assert (logits - logits_ref).abs().max().item() < 2 * ( |
| logits_hf - logits_ref |
| ).abs().max().item() |
|
|
| if os.path.exists(checkpoint_path / f"{model_name}-hf"): |
| shutil.rmtree(checkpoint_path / f"{model_name}-hf") |
|
|