| { |
| "module": "keras_nlp.src.models.gemma.gemma_causal_lm", |
| "class_name": "GemmaCausalLM", |
| "config": { |
| "backbone": { |
| "module": "keras_nlp.src.models.gemma.gemma_backbone", |
| "class_name": "GemmaBackbone", |
| "config": { |
| "name": "gemma_backbone", |
| "trainable": true, |
| "vocabulary_size": 256000, |
| "num_layers": 18, |
| "num_query_heads": 8, |
| "num_key_value_heads": 1, |
| "hidden_dim": 2048, |
| "intermediate_dim": 32768, |
| "head_dim": 256, |
| "layer_norm_epsilon": 1e-06, |
| "dropout": 0 |
| }, |
| "registered_name": "keras_nlp>GemmaBackbone" |
| }, |
| "preprocessor": { |
| "module": "keras_nlp.src.models.gemma.gemma_causal_lm_preprocessor", |
| "class_name": "GemmaCausalLMPreprocessor", |
| "config": { |
| "name": "gemma_causal_lm_preprocessor", |
| "trainable": true, |
| "dtype": "float32", |
| "tokenizer": { |
| "module": "keras_nlp.src.models.gemma.gemma_tokenizer", |
| "class_name": "GemmaTokenizer", |
| "config": { |
| "name": "gemma_tokenizer", |
| "trainable": true, |
| "dtype": "int32", |
| "proto": null, |
| "sequence_length": null |
| }, |
| "registered_name": "keras_nlp>GemmaTokenizer" |
| }, |
| "sequence_length": 8192, |
| "add_start_token": true, |
| "add_end_token": true |
| }, |
| "registered_name": "keras_nlp>GemmaCausalLMPreprocessor" |
| }, |
| "name": "gemma_causal_lm" |
| }, |
| "registered_name": "keras_nlp>GemmaCausalLM" |
| } |