Commit ·
59f6b3d
1
Parent(s): 9373984
Fix glm-ocr.py max_tokens exceeding max_model_len
Browse filesGLM-OCR SDK sets max_tokens=16384 but our max_model_len is 8192.
vLLM silently caps generation, making the high default misleading.
Align default to 8192 and document the SDK discrepancy.
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
- glm-ocr.py +4 -3
glm-ocr.py
CHANGED
|
@@ -214,7 +214,7 @@ def main(
|
|
| 214 |
image_column: str = "image",
|
| 215 |
batch_size: int = 16,
|
| 216 |
max_model_len: int = 8192,
|
| 217 |
-
max_tokens: int =
|
| 218 |
temperature: float = 0.01,
|
| 219 |
top_p: float = 0.00001,
|
| 220 |
repetition_penalty: float = 1.1,
|
|
@@ -278,6 +278,7 @@ def main(
|
|
| 278 |
# glmocr/config.py PageLoaderConfig: temperature=0.01, top_p=0.00001,
|
| 279 |
# top_k=1, repetition_penalty=1.1, max_tokens=16384
|
| 280 |
# generation_config.json on HF also sets do_sample=false (greedy)
|
|
|
|
| 281 |
sampling_params = SamplingParams(
|
| 282 |
temperature=temperature,
|
| 283 |
top_p=top_p,
|
|
@@ -454,8 +455,8 @@ Examples:
|
|
| 454 |
parser.add_argument(
|
| 455 |
"--max-tokens",
|
| 456 |
type=int,
|
| 457 |
-
default=
|
| 458 |
-
help="Maximum tokens to generate (default:
|
| 459 |
)
|
| 460 |
parser.add_argument(
|
| 461 |
"--temperature",
|
|
|
|
| 214 |
image_column: str = "image",
|
| 215 |
batch_size: int = 16,
|
| 216 |
max_model_len: int = 8192,
|
| 217 |
+
max_tokens: int = 8192,
|
| 218 |
temperature: float = 0.01,
|
| 219 |
top_p: float = 0.00001,
|
| 220 |
repetition_penalty: float = 1.1,
|
|
|
|
| 278 |
# glmocr/config.py PageLoaderConfig: temperature=0.01, top_p=0.00001,
|
| 279 |
# top_k=1, repetition_penalty=1.1, max_tokens=16384
|
| 280 |
# generation_config.json on HF also sets do_sample=false (greedy)
|
| 281 |
+
# Note: SDK uses max_tokens=16384 but vLLM caps at max_model_len (8192)
|
| 282 |
sampling_params = SamplingParams(
|
| 283 |
temperature=temperature,
|
| 284 |
top_p=top_p,
|
|
|
|
| 455 |
parser.add_argument(
|
| 456 |
"--max-tokens",
|
| 457 |
type=int,
|
| 458 |
+
default=8192,
|
| 459 |
+
help="Maximum tokens to generate (default: 8192, capped by max-model-len)",
|
| 460 |
)
|
| 461 |
parser.add_argument(
|
| 462 |
"--temperature",
|