Fixing bug with max tokens

This commit is contained in:
jknapp 2025-03-16 22:53:48 -07:00
parent 588b0a434d
commit 90b7aa64ce

View File

@ -174,7 +174,7 @@ class Pipeline:
payload = {"modelId": model_id,
"messages": processed_messages,
"system": [{'text': system_message['content'] if system_message else 'you are an intelligent ai assistant'}],
"inferenceConfig": {"temperature": 1, "maxTokens": max_tokens },
"inferenceConfig": {"temperature": 1, "maxTokens": MAX_COMBINED_TOKENS },
"additionalModelRequestFields": reasoning_config
}
if body.get("stream", False):