Fixing Max Tokens

This commit is contained in:
jknapp 2025-03-16 22:07:44 -07:00
parent 6966d575d5
commit 6d99028d91

View File

@ -168,7 +168,7 @@ class Pipeline:
payload = {"modelId": model_id,
"messages": processed_messages,
"system": [{'text': system_message if system_message else 'you are an intelligent ai assistant'}],
"inferenceConfig": {"temperature": 1, "max_tokens": body.get("max_tokens", MAX_COMBINED_TOKENS)},
"inferenceConfig": {"temperature": 1, "maxTokens": body.get("max_tokens", MAX_COMBINED_TOKENS)},
"additionalModelRequestFields": reasoning_config
}
if body.get("stream", False):