Skip to content

Commit 9347da2

Browse files
committed
llm params
1 parent 9203a90 commit 9347da2

File tree

1 file changed

+2
-5
lines changed

1 file changed

+2
-5
lines changed

src/inferencesh/models/llm.py

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -65,9 +65,8 @@ class BaseLLMInput(BaseAppInput):
6565
"Explain quantum computing in simple terms"
6666
]
6767
)
68-
temperature: float = Field(default=0.7)
69-
top_p: float = Field(default=0.95)
70-
max_tokens: int = Field(default=4096)
68+
temperature: float = Field(default=0.7, ge=0.0, le=1.0)
69+
top_p: float = Field(default=0.95, ge=0.0, le=1.0)
7170
context_size: int = Field(default=4096)
7271

7372
class ImageCapabilityMixin(BaseModel):
@@ -588,7 +587,6 @@ def stream_generate(
588587
tool_choice: Optional[Dict[str, Any]] = None,
589588
temperature: float = 0.7,
590589
top_p: float = 0.95,
591-
max_tokens: int = 4096,
592590
stop: Optional[List[str]] = None,
593591
verbose: bool = False,
594592
output_cls: type[BaseLLMOutput] = LLMOutput,
@@ -612,7 +610,6 @@ def _generate_worker():
612610
"stream": True,
613611
"temperature": temperature,
614612
"top_p": top_p,
615-
"max_tokens": max_tokens,
616613
"stop": stop
617614
}
618615
if tools is not None:

0 commit comments

Comments
 (0)