Skip to content

Commit 0a45bc9

Browse files
authored
Merge pull request #6 from steamship-core/doug/timeout-update
add configurable wait time to generate
2 parents f391162 + eeb5c11 commit 0a45bc9

File tree

1 file changed

+4
-1
lines changed

1 file changed

+4
-1
lines changed

src/steamship_langchain/llms/openai.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,7 @@ class OpenAI(BaseLLM):
2222
max_words: int = 256
2323
n: int = 1
2424
best_of: int = 1
25+
batch_task_timeout_seconds: int = 10 * 60 # 10 minute limit on generation tasks
2526

2627
@property
2728
def _llm_type(self) -> str:
@@ -106,7 +107,9 @@ def _batch(self, prompts: List[str], stop: Optional[List[str]] = None) -> List[G
106107
try:
107108
prompt_file = File.create(client=self.client, blocks=blocks)
108109
task = llm_plugin.tag(doc=prompt_file)
109-
task.wait() # TODO(douglas-reid): put in timeout, based on configuration
110+
# the llm_plugin handles retries and backoff. this wait()
111+
# will allow for that to happen.
112+
task.wait(max_timeout_s=self.batch_task_timeout_seconds)
110113
generation_file = task.output.file
111114

112115
for text_block in generation_file.blocks:

0 commit comments

Comments
 (0)