Update to gpt4all version 1.0.1. Implement the Streaming version of the completions endpoint. Implemented an openai python client test for the new streaming functionality. (#1129)

Co-authored-by: Brandon <bbeiler@ridgelineintl.com>
This commit is contained in:
Brandon Beiler
2023-07-05 23:17:30 -04:00
committed by GitHub
parent affd0af51f
commit fb576fbd7e
3 changed files with 94 additions and 36 deletions

View File

@@ -23,6 +23,25 @@ def test_completion():
assert len(response['choices'][0]['text']) > len(prompt)
print(response)
def test_streaming_completion():
model = "gpt4all-j-v1.3-groovy"
prompt = "Who is Michael Jordan?"
tokens = []
for resp in openai.Completion.create(
model=model,
prompt=prompt,
max_tokens=50,
temperature=0.28,
top_p=0.95,
n=1,
echo=True,
stream=True):
tokens.append(resp.choices[0].text)
assert (len(tokens) > 0)
assert (len("".join(tokens)) > len(prompt))
# def test_chat_completions():
# model = "gpt4all-j-v1.3-groovy"
# prompt = "Who is Michael Jordan?"
@@ -30,6 +49,3 @@ def test_completion():
# model=model,
# messages=[]
# )