mirror of
https://github.com/nomic-ai/gpt4all.git
synced 2025-10-26 02:52:26 +00:00
GPT4All API Scaffolding. Matches OpenAI OpenAPI spec for chats and completions (#839)
* GPT4All API Scaffolding. Matches OpenAI OpenAI spec for engines, chats and completions * Edits for docker building * FastAPI app builds and pydantic models are accurate * Added groovy download into dockerfile * improved dockerfile * Chat completions endpoint edits * API uni test sketch * Working example of groovy inference with open ai api * Added lines to test * Set default to mpt
This commit is contained in:
35
gpt4all-api/gpt4all_api/app/tests/test_endpoints.py
Normal file
35
gpt4all-api/gpt4all_api/app/tests/test_endpoints.py
Normal file
@@ -0,0 +1,35 @@
|
||||
"""
|
||||
Use the OpenAI python API to test gpt4all models.
|
||||
"""
|
||||
import openai
|
||||
openai.api_base = "http://localhost:4891/v1"
|
||||
|
||||
openai.api_key = "not needed for a local LLM"
|
||||
|
||||
|
||||
def test_completion():
|
||||
model = "gpt4all-j-v1.3-groovy"
|
||||
prompt = "Who is Michael Jordan?"
|
||||
response = openai.Completion.create(
|
||||
model=model,
|
||||
prompt=prompt,
|
||||
max_tokens=50,
|
||||
temperature=0.28,
|
||||
top_p=0.95,
|
||||
n=1,
|
||||
echo=True,
|
||||
stream=False
|
||||
)
|
||||
assert len(response['choices'][0]['text']) > len(prompt)
|
||||
print(response)
|
||||
|
||||
# def test_chat_completions():
|
||||
# model = "gpt4all-j-v1.3-groovy"
|
||||
# prompt = "Who is Michael Jordan?"
|
||||
# response = openai.ChatCompletion.create(
|
||||
# model=model,
|
||||
# messages=[]
|
||||
# )
|
||||
|
||||
|
||||
|
||||
Reference in New Issue
Block a user