diff --git a/README.md b/README.md index 489f28f..ef5f12f 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # LLM API for python -This python library connects to (llm-api)[https://github.com/1b5d/llm-api] using python, it was build to mimic (OpenAI's python library)[https://github.com/openai/openai-python] +This python library connects to [llm-api](https://github.com/1b5d/llm-api) using python, it was build to mimic [OpenAI's python library](https://github.com/openai/openai-python) # Usage @@ -10,31 +10,31 @@ You can install this library using pip pip install llm-api-python ``` -After running (llm-api)[https://github.com/1b5d/llm-api], simply configure your client as if it's OpenAI's python binding +After running [llm-api](https://github.com/1b5d/llm-api), simply configure your client as if it's OpenAI's python binding ```python import llm_api llm_api.api_key = "" -completion = llm_api.Completion.create(messages=[ +completion = llm_api.ChatCompletion.create(messages=[ { "role": "system", "content": "You are a helpful assistant, please answer the users' questions with honesty and accuracy." }, { "role": "user", "content": "What is the capital of France?" } -]) # returns a completion object +]) # returns a chat completion object -completion = llm_api.Completion.create(messages=[ +completion = llm_api.ChatCompletion.create(messages=[ ... ], stream=True) # returns a generator -completion = await llm_api.Completion.acreate(messages=[ +completion = await llm_api.ChatCompletion.acreate(messages=[ ... -]) # returns a completion object +]) # returns a chat completion object -completion = await llm_api.Completion.acreate(messages=[ +completion = await llm_api.ChatCompletion.acreate(messages=[ ... ], stream=True) # returns a async generator diff --git a/llm_api/__init__.py b/llm_api/__init__.py index fe914d1..4c40d65 100644 --- a/llm_api/__init__.py +++ b/llm_api/__init__.py @@ -6,10 +6,10 @@ import requests from aiohttp import ClientSession -from llm_api.completion import Completion +from llm_api.completion import ChatCompletion from llm_api.error import APIError, InvalidRequestError, LlmApiError # noqa: F401 -__all__ = ["Completion"] +__all__ = ["ChatCompletion"] api_key = os.environ.get("LLM_API_API_KEY") api_key_path: Optional[str] = os.environ.get("LLM_API_API_KEY_PATH") diff --git a/llm_api/completion.py b/llm_api/completion.py index a4c4d14..6a6b724 100644 --- a/llm_api/completion.py +++ b/llm_api/completion.py @@ -11,7 +11,7 @@ DEFAULT_PROMPT = "You are a helpful assistant." -class Completion(ApiObject): +class ChatCompletion(ApiObject): """A wrapper for LLM API client completion.""" @classmethod