core[minor], langchain[patch], experimental[patch]: Added missing py.typed to langchain_core (#14143)

See PR title.

From what I can see, `poetry` will auto-include this. Please let me know
if I am missing something here.

---------

Co-authored-by: Bagatur <baskaryan@gmail.com>
This commit is contained in:
James Braza
2023-12-01 22:15:23 -05:00
committed by GitHub
parent f7c257553d
commit 24385a00de
9 changed files with 40 additions and 30 deletions

View File

@@ -1,5 +1,5 @@
import time
from typing import Any, Callable, List
from typing import Any, Callable, List, cast
from langchain.prompts.chat import (
BaseChatPromptTemplate,
@@ -68,9 +68,9 @@ class AutoGPTPrompt(BaseChatPromptTemplate, BaseModel): # type: ignore[misc]
time_prompt = SystemMessage(
content=f"The current time and date is {time.strftime('%c')}"
)
used_tokens = self.token_counter(base_prompt.content) + self.token_counter(
time_prompt.content
)
used_tokens = self.token_counter(
cast(str, base_prompt.content)
) + self.token_counter(cast(str, time_prompt.content))
memory: VectorStoreRetriever = kwargs["memory"]
previous_messages = kwargs["messages"]
relevant_docs = memory.get_relevant_documents(str(previous_messages[-10:]))
@@ -88,7 +88,7 @@ class AutoGPTPrompt(BaseChatPromptTemplate, BaseModel): # type: ignore[misc]
f"from your past:\n{relevant_memory}\n\n"
)
memory_message = SystemMessage(content=content_format)
used_tokens += self.token_counter(memory_message.content)
used_tokens += self.token_counter(cast(str, memory_message.content))
historical_messages: List[BaseMessage] = []
for message in previous_messages[-10:][::-1]:
message_tokens = self.token_counter(message.content)

View File

@@ -1,7 +1,7 @@
"""Generic Wrapper for chat LLMs, with sample implementations
for Llama-2-chat, Llama-2-instruct and Vicuna models.
"""
from typing import Any, List, Optional
from typing import Any, List, Optional, cast
from langchain.callbacks.manager import (
AsyncCallbackManagerForLLMRun,
@@ -90,8 +90,12 @@ class ChatWrapper(BaseChatModel):
if self.usr_0_end is None:
self.usr_0_end = self.usr_n_end
prompt_parts.append(self.sys_beg + messages[0].content + self.sys_end)
prompt_parts.append(self.usr_0_beg + messages[1].content + self.usr_0_end)
prompt_parts.append(
self.sys_beg + cast(str, messages[0].content) + self.sys_end
)
prompt_parts.append(
self.usr_0_beg + cast(str, messages[1].content) + self.usr_0_end
)
for ai_message, human_message in zip(messages[2::2], messages[3::2]):
if not isinstance(ai_message, AIMessage) or not isinstance(
@@ -102,8 +106,12 @@ class ChatWrapper(BaseChatModel):
"optionally prepended by a system message"
)
prompt_parts.append(self.ai_n_beg + ai_message.content + self.ai_n_end)
prompt_parts.append(self.usr_n_beg + human_message.content + self.usr_n_end)
prompt_parts.append(
self.ai_n_beg + cast(str, ai_message.content) + self.ai_n_end
)
prompt_parts.append(
self.usr_n_beg + cast(str, human_message.content) + self.usr_n_end
)
return "".join(prompt_parts)

View File

@@ -1,5 +1,5 @@
import uuid
from typing import Any, Callable, Optional
from typing import Any, Callable, Optional, cast
from langchain.callbacks.manager import CallbackManagerForChainRun
from langchain.schema import AIMessage, HumanMessage
@@ -54,10 +54,10 @@ class BaseModeration:
message = prompt.messages[-1]
self.chat_message_index = len(prompt.messages) - 1
if isinstance(message, HumanMessage):
input_text = message.content
input_text = cast(str, message.content)
if isinstance(message, AIMessage):
input_text = message.content
input_text = cast(str, message.content)
else:
raise ValueError(
f"Invalid input type {type(input_text)}. "

View File

@@ -1,7 +1,7 @@
import json
from collections import defaultdict
from html.parser import HTMLParser
from typing import Any, DefaultDict, Dict, List, Optional
from typing import Any, DefaultDict, Dict, List, Optional, cast
from langchain.callbacks.manager import (
CallbackManagerForLLMRun,
@@ -176,7 +176,7 @@ class AnthropicFunctions(BaseChatModel):
response = self.model.predict_messages(
messages, stop=stop, callbacks=run_manager, **kwargs
)
completion = response.content
completion = cast(str, response.content)
if forced:
tag_parser = TagParser()
@@ -210,7 +210,7 @@ class AnthropicFunctions(BaseChatModel):
message = AIMessage(content=msg, additional_kwargs=kwargs)
return ChatResult(generations=[ChatGeneration(message=message)])
else:
response.content = response.content.strip()
response.content = cast(str, response.content).strip()
return ChatResult(generations=[ChatGeneration(message=response)])
@property