Compare commits

...

5 Commits

Author SHA1 Message Date
Eugene Yurtsev
bdc7383917 x 2024-06-24 17:08:57 -04:00
Eugene Yurtsev
8c1f310f11 Merge branch 'master' into renderer 2024-06-24 17:05:54 -04:00
Eugene Yurtsev
8c0217188a qxqx 2024-06-24 17:05:41 -04:00
Eugene Yurtsev
5a9d4b0088 Merge branch 'master' into renderer 2024-05-23 17:56:46 -04:00
Eugene Yurtsev
a098d61226 qx 2024-05-23 16:54:19 -04:00
5 changed files with 68 additions and 1 deletions

View File

@@ -1,7 +1,9 @@
# flake8: noqa
"""Global values and configuration that apply to all of LangChain."""
import abc
import contextlib
import warnings
from typing import TYPE_CHECKING, Optional
from typing import TYPE_CHECKING, Optional, Any, Generator
if TYPE_CHECKING:
from langchain_core.caches import BaseCache
@@ -15,6 +17,22 @@ _verbose: bool = False
_debug: bool = False
_llm_cache: Optional["BaseCache"] = None
class BaseRenderer(abc.ABC):
def render_html(self, obj: Any) -> str:
"""Render an object as HTML."""
raise NotImplementedError
class DefaultRenderer(BaseRenderer):
def render_html(self, obj: Any) -> str:
"""Render an object as HTML."""
if hasattr(obj, "_repr_html_"):
return obj._repr_html_()
return str(obj)
_renderer: Optional[BaseRenderer] = None
def set_verbose(value: bool) -> None:
"""Set a new value for the `verbose` global setting."""
@@ -195,3 +213,30 @@ def get_llm_cache() -> "BaseCache":
global _llm_cache
return _llm_cache or old_llm_cache
def set_renderer(renderer: BaseRenderer) -> None:
"""Set a new renderer."""
global _renderer
_renderer = renderer
def get_renderer() -> BaseRenderer:
"""Get the current renderer."""
global _renderer
return _renderer or DefaultRenderer()
@contextlib.contextmanager
def with_renderer(renderer: BaseRenderer) -> Generator[None, None, None]:
"""Context manager for temporarily setting a new renderer."""
global _renderer
# Save the current value
original_value = _renderer
try:
# Update the global value
_renderer = renderer
yield
finally:
# Restore the original value
_renderer = original_value

View File

@@ -2,6 +2,7 @@ from __future__ import annotations
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, Union, cast
from langchain_core.globals import get_renderer
from langchain_core.load.serializable import Serializable
from langchain_core.pydantic_v1 import Extra, Field
from langchain_core.utils import get_bolded_text
@@ -87,6 +88,10 @@ class BaseMessage(Serializable):
def pretty_print(self) -> None:
print(self.pretty_repr(html=is_interactive_env())) # noqa: T201
def _repr_html_(self) -> str:
"""Print HTML representation of the message."""
return get_renderer().render_html(self)
def merge_content(
first_content: Union[str, List[Union[str, Dict]]],

View File

@@ -279,6 +279,12 @@ class BasePromptTemplate(
else:
raise ValueError(f"{save_path} must be json or yaml")
def _repr_html_(self) -> str:
"""Print HTML representation of the message."""
from langchain_core.globals import get_renderer
return get_renderer().render_html(self)
def _get_document_info(doc: Document, prompt: BasePromptTemplate[str]) -> Dict:
base_info = {"page_content": doc.page_content, **doc.metadata}

View File

@@ -106,6 +106,12 @@ class BaseMessagePromptTemplate(Serializable, ABC):
prompt = ChatPromptTemplate(messages=[self]) # type: ignore[call-arg]
return prompt + other
def _repr_html_(self) -> str:
"""Print HTML representation of the message."""
from langchain_core.globals import get_renderer
return get_renderer().render_html(self)
class MessagesPlaceholder(BaseMessagePromptTemplate):
"""Prompt template that assumes variable is already list of messages.

View File

@@ -464,3 +464,8 @@ class Graph:
background_color=background_color,
padding=padding,
)
def _repr_html_(self) -> str:
from langchain_core.globals import get_renderer
return get_renderer().render_html(self)