Harrison/official pre release (#8106)

This commit is contained in:
Harrison Chase
2023-07-21 18:44:32 -07:00
committed by GitHub
parent 95bcf68802
commit aa0e69bc98
65 changed files with 210 additions and 602 deletions

View File

@@ -0,0 +1,6 @@
"""Experimental LLM wrappers."""
from langchain_experimental.llms.jsonformer_decoder import JsonFormer
from langchain_experimental.llms.rellm_decoder import RELLM
__all__ = ["RELLM", "JsonFormer"]

View File

@@ -0,0 +1,60 @@
"""Experimental implementation of jsonformer wrapped LLM."""
from __future__ import annotations
import json
from typing import TYPE_CHECKING, Any, List, Optional, cast
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.huggingface_pipeline import HuggingFacePipeline
from pydantic import Field, root_validator
if TYPE_CHECKING:
import jsonformer
def import_jsonformer() -> jsonformer:
"""Lazily import jsonformer."""
try:
import jsonformer
except ImportError:
raise ValueError(
"Could not import jsonformer python package. "
"Please install it with `pip install jsonformer`."
)
return jsonformer
class JsonFormer(HuggingFacePipeline):
json_schema: dict = Field(..., description="The JSON Schema to complete.")
max_new_tokens: int = Field(
default=200, description="Maximum number of new tokens to generate."
)
debug: bool = Field(default=False, description="Debug mode.")
@root_validator
def check_jsonformer_installation(cls, values: dict) -> dict:
import_jsonformer()
return values
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
jsonformer = import_jsonformer()
from transformers import Text2TextGenerationPipeline
pipeline = cast(Text2TextGenerationPipeline, self.pipeline)
model = jsonformer.Jsonformer(
model=pipeline.model,
tokenizer=pipeline.tokenizer,
json_schema=self.json_schema,
prompt=prompt,
max_number_tokens=self.max_new_tokens,
debug=self.debug,
)
text = model()
return json.dumps(text)

View File

@@ -0,0 +1,67 @@
"""Experimental implementation of RELLM wrapped LLM."""
from __future__ import annotations
from typing import TYPE_CHECKING, Any, List, Optional, cast
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.huggingface_pipeline import HuggingFacePipeline
from langchain.llms.utils import enforce_stop_tokens
from pydantic import Field, root_validator
if TYPE_CHECKING:
import rellm
from regex import Pattern as RegexPattern
else:
try:
from regex import Pattern as RegexPattern
except ImportError:
pass
def import_rellm() -> rellm:
"""Lazily import rellm."""
try:
import rellm
except ImportError:
raise ValueError(
"Could not import rellm python package. "
"Please install it with `pip install rellm`."
)
return rellm
class RELLM(HuggingFacePipeline):
regex: RegexPattern = Field(..., description="The structured format to complete.")
max_new_tokens: int = Field(
default=200, description="Maximum number of new tokens to generate."
)
@root_validator
def check_rellm_installation(cls, values: dict) -> dict:
import_rellm()
return values
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
rellm = import_rellm()
from transformers import Text2TextGenerationPipeline
pipeline = cast(Text2TextGenerationPipeline, self.pipeline)
text = rellm.complete_re(
prompt,
self.regex,
tokenizer=pipeline.tokenizer,
model=pipeline.model,
max_new_tokens=self.max_new_tokens,
)
if stop is not None:
# This is a bit hacky, but I can't figure out a better way to enforce
# stop tokens when making calls to huggingface_hub.
text = enforce_stop_tokens(text, stop)
return text