mirror of
https://github.com/hwchase17/langchain.git
synced 2025-09-03 20:16:52 +00:00
notebook fmt (#12498)
This commit is contained in:
@@ -1,4 +1,3 @@
|
||||
|
||||
from langchain.chat_models import ChatOpenAI
|
||||
from langchain.embeddings import HuggingFaceEmbeddings
|
||||
from langchain.prompts import ChatPromptTemplate
|
||||
@@ -26,10 +25,7 @@ embedder = HuggingFaceEmbeddings(model_name=EMBED_MODEL)
|
||||
# Connect to pre-loaded vectorstore
|
||||
# run the ingest.py script to populate this
|
||||
vectorstore = Redis.from_existing_index(
|
||||
embedding=embedder,
|
||||
index_name=INDEX_NAME,
|
||||
schema=INDEX_SCHEMA,
|
||||
redis_url=REDIS_URL
|
||||
embedding=embedder, index_name=INDEX_NAME, schema=INDEX_SCHEMA, redis_url=REDIS_URL
|
||||
)
|
||||
# TODO allow user to change parameters
|
||||
retriever = vectorstore.as_retriever(search_type="mmr")
|
||||
@@ -60,8 +56,7 @@ prompt = ChatPromptTemplate.from_template(template)
|
||||
# RAG Chain
|
||||
model = ChatOpenAI(model_name="gpt-3.5-turbo-16k")
|
||||
chain = (
|
||||
RunnableParallel({"context": retriever,
|
||||
"question": RunnablePassthrough()})
|
||||
RunnableParallel({"context": retriever, "question": RunnablePassthrough()})
|
||||
| prompt
|
||||
| model
|
||||
| StrOutputParser()
|
||||
|
@@ -12,11 +12,11 @@ def get_boolean_env_var(var_name, default_value=False):
|
||||
Returns:
|
||||
bool: The value of the environment variable, interpreted as a boolean.
|
||||
"""
|
||||
true_values = {'true', '1', 't', 'y', 'yes'}
|
||||
false_values = {'false', '0', 'f', 'n', 'no'}
|
||||
true_values = {"true", "1", "t", "y", "yes"}
|
||||
false_values = {"false", "0", "f", "n", "no"}
|
||||
|
||||
# Retrieve the environment variable's value
|
||||
value = os.getenv(var_name, '').lower()
|
||||
value = os.getenv(var_name, "").lower()
|
||||
|
||||
# Decide the boolean value based on the content of the string
|
||||
if value in true_values:
|
||||
@@ -37,17 +37,18 @@ DEBUG = get_boolean_env_var("DEBUG", False)
|
||||
# Set DEBUG env var to "true" if you wish to enable LC debugging module
|
||||
if DEBUG:
|
||||
import langchain
|
||||
langchain.debug=True
|
||||
|
||||
langchain.debug = True
|
||||
|
||||
|
||||
# Embedding model
|
||||
EMBED_MODEL = os.getenv("EMBED_MODEL",
|
||||
"sentence-transformers/all-MiniLM-L6-v2")
|
||||
EMBED_MODEL = os.getenv("EMBED_MODEL", "sentence-transformers/all-MiniLM-L6-v2")
|
||||
|
||||
# Redis Connection Information
|
||||
REDIS_HOST = os.getenv("REDIS_HOST", "localhost")
|
||||
REDIS_PORT = int(os.getenv("REDIS_PORT", 6379))
|
||||
|
||||
|
||||
def format_redis_conn_from_env():
|
||||
redis_url = os.getenv("REDIS_URL", None)
|
||||
if redis_url:
|
||||
@@ -64,6 +65,7 @@ def format_redis_conn_from_env():
|
||||
|
||||
return start + f"{REDIS_HOST}:{REDIS_PORT}"
|
||||
|
||||
|
||||
REDIS_URL = format_redis_conn_from_env()
|
||||
|
||||
# Vector Index Configuration
|
||||
@@ -72,5 +74,5 @@ INDEX_NAME = os.getenv("INDEX_NAME", "rag-redis")
|
||||
|
||||
current_file_path = os.path.abspath(__file__)
|
||||
parent_dir = os.path.dirname(current_file_path)
|
||||
schema_path = os.path.join(parent_dir, 'schema.yml')
|
||||
schema_path = os.path.join(parent_dir, "schema.yml")
|
||||
INDEX_SCHEMA = schema_path
|
||||
|
Reference in New Issue
Block a user