From 6f64cb5078bb71007d25fff847541fd8f7713c0c Mon Sep 17 00:00:00 2001 From: Toshish Jawale <986859+toshish@users.noreply.github.com> Date: Wed, 29 Nov 2023 11:48:25 -0800 Subject: [PATCH] Remove deprecated param and flexibility for prompt (#13310) - **Description:** Updated to remove deprecated parameter penalty_alpha, and use string variation of prompt rather than json object for better flexibility. - **Issue:** the issue # it fixes (if applicable), - **Dependencies:** N/A - **Tag maintainer:** @eyurtsev - **Twitter handle:** @symbldotai --------- Co-authored-by: toshishjawale Co-authored-by: Harrison Chase --- .../langchain/llms/symblai_nebula.py | 22 ++++--------------- .../llms/test_symblai_nebula.py | 3 +-- 2 files changed, 5 insertions(+), 20 deletions(-) diff --git a/libs/langchain/langchain/llms/symblai_nebula.py b/libs/langchain/langchain/llms/symblai_nebula.py index ddf238c7af9..e718a1221f5 100644 --- a/libs/langchain/langchain/llms/symblai_nebula.py +++ b/libs/langchain/langchain/llms/symblai_nebula.py @@ -57,8 +57,7 @@ class Nebula(LLM): temperature: Optional[float] = 0.6 top_p: Optional[float] = 0.95 repetition_penalty: Optional[float] = 1.0 - top_k: Optional[int] = 0 - penalty_alpha: Optional[float] = 0.0 + top_k: Optional[int] = 1 stop_sequences: Optional[List[str]] = None max_retries: Optional[int] = 10 @@ -106,7 +105,6 @@ class Nebula(LLM): "top_k": self.top_k, "top_p": self.top_p, "repetition_penalty": self.repetition_penalty, - "penalty_alpha": self.penalty_alpha, } @property @@ -162,16 +160,10 @@ class Nebula(LLM): """ params = self._invocation_params(stop, **kwargs) prompt = prompt.strip() - if "\n" in prompt: - instruction = prompt.split("\n")[0] - conversation = "\n".join(prompt.split("\n")[1:]) - else: - raise ValueError("Prompt must contain instruction and conversation.") response = completion_with_retry( self, - instruction=instruction, - conversation=conversation, + prompt=prompt, params=params, url=f"{self.nebula_service_url}{self.nebula_service_path}", ) @@ -181,8 +173,7 @@ class Nebula(LLM): def make_request( self: Nebula, - instruction: str, - conversation: str, + prompt: str, url: str = f"{DEFAULT_NEBULA_SERVICE_URL}{DEFAULT_NEBULA_SERVICE_PATH}", params: Optional[Dict] = None, ) -> Any: @@ -196,12 +187,7 @@ def make_request( "ApiKey": f"{api_key}", } - body = { - "prompt": { - "instruction": instruction, - "conversation": {"text": f"{conversation}"}, - } - } + body = {"prompt": prompt} # add params to body for key, value in params.items(): diff --git a/libs/langchain/tests/integration_tests/llms/test_symblai_nebula.py b/libs/langchain/tests/integration_tests/llms/test_symblai_nebula.py index a068e08aca4..6f623bd6e28 100644 --- a/libs/langchain/tests/integration_tests/llms/test_symblai_nebula.py +++ b/libs/langchain/tests/integration_tests/llms/test_symblai_nebula.py @@ -41,8 +41,7 @@ Rhea: Thanks, bye!""" instruction = """Identify the main objectives mentioned in this conversation.""" - prompt = PromptTemplate.from_template("{instruction}\n{conversation}") - + prompt = PromptTemplate.from_template(template="{instruction}\n{conversation}") llm_chain = LLMChain(prompt=prompt, llm=llm) output = llm_chain.run(instruction=instruction, conversation=conversation) assert isinstance(output, str)