Remove deprecated param and flexibility for prompt (#13310)

- **Description:** Updated to remove deprecated parameter penalty_alpha,
and use string variation of prompt rather than json object for better
flexibility. - **Issue:** the issue # it fixes (if applicable),
  - **Dependencies:** N/A
  - **Tag maintainer:** @eyurtsev
  - **Twitter handle:** @symbldotai

---------

Co-authored-by: toshishjawale <toshish@symbl.ai>
Co-authored-by: Harrison Chase <hw.chase.17@gmail.com>
This commit is contained in:
Toshish Jawale 2023-11-29 11:48:25 -08:00 committed by GitHub
parent 3eb391561b
commit 6f64cb5078
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 5 additions and 20 deletions

View File

@ -57,8 +57,7 @@ class Nebula(LLM):
temperature: Optional[float] = 0.6 temperature: Optional[float] = 0.6
top_p: Optional[float] = 0.95 top_p: Optional[float] = 0.95
repetition_penalty: Optional[float] = 1.0 repetition_penalty: Optional[float] = 1.0
top_k: Optional[int] = 0 top_k: Optional[int] = 1
penalty_alpha: Optional[float] = 0.0
stop_sequences: Optional[List[str]] = None stop_sequences: Optional[List[str]] = None
max_retries: Optional[int] = 10 max_retries: Optional[int] = 10
@ -106,7 +105,6 @@ class Nebula(LLM):
"top_k": self.top_k, "top_k": self.top_k,
"top_p": self.top_p, "top_p": self.top_p,
"repetition_penalty": self.repetition_penalty, "repetition_penalty": self.repetition_penalty,
"penalty_alpha": self.penalty_alpha,
} }
@property @property
@ -162,16 +160,10 @@ class Nebula(LLM):
""" """
params = self._invocation_params(stop, **kwargs) params = self._invocation_params(stop, **kwargs)
prompt = prompt.strip() prompt = prompt.strip()
if "\n" in prompt:
instruction = prompt.split("\n")[0]
conversation = "\n".join(prompt.split("\n")[1:])
else:
raise ValueError("Prompt must contain instruction and conversation.")
response = completion_with_retry( response = completion_with_retry(
self, self,
instruction=instruction, prompt=prompt,
conversation=conversation,
params=params, params=params,
url=f"{self.nebula_service_url}{self.nebula_service_path}", url=f"{self.nebula_service_url}{self.nebula_service_path}",
) )
@ -181,8 +173,7 @@ class Nebula(LLM):
def make_request( def make_request(
self: Nebula, self: Nebula,
instruction: str, prompt: str,
conversation: str,
url: str = f"{DEFAULT_NEBULA_SERVICE_URL}{DEFAULT_NEBULA_SERVICE_PATH}", url: str = f"{DEFAULT_NEBULA_SERVICE_URL}{DEFAULT_NEBULA_SERVICE_PATH}",
params: Optional[Dict] = None, params: Optional[Dict] = None,
) -> Any: ) -> Any:
@ -196,12 +187,7 @@ def make_request(
"ApiKey": f"{api_key}", "ApiKey": f"{api_key}",
} }
body = { body = {"prompt": prompt}
"prompt": {
"instruction": instruction,
"conversation": {"text": f"{conversation}"},
}
}
# add params to body # add params to body
for key, value in params.items(): for key, value in params.items():

View File

@ -41,8 +41,7 @@ Rhea: Thanks, bye!"""
instruction = """Identify the main objectives mentioned in this instruction = """Identify the main objectives mentioned in this
conversation.""" conversation."""
prompt = PromptTemplate.from_template("{instruction}\n{conversation}") prompt = PromptTemplate.from_template(template="{instruction}\n{conversation}")
llm_chain = LLMChain(prompt=prompt, llm=llm) llm_chain = LLMChain(prompt=prompt, llm=llm)
output = llm_chain.run(instruction=instruction, conversation=conversation) output = llm_chain.run(instruction=instruction, conversation=conversation)
assert isinstance(output, str) assert isinstance(output, str)