Backend prompt dedup (#822)

* Deduplicated prompt() function code
This commit is contained in:
AT
2023-06-04 08:59:24 -04:00
committed by GitHub
parent 945297d837
commit bbe195ee02
10 changed files with 286 additions and 457 deletions

View File

@@ -20,17 +20,19 @@ public:
size_t stateSize() const override;
size_t saveState(uint8_t *dest) const override;
size_t restoreState(const uint8_t *src) override;
void prompt(const std::string &prompt,
std::function<bool(int32_t)> promptCallback,
std::function<bool(int32_t, const std::string&)> responseCallback,
std::function<bool(bool)> recalculateCallback,
PromptContext &ctx) override;
bool evalTokens(PromptContext &ctx, const std::vector<int32_t> &tokens) override;
void setThreadCount(int32_t n_threads) override;
int32_t threadCount() const override;
private:
LLamaPrivate *d_ptr;
protected:
std::vector<Token> tokenize(const std::string&) const override;
std::string_view tokenToString(Token) const override;
Token sampleToken(PromptContext& ctx) const override;
bool evalTokens(PromptContext& ctx, const std::vector<int32_t> &tokens) const override;
int32_t contextLength() const override;
const std::vector<Token>& endTokens() const override;
};
#endif // LLAMAMODEL_H