mirror of
https://github.com/nomic-ai/gpt4all.git
synced 2025-09-12 14:01:38 +00:00
chat: fix #includes with include-what-you-use (#2401)
Also use qGuiApp instead of qApp. Signed-off-by: Jared Van Bortel <jared@nomic.ai>
This commit is contained in:
@@ -1,16 +1,29 @@
|
||||
#ifndef CHATLLM_H
|
||||
#define CHATLLM_H
|
||||
|
||||
#include <QObject>
|
||||
#include <QThread>
|
||||
#include <QFileInfo>
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include "database.h"
|
||||
#include "modellist.h"
|
||||
|
||||
#include "../gpt4all-backend/llmodel.h"
|
||||
|
||||
#include <QByteArray>
|
||||
#include <QElapsedTimer>
|
||||
#include <QFileInfo>
|
||||
#include <QObject>
|
||||
#include <QPair>
|
||||
#include <QString>
|
||||
#include <QThread>
|
||||
#include <QVector>
|
||||
#include <QtGlobal>
|
||||
|
||||
#include <atomic>
|
||||
#include <cstdint>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
|
||||
class QDataStream;
|
||||
struct ResultInfo;
|
||||
template <typename T> class QList;
|
||||
|
||||
enum LLModelType {
|
||||
GPTJ_,
|
||||
LLAMA_,
|
||||
|
Reference in New Issue
Block a user