mirror of
https://github.com/nomic-ai/gpt4all.git
synced 2025-09-24 13:04:00 +00:00
C# bindings (#650)
* First workin version of the C# bindings * Update README.md Signed-off-by: mvenditto <venditto.matteo@gmail.com> * Added more docs + fixed prompt callback signature * build scripts revision * Added .editorconfig + fixed style issues --------- Signed-off-by: mvenditto <venditto.matteo@gmail.com>
This commit is contained in:
@@ -0,0 +1,31 @@
|
||||
namespace Gpt4All;
|
||||
|
||||
/// <summary>
|
||||
/// Interface for text prediction services
|
||||
/// </summary>
|
||||
public interface ITextPrediction
|
||||
{
|
||||
/// <summary>
|
||||
/// Get prediction results for the prompt and provided options.
|
||||
/// </summary>
|
||||
/// <param name="text">The text to complete</param>
|
||||
/// <param name="opts">The prediction settings</param>
|
||||
/// <param name="cancellationToken">The <see cref="CancellationToken"/> for cancellation requests. The default is <see cref="CancellationToken.None"/>.</param>
|
||||
/// <returns>The prediction result generated by the model</returns>
|
||||
Task<ITextPredictionResult> GetPredictionAsync(
|
||||
string text,
|
||||
PredictRequestOptions opts,
|
||||
CancellationToken cancellation = default);
|
||||
|
||||
/// <summary>
|
||||
/// Get streaming prediction results for the prompt and provided options.
|
||||
/// </summary>
|
||||
/// <param name="text">The text to complete</param>
|
||||
/// <param name="opts">The prediction settings</param>
|
||||
/// <param name="cancellationToken">The <see cref="CancellationToken"/> for cancellation requests. The default is <see cref="CancellationToken.None"/>.</param>
|
||||
/// <returns>The prediction result generated by the model</returns>
|
||||
Task<ITextPredictionStreamingResult> GetStreamingPredictionAsync(
|
||||
string text,
|
||||
PredictRequestOptions opts,
|
||||
CancellationToken cancellationToken = default);
|
||||
}
|
@@ -0,0 +1,10 @@
|
||||
namespace Gpt4All;
|
||||
|
||||
public interface ITextPredictionResult
|
||||
{
|
||||
bool Success { get; }
|
||||
|
||||
string? ErrorMessage { get; }
|
||||
|
||||
Task<string> GetPredictionAsync(CancellationToken cancellationToken = default);
|
||||
}
|
@@ -0,0 +1,6 @@
|
||||
namespace Gpt4All;
|
||||
|
||||
public interface ITextPredictionStreamingResult : ITextPredictionResult
|
||||
{
|
||||
IAsyncEnumerable<string> GetPredictionStreamingAsync(CancellationToken cancellationToken = default);
|
||||
}
|
@@ -0,0 +1,30 @@
|
||||
namespace Gpt4All;
|
||||
|
||||
public record PredictRequestOptions
|
||||
{
|
||||
public nuint LogitsSize { get; init; } = 0;
|
||||
|
||||
public nuint TokensSize { get; init; } = 0;
|
||||
|
||||
public int PastConversationTokensNum { get; init; } = 0;
|
||||
|
||||
public int ContextSize { get; init; } = 1024;
|
||||
|
||||
public int TokensToPredict { get; init; } = 128;
|
||||
|
||||
public int TopK { get; init; } = 40;
|
||||
|
||||
public float TopP { get; init; } = 0.9f;
|
||||
|
||||
public float Temperature { get; init; } = 0.1f;
|
||||
|
||||
public int Batches { get; init; } = 8;
|
||||
|
||||
public float RepeatPenalty { get; init; } = 1.2f;
|
||||
|
||||
public int RepeatLastN { get; init; } = 10;
|
||||
|
||||
public float ContextErase { get; init; } = 0.5f;
|
||||
|
||||
public static readonly PredictRequestOptions Defaults = new();
|
||||
}
|
@@ -0,0 +1,27 @@
|
||||
using System.Text;
|
||||
|
||||
namespace Gpt4All;
|
||||
|
||||
public record TextPredictionResult : ITextPredictionResult
|
||||
{
|
||||
private readonly StringBuilder _result;
|
||||
|
||||
public bool Success { get; internal set; } = true;
|
||||
|
||||
public string? ErrorMessage { get; internal set; }
|
||||
|
||||
internal TextPredictionResult()
|
||||
{
|
||||
_result = new StringBuilder();
|
||||
}
|
||||
|
||||
internal void Append(string token)
|
||||
{
|
||||
_result.Append(token);
|
||||
}
|
||||
|
||||
public Task<string> GetPredictionAsync(CancellationToken cancellationToken = default)
|
||||
{
|
||||
return Task.FromResult(_result.ToString());
|
||||
}
|
||||
}
|
@@ -0,0 +1,49 @@
|
||||
using System.Text;
|
||||
using System.Threading.Channels;
|
||||
|
||||
namespace Gpt4All;
|
||||
|
||||
public record TextPredictionStreamingResult : ITextPredictionStreamingResult
|
||||
{
|
||||
private readonly Channel<string> _channel;
|
||||
|
||||
public bool Success { get; internal set; } = true;
|
||||
|
||||
public string? ErrorMessage { get; internal set; }
|
||||
|
||||
public Task Completion => _channel.Reader.Completion;
|
||||
|
||||
internal TextPredictionStreamingResult()
|
||||
{
|
||||
_channel = Channel.CreateUnbounded<string>();
|
||||
}
|
||||
|
||||
internal bool Append(string token)
|
||||
{
|
||||
return _channel.Writer.TryWrite(token);
|
||||
}
|
||||
|
||||
internal void Complete()
|
||||
{
|
||||
_channel.Writer.Complete();
|
||||
}
|
||||
|
||||
public async Task<string> GetPredictionAsync(CancellationToken cancellationToken = default)
|
||||
{
|
||||
var sb = new StringBuilder();
|
||||
|
||||
var tokens = GetPredictionStreamingAsync(cancellationToken).ConfigureAwait(false);
|
||||
|
||||
await foreach (var token in tokens)
|
||||
{
|
||||
sb.Append(token);
|
||||
}
|
||||
|
||||
return sb.ToString();
|
||||
}
|
||||
|
||||
public IAsyncEnumerable<string> GetPredictionStreamingAsync(CancellationToken cancellationToken = default)
|
||||
{
|
||||
return _channel.Reader.ReadAllAsync(cancellationToken);
|
||||
}
|
||||
}
|
Reference in New Issue
Block a user