Skip to content

Commit 495177f

Browse files
committed
fix: typos.
1 parent de31a06 commit 495177f

32 files changed

+43
-43
lines changed

LLama.Examples/Examples/BatchedExecutorSaveAndLoad.cs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -80,7 +80,7 @@ public static async Task Run()
8080
// Continue generating text
8181
await GenerateTokens(executor, conversation, sampler, decoder, n_len);
8282

83-
// Display final ouput
83+
// Display final output
8484
AnsiConsole.MarkupLine($"[red]{prompt}{decoder.Read()}[/]");
8585
}
8686

LLama.Examples/Examples/LlavaInteractiveModeExecute.cs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -95,7 +95,7 @@ public static async Task Run()
9595
Console.WriteLine();
9696

9797

98-
// Initilize Images in executor
98+
// Initialize Images in executor
9999
//
100100
foreach (var image in imagePaths)
101101
{

LLama.Examples/Examples/SpeechChat.cs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -124,7 +124,7 @@ The short audio comes from a user that is speaking to an AI Language Model in re
124124
int totalNonBlankClips; // ..but for example's sake they work on a
125125
int nonIdleTime; // ..clip-based quant-length (1 = clipLength).
126126
// Default detection settings: A speech of 750ms, followed by pause of 500ms. (2x250ms)
127-
public (int minBlanksPerSeperation, int minNonBlanksForValidMessages) detectionSettings = (2, 3);
127+
public (int minBlanksPerSeparation, int minNonBlanksForValidMessages) detectionSettings = (2, 3);
128128

129129
public HashSet<ISpeechListener> ServiceUsers = [];
130130

@@ -156,7 +156,7 @@ void OnAudioDataAvailable(object? sender, WaveInEventArgs e)
156156

157157
// Compare the volume with the threshold and act accordingly. Once an interesting and 'full' set of clips pops up, serve it.
158158
if (maxVolume >= voiceDetectionThreshold) { currentBlankClips = 0; totalNonBlankClips++; nonIdleTime++; }
159-
else if (++currentBlankClips < detectionSettings.minBlanksPerSeperation) { nonIdleTime++; }
159+
else if (++currentBlankClips < detectionSettings.minBlanksPerSeparation) { nonIdleTime++; }
160160
else
161161
{
162162
if (totalNonBlankClips >= detectionSettings.minNonBlanksForValidMessages) { SendTranscription(); }

LLama.Web/Async/AsyncLock.cs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
namespace LLama.Web.Async
22
{
33
/// <summary>
4-
/// Create an Async locking using statment
4+
/// Create an Async locking using statement
55
/// </summary>
66
public sealed class AsyncLock
77
{

LLama.Web/Extensions.cs

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -34,14 +34,14 @@ public static List<string> GetOutputFilters(this ISessionConfig sessionConfig)
3434
private static List<string> CombineCSV(List<string> list, string csv)
3535
{
3636
var results = list is null || list.Count == 0
37-
? CommaSeperatedToList(csv)
38-
: CommaSeperatedToList(csv).Concat(list);
37+
? CommaSeparatedToList(csv)
38+
: CommaSeparatedToList(csv).Concat(list);
3939
return results
4040
.Distinct()
4141
.ToList();
4242
}
4343

44-
private static List<string> CommaSeperatedToList(string value)
44+
private static List<string> CommaSeparatedToList(string value)
4545
{
4646
if (string.IsNullOrEmpty(value))
4747
return new List<string>();

LLama.Web/Hubs/SessionConnectionHub.cs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ public override async Task OnDisconnectedAsync(Exception exception)
3030
{
3131
_logger.Log(LogLevel.Information, "[OnDisconnectedAsync], Id: {0}", Context.ConnectionId);
3232

33-
// Remove connections session on dissconnect
33+
// Remove connections session on disconnect
3434
await _modelSessionService.CloseAsync(Context.ConnectionId);
3535
await base.OnDisconnectedAsync(exception);
3636
}

LLama.Web/README.md

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,8 @@
11
## LLama.Web - Basic ASP.NET Core examples of LLamaSharp in action
2-
LLama.Web has no heavy dependencies and no extra frameworks ove bootstrap and jquery to keep the examples clean and easy to copy over to your own project
2+
LLama.Web has no heavy dependencies and no extra frameworks over bootstrap and jquery to keep the examples clean and easy to copy over to your own project
33

44
## Websockets
5-
Using signalr websockets simplifys the streaming of responses and model per connection management
5+
Using signalr websockets simplifies the streaming of responses and model per connection management
66

77

88

@@ -23,7 +23,7 @@ Example:
2323
{
2424
"Name": "Alpaca",
2525
"Path": "D:\\Repositories\\AI\\Prompts\\alpaca.txt",
26-
"Prompt": "Alternativly to can set a prompt text directly and omit the Path"
26+
"Prompt": "Alternatively to can set a prompt text directly and omit the Path"
2727
"AntiPrompt": [
2828
"User:"
2929
],

LLama.Web/Services/ModelService.cs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@ namespace LLama.Web.Services
88
{
99

1010
/// <summary>
11-
/// Sercive for handling Models,Weights & Contexts
11+
/// Service for handling Models,Weights & Contexts
1212
/// </summary>
1313
public class ModelService : IModelService
1414
{

LLama/Abstractions/IInferenceParams.cs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66
namespace LLama.Abstractions
77
{
88
/// <summary>
9-
/// The paramters used for inference.
9+
/// The parameters used for inference.
1010
/// </summary>
1111
public interface IInferenceParams
1212
{

LLama/Abstractions/ILLamaExecutor.cs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ public interface ILLamaExecutor
2020
/// </summary>
2121
public bool IsMultiModal { get; }
2222
/// <summary>
23-
/// Muti-Modal Projections / Clip Model weights
23+
/// Multi-Modal Projections / Clip Model weights
2424
/// </summary>
2525
public LLavaWeights? ClipModel { get; }
2626

LLama/Abstractions/IModelParams.cs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -232,7 +232,7 @@ public override void Write(Utf8JsonWriter writer, TensorSplitsCollection value,
232232
public sealed record MetadataOverride
233233
{
234234
/// <summary>
235-
/// Get the key being overriden by this override
235+
/// Get the key being overridden by this override
236236
/// </summary>
237237
public string Key { get; }
238238

LLama/ChatSession.cs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -545,7 +545,7 @@ public async IAsyncEnumerable<string> RegenerateAssistantMessageAsync(
545545
InferenceParams? inferenceParams = null,
546546
[EnumeratorCancellation] CancellationToken cancellationToken = default)
547547
{
548-
// Make sure the last message is an assistant message (reponse from the LLM).
548+
// Make sure the last message is an assistant message (response from the LLM).
549549
ChatHistory.Message? lastAssistantMessage = History.Messages.LastOrDefault();
550550

551551
if (lastAssistantMessage is null

LLama/Common/InferenceParams.cs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77
namespace LLama.Common
88
{
99
/// <summary>
10-
/// The paramters used for inference.
10+
/// The parameters used for inference.
1111
/// </summary>
1212
public record InferenceParams
1313
: IInferenceParams

LLama/Extensions/IContextParamsExtensions.cs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66
namespace LLama.Extensions
77
{
88
/// <summary>
9-
/// Extention methods to the IContextParams interface
9+
/// Extension methods to the IContextParams interface
1010
/// </summary>
1111
public static class IContextParamsExtensions
1212
{

LLama/Extensions/IModelParamsExtensions.cs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77
namespace LLama.Extensions;
88

99
/// <summary>
10-
/// Extention methods to the IModelParams interface
10+
/// Extension methods to the IModelParams interface
1111
/// </summary>
1212
public static class IModelParamsExtensions
1313
{

LLama/LLamaContext.cs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -628,7 +628,7 @@ protected override bool ReleaseHandle()
628628
}
629629

630630
/// <summary>
631-
/// Copy bytes to a desintation pointer.
631+
/// Copy bytes to a destination pointer.
632632
/// </summary>
633633
/// <param name="dst">Destination to write to</param>
634634
/// <param name="length">Length of the destination buffer</param>

LLama/LLamaExecutorBase.cs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -209,7 +209,7 @@ protected virtual void HandleRunOutOfContext(int tokensToKeep)
209209
/// <summary>
210210
/// Try to reuse the matching prefix from the session file.
211211
/// </summary>
212-
protected virtual void TryReuseMathingPrefix()
212+
protected virtual void TryReuseMatchingPrefix()
213213
{
214214
if (_n_session_consumed < _session_tokens.Count)
215215
{

LLama/LLamaInstructExecutor.cs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -189,7 +189,7 @@ protected override Task InferInternal(IInferenceParams inferenceParams, InferSta
189189
HandleRunOutOfContext(inferenceParams.TokensKeep);
190190
}
191191

192-
TryReuseMathingPrefix();
192+
TryReuseMatchingPrefix();
193193

194194
var (result, _) = Context.NativeHandle.Decode(_embeds, LLamaSeqId.Zero, batch, ref _pastTokensCount);
195195
if (result != DecodeResult.Ok)
@@ -259,7 +259,7 @@ protected override Task InferInternal(IInferenceParams inferenceParams, InferSta
259259
return Task.CompletedTask;
260260
}
261261
/// <summary>
262-
/// The desciptor of the state of the instruct executor.
262+
/// The descriptor of the state of the instruct executor.
263263
/// </summary>
264264
public class InstructExecutorState : ExecutorBaseState
265265
{

LLama/LLamaInteractExecutor.cs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -234,7 +234,7 @@ protected override Task InferInternal(IInferenceParams inferenceParams, InferSta
234234
HandleRunOutOfContext(inferenceParams.TokensKeep);
235235
}
236236

237-
TryReuseMathingPrefix();
237+
TryReuseMatchingPrefix();
238238

239239
// Changes to support Multi-Modal LLMs.
240240
//

LLama/LLamaStatelessExecutor.cs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,7 @@ public StatelessExecutor(LLamaWeights weights, IContextParams @params, ILogger?
6363
/// <inheritdoc />
6464
public async IAsyncEnumerable<string> InferAsync(string prompt, IInferenceParams? inferenceParams = null, [EnumeratorCancellation] CancellationToken cancellationToken = default)
6565
{
66-
// Ensure the context from last time is disposed (it always hould be)
66+
// Ensure the context from last time is disposed (it always should be)
6767
if (!Context.NativeHandle.IsClosed)
6868
Context.Dispose();
6969

LLama/Native/NativeApi.LLava.cs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ public static unsafe partial class NativeApi
2121
/// <param name="ctx_clip">SafeHandle to the Clip Model</param>
2222
/// <param name="n_threads">Number of threads</param>
2323
/// <param name="image_bytes">Binary image in jpeg format</param>
24-
/// <param name="image_bytes_length">Bytes lenght of the image</param>
24+
/// <param name="image_bytes_length">Bytes length of the image</param>
2525
/// <returns>SafeHandle to the Embeddings</returns>
2626
[DllImport(llavaLibraryName, EntryPoint = "llava_image_embed_make_with_bytes",
2727
CallingConvention = CallingConvention.Cdecl)]
@@ -35,7 +35,7 @@ SafeLlavaImageEmbedHandle llava_image_embed_make_with_bytes(SafeLlavaModelHandle
3535
/// <param name="ctx_clip">SafeHandle to the Clip Model</param>
3636
/// <param name="n_threads">Number of threads</param>
3737
/// <param name="image_path">Image filename (jpeg) to generate embeddings</param>
38-
/// <returns>SafeHandel to the embeddings</returns>
38+
/// <returns>SafeHandle to the embeddings</returns>
3939
[DllImport(llavaLibraryName, EntryPoint = "llava_image_embed_make_with_filename", CallingConvention = CallingConvention.Cdecl)]
4040
public static extern
4141
SafeLlavaImageEmbedHandle llava_image_embed_make_with_filename(SafeLlavaModelHandle ctx_clip, int n_threads,

LLama/Native/NativeApi.Load.cs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ static NativeApi()
3434
"3. One of the dependency of the native library is missed. Please use `ldd` on linux, `dumpbin` on windows and `otool`" +
3535
"to check if all the dependency of the native library is satisfied. Generally you could find the libraries under your output folder.\n" +
3636
"4. Try to compile llama.cpp yourself to generate a libllama library, then use `LLama.Native.NativeLibraryConfig.WithLibrary` " +
37-
"to specify it at the very beginning of your code. For more informations about compilation, please refer to LLamaSharp repo on github.\n");
37+
"to specify it at the very beginning of your code. For more information about compilation, please refer to LLamaSharp repo on github.\n");
3838
}
3939

4040
// Now that the "loaded" flag is set configure logging in llama.cpp

LLama/Native/NativeLibraryConfig.cs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -101,7 +101,7 @@ public NativeLibraryConfig SkipCheck(bool enable = true)
101101
}
102102

103103
/// <summary>
104-
/// Add self-defined search directories. Note that the file stucture of the added
104+
/// Add self-defined search directories. Note that the file structure of the added
105105
/// directories must be the same as the default directory. Besides, the directory
106106
/// won't be used recursively.
107107
/// </summary>
@@ -116,7 +116,7 @@ public NativeLibraryConfig WithSearchDirectories(IEnumerable<string> directories
116116
}
117117

118118
/// <summary>
119-
/// Add self-defined search directories. Note that the file stucture of the added
119+
/// Add self-defined search directories. Note that the file structure of the added
120120
/// directories must be the same as the default directory. Besides, the directory
121121
/// won't be used recursively.
122122
/// </summary>

README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -175,7 +175,7 @@ For more examples, please refer to [LLamaSharp.Examples](./LLama.Examples).
175175
#### Why GPU is not used when I have installed CUDA
176176

177177
1. If you are using backend packages, please make sure you have installed the cuda backend package which matches the cuda version of your device. Please note that before LLamaSharp v0.10.0, only one backend package should be installed.
178-
2. Add `NativeLibraryConfig.Instance.WithLogs(LLamaLogLevel.Info)` to the very beginning of your code. The log will show which native library file is loaded. If the CPU library is loaded, please try to compile the native library yourself and open an issue for that. If the CUDA libraty is loaded, please check if `GpuLayerCount > 0` when loading the model weight.
178+
2. Add `NativeLibraryConfig.Instance.WithLogs(LLamaLogLevel.Info)` to the very beginning of your code. The log will show which native library file is loaded. If the CPU library is loaded, please try to compile the native library yourself and open an issue for that. If the CUDA library is loaded, please check if `GpuLayerCount > 0` when loading the model weight.
179179

180180
#### Why the inference is slow
181181

docs/Examples/LLavaInteractiveModeExecute.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -98,7 +98,7 @@ namespace LLama.Examples.Examples
9898
Console.WriteLine();
9999

100100

101-
// Initilize Images in executor
101+
// Initialize Images in executor
102102
//
103103
foreach (var image in imagePaths)
104104
{

docs/FAQ.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,11 @@
1-
# Frequently asked qustions
1+
# Frequently asked questions
22

33
Sometimes, your application with LLM and LLamaSharp may have unexpected behaviours. Here are some frequently asked questions, which may help you to deal with your problem.
44

55
## Why GPU is not used when I have installed CUDA
66

77
1. If you are using backend packages, please make sure you have installed the cuda backend package which matches the cuda version of your device. Please note that before LLamaSharp v0.10.0, only one backend package should be installed.
8-
2. Add `NativeLibraryConfig.Instance.WithLogs(LLamaLogLevel.Info)` to the very beginning of your code. The log will show which native library file is loaded. If the CPU library is loaded, please try to compile the native library yourself and open an issue for that. If the CUDA libraty is loaded, please check if `GpuLayerCount > 0` when loading the model weight.
8+
2. Add `NativeLibraryConfig.Instance.WithLogs(LLamaLogLevel.Info)` to the very beginning of your code. The log will show which native library file is loaded. If the CPU library is loaded, please try to compile the native library yourself and open an issue for that. If the CUDA library is loaded, please check if `GpuLayerCount > 0` when loading the model weight.
99

1010
## Why the inference is slow
1111

docs/QuickStart.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -169,7 +169,7 @@ do
169169
Console.WriteLine();
170170

171171

172-
// Initilize Images in executor
172+
// Initialize Images in executor
173173
//
174174
ex.ImagePaths = imagePaths.ToList();
175175
}

docs/Tutorials/Executors.md

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ public interface ILLamaExecutor
2323
/// </summary>
2424
public bool IsMultiModal { get; }
2525
/// <summary>
26-
/// Muti-Modal Projections / Clip Model weights
26+
/// Multi-Modal Projections / Clip Model weights
2727
/// </summary>
2828
public LLavaWeights? ClipModel { get; }
2929

@@ -110,7 +110,7 @@ At this time, by repeating the same mode of `Q: xxx? A: xxx.`, LLM outputs the a
110110

111111
## BatchedExecutor
112112

113-
Different from other executors, `BatchedExecutor` could accept multiple inputs from different sessions and geneate outputs for them at the same time. Here is an example to use it.
113+
Different from other executors, `BatchedExecutor` could accept multiple inputs from different sessions and generate outputs for them at the same time. Here is an example to use it.
114114

115115
```cs
116116
using LLama.Batched;
@@ -249,7 +249,7 @@ Here is the parameters for LLamaSharp executors.
249249

250250
```cs
251251
/// <summary>
252-
/// The paramters used for inference.
252+
/// The parameters used for inference.
253253
/// </summary>
254254
public record InferenceParams
255255
: IInferenceParams

docs/Tutorials/NativeLibraryConfig.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@ As indicated in [Architecture](../Architecture.md), LLamaSharp uses the native l
88
Before introducing the way to customize native library loading, please follow the tips below to see if you need to compile the native library yourself, rather than use the published backend packages, which contain native library files for multiple targets.
99

1010
1. Your device/environment has not been supported by any published backend packages. For example, vulkan has not been supported yet. In this case, it will mean a lot to open an issue to tell us you are using it. Since our support for new backend will have a delay, you could compile yourself before that.
11-
2. You want to gain the best performance of LLamaSharp. Because LLamaSharp offloads the model to both GPU and CPU, the performance is significantly related with CPU if your GPU memory size is small. AVX ([Advanced Vector Extensions](https://en.wikipedia.org/wiki/Advanced_Vector_Extensions)) and BLAS ([Basic Linear Algebra Subprograms](https://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms)) are the most important ways to accelerate the CPU computation. By default, LLamaSharp disables the support for BLAS and use AVX2 for CUDA backend yet. If you would like to enable BLAS or use AVX 512 along with CUDA, please compile the native library youself, following the [instructions here](../ContributingGuide.md).
11+
2. You want to gain the best performance of LLamaSharp. Because LLamaSharp offloads the model to both GPU and CPU, the performance is significantly related with CPU if your GPU memory size is small. AVX ([Advanced Vector Extensions](https://en.wikipedia.org/wiki/Advanced_Vector_Extensions)) and BLAS ([Basic Linear Algebra Subprograms](https://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms)) are the most important ways to accelerate the CPU computation. By default, LLamaSharp disables the support for BLAS and use AVX2 for CUDA backend yet. If you would like to enable BLAS or use AVX 512 along with CUDA, please compile the native library yourself, following the [instructions here](../ContributingGuide.md).
1212
3. You want to debug the c++ code.
1313

1414

0 commit comments

Comments
 (0)