diff --git a/README.md b/README.md index 25b63c151a2..9192462bfe8 100644 --- a/README.md +++ b/README.md @@ -61,7 +61,7 @@ Or you can even run it straight in the browser: [talk.wasm](examples/talk.wasm) - Various other examples are available in the [examples](examples) folder The tensor operators are optimized heavily for Apple silicon CPUs. Depending on the computation size, Arm Neon SIMD -instrisics or CBLAS Accelerate framework routines are used. The latter are especially effective for bigger sizes since +intrinsics or CBLAS Accelerate framework routines are used. The latter are especially effective for bigger sizes since the Accelerate framework utilizes the special-purpose AMX coprocessor available in modern Apple products. ## Quick start diff --git a/whisper.cpp b/whisper.cpp index eb8dff195d7..e1cc6b77b1a 100644 --- a/whisper.cpp +++ b/whisper.cpp @@ -82,7 +82,7 @@ static void byteswap_tensor(ggml_tensor * tensor) { } while (0) #define BYTESWAP_TENSOR(t) \ do { \ - byteswap_tensor(tensor); \ + byteswap_tensor(t); \ } while (0) #else #define BYTESWAP_VALUE(d) do {} while (0) @@ -589,7 +589,7 @@ struct whisper_model { struct whisper_sequence { std::vector tokens; - // the accumulated transcription in the current interation (used to truncate the tokens array) + // the accumulated transcription in the current iteration (used to truncate the tokens array) int result_len; double sum_logprobs_all; // the sum of the log probabilities of the tokens diff --git a/whisper.h b/whisper.h index e7c1a1259f5..588c287691a 100644 --- a/whisper.h +++ b/whisper.h @@ -346,7 +346,7 @@ extern "C" { void * user_data); // Parameters for the whisper_full() function - // If you chnage the order or add new parameters, make sure to update the default values in whisper.cpp: + // If you change the order or add new parameters, make sure to update the default values in whisper.cpp: // whisper_full_default_params() struct whisper_full_params { enum whisper_sampling_strategy strategy;