diff --git a/examples/parallel/CMakeLists.txt b/examples/parallel/CMakeLists.txt new file mode 100644 index 0000000..153c422 --- /dev/null +++ b/examples/parallel/CMakeLists.txt @@ -0,0 +1,3 @@ +set(TARGET parallel) +add_executable(${TARGET} parallel.cpp) +target_link_libraries(${TARGET} PRIVATE whisper ${CMAKE_THREAD_LIBS_INIT}) diff --git a/examples/parallel/README.md b/examples/parallel/README.md new file mode 100644 index 0000000..919537b --- /dev/null +++ b/examples/parallel/README.md @@ -0,0 +1,3 @@ +# parallel + +TODO diff --git a/examples/parallel/parallel.cpp b/examples/parallel/parallel.cpp new file mode 100644 index 0000000..91e4a37 --- /dev/null +++ b/examples/parallel/parallel.cpp @@ -0,0 +1,422 @@ +#include "whisper.h" + +// third-party utilities +// use your favorite implementations +#define DR_WAV_IMPLEMENTATION +#include "dr_wav.h" + +#include +#include +#include +#include +#include +#include + +// Terminal color map. 10 colors grouped in ranges [0.0, 0.1, ..., 0.9] +// Lowest is red, middle is yellow, highest is green. +const std::vector k_colors = { + "\033[38;5;196m", "\033[38;5;202m", "\033[38;5;208m", "\033[38;5;214m", "\033[38;5;220m", + "\033[38;5;226m", "\033[38;5;190m", "\033[38;5;154m", "\033[38;5;118m", "\033[38;5;82m", +}; + +// 500 -> 00:05.000 +// 6000 -> 01:00.000 +std::string to_timestamp(int64_t t, bool comma = false) { + int64_t msec = t * 10; + int64_t hr = msec / (1000 * 60 * 60); + msec = msec - hr * (1000 * 60 * 60); + int64_t min = msec / (1000 * 60); + msec = msec - min * (1000 * 60); + int64_t sec = msec / 1000; + msec = msec - sec * 1000; + + char buf[32]; + snprintf(buf, sizeof(buf), "%02d:%02d:%02d%s%03d", (int) hr, (int) min, (int) sec, comma ? "," : ".", (int) msec); + + return std::string(buf); +} + +// command-line parameters +struct whisper_params { + int32_t seed = -1; // RNG seed, not used currently + int32_t n_threads = std::min(4, (int32_t) std::thread::hardware_concurrency()); + int32_t offset_t_ms = 0; + int32_t offset_n = 0; + + bool verbose = false; + bool translate = false; + bool output_txt = false; + bool output_vtt = false; + bool output_srt = false; + bool print_special_tokens = false; + bool print_colors = false; + bool no_timestamps = false; + + std::string language = "en"; + std::string model = "models/ggml-base.en.bin"; + + std::vector fname_inp = {}; +}; + +void whisper_print_usage(int argc, char ** argv, const whisper_params & params); + +bool whisper_params_parse(int argc, char ** argv, whisper_params & params) { + for (int i = 1; i < argc; i++) { + std::string arg = argv[i]; + + if (arg[0] != '-') { + params.fname_inp.push_back(arg); + continue; + } + + if (arg == "-s" || arg == "--seed") { + params.seed = std::stoi(argv[++i]); + } else if (arg == "-t" || arg == "--threads") { + params.n_threads = std::stoi(argv[++i]); + } else if (arg == "-ot" || arg == "--offset-t") { + params.offset_t_ms = std::stoi(argv[++i]); + } else if (arg == "-on" || arg == "--offset-n") { + params.offset_n = std::stoi(argv[++i]); + } else if (arg == "-v" || arg == "--verbose") { + params.verbose = true; + } else if (arg == "--translate") { + params.translate = true; + } else if (arg == "-l" || arg == "--language") { + params.language = argv[++i]; + if (whisper_lang_id(params.language.c_str()) == -1) { + fprintf(stderr, "error: unknown language '%s'\n", params.language.c_str()); + whisper_print_usage(argc, argv, params); + exit(0); + } + } else if (arg == "-otxt" || arg == "--output-txt") { + params.output_txt = true; + } else if (arg == "-ovtt" || arg == "--output-vtt") { + params.output_vtt = true; + } else if (arg == "-osrt" || arg == "--output-srt") { + params.output_srt = true; + } else if (arg == "-ps" || arg == "--print_special") { + params.print_special_tokens = true; + } else if (arg == "-pc" || arg == "--print_colors") { + params.print_colors = true; + } else if (arg == "-nt" || arg == "--no_timestamps") { + params.no_timestamps = true; + } else if (arg == "-m" || arg == "--model") { + params.model = argv[++i]; + } else if (arg == "-f" || arg == "--file") { + params.fname_inp.push_back(argv[++i]); + } else if (arg == "-h" || arg == "--help") { + whisper_print_usage(argc, argv, params); + exit(0); + } else { + fprintf(stderr, "error: unknown argument: %s\n", arg.c_str()); + whisper_print_usage(argc, argv, params); + exit(0); + } + } + + return true; +} + +void whisper_print_usage(int argc, char ** argv, const whisper_params & params) { + fprintf(stderr, "\n"); + fprintf(stderr, "usage: %s [options] file0.wav file1.wav ...\n", argv[0]); + fprintf(stderr, "\n"); + fprintf(stderr, "options:\n"); + fprintf(stderr, " -h, --help show this help message and exit\n"); + fprintf(stderr, " -s SEED, --seed SEED RNG seed (default: -1)\n"); + fprintf(stderr, " -t N, --threads N number of threads to use during computation (default: %d)\n", params.n_threads); + fprintf(stderr, " -ot N, --offset-t N time offset in milliseconds (default: %d)\n", params.offset_t_ms); + fprintf(stderr, " -on N, --offset-n N segment index offset (default: %d)\n", params.offset_n); + fprintf(stderr, " -v, --verbose verbose output\n"); + fprintf(stderr, " --translate translate from source language to english\n"); + fprintf(stderr, " -otxt, --output-txt output result in a text file\n"); + fprintf(stderr, " -ovtt, --output-vtt output result in a vtt file\n"); + fprintf(stderr, " -osrt, --output-srt output result in a srt file\n"); + fprintf(stderr, " -ps, --print_special print special tokens\n"); + fprintf(stderr, " -pc, --print_colors print colors\n"); + fprintf(stderr, " -nt, --no_timestamps do not print timestamps\n"); + fprintf(stderr, " -l LANG, --language LANG spoken language (default: %s)\n", params.language.c_str()); + fprintf(stderr, " -m FNAME, --model FNAME model path (default: %s)\n", params.model.c_str()); + fprintf(stderr, " -f FNAME, --file FNAME input WAV file path\n"); + fprintf(stderr, "\n"); +} + +void whisper_print_segment_callback(struct whisper_context * ctx, void * user_data) { + const whisper_params & params = *(whisper_params *) user_data; + + const int n_segments = whisper_full_n_segments(ctx); + + // print the last segment + const int i = n_segments - 1; + if (i == 0) { + printf("\n"); + } + + if (params.no_timestamps) { + if (params.print_colors) { + for (int j = 0; j < whisper_full_n_tokens(ctx, i); ++j) { + if (params.print_special_tokens == false) { + const whisper_token id = whisper_full_get_token_id(ctx, i, j); + if (id >= whisper_token_eot(ctx)) { + continue; + } + } + + const char * text = whisper_full_get_token_text(ctx, i, j); + const float p = whisper_full_get_token_p (ctx, i, j); + + const int col = std::max(0, std::min((int) k_colors.size(), (int) (std::pow(p, 3)*float(k_colors.size())))); + + printf("%s%s%s", k_colors[col].c_str(), text, "\033[0m"); + } + } else { + const char * text = whisper_full_get_segment_text(ctx, i); + printf("%s", text); + } + fflush(stdout); + } else { + const int64_t t0 = whisper_full_get_segment_t0(ctx, i); + const int64_t t1 = whisper_full_get_segment_t1(ctx, i); + + if (params.print_colors) { + printf("[%s --> %s] ", to_timestamp(t0).c_str(), to_timestamp(t1).c_str()); + for (int j = 0; j < whisper_full_n_tokens(ctx, i); ++j) { + if (params.print_special_tokens == false) { + const whisper_token id = whisper_full_get_token_id(ctx, i, j); + if (id >= whisper_token_eot(ctx)) { + continue; + } + } + + const char * text = whisper_full_get_token_text(ctx, i, j); + const float p = whisper_full_get_token_p (ctx, i, j); + + const int col = std::max(0, std::min((int) k_colors.size(), (int) (std::pow(p, 3)*float(k_colors.size())))); + + printf("%s%s%s", k_colors[col].c_str(), text, "\033[0m"); + } + printf("\n"); + } else { + const char * text = whisper_full_get_segment_text(ctx, i); + + printf("[%s --> %s] %s\n", to_timestamp(t0).c_str(), to_timestamp(t1).c_str(), text); + } + } +} + +bool output_txt(struct whisper_context * ctx, const char * fname) { + std::ofstream fout(fname); + if (!fout.is_open()) { + fprintf(stderr, "%s: failed to open '%s' for writing\n", __func__, fname); + return false; + } + + fprintf(stderr, "%s: saving output to '%s'\n", __func__, fname); + + const int n_segments = whisper_full_n_segments(ctx); + for (int i = 0; i < n_segments; ++i) { + const char * text = whisper_full_get_segment_text(ctx, i); + fout << text; + } + + return true; +} + +bool output_vtt(struct whisper_context * ctx, const char * fname) { + std::ofstream fout(fname); + if (!fout.is_open()) { + fprintf(stderr, "%s: failed to open '%s' for writing\n", __func__, fname); + return 9; + } + + fprintf(stderr, "%s: saving output to '%s'\n", __func__, fname); + + fout << "WEBVTT\n\n"; + + const int n_segments = whisper_full_n_segments(ctx); + for (int i = 0; i < n_segments; ++i) { + const char * text = whisper_full_get_segment_text(ctx, i); + const int64_t t0 = whisper_full_get_segment_t0(ctx, i); + const int64_t t1 = whisper_full_get_segment_t1(ctx, i); + + fout << to_timestamp(t0) << " --> " << to_timestamp(t1) << "\n"; + fout << text << "\n\n"; + } + + return true; +} + +bool output_srt(struct whisper_context * ctx, const char * fname, const whisper_params & params) { + std::ofstream fout(fname); + if (!fout.is_open()) { + fprintf(stderr, "%s: failed to open '%s' for writing\n", __func__, fname); + return false; + } + + fprintf(stderr, "%s: saving output to '%s'\n", __func__, fname); + + const int n_segments = whisper_full_n_segments(ctx); + for (int i = 0; i < n_segments; ++i) { + const char * text = whisper_full_get_segment_text(ctx, i); + const int64_t t0 = whisper_full_get_segment_t0(ctx, i); + const int64_t t1 = whisper_full_get_segment_t1(ctx, i); + + fout << i + 1 + params.offset_n << "\n"; + fout << to_timestamp(t0, true) << " --> " << to_timestamp(t1, true) << "\n"; + fout << text << "\n\n"; + } + + return true; +} + +int main(int argc, char ** argv) { + whisper_params params; + + if (whisper_params_parse(argc, argv, params) == false) { + return 1; + } + + if (params.seed < 0) { + params.seed = time(NULL); + } + + if (params.fname_inp.empty()) { + fprintf(stderr, "error: no input files specified\n"); + whisper_print_usage(argc, argv, params); + return 2; + } + + // whisper init + + struct whisper_context * ctx = whisper_init(params.model.c_str()); + + if (ctx == nullptr) { + fprintf(stderr, "error: failed to initialize whisper context\n"); + return 3; + } + + for (int f = 0; f < (int) params.fname_inp.size(); ++f) { + const auto fname_inp = params.fname_inp[f]; + + // WAV input + std::vector pcmf32; + { + drwav wav; + if (!drwav_init_file(&wav, fname_inp.c_str(), NULL)) { + fprintf(stderr, "%s: failed to open WAV file '%s' - check your input\n", argv[0], fname_inp.c_str()); + whisper_print_usage(argc, argv, {}); + return 4; + } + + if (wav.channels != 1 && wav.channels != 2) { + fprintf(stderr, "%s: WAV file '%s' must be mono or stereo\n", argv[0], fname_inp.c_str()); + return 5; + } + + if (wav.sampleRate != WHISPER_SAMPLE_RATE) { + fprintf(stderr, "%s: WAV file '%s' must be 16 kHz\n", argv[0], fname_inp.c_str()); + return 6; + } + + if (wav.bitsPerSample != 16) { + fprintf(stderr, "%s: WAV file '%s' must be 16-bit\n", argv[0], fname_inp.c_str()); + return 7; + } + + int n = wav.totalPCMFrameCount; + + std::vector pcm16; + pcm16.resize(n*wav.channels); + drwav_read_pcm_frames_s16(&wav, n, pcm16.data()); + drwav_uninit(&wav); + + // convert to mono, float + pcmf32.resize(n); + if (wav.channels == 1) { + for (int i = 0; i < n; i++) { + pcmf32[i] = float(pcm16[i])/32768.0f; + } + } else { + for (int i = 0; i < n; i++) { + pcmf32[i] = float(pcm16[2*i] + pcm16[2*i + 1])/65536.0f; + } + } + } + + // print system information + { + fprintf(stderr, "\n"); + fprintf(stderr, "system_info: n_threads = %d / %d | %s\n", params.n_threads, std::thread::hardware_concurrency(), whisper_print_system_info()); + } + + // print some info about the processing + { + fprintf(stderr, "\n"); + if (!whisper_is_multilingual(ctx)) { + if (params.language != "en" || params.translate) { + params.language = "en"; + params.translate = false; + fprintf(stderr, "%s: WARNING: model is not multilingual, ignoring language and translation options\n", __func__); + } + } + fprintf(stderr, "%s: processing '%s' (%d samples, %.1f sec), %d threads, lang = %s, task = %s, timestamps = %d ...\n", + __func__, fname_inp.c_str(), int(pcmf32.size()), float(pcmf32.size())/WHISPER_SAMPLE_RATE, params.n_threads, + params.language.c_str(), + params.translate ? "translate" : "transcribe", + params.no_timestamps ? 0 : 1); + + fprintf(stderr, "\n"); + } + + + // run the inference + { + whisper_full_params wparams = whisper_full_default_params(WHISPER_SAMPLING_GREEDY); + + wparams.print_realtime = false; + wparams.print_progress = false; + wparams.print_timestamps = !params.no_timestamps; + wparams.print_special_tokens = params.print_special_tokens; + wparams.translate = params.translate; + wparams.language = params.language.c_str(); + wparams.n_threads = params.n_threads; + wparams.offset_ms = params.offset_t_ms; + + // this callback is called on each new segment + if (!wparams.print_realtime) { + wparams.new_segment_callback = whisper_print_segment_callback; + wparams.new_segment_callback_user_data = ¶ms; + } + + if (whisper_full(ctx, wparams, pcmf32.data(), pcmf32.size()) != 0) { + fprintf(stderr, "%s: failed to process audio\n", argv[0]); + return 8; + } + + printf("\n"); + + // output to text file + if (params.output_txt) { + const auto fname_txt = fname_inp + ".txt"; + output_txt(ctx, fname_txt.c_str()); + } + + // output to VTT file + if (params.output_vtt) { + const auto fname_vtt = fname_inp + ".vtt"; + output_vtt(ctx, fname_vtt.c_str()); + } + + // output to SRT file + if (params.output_srt) { + const auto fname_srt = fname_inp + ".srt"; + output_srt(ctx, fname_srt.c_str(), params); + } + } + } + + whisper_print_timings(ctx); + whisper_free(ctx); + + return 0; +} diff --git a/whisper.cpp b/whisper.cpp index f42976d..ee8d994 100644 --- a/whisper.cpp +++ b/whisper.cpp @@ -413,7 +413,6 @@ struct whisper_context { std::vector probs; std::vector logits; - std::vector tokens_cur; std::vector result_all; std::vector prompt_past; @@ -430,7 +429,7 @@ struct whisper_context { // // see the convert-pt-to-ggml.py script for details // -bool whisper_model_load(const std::string & fname, whisper_context & wctx) { +bool whisper_model_load(const std::string & fname, const int n_processors, whisper_context & wctx) { fprintf(stderr, "%s: loading model from '%s'\n", __func__, fname.c_str()); auto & model = wctx.model; @@ -700,11 +699,11 @@ bool whisper_model_load(const std::string & fname, whisper_context & wctx) { ctx_size += n_text_layer*( n_text_state*ggml_type_size(GGML_TYPE_F32)); // cross_attn_ln_1_b } - ctx_size += n_text_layer*n_text_ctx*n_text_state*ggml_type_size(GGML_TYPE_F16); // memory_k - ctx_size += n_text_layer*n_text_ctx*n_text_state*ggml_type_size(GGML_TYPE_F16); // memory_v + ctx_size += n_processors*n_text_layer*n_text_ctx*n_text_state*ggml_type_size(GGML_TYPE_F16); // memory_k + ctx_size += n_processors*n_text_layer*n_text_ctx*n_text_state*ggml_type_size(GGML_TYPE_F16); // memory_v - ctx_size += n_text_layer*n_audio_ctx*n_text_state*ggml_type_size(GGML_TYPE_F16); // memory_cross_k - ctx_size += n_text_layer*n_audio_ctx*n_text_state*ggml_type_size(GGML_TYPE_F16); // memory_cross_v + ctx_size += n_processors*n_text_layer*n_audio_ctx*n_text_state*ggml_type_size(GGML_TYPE_F16); // memory_cross_k + ctx_size += n_processors*n_text_layer*n_audio_ctx*n_text_state*ggml_type_size(GGML_TYPE_F16); // memory_cross_v ctx_size += (15 + 15*n_audio_layer + 24*n_text_layer)*256; // object overhead @@ -934,7 +933,7 @@ bool whisper_model_load(const std::string & fname, whisper_context & wctx) { // key/value memory for the self-attention layer { const int n_mem = n_text_layer*n_text_ctx; - const int n_elements = n_text_state*n_mem; + const int n_elements = n_text_state*n_mem*n_processors; model.memory_k = ggml_new_tensor_1d(ctx, GGML_TYPE_F16, n_elements); model.memory_v = ggml_new_tensor_1d(ctx, GGML_TYPE_F16, n_elements); @@ -945,7 +944,7 @@ bool whisper_model_load(const std::string & fname, whisper_context & wctx) { const int n_audio_ctx = hparams.n_audio_ctx; const int n_mem = n_text_layer*n_audio_ctx; - const int n_elements = n_text_state*n_mem; + const int n_elements = n_text_state*n_mem*n_processors; model.memory_cross_k = ggml_new_tensor_1d(ctx, GGML_TYPE_F16, n_elements); model.memory_cross_v = ggml_new_tensor_1d(ctx, GGML_TYPE_F16, n_elements); @@ -955,7 +954,7 @@ bool whisper_model_load(const std::string & fname, whisper_context & wctx) { ggml_nbytes(model.memory_k) + ggml_nbytes(model.memory_v) + ggml_nbytes(model.memory_cross_k) + ggml_nbytes(model.memory_cross_v); - fprintf(stderr, "%s: memory size = %8.2f MB \n", __func__, memory_size/1024.0/1024.0); + fprintf(stderr, "%s: memory size = %8.2f MB (%d processors)\n", __func__, memory_size/1024.0/1024.0, n_processors); } // load weights @@ -1046,7 +1045,8 @@ bool whisper_model_load(const std::string & fname, whisper_context & wctx) { bool whisper_encode( whisper_context & wctx, const int n_threads, - const int mel_offset) { + const int mel_offset, + const int processor_id) { const auto & model = wctx.model; const auto & mel_inp = wctx.mel; const auto & hparams = model.hparams; @@ -1400,8 +1400,11 @@ bool whisper_encode( Vcross), Vcross); - struct ggml_tensor * k = ggml_view_1d(ctx0, model.memory_cross_k, n_state*n_ctx, (ggml_element_size(model.memory_cross_k)*n_state)*(il*n_ctx)); - struct ggml_tensor * v = ggml_view_1d(ctx0, model.memory_cross_v, n_state*n_ctx, (ggml_element_size(model.memory_cross_v)*n_state)*(il*n_ctx)); + const size_t offset_k = processor_id*(ggml_element_size(model.memory_cross_k)*n_state)*(model.hparams.n_text_layer*n_ctx); + const size_t offset_v = processor_id*(ggml_element_size(model.memory_cross_v)*n_state)*(model.hparams.n_text_layer*n_ctx); + + struct ggml_tensor * k = ggml_view_1d(ctx0, model.memory_cross_k, n_state*n_ctx, offset_k + (ggml_element_size(model.memory_cross_k)*n_state)*(il*n_ctx)); + struct ggml_tensor * v = ggml_view_1d(ctx0, model.memory_cross_v, n_state*n_ctx, offset_v + (ggml_element_size(model.memory_cross_v)*n_state)*(il*n_ctx)); ggml_build_forward_expand(&gf, ggml_cpy(ctx0, Kcross, k)); ggml_build_forward_expand(&gf, ggml_cpy(ctx0, Vcross, v)); @@ -1434,7 +1437,8 @@ bool whisper_decode( const int n_threads, const whisper_token * tokens, const int n_tokens, - const int n_past) { + const int n_past, + const int processor_id) { const auto & model = wctx.model; const auto & hparams = model.hparams; @@ -1529,10 +1533,13 @@ bool whisper_decode( Vcur), Vcur); + const size_t offset_k = processor_id*(ggml_element_size(model.memory_k)*n_state)*(n_layer*n_ctx); + const size_t offset_v = processor_id*(ggml_element_size(model.memory_v)*n_state)*(n_layer*n_ctx); + // store key and value to memory { - struct ggml_tensor * k = ggml_view_1d(ctxL, model.memory_k, N*n_state, (ggml_element_size(model.memory_k)*n_state)*(il*n_ctx + n_past)); - struct ggml_tensor * v = ggml_view_1d(ctxL, model.memory_v, N*n_state, (ggml_element_size(model.memory_v)*n_state)*(il*n_ctx + n_past)); + struct ggml_tensor * k = ggml_view_1d(ctxL, model.memory_k, N*n_state, offset_k + (ggml_element_size(model.memory_k)*n_state)*(il*n_ctx + n_past)); + struct ggml_tensor * v = ggml_view_1d(ctxL, model.memory_v, N*n_state, offset_v + (ggml_element_size(model.memory_v)*n_state)*(il*n_ctx + n_past)); ggml_build_forward_expand(&gf, ggml_cpy(ctxL, Kcur, k)); ggml_build_forward_expand(&gf, ggml_cpy(ctxL, Vcur, v)); @@ -1550,7 +1557,7 @@ bool whisper_decode( struct ggml_tensor * K = ggml_permute(ctxL, ggml_reshape_3d(ctxL, - ggml_view_1d(ctxL, model.memory_k, (n_past + N)*n_state, il*n_ctx*ggml_element_size(model.memory_k)*n_state), + ggml_view_1d(ctxL, model.memory_k, (n_past + N)*n_state, offset_k + il*n_ctx*ggml_element_size(model.memory_k)*n_state), n_state/n_head, n_head, n_past + N), 0, 2, 1, 3); @@ -1570,7 +1577,7 @@ bool whisper_decode( struct ggml_tensor * V_trans = ggml_permute(ctxL, ggml_reshape_3d(ctxL, - ggml_view_1d(ctxL, model.memory_v, (n_past + N)*n_state, il*n_ctx*ggml_element_size(model.memory_v)*n_state), + ggml_view_1d(ctxL, model.memory_v, (n_past + N)*n_state, offset_v + il*n_ctx*ggml_element_size(model.memory_v)*n_state), n_state/n_head, n_head, n_past + N), 1, 2, 0, 3); @@ -1622,15 +1629,18 @@ bool whisper_decode( Qcur = ggml_scale(ctxL, Qcur, ggml_new_f32(ctxL, pow(float(n_state)/n_head, -0.25))); + const size_t offset_k = processor_id*(ggml_element_size(model.memory_cross_k)*n_state)*(n_layer*M); + const size_t offset_v = processor_id*(ggml_element_size(model.memory_cross_v)*n_state)*(n_layer*M); + // Kcross is already scaled struct ggml_tensor * Kcross = ggml_reshape_3d(ctxL, - ggml_view_1d(ctxL, model.memory_cross_k, M*n_state, il*M*ggml_element_size(model.memory_cross_k)*n_state), + ggml_view_1d(ctxL, model.memory_cross_k, M*n_state, offset_k + il*M*ggml_element_size(model.memory_cross_k)*n_state), n_state/n_head, n_head, M); struct ggml_tensor * Vcross = ggml_reshape_3d(ctxL, - ggml_view_1d(ctxL, model.memory_cross_v, M*n_state, il*M*ggml_element_size(model.memory_cross_v)*n_state), + ggml_view_1d(ctxL, model.memory_cross_v, M*n_state, offset_v + il*M*ggml_element_size(model.memory_cross_v)*n_state), n_state/n_head, n_head, M); // ------ @@ -2116,7 +2126,26 @@ struct whisper_context * whisper_init(const char * path_model) { ctx->t_start_us = t_start_us; - if (!whisper_model_load(path_model, *ctx)) { + if (!whisper_model_load(path_model, 1, *ctx)) { + fprintf(stderr, "%s: failed to load model from '%s'\n", __func__, path_model); + return NULL; + } + + ctx->t_load_us = ggml_time_us() - t_start_us; + + return ctx; +} + +struct whisper_context * whisper_init_parallel(const char * path_model, int n_processors) { + ggml_time_init(); + + whisper_context * ctx = new whisper_context; + + const int64_t t_start_us = ggml_time_us(); + + ctx->t_start_us = t_start_us; + + if (!whisper_model_load(path_model, n_processors, *ctx)) { fprintf(stderr, "%s: failed to load model from '%s'\n", __func__, path_model); return NULL; } @@ -2167,7 +2196,7 @@ int whisper_set_mel( int whisper_encode(struct whisper_context * ctx, int offset, int n_threads) { const int64_t t_start_us = ggml_time_us(); - if (!whisper_encode(*ctx, n_threads, offset)) { + if (!whisper_encode(*ctx, n_threads, offset, 0)) { fprintf(stderr, "%s: failed to eval\n", __func__); return -1; } @@ -2180,7 +2209,7 @@ int whisper_encode(struct whisper_context * ctx, int offset, int n_threads) { int whisper_decode(struct whisper_context * ctx, const whisper_token * tokens, int n_tokens, int n_past, int n_threads) { const int64_t t_start_us = ggml_time_us(); - if (!whisper_decode(*ctx, n_threads, tokens, n_tokens, n_past)) { + if (!whisper_decode(*ctx, n_threads, tokens, n_tokens, n_past, 0)) { fprintf(stderr, "%s: failed to eval\n", __func__); return 1; } @@ -2302,6 +2331,7 @@ struct whisper_full_params whisper_full_default_params(enum whisper_sampling_str /*.n_threads =*/ std::min(4, (int32_t) std::thread::hardware_concurrency()), /*.offset_ms =*/ 0, + /*.n_processors =*/ 1, /*.translate =*/ false, /*.no_context =*/ false, @@ -2333,6 +2363,7 @@ struct whisper_full_params whisper_full_default_params(enum whisper_sampling_str /*.n_threads =*/ std::min(4, (int32_t) std::thread::hardware_concurrency()), /*.offset_ms =*/ 0, + /*.n_processors =*/ 1, /*.translate =*/ false, /*.no_context =*/ false, @@ -2369,7 +2400,6 @@ int whisper_full( int n_samples) { // clear old results auto & result_all = ctx->result_all; - auto & tokens_cur = ctx->tokens_cur; result_all.clear(); @@ -2379,10 +2409,12 @@ int whisper_full( return -1; } + const int seek_start = params.offset_ms/10; + // if length of spectrogram is less than 1s (100 samples), then return // basically don't process anything that is less than 1s // see issue #39: https://github.com/ggerganov/whisper.cpp/issues/39 - if (whisper_n_len(ctx) < 100) { + if (whisper_n_len(ctx) < 100 + seek_start) { return 0; } @@ -2406,8 +2438,14 @@ int whisper_full( int progress_prev = 0; int progress_step = 5; + std::vector tokens_cur; + tokens_cur.reserve(whisper_n_text_ctx(ctx)); + + std::vector prompt; + prompt.reserve(whisper_n_text_ctx(ctx)); + // main loop - int seek = params.offset_ms/10; + int seek = seek_start; while (true) { int progress_cur = (100*seek)/whisper_n_len(ctx); while (progress_cur >= progress_prev + progress_step) { @@ -2427,9 +2465,8 @@ int whisper_full( return 7; } - std::vector prompt; - int n_past = 0; + prompt.clear(); // if we have already generated some text, use it as a prompt to condition the next generation if (prompt_past.size() > 0) { diff --git a/whisper.h b/whisper.h index 5414248..c918e98 100644 --- a/whisper.h +++ b/whisper.h @@ -72,6 +72,8 @@ extern "C" { // Returns NULL on failure. WHISPER_API struct whisper_context * whisper_init(const char * path_model); + WHISPER_API struct whisper_context * whisper_init_parallel(const char * path_model, int n_processors); + // Frees all memory allocated by the model. WHISPER_API void whisper_free(struct whisper_context * ctx); @@ -170,6 +172,7 @@ extern "C" { int n_threads; int offset_ms; + int n_processors; bool translate; bool no_context;