talk-llama : update to latest llama.cpp (improved performance)
This commit is contained in:
parent
69b8503935
commit
ea36831459
File diff suppressed because it is too large
Load Diff
@ -6,7 +6,7 @@
|
|||||||
#include <stdbool.h>
|
#include <stdbool.h>
|
||||||
|
|
||||||
#ifdef LLAMA_SHARED
|
#ifdef LLAMA_SHARED
|
||||||
# ifdef _WIN32
|
# if defined(_WIN32) && !defined(__MINGW32__)
|
||||||
# ifdef LLAMA_BUILD
|
# ifdef LLAMA_BUILD
|
||||||
# define LLAMA_API __declspec(dllexport)
|
# define LLAMA_API __declspec(dllexport)
|
||||||
# else
|
# else
|
||||||
@ -20,7 +20,7 @@
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
#define LLAMA_FILE_VERSION 1
|
#define LLAMA_FILE_VERSION 1
|
||||||
#define LLAMA_FILE_MAGIC 0x67676d66 // 'ggmf' in hex
|
#define LLAMA_FILE_MAGIC 0x67676a74 // 'ggjt' in hex
|
||||||
#define LLAMA_FILE_MAGIC_UNVERSIONED 0x67676d6c // pre-versioned files
|
#define LLAMA_FILE_MAGIC_UNVERSIONED 0x67676d6c // pre-versioned files
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
@ -45,7 +45,7 @@ extern "C" {
|
|||||||
|
|
||||||
} llama_token_data;
|
} llama_token_data;
|
||||||
|
|
||||||
typedef void (*llama_progress_callback)(double progress, void *ctx);
|
typedef void (*llama_progress_callback)(float progress, void *ctx);
|
||||||
|
|
||||||
struct llama_context_params {
|
struct llama_context_params {
|
||||||
int n_ctx; // text context
|
int n_ctx; // text context
|
||||||
@ -55,6 +55,7 @@ extern "C" {
|
|||||||
bool f16_kv; // use fp16 for KV cache
|
bool f16_kv; // use fp16 for KV cache
|
||||||
bool logits_all; // the llama_eval() call computes all logits, not just the last one
|
bool logits_all; // the llama_eval() call computes all logits, not just the last one
|
||||||
bool vocab_only; // only load the vocabulary, no weights
|
bool vocab_only; // only load the vocabulary, no weights
|
||||||
|
bool use_mmap; // use mmap if possible
|
||||||
bool use_mlock; // force system to keep model in RAM
|
bool use_mlock; // force system to keep model in RAM
|
||||||
bool embedding; // embedding mode only
|
bool embedding; // embedding mode only
|
||||||
|
|
||||||
@ -66,6 +67,9 @@ extern "C" {
|
|||||||
|
|
||||||
LLAMA_API struct llama_context_params llama_context_default_params();
|
LLAMA_API struct llama_context_params llama_context_default_params();
|
||||||
|
|
||||||
|
LLAMA_API bool llama_mmap_supported();
|
||||||
|
LLAMA_API bool llama_mlock_supported();
|
||||||
|
|
||||||
// Various functions for loading a ggml llama model.
|
// Various functions for loading a ggml llama model.
|
||||||
// Allocate (almost) all memory needed for the model.
|
// Allocate (almost) all memory needed for the model.
|
||||||
// Return NULL on failure
|
// Return NULL on failure
|
||||||
@ -81,8 +85,24 @@ extern "C" {
|
|||||||
LLAMA_API int llama_model_quantize(
|
LLAMA_API int llama_model_quantize(
|
||||||
const char * fname_inp,
|
const char * fname_inp,
|
||||||
const char * fname_out,
|
const char * fname_out,
|
||||||
int itype,
|
int itype);
|
||||||
int qk);
|
|
||||||
|
// Returns the KV cache that will contain the context for the
|
||||||
|
// ongoing prediction with the model.
|
||||||
|
LLAMA_API const uint8_t * llama_get_kv_cache(struct llama_context * ctx);
|
||||||
|
|
||||||
|
// Returns the size of the KV cache
|
||||||
|
LLAMA_API size_t llama_get_kv_cache_size(struct llama_context * ctx);
|
||||||
|
|
||||||
|
// Returns the number of tokens in the KV cache
|
||||||
|
LLAMA_API int llama_get_kv_cache_token_count(struct llama_context * ctx);
|
||||||
|
|
||||||
|
// Sets the KV cache containing the current context for the model
|
||||||
|
LLAMA_API void llama_set_kv_cache(
|
||||||
|
struct llama_context * ctx,
|
||||||
|
const uint8_t * kv_cache,
|
||||||
|
size_t n_size,
|
||||||
|
int n_token_count);
|
||||||
|
|
||||||
// Run the llama inference to obtain the logits and probabilities for the next token.
|
// Run the llama inference to obtain the logits and probabilities for the next token.
|
||||||
// tokens + n_tokens is the provided batch of new tokens to process
|
// tokens + n_tokens is the provided batch of new tokens to process
|
||||||
@ -135,9 +155,9 @@ extern "C" {
|
|||||||
const llama_token * last_n_tokens_data,
|
const llama_token * last_n_tokens_data,
|
||||||
int last_n_tokens_size,
|
int last_n_tokens_size,
|
||||||
int top_k,
|
int top_k,
|
||||||
double top_p,
|
float top_p,
|
||||||
double temp,
|
float temp,
|
||||||
double repeat_penalty);
|
float repeat_penalty);
|
||||||
|
|
||||||
// Performance information
|
// Performance information
|
||||||
LLAMA_API void llama_print_timings(struct llama_context * ctx);
|
LLAMA_API void llama_print_timings(struct llama_context * ctx);
|
||||||
@ -150,4 +170,4 @@ extern "C" {
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif
|
#endif // LLAMA_H
|
||||||
|
12
examples/talk-llama/llama_internal.h
Normal file
12
examples/talk-llama/llama_internal.h
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
// Internal header to be included by llama.cpp and tests/benchmarks only.
|
||||||
|
|
||||||
|
#ifndef LLAMA_INTERNAL_H
|
||||||
|
#define LLAMA_INTERNAL_H
|
||||||
|
|
||||||
|
#include <vector>
|
||||||
|
#include <string>
|
||||||
|
struct ggml_tensor;
|
||||||
|
|
||||||
|
std::vector<std::pair<std::string, struct ggml_tensor *>>& llama_internal_get_tensor_map(struct llama_context * ctx);
|
||||||
|
|
||||||
|
#endif // LLAMA_INTERNAL_H
|
383
examples/talk-llama/llama_util.h
Executable file
383
examples/talk-llama/llama_util.h
Executable file
@ -0,0 +1,383 @@
|
|||||||
|
// Internal header to be included only by llama.cpp.
|
||||||
|
// Contains wrappers around OS interfaces.
|
||||||
|
|
||||||
|
#ifndef LLAMA_UTIL_H
|
||||||
|
#define LLAMA_UTIL_H
|
||||||
|
|
||||||
|
#include <cstdio>
|
||||||
|
#include <cstdint>
|
||||||
|
#include <cerrno>
|
||||||
|
#include <cstring>
|
||||||
|
#include <cstdarg>
|
||||||
|
#include <cstdlib>
|
||||||
|
#include <climits>
|
||||||
|
|
||||||
|
#include <string>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
#ifdef __has_include
|
||||||
|
#if __has_include(<unistd.h>)
|
||||||
|
#include <unistd.h>
|
||||||
|
#if defined(_POSIX_MAPPED_FILES)
|
||||||
|
#include <sys/mman.h>
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if defined(_WIN32)
|
||||||
|
#define WIN32_LEAN_AND_MEAN
|
||||||
|
#define NOMINMAX
|
||||||
|
#include <windows.h>
|
||||||
|
#include <io.h>
|
||||||
|
#include <stdio.h> // for _fseeki64
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#define LLAMA_ASSERT(x) \
|
||||||
|
do { \
|
||||||
|
if (!(x)) { \
|
||||||
|
fprintf(stderr, "LLAMA_ASSERT: %s:%d: %s\n", __FILE__, __LINE__, #x); \
|
||||||
|
abort(); \
|
||||||
|
} \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
#ifdef __GNUC__
|
||||||
|
__attribute__((format(printf, 1, 2)))
|
||||||
|
#endif
|
||||||
|
static std::string format(const char * fmt, ...) {
|
||||||
|
va_list ap, ap2;
|
||||||
|
va_start(ap, fmt);
|
||||||
|
va_copy(ap2, ap);
|
||||||
|
int size = vsnprintf(NULL, 0, fmt, ap);
|
||||||
|
LLAMA_ASSERT(size >= 0 && size < INT_MAX);
|
||||||
|
std::vector<char> buf(size + 1);
|
||||||
|
int size2 = vsnprintf(buf.data(), size + 1, fmt, ap2);
|
||||||
|
LLAMA_ASSERT(size2 == size);
|
||||||
|
va_end(ap2);
|
||||||
|
va_end(ap);
|
||||||
|
return std::string(buf.data(), size);
|
||||||
|
};
|
||||||
|
|
||||||
|
struct llama_file {
|
||||||
|
// use FILE * so we don't have to re-open the file to mmap
|
||||||
|
FILE * fp;
|
||||||
|
size_t size;
|
||||||
|
|
||||||
|
llama_file(const char * fname, const char * mode) {
|
||||||
|
fp = std::fopen(fname, mode);
|
||||||
|
if (fp == NULL) {
|
||||||
|
throw format("failed to open %s: %s", fname, std::strerror(errno));
|
||||||
|
}
|
||||||
|
seek(0, SEEK_END);
|
||||||
|
size = tell();
|
||||||
|
seek(0, SEEK_SET);
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t tell() const {
|
||||||
|
#ifdef _WIN32
|
||||||
|
__int64 ret = _ftelli64(fp);
|
||||||
|
#else
|
||||||
|
long ret = std::ftell(fp);
|
||||||
|
#endif
|
||||||
|
LLAMA_ASSERT(ret != -1); // this really shouldn't fail
|
||||||
|
return (size_t) ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
void seek(size_t offset, int whence) {
|
||||||
|
#ifdef _WIN32
|
||||||
|
int ret = _fseeki64(fp, (__int64) offset, whence);
|
||||||
|
#else
|
||||||
|
int ret = std::fseek(fp, (long) offset, whence);
|
||||||
|
#endif
|
||||||
|
LLAMA_ASSERT(ret == 0); // same
|
||||||
|
}
|
||||||
|
|
||||||
|
void read_raw(void * ptr, size_t size) {
|
||||||
|
if (size == 0) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
errno = 0;
|
||||||
|
std::size_t ret = std::fread(ptr, size, 1, fp);
|
||||||
|
if (ferror(fp)) {
|
||||||
|
throw format("read error: %s", strerror(errno));
|
||||||
|
}
|
||||||
|
if (ret != 1) {
|
||||||
|
throw std::string("unexpectedly reached end of file");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
std::uint32_t read_u32() {
|
||||||
|
std::uint32_t ret;
|
||||||
|
read_raw(&ret, sizeof(ret));
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::string read_string(std::uint32_t len) {
|
||||||
|
std::vector<char> chars(len);
|
||||||
|
read_raw(chars.data(), len);
|
||||||
|
return std::string(chars.data(), len);
|
||||||
|
}
|
||||||
|
|
||||||
|
void write_raw(const void * ptr, size_t size) {
|
||||||
|
if (size == 0) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
errno = 0;
|
||||||
|
size_t ret = std::fwrite(ptr, size, 1, fp);
|
||||||
|
if (ret != 1) {
|
||||||
|
throw format("write error: %s", strerror(errno));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void write_u32(std::uint32_t val) {
|
||||||
|
write_raw(&val, sizeof(val));
|
||||||
|
}
|
||||||
|
|
||||||
|
~llama_file() {
|
||||||
|
if (fp) {
|
||||||
|
std::fclose(fp);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
#if defined(_WIN32)
|
||||||
|
static std::string llama_format_win_err(DWORD err) {
|
||||||
|
LPSTR buf;
|
||||||
|
size_t size = FormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS,
|
||||||
|
NULL, err, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), (LPSTR)&buf, 0, NULL);
|
||||||
|
if (!size) {
|
||||||
|
return "FormatMessageA failed";
|
||||||
|
}
|
||||||
|
std::string ret(buf, size);
|
||||||
|
LocalFree(buf);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
struct llama_mmap {
|
||||||
|
void * addr;
|
||||||
|
size_t size;
|
||||||
|
|
||||||
|
llama_mmap(const llama_mmap &) = delete;
|
||||||
|
|
||||||
|
#ifdef _POSIX_MAPPED_FILES
|
||||||
|
static constexpr bool SUPPORTED = true;
|
||||||
|
|
||||||
|
llama_mmap(struct llama_file * file) {
|
||||||
|
size = file->size;
|
||||||
|
int fd = fileno(file->fp);
|
||||||
|
int flags = MAP_SHARED;
|
||||||
|
#ifdef __linux__
|
||||||
|
flags |= MAP_POPULATE;
|
||||||
|
#endif
|
||||||
|
addr = mmap(NULL, file->size, PROT_READ, flags, fd, 0);
|
||||||
|
close(fd);
|
||||||
|
if (addr == MAP_FAILED) {
|
||||||
|
throw format("mmap failed: %s", strerror(errno));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Advise the kernel to preload the mapped memory
|
||||||
|
if (madvise(addr, file->size, MADV_WILLNEED)) {
|
||||||
|
fprintf(stderr, "warning: madvise(.., MADV_WILLNEED) failed: %s\n",
|
||||||
|
strerror(errno));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
~llama_mmap() {
|
||||||
|
munmap(addr, size);
|
||||||
|
}
|
||||||
|
#elif defined(_WIN32)
|
||||||
|
static constexpr bool SUPPORTED = true;
|
||||||
|
|
||||||
|
llama_mmap(struct llama_file * file) {
|
||||||
|
size = file->size;
|
||||||
|
|
||||||
|
HANDLE hFile = (HANDLE) _get_osfhandle(_fileno(file->fp));
|
||||||
|
|
||||||
|
HANDLE hMapping = CreateFileMappingA(hFile, NULL, PAGE_READONLY, 0, 0, NULL);
|
||||||
|
DWORD error = GetLastError();
|
||||||
|
CloseHandle(hFile);
|
||||||
|
|
||||||
|
if (hMapping == NULL) {
|
||||||
|
throw format("CreateFileMappingA failed: %s", llama_format_win_err(error).c_str());
|
||||||
|
}
|
||||||
|
|
||||||
|
addr = MapViewOfFile(hMapping, FILE_MAP_READ, 0, 0, 0);
|
||||||
|
error = GetLastError();
|
||||||
|
CloseHandle(hMapping);
|
||||||
|
|
||||||
|
if (addr == NULL) {
|
||||||
|
throw format("MapViewOfFile failed: %s", llama_format_win_err(error).c_str());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Advise the kernel to preload the mapped memory
|
||||||
|
WIN32_MEMORY_RANGE_ENTRY range;
|
||||||
|
range.VirtualAddress = addr;
|
||||||
|
range.NumberOfBytes = (SIZE_T)size;
|
||||||
|
if (!PrefetchVirtualMemory(GetCurrentProcess(), 1, &range, 0)) {
|
||||||
|
fprintf(stderr, "warning: PrefetchVirtualMemory failed: %s\n",
|
||||||
|
llama_format_win_err(GetLastError()).c_str());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
~llama_mmap() {
|
||||||
|
if (!UnmapViewOfFile(addr)) {
|
||||||
|
fprintf(stderr, "warning: UnmapViewOfFile failed: %s\n",
|
||||||
|
llama_format_win_err(GetLastError()).c_str());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
static constexpr bool SUPPORTED = false;
|
||||||
|
|
||||||
|
llama_mmap(struct llama_file *) {
|
||||||
|
throw std::string("mmap not supported");
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
};
|
||||||
|
|
||||||
|
// Represents some region of memory being locked using mlock or VirtualLock;
|
||||||
|
// will automatically unlock on destruction.
|
||||||
|
struct llama_mlock {
|
||||||
|
void * addr = NULL;
|
||||||
|
size_t size = 0;
|
||||||
|
bool failed_already = false;
|
||||||
|
|
||||||
|
llama_mlock() {}
|
||||||
|
llama_mlock(const llama_mlock &) = delete;
|
||||||
|
|
||||||
|
~llama_mlock() {
|
||||||
|
if (size) {
|
||||||
|
raw_unlock(addr, size);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void init(void * addr) {
|
||||||
|
LLAMA_ASSERT(this->addr == NULL && this->size == 0);
|
||||||
|
this->addr = addr;
|
||||||
|
}
|
||||||
|
|
||||||
|
void grow_to(size_t target_size) {
|
||||||
|
LLAMA_ASSERT(addr);
|
||||||
|
if (failed_already) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
size_t granularity = lock_granularity();
|
||||||
|
target_size = (target_size + granularity - 1) & ~(granularity - 1);
|
||||||
|
if (target_size > size) {
|
||||||
|
if (raw_lock((uint8_t *) addr + size, target_size - size)) {
|
||||||
|
size = target_size;
|
||||||
|
} else {
|
||||||
|
failed_already = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef _POSIX_MEMLOCK_RANGE
|
||||||
|
static constexpr bool SUPPORTED = true;
|
||||||
|
|
||||||
|
size_t lock_granularity() {
|
||||||
|
return (size_t) sysconf(_SC_PAGESIZE);
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef __APPLE__
|
||||||
|
#define MLOCK_SUGGESTION \
|
||||||
|
"Try increasing the sysctl values 'vm.user_wire_limit' and 'vm.global_user_wire_limit' and/or " \
|
||||||
|
"decreasing 'vm.global_no_user_wire_amount'. Also try increasing RLIMIT_MLOCK (ulimit -l).\n"
|
||||||
|
#else
|
||||||
|
#define MLOCK_SUGGESTION \
|
||||||
|
"Try increasing RLIMIT_MLOCK ('ulimit -l' as root).\n"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
bool raw_lock(const void * addr, size_t size) {
|
||||||
|
if (!mlock(addr, size)) {
|
||||||
|
return true;
|
||||||
|
} else {
|
||||||
|
fprintf(stderr, "warning: failed to mlock %zu-byte buffer (after previously locking %zu bytes): %s\n" MLOCK_SUGGESTION,
|
||||||
|
size, this->size, std::strerror(errno));
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#undef MLOCK_SUGGESTION
|
||||||
|
|
||||||
|
void raw_unlock(void * addr, size_t size) {
|
||||||
|
if (munlock(addr, size)) {
|
||||||
|
fprintf(stderr, "warning: failed to munlock buffer: %s\n", std::strerror(errno));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#elif defined(_WIN32)
|
||||||
|
static constexpr bool SUPPORTED = true;
|
||||||
|
|
||||||
|
size_t lock_granularity() {
|
||||||
|
SYSTEM_INFO si;
|
||||||
|
GetSystemInfo(&si);
|
||||||
|
return (size_t) si.dwPageSize;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool raw_lock(void * addr, size_t size) {
|
||||||
|
for (int tries = 1; ; tries++) {
|
||||||
|
if (VirtualLock(addr, size)) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
if (tries == 2) {
|
||||||
|
fprintf(stderr, "warning: failed to VirtualLock %zu-byte buffer (after previously locking %zu bytes): %s\n",
|
||||||
|
size, this->size, llama_format_win_err(GetLastError()).c_str());
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// It failed but this was only the first try; increase the working
|
||||||
|
// set size and try again.
|
||||||
|
SIZE_T min_ws_size, max_ws_size;
|
||||||
|
if (!GetProcessWorkingSetSize(GetCurrentProcess(), &min_ws_size, &max_ws_size)) {
|
||||||
|
fprintf(stderr, "warning: GetProcessWorkingSetSize failed: %s\n",
|
||||||
|
llama_format_win_err(GetLastError()).c_str());
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
// Per MSDN: "The maximum number of pages that a process can lock
|
||||||
|
// is equal to the number of pages in its minimum working set minus
|
||||||
|
// a small overhead."
|
||||||
|
// Hopefully a megabyte is enough overhead:
|
||||||
|
size_t increment = size + 1048576;
|
||||||
|
// The minimum must be <= the maximum, so we need to increase both:
|
||||||
|
min_ws_size += size;
|
||||||
|
max_ws_size += size;
|
||||||
|
if (!SetProcessWorkingSetSize(GetCurrentProcess(), min_ws_size, max_ws_size)) {
|
||||||
|
fprintf(stderr, "warning: SetProcessWorkingSetSize failed: %s\n",
|
||||||
|
llama_format_win_err(GetLastError()).c_str());
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void raw_unlock(void * addr, size_t size) {
|
||||||
|
if (!VirtualUnlock(addr, size)) {
|
||||||
|
fprintf(stderr, "warning: failed to VirtualUnlock buffer: %s\n",
|
||||||
|
llama_format_win_err(GetLastError()).c_str());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
static constexpr bool SUPPORTED = false;
|
||||||
|
|
||||||
|
void raw_lock(const void * addr, size_t size) {
|
||||||
|
fprintf(stderr, "warning: mlock not supported on this system\n");
|
||||||
|
}
|
||||||
|
|
||||||
|
void raw_unlock(const void * addr, size_t size) {}
|
||||||
|
#endif
|
||||||
|
};
|
||||||
|
|
||||||
|
// Replacement for std::vector<uint8_t> that doesn't require zero-initialization.
|
||||||
|
struct llama_buffer {
|
||||||
|
uint8_t * addr = NULL;
|
||||||
|
size_t size = 0;
|
||||||
|
|
||||||
|
void resize(size_t size) {
|
||||||
|
delete[] addr;
|
||||||
|
addr = new uint8_t[size];
|
||||||
|
this->size = size;
|
||||||
|
}
|
||||||
|
|
||||||
|
~llama_buffer() {
|
||||||
|
delete[] addr;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
#endif
|
Loading…
Reference in New Issue
Block a user