Skip to content

Commit 2d47404

Browse files
committed
llamafile : improve moe prompt eval speed on cpu
This change introduces a llamafile_mixmul() API that allows tinyBLAS to speed up "Mixture of Expert" models. On my Threadripper, Mixtral's 8x7b F16 weights now process prompts 2x faster. I'm also seeing a 60 percent improvement with Mixtral 8x22b Q4_0. The same applies to Q8_0, which is also supported by tinyBLAS. MoE models spend the majority of their time inside MUL_MAT_ID rather than MUL_MAT, which is why llamafile_sgemm was not able to help them before. llamafile_mixmul works by decomposing the mixmul operation into sgemm calls.
1 parent 201cc11 commit 2d47404

File tree

6 files changed

+734
-219
lines changed

6 files changed

+734
-219
lines changed

common/common.cpp

+3-3
Original file line numberDiff line numberDiff line change
@@ -74,7 +74,7 @@
7474
using json = nlohmann::ordered_json;
7575

7676
int32_t get_num_physical_cores() {
77-
#ifdef __linux__
77+
#if defined(__linux__) || defined(__COSMOPOLITAN__)
7878
// enumerate the set of thread siblings, num entries is num cores
7979
std::unordered_set<std::string> siblings;
8080
for (uint32_t cpu=0; cpu < UINT32_MAX; ++cpu) {
@@ -109,7 +109,7 @@ int32_t get_num_physical_cores() {
109109
return n_threads > 0 ? (n_threads <= 4 ? n_threads : n_threads / 2) : 4;
110110
}
111111

112-
#if defined(__x86_64__) && defined(__linux__) && !defined(__ANDROID__)
112+
#if defined(__x86_64__) && (defined(__linux__) || defined(__COSMOPOLITAN__)) && !defined(__ANDROID__)
113113
#include <pthread.h>
114114

115115
static void cpuid(unsigned leaf, unsigned subleaf,
@@ -163,7 +163,7 @@ static int count_math_cpus(int cpu_count) {
163163
* Returns number of CPUs on system that are useful for math.
164164
*/
165165
int get_math_cpu_count() {
166-
#if defined(__x86_64__) && defined(__linux__) && !defined(__ANDROID__)
166+
#if defined(__x86_64__) && (defined(__linux__) || defined(__COSMOPOLITAN__)) && !defined(__ANDROID__)
167167
int cpu_count = sysconf(_SC_NPROCESSORS_ONLN);
168168
if (cpu_count < 1) {
169169
return get_num_physical_cores();

ggml-impl.h

+3
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,9 @@
1717
#define MIN(a, b) ((a) < (b) ? (a) : (b))
1818
#define MAX(a, b) ((a) > (b) ? (a) : (b))
1919

20+
// some compilers don't provide _mm256_set_m128i, e.g. gcc 7
21+
#define MM256_SET_M128I(a, b) _mm256_insertf128_si256(_mm256_castsi128_si256(b), (a), 1)
22+
2023
#if defined(_WIN32)
2124

2225
#define m512bh(p) p

ggml-quants.c

-3
Original file line numberDiff line numberDiff line change
@@ -28,9 +28,6 @@
2828

2929
#define UNUSED GGML_UNUSED
3030

31-
// some compilers don't provide _mm256_set_m128i, e.g. gcc 7
32-
#define MM256_SET_M128I(a, b) _mm256_insertf128_si256(_mm256_castsi128_si256(b), (a), 1)
33-
3431
#if defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) || defined(__SSSE3__)
3532
// multiply int8_t, add results pairwise twice
3633
static inline __m128i mul_sum_i8_pairs(const __m128i x, const __m128i y) {

ggml.c

+13-1
Original file line numberDiff line numberDiff line change
@@ -12687,11 +12687,16 @@ static void ggml_compute_forward_mul_mat_id(
1268712687
const struct ggml_tensor * src1 = dst->src[1];
1268812688
const struct ggml_tensor * ids = dst->src[2];
1268912689

12690-
GGML_TENSOR_BINARY_OP_LOCALS
12690+
#if GGML_USE_LLAMAFILE
12691+
if (llamafile_mixmul(params, src0, src1, ids, dst))
12692+
return;
12693+
#endif
1269112694

1269212695
const int ith = params->ith;
1269312696
const int nth = params->nth;
1269412697

12698+
GGML_TENSOR_BINARY_OP_LOCALS
12699+
1269512700
const enum ggml_type type = src0->type;
1269612701

1269712702
const bool src1_cont = ggml_is_contiguous(src1);
@@ -20167,6 +20172,9 @@ struct ggml_cplan ggml_graph_plan(const struct ggml_cgraph * cgraph, int n_threa
2016720172
cur = 0;
2016820173
const struct ggml_tensor * src0 = node->src[0];
2016920174
const struct ggml_tensor * src1 = node->src[1];
20175+
#if GGML_USE_LLAMAFILE
20176+
const struct ggml_tensor * src2 = node->src[2];
20177+
#endif
2017020178
const enum ggml_type vec_dot_type = type_traits[src0->type].vec_dot_type;
2017120179
if (src1->type != vec_dot_type) {
2017220180
cur += ggml_row_size(vec_dot_type, ggml_nelements(src1));
@@ -20175,6 +20183,10 @@ struct ggml_cplan ggml_graph_plan(const struct ggml_cgraph * cgraph, int n_threa
2017520183
cur += GGML_PAD(cur, sizeof(int64_t)); // align
2017620184
cur += n_as * sizeof(int64_t); // matrix_row_counts
2017720185
cur += n_as * src1->ne[2] * sizeof(int64_t); // matrix_rows
20186+
#if GGML_USE_LLAMAFILE
20187+
size_t cur2 = llamafile_mixmul_needs(src0, src1, src2);
20188+
cur = cur > cur2 ? cur : cur2;
20189+
#endif
2017820190
} break;
2017920191
case GGML_OP_OUT_PROD:
2018020192
{

0 commit comments

Comments
 (0)