LOG("warming up the model with an empty run\n");
const std::vector<llama_token> tmp = { llama_token_bos(lctx), llama_token_eos(lctx), };
- llama_eval(lctx, tmp.data(), tmp.size(), 0, params.n_threads);
+ llama_eval(lctx, tmp.data(), std::min(tmp.size(), (size_t) params.n_batch), 0, params.n_threads);
llama_reset_timings(lctx);
}
}
threadgroup_barrier(mem_flags::mem_threadgroup);
}
- //// broadcast
- //if (tpitg == 0) {
- // sum[0] /= ne00;
- //}
- //threadgroup_barrier(mem_flags::mem_threadgroup);
+ // broadcast
+ if (tpitg == 0) {
+ sum[0] /= ne00;
+ }
+ threadgroup_barrier(mem_flags::mem_threadgroup);
const float mean = sum[0];
- // recenter and VARIANCE
+ // recenter
device float * y = dst + tgpig*ne00;
- sum[tpitg] = 0.0f;
for (int i00 = tpitg; i00 < ne00; i00 += ntg) {
y[i00] = x[i00] - mean;
+ }
+
+ // VARIANCE
+ // parallel sum
+ //
+ // WARNING: combining this loop with the one above will give you wrong results for nth == 256
+ // I have no idea why, so for now I am keeping them separate. But this behavior is very concerning.
+ // Tested with:
+ // ./perplexity -m ./falcon-7b/ggml-model-q4_0.gguf -f wiki.test.raw -ngl 1 -t 4
+ //
+ sum[tpitg] = 0.0f;
+ for (int i00 = tpitg; i00 < ne00; i00 += ntg) {
sum[tpitg] += y[i00] * y[i00];
}
- //// VARIANCE
- //// parallel sum
- //sum[tpitg] = 0.0f;
- //for (int i00 = tpitg; i00 < ne00; i00 += ntg) {
- // sum[tpitg] += y[i00] * y[i00];
- //}
// reduce
threadgroup_barrier(mem_flags::mem_threadgroup);
for (uint i = ntg/2; i > 0; i /= 2) {
}
threadgroup_barrier(mem_flags::mem_threadgroup);
}
- //// broadcast
- //if (tpitg == 0) {
- // sum[0] /= ne00;
- //}
- //threadgroup_barrier(mem_flags::mem_threadgroup);
+ // broadcast
+ if (tpitg == 0) {
+ sum[0] /= ne00;
+ }
+ threadgroup_barrier(mem_flags::mem_threadgroup);
const float variance = sum[0];
const float scale = 1.0f/sqrt(variance + eps);
}
}
-
kernel void kernel_rms_norm(
device const void * src0,
device float * dst,
}
}
}
-
}
kernel void kernel_alibi_f32(