static_assert(K_QUANTS_PER_ITERATION == 1 || K_QUANTS_PER_ITERATION == 2, "K_QUANTS_PER_ITERATION must be 1 or 2");
#endif
+struct ggml_tensor_extra_gpu {
+ void * data_device[GGML_CUDA_MAX_DEVICES]; // 1 pointer for each device for split tensors
+ cudaEvent_t events[GGML_CUDA_MAX_DEVICES]; // events for synchronizing multiple GPUs
+};
+
static __global__ void add_f32(const float * x, const float * y, float * dst, const int k) {
const int i = blockDim.x*blockIdx.x + threadIdx.x;
} else {
GGML_ASSERT(false);
}
- CUDA_CHECK(cudaGetLastError());
(void) src1;
(void) dst;
// compute
mul_f32_cuda(src0_ddf_i01, src1_ddf_i01, dst_ddf_i01, ne00, ne10, cudaStream_main);
- CUDA_CHECK(cudaGetLastError());
}
(void) dst;
// compute
silu_f32_cuda(src0_ddf_i, dst_ddf_i, ne00*i01_diff, cudaStream_main);
- CUDA_CHECK(cudaGetLastError());
(void) src1;
(void) dst;
// compute
rms_norm_f32_cuda(src0_ddf_i, dst_ddf_i, ne00, i01_diff, cudaStream_main);
- CUDA_CHECK(cudaGetLastError());
(void) src1;
(void) dst;
GGML_ASSERT(false);
break;
}
- CUDA_CHECK(cudaGetLastError());
#ifdef GGML_CUDA_DMMV_F16
if (src1_convert_f16) {
// compute
rope_f32_cuda(src0_ddf_i, dst_ddf_i, ne00, i01_diff, p, theta_scale, cudaStream_main);
- CUDA_CHECK(cudaGetLastError());
(void) dst;
(void) src0_ddq_i;
// compute
diag_mask_inf_f32_cuda(src0_ddf_i, dst_ddf_i, ne00, i01_diff, ne01, n_past, cudaStream_main);
- CUDA_CHECK(cudaGetLastError());
(void) dst;
(void) src0_ddq_i;
// compute
soft_max_f32_cuda(src0_ddf_i, dst_ddf_i, ne00, i01_diff, cudaStream_main);
- CUDA_CHECK(cudaGetLastError());
(void) src1;
(void) dst;
size_t src1_asf[GGML_CUDA_MAX_DEVICES] = {0};
size_t dst_asf[GGML_CUDA_MAX_DEVICES] = {0};
- // if multiple GPUs are used they need to wait for the main GPU to finish
+ // if multiple devices are used they need to wait for the main device
+ // here an event is recorded that signifies that the main device has finished calculating the input data
if (split && g_device_count > 1) {
CUDA_CHECK(cudaSetDevice(g_main_device));
- CUDA_CHECK(cudaDeviceSynchronize());
+ CUDA_CHECK(cudaEventRecord(src0_extra->events[g_main_device], g_cudaStreams_main[g_main_device]));
}
for (int id = 0; id < g_device_count; ++id) {
int64_t row_diff = row_high - row_low;
cudaSetDevice(id);
+ cudaStream_t cudaStream_main = g_cudaStreams_main[id];
+
+ // wait for main GPU data if necessary
+ if (split && id != g_main_device) {
+ CUDA_CHECK(cudaStreamWaitEvent(cudaStream_main, src0_extra->events[g_main_device]));
+ }
if (src0_on_device && src0_is_contiguous) {
if (src0_is_f32) {
}
const int64_t i11 = i13*ne12 + i12;
- cudaStream_t cudaStream_main = g_cudaStreams_main[id];
-
// for split tensors the data begins at i0 == i0_offset_low
char * src0_ddq_i = src0_ddq[id] + (i0 - i0_offset_low)*src0_stride*src0_ts/src0_bs;
float * src0_ddf_i = src0_ddf[id] + (i0 - i0_offset_low)*src0_stride;
// do the computation
op(src0, src1, dst, src0_ddq_i, src0_ddf_i, src1_ddf_i, dst_ddf_i, i02, i01_low, i01_high, i11, cudaStream_main);
+ CUDA_CHECK(cudaGetLastError());
// copy dst to host or other device if necessary
if (!dst_on_device) {
CUDA_CHECK(cudaMemcpyAsync(dhf_dst_i, dst_ddf_i, dst_stride*sizeof(float), kind, cudaStream_main));
}
}
+
+ // signify to main device that other device is done
+ if (split && g_device_count > 1 && id != g_main_device) {
+ CUDA_CHECK(cudaEventRecord(src0_extra->events[id], cudaStream_main));
+ }
}
}
}
}
CUDA_CHECK(cudaSetDevice(id));
- CUDA_CHECK(cudaDeviceSynchronize());
if (src0_asq[id] > 0) {
ggml_cuda_pool_free(src0_ddq[id], src0_asq[id]);
ggml_cuda_pool_free(dst_ddf[id], dst_asf[id]);
}
}
+
+ // main device waits for all other devices to be finished
+ if (split && g_device_count > 1) {
+ CUDA_CHECK(cudaSetDevice(g_main_device));
+ for (int id = 0; id < g_device_count; ++id) {
+ if (id != g_main_device) {
+ CUDA_CHECK(cudaStreamWaitEvent(g_cudaStreams_main[g_main_device], src0_extra->events[id]));
+ }
+ }
+ }
+
+ if (dst->backend == GGML_BACKEND_CPU) {
+ CUDA_CHECK(cudaSetDevice(g_main_device));
+ CUDA_CHECK(cudaDeviceSynchronize());
+ }
}
void ggml_cuda_add(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
cudaMemcpy(buf, buf_host, size, cudaMemcpyHostToDevice);
extra->data_device[id] = buf;
+
+ if (backend == GGML_BACKEND_GPU_SPLIT) {
+ CUDA_CHECK(cudaEventCreateWithFlags(&extra->events[id], cudaEventDisableTiming));
+ }
}
tensor->extra = extra;
ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) tensor->extra;
for (int id = 0; id < g_device_count; ++id) {
- if (extra->data_device[id] == nullptr) {
- continue;
+ if (extra->data_device[id] != nullptr) {
+ CUDA_CHECK(cudaSetDevice(id));
+ CUDA_CHECK(cudaFree(extra->data_device[id]));
}
- CUDA_CHECK(cudaSetDevice(id));
- CUDA_CHECK(cudaFree(extra->data_device[id]));
+ if (extra->events[id] != nullptr) {
+ CUDA_CHECK(cudaSetDevice(id));
+ CUDA_CHECK(cudaEventDestroy(extra->events[id]));
+ }
}
delete extra;