#define GGML_CUDA_CC_VOLTA 700
#define GGML_CUDA_CC_TURING 750
#define GGML_CUDA_CC_AMPERE 800
-#define GGML_CUDA_CC_OFFSET_AMD 1000000
+#define GGML_CUDA_CC_OFFSET_AMD 0x1000000
// GCN/CNDA, wave size is 64
-#define GGML_CUDA_CC_GCN4 (GGML_CUDA_CC_OFFSET_AMD + 803) // Tonga, Fiji, Polaris, minimum for fast fp16
-#define GGML_CUDA_CC_VEGA (GGML_CUDA_CC_OFFSET_AMD + 900) // Vega56/64, minimum for fp16 dual issue
-#define GGML_CUDA_CC_VEGA20 (GGML_CUDA_CC_OFFSET_AMD + 906) // MI50/Radeon VII, minimum for dp4a
-#define GGML_CUDA_CC_CDNA (GGML_CUDA_CC_OFFSET_AMD + 908) // MI100, minimum for MFMA, acc registers
-#define GGML_CUDA_CC_CDNA2 (GGML_CUDA_CC_OFFSET_AMD + 910) // MI210, minimum acc register renameing
-#define GGML_CUDA_CC_CDNA3 (GGML_CUDA_CC_OFFSET_AMD + 942) // MI300
+#define GGML_CUDA_CC_GCN4 (GGML_CUDA_CC_OFFSET_AMD + 0x803) // Tonga, Fiji, Polaris, minimum for fast fp16
+#define GGML_CUDA_CC_VEGA (GGML_CUDA_CC_OFFSET_AMD + 0x900) // Vega56/64, minimum for fp16 dual issue
+#define GGML_CUDA_CC_VEGA20 (GGML_CUDA_CC_OFFSET_AMD + 0x906) // MI50/Radeon VII, minimum for dp4a
+#define GGML_CUDA_CC_CDNA (GGML_CUDA_CC_OFFSET_AMD + 0x908) // MI100, minimum for MFMA, acc registers
+#define GGML_CUDA_CC_CDNA2 (GGML_CUDA_CC_OFFSET_AMD + 0x910) // MI210, minimum acc register renameing
+#define GGML_CUDA_CC_CDNA3 (GGML_CUDA_CC_OFFSET_AMD + 0x942) // MI300
// RNDA removes MFMA, dp4a, xnack, acc registers, wave size is 32
-#define GGML_CUDA_CC_RDNA1 (GGML_CUDA_CC_OFFSET_AMD + 1010) // RX 5000
-#define GGML_CUDA_CC_RDNA2 (GGML_CUDA_CC_OFFSET_AMD + 1030) // RX 6000, minimum for dp4a
-#define GGML_CUDA_CC_RDNA3 (GGML_CUDA_CC_OFFSET_AMD + 1100) // RX 7000, minimum for WMMA
+#define GGML_CUDA_CC_RDNA1 (GGML_CUDA_CC_OFFSET_AMD + 0x1010) // RX 5000
+#define GGML_CUDA_CC_RDNA2 (GGML_CUDA_CC_OFFSET_AMD + 0x1030) // RX 6000, minimum for dp4a
+#define GGML_CUDA_CC_RDNA3 (GGML_CUDA_CC_OFFSET_AMD + 0x1100) // RX 7000, minimum for WMMA
#define GGML_CUDA_CC_QY1 210
#define GGML_CUDA_CC_QY2 220
#endif
}
+#if defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)
+static int ggml_cuda_parse_id(char devName[]) {
+ // A list of possible Target IDs can be found under the rocclr/clr repo in device.cpp
+ // these values are not stable so this is susceptible to breakage
+ // https://github.com/ROCm/clr/blob/amd-staging/rocclr/device/device.cpp
+ int archMajor = 0x0;
+ int archMinor = 0x0;
+ int archNum = GGML_CUDA_CC_OFFSET_AMD;
+ int archLen = strlen(devName);
+ char archName[archLen + 1];
+
+ // strip leading 'gfx' while copying into our buffer
+ if (archLen > 3) {
+ strcpy(archName, &devName[3]);
+ archLen -= 3;
+ }
+
+ // trim trailing :xnack- or :sramecc- statuses
+ archLen = strcspn(archName, ":");
+ archName[archLen] = '\0';
+
+ // tease out the version information
+ if (archLen > 8) {
+ // versions labeled generic use '-' as delimiter
+ // strip the trailing "-generic" then iterate through what remains
+ if ((strstr(archName, "-generic"))) {
+ archName[archLen - 8] = '\0';
+ char * pch;
+ if ((pch = strtok(archName, "-"))) {
+ archMajor = (int)strtoul(pch, 0, 16);
+ if ((pch = strtok(NULL, "-"))) {
+ archMinor = 0x10 * (int)strtoul(pch, 0, 16);
+ }
+ }
+ }
+ } else if (archLen >= 3) {
+ // last two digits should be the minor * 0x10 + stepping
+ archMinor = (int)strtoul(&archName[archLen - 2], 0, 16);
+ archName[archLen - 2] = '\0';
+
+ // only the major version remains
+ archMajor = (int)strtoul(archName, 0, 16);
+ }
+ archNum += archMajor * 0x100;
+ archNum += archMinor;
+ return archNum;
+}
+#endif // defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)
+
static ggml_cuda_device_info ggml_cuda_init() {
#ifdef __HIP_PLATFORM_AMD__
// Workaround for a rocBLAS bug when using multiple graphics cards:
cudaDeviceProp prop;
CUDA_CHECK(cudaGetDeviceProperties(&prop, id));
- GGML_LOG_INFO(" Device %d: %s, compute capability %d.%d, VMM: %s\n", id, prop.name, prop.major, prop.minor, device_vmm ? "yes" : "no");
info.default_tensor_split[id] = total_vram;
total_vram += prop.totalGlobalMem;
info.devices[id].smpb = prop.sharedMemPerBlock;
#if defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)
info.devices[id].smpbo = prop.sharedMemPerBlock;
- info.devices[id].cc = 100*prop.major + 10*prop.minor + GGML_CUDA_CC_OFFSET_AMD;
+
+ info.devices[id].cc = ggml_cuda_parse_id(prop.gcnArchName);
+ if ((info.devices[id].cc & 0xff00) == 0x0) {
+ GGML_LOG_WARN("invalid architecture ID received for device %d %s: %s cc %d.%d\n",
+ id, prop.name, prop.gcnArchName, prop.major, prop.minor);
+
+ // Fallback to prop.major and prop.minor
+ if (prop.major > 0) {
+ info.devices[id].cc = GGML_CUDA_CC_OFFSET_AMD + prop.major * 0x100;
+ info.devices[id].cc += prop.minor * 0x10;
+ }
+ }
+ GGML_LOG_INFO(" Device %d: %s, %s (0x%x), VMM: %s\n",
+ id, prop.name, prop.gcnArchName, info.devices[id].cc & 0xffff, device_vmm ? "yes" : "no");
#else
info.devices[id].smpbo = prop.sharedMemPerBlockOptin;
info.devices[id].cc = 100*prop.major + 10*prop.minor;
+ GGML_LOG_INFO(" Device %d: %s, compute capability %d.%d, VMM: %s\n",
+ id, prop.name, prop.major, prop.minor, device_vmm ? "yes" : "no");
#endif // defined(GGML_USE_HIP) && defined(__HIP_PLATFORM_AMD__)
}