for (size_t il = 0; il < v_input.size(); ++il) {
// prepare output vector
struct ggml_tensor * ctrl_out = v_output[il];
- ggml_format_name(ctrl_out, "direction.%ld", il+1);
+ ggml_format_name(ctrl_out, "direction.%zu", il+1);
// calculate mean vector
struct ggml_tensor * t_layer = v_input[il];
// prepare output vector
struct ggml_tensor * ctrl_out = v_output[il];
- ggml_format_name(ctrl_out, "direction.%ld", il+1);
+ ggml_format_name(ctrl_out, "direction.%zu", il+1);
// run power_iteration
params.i_layer = il;
fout.write((const char *)data.data(), data.size());
}
- printf("%s : merged %ld tensors with lora adapters\n", __func__, n_merged);
- printf("%s : wrote %ld tensors to output file\n", __func__, trans.size());
+ printf("%s : merged %zu tensors with lora adapters\n", __func__, n_merged);
+ printf("%s : wrote %zu tensors to output file\n", __func__, trans.size());
}
void copy_tensor(struct ggml_tensor * base) {
const float scale = alpha ? adapters[i]->scale * alpha / rank : adapters[i]->scale;
delta = ggml_scale(ctx0, delta, scale);
cur = ggml_add(ctx0, delta, cur);
- printf("%s : + merging from adapter[%ld] type=%s\n", __func__, i, ggml_type_name(inp_a[i]->type));
+ printf("%s : + merging from adapter[%zu] type=%s\n", __func__, i, ggml_type_name(inp_a[i]->type));
printf("%s : input_scale=%f calculated_scale=%f rank=%d\n", __func__, adapters[i]->scale, scale, (int) inp_b[i]->ne[0]);
}
cur = ggml_cast(ctx0, cur, out->type);