|
39 | 39 | #define cudaDeviceCanAccessPeer hipDeviceCanAccessPeer
|
40 | 40 | #define cudaDeviceDisablePeerAccess hipDeviceDisablePeerAccess
|
41 | 41 | #define cudaDeviceEnablePeerAccess hipDeviceEnablePeerAccess
|
| 42 | +#define cudaDeviceGetMemPool hipDeviceGetMemPool |
42 | 43 | #define cudaDeviceProp hipDeviceProp_t
|
43 | 44 | #define cudaDeviceSynchronize hipDeviceSynchronize
|
44 | 45 | #define cudaError_t hipError_t
|
|
48 | 49 | #define cudaEvent_t hipEvent_t
|
49 | 50 | #define cudaEventDestroy hipEventDestroy
|
50 | 51 | #define cudaFree hipFree
|
| 52 | +#define cudaFreeAsync hipFreeAsync |
51 | 53 | #define cudaFreeHost hipHostFree
|
52 | 54 | #define cudaGetDevice hipGetDevice
|
53 | 55 | #define cudaGetDeviceCount hipGetDeviceCount
|
54 | 56 | #define cudaGetDeviceProperties hipGetDeviceProperties
|
55 | 57 | #define cudaGetErrorString hipGetErrorString
|
56 | 58 | #define cudaGetLastError hipGetLastError
|
57 | 59 | #define cudaMalloc hipMalloc
|
| 60 | +#define cudaMallocFromPoolAsync hipMallocFromPoolAsync |
58 | 61 | #define cudaMallocHost(ptr, size) hipHostMalloc(ptr, size, hipHostMallocDefault)
|
59 | 62 | #define cudaMemcpy hipMemcpy
|
60 | 63 | #define cudaMemcpy2DAsync hipMemcpy2DAsync
|
|
63 | 66 | #define cudaMemcpyDeviceToHost hipMemcpyDeviceToHost
|
64 | 67 | #define cudaMemcpyHostToDevice hipMemcpyHostToDevice
|
65 | 68 | #define cudaMemcpyKind hipMemcpyKind
|
| 69 | +#define cudaMemPool_t hipMemPool_t |
| 70 | +#define cudaMemPoolAttrReleaseThreshold hipMemPoolAttrReleaseThreshold |
| 71 | +#define cudaMemPoolSetAttribute hipMemPoolSetAttribute |
66 | 72 | #define cudaMemset hipMemset
|
67 | 73 | #define cudaMemsetAsync hipMemsetAsync
|
68 | 74 | #define cudaOccupancyMaxPotentialBlockSize hipOccupancyMaxPotentialBlockSize
|
@@ -4470,6 +4476,13 @@ static __device__ void cpy_1_f32_f16(const char * cxi, char * cdsti) {
|
4470 | 4476 | *dsti = __float2half(*xi);
|
4471 | 4477 | }
|
4472 | 4478 |
|
| 4479 | +static __device__ void cpy_1_f16_f16(const char * cxi, char * cdsti) { |
| 4480 | + const half * xi = (const half *) cxi; |
| 4481 | + half * dsti = (half *) cdsti; |
| 4482 | + |
| 4483 | + *dsti = *xi; |
| 4484 | +} |
| 4485 | + |
4473 | 4486 | template <cpy_kernel_t cpy_1>
|
4474 | 4487 | static __global__ void cpy_f32_f16(const char * cx, char * cdst, const int ne,
|
4475 | 4488 | const int ne00, const int ne01, const int nb00, const int nb01, const int nb02,
|
@@ -4723,6 +4736,25 @@ static __global__ void clamp_f32(const float * x, float * dst, const float min,
|
4723 | 4736 | dst[i] = x[i] < min ? min : (x[i] > max ? max : x[i]);
|
4724 | 4737 | }
|
4725 | 4738 |
|
| 4739 | +static __global__ void im2col_f32_f16( |
| 4740 | + const float * x, half * dst, |
| 4741 | + int ofs0, int ofs1, int IW, int IH, int CHW, |
| 4742 | + int s0, int s1, int p0, int p1, int d0, int d1) { |
| 4743 | + const int iiw = blockIdx.z * s0 + threadIdx.z * d0 - p0; |
| 4744 | + const int iih = blockIdx.y * s1 + threadIdx.y * d1 - p1; |
| 4745 | + |
| 4746 | + const int offset_dst = |
| 4747 | + (threadIdx.x * gridDim.y * gridDim.z + blockIdx.y * gridDim.z + blockIdx.z) * CHW + |
| 4748 | + (blockIdx.x * (blockDim.y * blockDim.z) + threadIdx.y * blockDim.z + threadIdx.z); |
| 4749 | + |
| 4750 | + if (iih < 0 || iih >= IH || iiw < 0 || iiw >= IW) { |
| 4751 | + dst[offset_dst] = __float2half(0.0f); |
| 4752 | + } else { |
| 4753 | + const int offset_src = threadIdx.x * ofs0 + blockIdx.x * ofs1; |
| 4754 | + dst[offset_dst] = __float2half(x[offset_src + iih * IW + iiw]); |
| 4755 | + } |
| 4756 | +} |
| 4757 | + |
4726 | 4758 | template<int qk, int qr, dequantize_kernel_t dq>
|
4727 | 4759 | static void get_rows_cuda(const void * x, const int32_t * y, float * dst, const int nrows, const int ncols, cudaStream_t stream) {
|
4728 | 4760 | const dim3 block_dims(CUDA_GET_ROWS_BLOCK_SIZE, 1, 1);
|
@@ -5612,6 +5644,16 @@ static void ggml_cpy_f32_f16_cuda(
|
5612 | 5644 | (cx, cdst, ne, ne00, ne01, nb00, nb01, nb02, ne10, ne11, nb10, nb11, nb12);
|
5613 | 5645 | }
|
5614 | 5646 |
|
| 5647 | +static void ggml_cpy_f16_f16_cuda( |
| 5648 | + const char * cx, char * cdst, const int ne, |
| 5649 | + const int ne00, const int ne01, const int nb00, const int nb01, const int nb02, |
| 5650 | + const int ne10, const int ne11, const int nb10, const int nb11, const int nb12, cudaStream_t stream) { |
| 5651 | + |
| 5652 | + const int num_blocks = (ne + CUDA_CPY_BLOCK_SIZE - 1) / CUDA_CPY_BLOCK_SIZE; |
| 5653 | + cpy_f32_f16<cpy_1_f16_f16><<<num_blocks, CUDA_CPY_BLOCK_SIZE, 0, stream>>> |
| 5654 | + (cx, cdst, ne, ne00, ne01, nb00, nb01, nb02, ne10, ne11, nb10, nb11, nb12); |
| 5655 | +} |
| 5656 | + |
5615 | 5657 | static void scale_f32_cuda(const float * x, float * dst, const float scale, const int k, cudaStream_t stream) {
|
5616 | 5658 | const int num_blocks = (k + CUDA_SCALE_BLOCK_SIZE - 1) / CUDA_SCALE_BLOCK_SIZE;
|
5617 | 5659 | scale_f32<<<num_blocks, CUDA_SCALE_BLOCK_SIZE, 0, stream>>>(x, dst, scale, k);
|
@@ -5695,6 +5737,15 @@ static void soft_max_f32_cuda(const float * x, float * dst, const int ncols_x, c
|
5695 | 5737 | soft_max_f32<<<block_nums, block_dims, 0, stream>>>(x, dst, ncols_x);
|
5696 | 5738 | }
|
5697 | 5739 |
|
| 5740 | +static void im2col_f32_f16_cuda(const float * x, half * dst, |
| 5741 | + int OH, int IW, int IH, int OW, int IC, |
| 5742 | + int KH, int KW, int N, int ofs0, int ofs1, |
| 5743 | + int s0, int s1, int p0, int p1, int d0, int d1, cudaStream_t stream) { |
| 5744 | + dim3 block_nums(IC, OH, OW); |
| 5745 | + dim3 block_dims(N, KH, KW); |
| 5746 | + im2col_f32_f16<<<block_nums, block_dims, 0, stream>>>(x, dst, ofs0, ofs1, IW, IH, (IC * KH * KW), s0, s1, p0, p1, d0, d1); |
| 5747 | +} |
| 5748 | + |
5698 | 5749 | // buffer pool for cuda
|
5699 | 5750 | #define MAX_CUDA_BUFFERS 256
|
5700 | 5751 |
|
@@ -6477,7 +6528,7 @@ inline void ggml_cuda_op_mul_mat_cublas(
|
6477 | 6528 | src1_as_f16 = (half *) ggml_cuda_pool_malloc_async(ne * sizeof(half), &src1_as, id, stream);
|
6478 | 6529 | to_fp16_cuda(src1_ddf_i, src1_as_f16, ne, stream);
|
6479 | 6530 | }
|
6480 |
| - const half * src1_ptr = src1->type == GGML_TYPE_F16 ? (const half *) src1_ddq_i : src1_as_f16; |
| 6531 | + const half * src1_ptr = src1->type == GGML_TYPE_F16 ? (const half *) src1_ddf_i : src1_as_f16; |
6481 | 6532 | size_t dst_f16_as = 0;
|
6482 | 6533 | half * dst_f16 = (half *) ggml_cuda_pool_malloc_async(row_diff*src1_ncols * sizeof(half), &dst_f16_as, id, stream);
|
6483 | 6534 |
|
@@ -6653,6 +6704,45 @@ inline void ggml_cuda_op_alibi(
|
6653 | 6704 | (void) src1_dd;
|
6654 | 6705 | }
|
6655 | 6706 |
|
| 6707 | +inline void ggml_cuda_op_im2col( |
| 6708 | + const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, |
| 6709 | + const float * src0_dd, const float * src1_dd, float * dst_dd, const cudaStream_t & main_stream) { |
| 6710 | + |
| 6711 | + GGML_ASSERT(src0->type == GGML_TYPE_F16); |
| 6712 | + GGML_ASSERT(src1->type == GGML_TYPE_F32); |
| 6713 | + GGML_ASSERT( dst->type == GGML_TYPE_F16); |
| 6714 | + |
| 6715 | + const int32_t s0 = ((const int32_t*)(dst->op_params))[0]; |
| 6716 | + const int32_t s1 = ((const int32_t*)(dst->op_params))[1]; |
| 6717 | + const int32_t p0 = ((const int32_t*)(dst->op_params))[2]; |
| 6718 | + const int32_t p1 = ((const int32_t*)(dst->op_params))[3]; |
| 6719 | + const int32_t d0 = ((const int32_t*)(dst->op_params))[4]; |
| 6720 | + const int32_t d1 = ((const int32_t*)(dst->op_params))[5]; |
| 6721 | + |
| 6722 | + const bool is_2D = ((const int32_t*)(dst->op_params))[6] == 1; |
| 6723 | + |
| 6724 | + const int64_t N = src1->ne[is_2D ? 3 : 2]; |
| 6725 | + const int64_t IC = src1->ne[is_2D ? 2 : 1]; |
| 6726 | + const int64_t IH = is_2D ? src1->ne[1] : 1; |
| 6727 | + const int64_t IW = src1->ne[0]; |
| 6728 | + |
| 6729 | + const int64_t KH = is_2D ? src0->ne[1] : 1; |
| 6730 | + const int64_t KW = src0->ne[0]; |
| 6731 | + |
| 6732 | + const int64_t OH = is_2D ? dst->ne[2] : 1; |
| 6733 | + const int64_t OW = dst->ne[1]; |
| 6734 | + |
| 6735 | + const size_t ofs0 = src1->nb[is_2D ? 3 : 2] / 4; // nb is byte offset, src is type float32 |
| 6736 | + const size_t ofs1 = src1->nb[is_2D ? 2 : 1] / 4; // nb is byte offset, src is type float32 |
| 6737 | + |
| 6738 | + im2col_f32_f16_cuda(src1_dd, (half*) dst_dd, |
| 6739 | + OH, IW, IH, OW, IC, KH, KW, N, |
| 6740 | + ofs0, ofs1, s0, s1, p0, p1, d0, d1, main_stream); |
| 6741 | + |
| 6742 | + (void) src0; |
| 6743 | + (void) src0_dd; |
| 6744 | +} |
| 6745 | + |
6656 | 6746 | inline void ggml_cuda_op_diag_mask_inf(
|
6657 | 6747 | const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst,
|
6658 | 6748 | const float * src0_dd, const float * src1_dd, float * dst_dd, const cudaStream_t & main_stream) {
|
@@ -7543,6 +7633,9 @@ static void ggml_cuda_cpy(const ggml_tensor * src0, const ggml_tensor * src1, gg
|
7543 | 7633 | } else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F16) {
|
7544 | 7634 | ggml_cpy_f32_f16_cuda(src0_ddc, src1_ddc, ne, ne00, ne01, nb00, nb01, nb02,
|
7545 | 7635 | ne10, ne11, nb10, nb11, nb12, main_stream);
|
| 7636 | + } else if (src0->type == GGML_TYPE_F16 && src1->type == GGML_TYPE_F16) { |
| 7637 | + ggml_cpy_f16_f16_cuda(src0_ddc, src1_ddc, ne, ne00, ne01, nb00, nb01, nb02, |
| 7638 | + ne10, ne11, nb10, nb11, nb12, main_stream); |
7546 | 7639 | } else {
|
7547 | 7640 | fprintf(stderr, "%s: unsupported type combination (%s to %s)\n", __func__,
|
7548 | 7641 | ggml_type_name(src0->type), ggml_type_name(src1->type));
|
@@ -7574,6 +7667,10 @@ static void ggml_cuda_alibi(const ggml_tensor * src0, const ggml_tensor * src1,
|
7574 | 7667 | ggml_cuda_op_flatten(src0, src1, dst, ggml_cuda_op_alibi);
|
7575 | 7668 | }
|
7576 | 7669 |
|
| 7670 | +void ggml_cuda_im2col(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) { |
| 7671 | + ggml_cuda_op_flatten(src0, src1, dst, ggml_cuda_op_im2col); |
| 7672 | +} |
| 7673 | + |
7577 | 7674 | static void ggml_cuda_nop(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
|
7578 | 7675 | (void) src0;
|
7579 | 7676 | (void) src1;
|
@@ -7937,6 +8034,9 @@ bool ggml_cuda_compute_forward(struct ggml_compute_params * params, struct ggml_
|
7937 | 8034 | case GGML_OP_ALIBI:
|
7938 | 8035 | func = ggml_cuda_alibi;
|
7939 | 8036 | break;
|
| 8037 | + case GGML_OP_IM2COL: |
| 8038 | + func = ggml_cuda_im2col; |
| 8039 | + break; |
7940 | 8040 | default:
|
7941 | 8041 | return false;
|
7942 | 8042 | }
|
|
0 commit comments