CANN/sip复数矩阵批量乘法
HCgemmBatched【免费下载链接】sip本项目是CANN提供的一款高效、可靠的高性能信号处理算子加速库基于华为Ascend AI处理器专门为信号处理领域而设计。项目地址: https://gitcode.com/cann/sip产品支持情况产品是否支持Atlas 200I/500 A2 推理产品×Atlas 推理系列产品×Atlas 训练系列产品×Atlas A3 训练系列产品/Atlas A3 推理系列产品√Atlas A2 训练系列产品/Atlas A2 推理系列产品√Ascend 950PR/Ascend 950DT×功能说明接口功能asdBlasMakeHCgemmBatchedPlan初始化该句柄对应的算子配置。asdBlasHCgemmBatched用于计算复数矩阵的乘积。计算公式 $$ C alpha * op(A[i])*op(B[i]) beta * C[i] $$ 示例输入“inTensorA[i]”为[ [ 1i, 12i ], [ 13i, 14i ] ]输入“inTensorB[i]”为[ [ 2i, 22i ], [ 23i, 24i ] ]输入“inTensorC[i]”为[ [ 3i, 32i ], [ 33i, 34i ] ]输入“transa”为 ASDBLAS_OP_N输入“transb”为ASDBLAS_OP_N。输入“m”为2输入“n”为 2输入“k”为2输入“alpha”为1i“beta”为22i。输入“lda”为 2输入“ldb”为2输入“ldc”为2。输入“batchCount”为1。调用“asdBlasHCgemmBatched”算子后输出“C”为[ [ -1519i, -2719i ], [ -3721i, -5713i ] ]函数原型AspbStatus asdBlasMakeHCgemmBatchedPlan( asdBlasHandle handle)AspbStatus asdBlasHCgemmBatched( asdBlasHandle handle, asdBlasOperation_t transa, asdBlasOperation_t transb, const int64_t m, const int64_t n, const int64_t k, const std::complexop::fp16_t alpha, aclTensor * A, const int64_t lda, aclTensor * B, const int64_t ldb, const std::complexop::fp16_t beta, aclTensor * C, const int64_t ldc, const int64_t batchCount)asdBlasMakeHCgemmBatchedPlan参数说明参数名输入/输出描述handleasdBlasHandle输入算子的句柄返回值返回状态码具体参见SiP返回码。asdBlasHCgemmBatched参数说明参数名输入/输出描述handleasdBlasHandle输入算子的句柄transaasdBlasOperation_t输入指定矩阵A是否需要转置取值必须为ASDBLAS_OP_N。transbasdBlasOperation_t输入指定矩阵B是否需要转置取值必须为ASDBLAS_OP_N。mint64_t输入矩阵C的行数取值范围为{1-32}。nint64_t输入矩阵C的列数取值范围为{1-32}。k int64_t输入矩阵A和B的公共维度取值范围为{1-32}。lda int64_t输入A左右相邻元素间的内存地址偏移量取值和k相等。ldb int64_t输入B左右相邻元素间的内存地址偏移量取值和n相等。ldc int64_t输入C左右相邻元素间的内存地址偏移量取值和n相等。AaclTensor *输入输入的矩阵对应公式中的A数据类型支持COMPLEX32。数据格式支持ND。shape为[batchCount, m, k]。BaclTensor *输入输入的矩阵对应公式中的B数据类型支持COMPLEX32。数据格式支持ND。shape为[batchCount, k, n]。CaclTensor *输出对应公式中的C数据类型支持COMPLEX32。数据格式支持ND。shape为[batchCount, m, n]。alpha (std::complexop::fp16_t )输入对应公式中的alpha复数标量用于乘以矩阵乘法的结果取值必须为10j。betastd::complexop::fp16_t 输入对应公式中的beta复数标量用于乘以矩阵C。取值必须为 00j。。batchCountint64_t输入批次数量。取值范围为{12 - 26208}。返回值返回状态码具体参见SiP返回码。约束说明算子实际计算时只支持3维ND运算。算子输入数据为行主序输入shape为[batchCount, mk]、[batchCount, kn]、[batchCount, mn]输出shape为[batchCount, mn]。调用示例示例代码如下该样例旨在提供快速上手、开发和调试算子的最小化实现其核心目标是使用最精简的代码展示算子的核心功能而非提供生产级的安全保障。不推荐用户直接将示例代码作为业务代码若用户将示例代码应用在自身的真实业务场景中且发生了安全问题则需用户自行承担。#include iostream #include vector #include asdsip.h #include acl/acl.h #include acl_meta.h using namespace AsdSip; #define ASD_STATUS_CHECK(err) \ do { \ AsdSip::AspbStatus err_ (err); \ if (err_ ! AsdSip::ErrorType::ACL_SUCCESS) { \ std::cout Execute failed. std::endl; \ exit(-1); \ } else { \ std::cout Execute successfully. std::endl; \ } \ } while (0) #define CHECK_RET(cond, return_expr) \ do { \ if (!(cond)) { \ return_expr; \ } \ } while (0) #define LOG_PRINT(message, ...) \ do { \ printf(message, ##__VA_ARGS__); \ } while (0) int64_t GetShapeSize(const std::vectorint64_t shape) { int64_t shapeSize 1; for (auto i : shape) { shapeSize * i; } return shapeSize; } int Init(int32_t deviceId, aclrtStream *stream) { // 固定写法acl初始化 auto ret aclInit(nullptr); CHECK_RET(ret ::ACL_SUCCESS, LOG_PRINT(aclInit failed. ERROR: %d\n, ret); return ret); ret aclrtSetDevice(deviceId); CHECK_RET(ret ::ACL_SUCCESS, LOG_PRINT(aclrtSetDevice failed. ERROR: %d\n, ret); return ret); ret aclrtCreateStream(stream); CHECK_RET(ret ::ACL_SUCCESS, LOG_PRINT(aclrtCreateStream failed. ERROR: %d\n, ret); return ret); return 0; } template typename T int CreateAclTensor(const std::vectorT hostData, const std::vectorint64_t shape, void **deviceAddr, aclDataType dataType, aclTensor **tensor) { auto size GetShapeSize(shape) * sizeof(T); // 调用aclrtMalloc申请device侧内存 auto ret aclrtMalloc(deviceAddr, size, ACL_MEM_MALLOC_HUGE_FIRST); CHECK_RET(ret ::ACL_SUCCESS, LOG_PRINT(aclrtMalloc failed. ERROR: %d\n, ret); return ret); // 调用aclrtMemcpy将host侧数据复制到device侧内存上 ret aclrtMemcpy(*deviceAddr, size, hostData.data(), size, ACL_MEMCPY_HOST_TO_DEVICE); CHECK_RET(ret ::ACL_SUCCESS, LOG_PRINT(aclrtMemcpy failed. ERROR: %d\n, ret); return ret); // 计算连续tensor的strides std::vectorint64_t strides(shape.size(), 1); for (int64_t i shape.size() - 2; i 0; i--) { strides[i] shape[i 1] * strides[i 1]; } // 调用aclCreateTensor接口创建aclTensor *tensor aclCreateTensor(shape.data(), shape.size(), dataType, strides.data(), 0, aclFormat::ACL_FORMAT_ND, shape.data(), shape.size(), *deviceAddr); return 0; } void printTensor(std::vectorstd::complexop::fp16_t tensorData, int64_t batch, int64_t rows, int64_t cols) { for (int64_t b 0; b batch; b) { for (int64_t i 0; i rows; i) { for (int64_t j 0; j cols; j) { auto data tensorData[b * rows * cols i * cols j]; std::cout ( (float)data.real() , (float)data.imag() ) ; } std::cout std::endl; } std::cout std::endl; } } int main(int argc, char **argv) { int deviceId 0; aclrtStream stream; auto ret Init(deviceId, stream); CHECK_RET(ret ::ACL_SUCCESS, LOG_PRINT(Init acl failed. ERROR: %d\n, ret); return ret); int batch 2; int m 3; int k 3; int n 3; asdBlasOperation_t transA asdBlasOperation_t::ASDBLAS_OP_N; asdBlasOperation_t transB asdBlasOperation_t::ASDBLAS_OP_N; std::complexop::fp16_t alpha std::complexop::fp16_t(1.0f, 0.0f); std::complexop::fp16_t beta std::complexop::fp16_t(0.0f, 0.0f); int64_t lda k; int64_t ldb n; int64_t ldc n; const int64_t tensorASize batch * m * k; const int64_t tensorBSize batch * k * n; const int64_t tensorCSize batch * m * n; std::vectorstd::complexop::fp16_t tensorInAData; tensorInAData.reserve(tensorASize); for (int i 0; i tensorASize; i) { tensorInAData.push_back(std::complexop::fp16_t(1.0f, i 0.0f)); } std::vectorstd::complexop::fp16_t tensorInBData; tensorInBData.reserve(tensorBSize); for (int i 0; i tensorBSize; i) { tensorInBData.push_back(std::complexop::fp16_t(1.0f, i 0.0f)); } std::vectorstd::complexop::fp16_t tensorInCData; tensorInCData.reserve(tensorCSize); for (int i 0; i tensorCSize; i) { tensorInCData.push_back(std::complexop::fp16_t(1.0f, i 0.0f)); } std::vectorint64_t matAShape {batch, m, k}; std::vectorint64_t matBShape {batch, k, n}; std::vectorint64_t matCShape {batch, m, n}; aclTensor *matA nullptr; aclTensor *matB nullptr; aclTensor *matC nullptr; void *matADeviceAddr nullptr; void *matBDeviceAddr nullptr; void *matCDeviceAddr nullptr; ret CreateAclTensorstd::complexop::fp16_t( tensorInAData, matAShape, matADeviceAddr, aclDataType::ACL_COMPLEX32, matA); CHECK_RET(ret ::ACL_SUCCESS, return ret); ret CreateAclTensorstd::complexop::fp16_t( tensorInBData, matBShape, matBDeviceAddr, aclDataType::ACL_COMPLEX32, matB); CHECK_RET(ret ::ACL_SUCCESS, return ret); ret CreateAclTensorstd::complexop::fp16_t( tensorInCData, matCShape, matCDeviceAddr, aclDataType::ACL_COMPLEX32, matC); CHECK_RET(ret ::ACL_SUCCESS, return ret); std::cout alpha ( (float) alpha.real() , (float) alpha.imag() ) std::endl; std::cout beta ( (float) beta.real() , (float) beta.imag() ) std::endl; std::cout ------- input TensorInA ------- std::endl; printTensor(tensorInAData, batch, m, k); std::cout ------- input TensorInB ------- std::endl; printTensor(tensorInBData, batch, k, n); asdBlasHandle handle; asdBlasCreate(handle); size_t lwork 0; void *buffer nullptr; asdBlasMakeHCgemmBatchedPlan(handle); asdBlasGetWorkspaceSize(handle, lwork); std::cout lwork lwork std::endl; if (lwork 0) { ret aclrtMalloc(buffer, static_castint64_t(lwork), ACL_MEM_MALLOC_HUGE_FIRST); CHECK_RET(ret ::ACL_SUCCESS, LOG_PRINT(allocate workspace failed. ERROR: %d\n, ret); return ret); } asdBlasSetWorkspace(handle, buffer); asdBlasSetStream(handle, stream); ASD_STATUS_CHECK(asdBlasHCgemmBatched(handle, transA, transB, m, n, k, alpha, matA, lda, matB, ldb, beta, matC, ldc, batch)); asdBlasSynchronize(handle); asdBlasDestroy(handle); ret aclrtMemcpy(tensorInCData.data(), tensorCSize * sizeof(std::complexop::fp16_t), matCDeviceAddr, tensorCSize * sizeof(std::complexop::fp16_t), ACL_MEMCPY_DEVICE_TO_HOST); CHECK_RET(ret ::ACL_SUCCESS, LOG_PRINT(copy result from device to host failed. ERROR: %d\n, ret); return ret); std::cout ------- output TensorInC ------- std::endl; printTensor(tensorInCData, batch, m, n); aclDestroyTensor(matA); aclDestroyTensor(matB); aclDestroyTensor(matC); aclrtFree(matADeviceAddr); aclrtFree(matBDeviceAddr); aclrtFree(matCDeviceAddr); aclrtDestroyStream(stream); aclrtResetDevice(deviceId); aclFinalize(); return 0; }【免费下载链接】sip本项目是CANN提供的一款高效、可靠的高性能信号处理算子加速库基于华为Ascend AI处理器专门为信号处理领域而设计。项目地址: https://gitcode.com/cann/sip创作声明:本文部分内容由AI辅助生成(AIGC),仅供参考