10#include "quest/include/channels.h"
11#include "quest/include/types.h"
13#include "quest/src/core/bitwise.hpp"
14#include "quest/src/core/memory.hpp"
15#include "quest/src/core/printer.hpp"
16#include "quest/src/core/utilities.hpp"
17#include "quest/src/core/validation.hpp"
18#include "quest/src/comm/comm_config.hpp"
19#include "quest/src/comm/comm_routines.hpp"
20#include "quest/src/cpu/cpu_config.hpp"
21#include "quest/src/gpu/gpu_config.hpp"
38 cpu_deallocArray(op.cpuElemsFlat);
39 cpu_deallocMatrixWrapper(op.cpuElems);
42 cpu_deallocHeapFlag(op.wasGpuSynced);
45 auto gpuPtr = util_getGpuMemPtr(op);
46 if (mem_isAllocated(gpuPtr))
47 gpu_deallocArray(gpuPtr);
54 cpu_deallocMatrixList(map.matrices, map.numRows, map.numMatrices);
57 util_deallocEpsilonSensitiveHeapFlag(map.isApproxCPTP);
60 freeSuperOp(map.superop);
72 map.matrices =
nullptr;
76bool didAnyLocalAllocsFail(
SuperOp op) {
78 if (!mem_isAllocated(op.wasGpuSynced))
return true;
79 if (!mem_isAllocated(op.cpuElemsFlat))
return true;
80 if (!mem_isOuterAllocated(op.cpuElems))
return true;
82 if (
getQuESTEnv().isGpuAccelerated && !mem_isAllocated(op.gpuElemsFlat))
89bool didAnyLocalAllocsFail(
KrausMap map) {
91 if (!mem_isAllocated(map.isApproxCPTP))
94 if (!mem_isAllocated(map.matrices, map.numRows, map.numMatrices))
97 if (didAnyLocalAllocsFail(map.superop))
106void freeAllMemoryIfAnyAllocsFailed(T& obj) {
109 bool anyFail = didAnyLocalAllocsFail(obj);
111 anyFail = comm_isTrueOnAllNodes(anyFail);
125SuperOp allocSuperOp(
int numQubits) {
131 qindex numRows = powerOf2(2 * numQubits);
132 qindex numElems = numRows * numRows;
134 qcomp* cpuMem = cpu_allocArray(numElems);
135 qcomp* gpuMem =
nullptr;
137 gpuMem = gpu_allocArray(numElems);
140 .numQubits = numQubits,
143 .cpuElems = cpu_allocAndInitMatrixWrapper(cpuMem, numRows),
144 .cpuElemsFlat = cpuMem,
145 .gpuElemsFlat = gpuMem,
147 .wasGpuSynced = cpu_allocHeapFlag()
151 if (mem_isAllocated(out.wasGpuSynced))
152 *(out.wasGpuSynced) = 0;
159 validate_envIsInit(__func__);
160 validate_newSuperOpParams(numQubits, __func__);
162 SuperOp out = allocSuperOp(numQubits);
165 freeAllMemoryIfAnyAllocsFailed(out);
166 validate_newSuperOpAllocs(out, __func__);
173 validate_envIsInit(__func__);
174 validate_newKrausMapParams(numQubits, numOperators, __func__);
177 qindex numRows = powerOf2(numQubits);
180 .numQubits = numQubits,
181 .numMatrices = numOperators,
184 .matrices = cpu_allocMatrixList(numRows, numOperators),
185 .superop = allocSuperOp(numQubits),
187 .isApproxCPTP = util_allocEpsilonSensitiveHeapFlag(),
191 freeAllMemoryIfAnyAllocsFailed(out);
192 validate_newKrausMapAllocs(out, __func__);
195 util_setFlagToUnknown(out.isApproxCPTP);
202 validate_superOpFields(op, __func__);
209 validate_krausMapFields(map, __func__);
222 validate_superOpFields(op, __func__);
225 if (mem_isAllocated(util_getGpuMemPtr(op)))
226 gpu_copyCpuToGpu(op);
230 *(op.wasGpuSynced) = 1;
235 validate_krausMapFields(map, __func__);
238 util_setSuperoperator(map.superop.cpuElems, map.matrices, map.numMatrices, map.numQubits);
243 util_setFlagToUnknown(map.isApproxCPTP);
259void setAndSyncSuperOpElems(
SuperOp op, T matrix) {
262 cpu_copyMatrix(op.cpuElems, matrix, op.numRows);
270 validate_superOpFields(op, __func__);
271 validate_matrixNewElemsPtrNotNull(matrix, op.numRows, __func__);
273 setAndSyncSuperOpElems(op, matrix);
277 validate_superOpFields(op, __func__);
278 validate_superOpNewMatrixDims(op, matrix, __func__);
280 setAndSyncSuperOpElems(op, matrix);
296void setAndSyncKrausMapElems(
KrausMap map, T matrices) {
299 for (
int n=0; n<map.numMatrices; n++)
300 cpu_copyMatrix(map.matrices[n], matrices[n], map.numRows);
308 validate_krausMapFields(map, __func__);
310 setAndSyncKrausMapElems(map, matrices);
315 validate_krausMapFields(map, __func__);
316 validate_krausMapNewMatrixDims(map, matrices, __func__);
318 setAndSyncKrausMapElems(map, matrices);
335 validate_krausMapFields(map, __func__);
336 validate_krausMapFieldsMatchPassedParams(map, numQb, numOps, __func__);
337 validate_krausMapNewMatrixDims(map, matrices, __func__);
339 setAndSyncKrausMapElems(map, matrices);
344 validate_superOpFields(op, __func__);
345 validate_superOpFieldsMatchPassedParams(op, numQb, __func__);
346 validate_superOpNewMatrixDims(op, matrix, __func__);
348 setAndSyncSuperOpElems(op, matrix);
361 validate_envIsInit(__func__);
362 validate_newKrausMapParams(numQubits, numOperators, __func__);
363 validate_newInlineKrausMapDimMatchesVectors(numQubits, numOperators, matrices, __func__);
368 setAndSyncKrausMapElems(map, matrices);
374 validate_envIsInit(__func__);
375 validate_newSuperOpParams(numQubits, __func__);
376 validate_newInlineSuperOpDimMatchesVectors(numQubits, matrix, __func__);
381 setAndSyncSuperOpElems(op, matrix);
400 void _validateParamsToSetKrausMapFromArr(
KrausMap map) {
401 validate_krausMapFields(map,
"setKrausMap");
404 void _validateParamsToSetSuperOpFromArr(
SuperOp op) {
405 validate_superOpFields(op,
"setSuperOp");
408 void _validateParamsToSetInlineKrausMap(
KrausMap map,
int numQb,
int numOps) {
410 const char* caller =
"setInlineKrausMap";
411 validate_krausMapFields(map, caller);
412 validate_krausMapFieldsMatchPassedParams(map, numQb, numOps, caller);
415 void _validateParamsToSetInlineSuperOp(
SuperOp op,
int numQb) {
417 const char* caller =
"setInlineSuperOp";
418 validate_superOpFields(op, caller);
419 validate_superOpFieldsMatchPassedParams(op, numQb, caller);
422 void _validateParamsToCreateInlineKrausMap(
int numQb,
int numOps) {
424 const char* caller =
"createInlineKrausMap";
425 validate_envIsInit(caller);
426 validate_newKrausMapParams(numQb, numOps, caller);
429 void _validateParamsToCreateInlineSuperOp(
int numQb) {
431 const char* caller =
"createInlineSuperOp";
432 validate_envIsInit(caller);
433 validate_newSuperOpParams(numQb, caller);
446 validate_superOpFields(op, __func__);
447 validate_numReportedNewlinesAboveZero(__func__);
451 validate_superOpIsSynced(op, __func__);
454 size_t elemMem = mem_getLocalSuperOpMemoryRequired(op.numQubits);
455 size_t structMem =
sizeof(op);
457 print_header(op, elemMem + structMem);
461 print_oneFewerNewlines();
466 validate_krausMapFields(map, __func__);
467 validate_numReportedNewlinesAboveZero(__func__);
471 validate_krausMapIsSynced(map, __func__);
476 size_t krausMem = mem_getLocalMatrixMemoryRequired(map.numQubits, isDense, numNodes) * map.numMatrices;
477 size_t superMem = mem_getLocalSuperOpMemoryRequired(map.superop.numQubits);
478 size_t strucMem =
sizeof(map);
481 size_t totalMem = krausMem + superMem + strucMem;
482 print_header(map, totalMem);
486 print_oneFewerNewlines();
KrausMap createInlineKrausMap(int numQubits, int numOperators, std::vector< std::vector< std::vector< qcomp > > > matrices)
SuperOp createInlineSuperOp(int numQubits, std::vector< std::vector< qcomp > > matrix)
KrausMap createKrausMap(int numQubits, int numOperators)
SuperOp createSuperOp(int numQubits)
void destroySuperOp(SuperOp op)
void destroyKrausMap(KrausMap map)
void reportKrausMap(KrausMap map)
void reportSuperOp(SuperOp op)
void setInlineSuperOp(SuperOp op, int numQb, std::vector< std::vector< qcomp > > matrix)
void setInlineKrausMap(KrausMap map, int numQb, int numOps, std::vector< std::vector< std::vector< qcomp > > > matrices)
void setSuperOp(SuperOp op, qcomp **matrix)
void setKrausMap(KrausMap map, qcomp ***matrices)
void syncSuperOp(SuperOp op)
void syncKrausMap(KrausMap map)