10#include "quest/include/channels.h"
11#include "quest/include/types.h"
13#include "quest/src/core/bitwise.hpp"
14#include "quest/src/core/memory.hpp"
15#include "quest/src/core/printer.hpp"
16#include "quest/src/core/utilities.hpp"
17#include "quest/src/core/validation.hpp"
18#include "quest/src/comm/comm_config.hpp"
19#include "quest/src/comm/comm_routines.hpp"
20#include "quest/src/cpu/cpu_config.hpp"
21#include "quest/src/gpu/gpu_config.hpp"
38 cpu_deallocArray(op.cpuElemsFlat);
39 cpu_deallocMatrixWrapper(op.cpuElems);
42 cpu_deallocHeapFlag(op.wasGpuSynced);
45 auto gpuPtr = util_getGpuMemPtr(op);
46 if (mem_isAllocated(gpuPtr))
47 gpu_deallocArray(gpuPtr);
54 cpu_deallocMatrixList(map.matrices, map.numRows, map.numMatrices);
57 util_deallocEpsilonSensitiveHeapFlag(map.isApproxCPTP);
60 freeSuperOp(map.superop);
72 map.matrices =
nullptr;
76bool didAnyLocalAllocsFail(
SuperOp op) {
78 if (!mem_isAllocated(op.wasGpuSynced))
return true;
79 if (!mem_isAllocated(op.cpuElemsFlat))
return true;
80 if (!mem_isOuterAllocated(op.cpuElems))
return true;
82 if (
getQuESTEnv().isGpuAccelerated && !mem_isAllocated(op.gpuElemsFlat))
89bool didAnyLocalAllocsFail(
KrausMap map) {
91 if (!mem_isAllocated(map.isApproxCPTP))
94 if (!mem_isAllocated(map.matrices, map.numRows, map.numMatrices))
97 if (didAnyLocalAllocsFail(map.superop))
106void freeAllMemoryIfAnyAllocsFailed(T& obj) {
109 bool anyFail = didAnyLocalAllocsFail(obj);
111 anyFail = comm_isTrueOnAllNodes(anyFail);
125SuperOp allocSuperOp(
int numQubits) {
131 qindex numRows = powerOf2(2 * numQubits);
132 qindex numElems = numRows * numRows;
135 qcomp* cpuMem = cpu_allocArray(numElems);
136 qcomp* gpuMem =
nullptr;
138 gpuMem = gpu_allocArray(numElems);
142 out.numQubits = numQubits;
143 out.numRows = numRows;
146 out.cpuElems = cpu_allocAndInitMatrixWrapper(cpuMem, numRows);
147 out.cpuElemsFlat = cpuMem;
148 out.gpuElemsFlat = gpuMem;
151 out.wasGpuSynced = cpu_allocHeapFlag();
154 if (mem_isAllocated(out.wasGpuSynced))
155 *(out.wasGpuSynced) = 0;
163 validate_envIsInit(__func__);
164 validate_newSuperOpParams(numQubits, __func__);
166 SuperOp out = allocSuperOp(numQubits);
169 freeAllMemoryIfAnyAllocsFailed(out);
170 validate_newSuperOpAllocs(out, __func__);
177 validate_envIsInit(__func__);
178 validate_newKrausMapParams(numQubits, numOperators, __func__);
181 qindex numRows = powerOf2(numQubits);
185 out.numQubits = numQubits,
186 out.numMatrices = numOperators,
187 out.numRows = numRows,
188 out.matrices = cpu_allocMatrixList(numRows, numOperators);
189 out.superop = allocSuperOp(numQubits);
190 out.isApproxCPTP = util_allocEpsilonSensitiveHeapFlag();
193 freeAllMemoryIfAnyAllocsFailed(out);
194 validate_newKrausMapAllocs(out, __func__);
197 util_setFlagToUnknown(out.isApproxCPTP);
204 validate_superOpFields(op, __func__);
211 validate_krausMapFields(map, __func__);
224 validate_superOpFields(op, __func__);
227 if (mem_isAllocated(util_getGpuMemPtr(op)))
228 gpu_copyCpuToGpu(op);
232 *(op.wasGpuSynced) = 1;
237 validate_krausMapFields(map, __func__);
240 util_setSuperoperator(map.superop.cpuElems, map.matrices, map.numMatrices, map.numQubits);
245 util_setFlagToUnknown(map.isApproxCPTP);
261void setAndSyncSuperOpElems(
SuperOp op, T matrix) {
264 cpu_copyMatrix(op.cpuElems, matrix, op.numRows);
272 validate_superOpFields(op, __func__);
273 validate_matrixNewElemsPtrNotNull(matrix, op.numRows, __func__);
275 setAndSyncSuperOpElems(op, matrix);
279 validate_superOpFields(op, __func__);
280 validate_superOpNewMatrixDims(op, matrix, __func__);
282 setAndSyncSuperOpElems(op, matrix);
298void setAndSyncKrausMapElems(
KrausMap map, T matrices) {
301 for (
int n=0; n<map.numMatrices; n++)
302 cpu_copyMatrix(map.matrices[n], matrices[n], map.numRows);
310 validate_krausMapFields(map, __func__);
312 setAndSyncKrausMapElems(map, matrices);
317 validate_krausMapFields(map, __func__);
318 validate_krausMapNewMatrixDims(map, matrices, __func__);
320 setAndSyncKrausMapElems(map, matrices);
337 validate_krausMapFields(map, __func__);
338 validate_krausMapFieldsMatchPassedParams(map, numQb, numOps, __func__);
339 validate_krausMapNewMatrixDims(map, matrices, __func__);
341 setAndSyncKrausMapElems(map, matrices);
346 validate_superOpFields(op, __func__);
347 validate_superOpFieldsMatchPassedParams(op, numQb, __func__);
348 validate_superOpNewMatrixDims(op, matrix, __func__);
350 setAndSyncSuperOpElems(op, matrix);
363 validate_envIsInit(__func__);
364 validate_newKrausMapParams(numQubits, numOperators, __func__);
365 validate_newInlineKrausMapDimMatchesVectors(numQubits, numOperators, matrices, __func__);
370 setAndSyncKrausMapElems(map, matrices);
376 validate_envIsInit(__func__);
377 validate_newSuperOpParams(numQubits, __func__);
378 validate_newInlineSuperOpDimMatchesVectors(numQubits, matrix, __func__);
383 setAndSyncSuperOpElems(op, matrix);
402 void _validateParamsToSetKrausMapFromArr(
KrausMap map) {
403 validate_krausMapFields(map,
"setKrausMap");
406 void _validateParamsToSetSuperOpFromArr(
SuperOp op) {
407 validate_superOpFields(op,
"setSuperOp");
410 void _validateParamsToSetInlineKrausMap(
KrausMap map,
int numQb,
int numOps) {
412 const char* caller =
"setInlineKrausMap";
413 validate_krausMapFields(map, caller);
414 validate_krausMapFieldsMatchPassedParams(map, numQb, numOps, caller);
417 void _validateParamsToSetInlineSuperOp(
SuperOp op,
int numQb) {
419 const char* caller =
"setInlineSuperOp";
420 validate_superOpFields(op, caller);
421 validate_superOpFieldsMatchPassedParams(op, numQb, caller);
424 void _validateParamsToCreateInlineKrausMap(
int numQb,
int numOps) {
426 const char* caller =
"createInlineKrausMap";
427 validate_envIsInit(caller);
428 validate_newKrausMapParams(numQb, numOps, caller);
431 void _validateParamsToCreateInlineSuperOp(
int numQb) {
433 const char* caller =
"createInlineSuperOp";
434 validate_envIsInit(caller);
435 validate_newSuperOpParams(numQb, caller);
448 validate_superOpFields(op, __func__);
449 validate_numReportedNewlinesAboveZero(__func__);
453 validate_superOpIsSynced(op, __func__);
456 size_t elemMem = mem_getLocalSuperOpMemoryRequired(op.numQubits);
457 size_t structMem =
sizeof(op);
459 print_header(op, elemMem + structMem);
463 print_oneFewerNewlines();
468 validate_krausMapFields(map, __func__);
469 validate_numReportedNewlinesAboveZero(__func__);
473 validate_krausMapIsSynced(map, __func__);
478 size_t krausMem = mem_getLocalMatrixMemoryRequired(map.numQubits, isDense, numNodes) * map.numMatrices;
479 size_t superMem = mem_getLocalSuperOpMemoryRequired(map.superop.numQubits);
480 size_t strucMem =
sizeof(map);
483 size_t totalMem = krausMem + superMem + strucMem;
484 print_header(map, totalMem);
488 print_oneFewerNewlines();
KrausMap createInlineKrausMap(int numQubits, int numOperators, std::vector< std::vector< std::vector< qcomp > > > matrices)
SuperOp createInlineSuperOp(int numQubits, std::vector< std::vector< qcomp > > matrix)
KrausMap createKrausMap(int numQubits, int numOperators)
SuperOp createSuperOp(int numQubits)
void destroySuperOp(SuperOp op)
void destroyKrausMap(KrausMap map)
void reportKrausMap(KrausMap map)
void reportSuperOp(SuperOp op)
void setInlineSuperOp(SuperOp op, int numQb, std::vector< std::vector< qcomp > > matrix)
void setInlineKrausMap(KrausMap map, int numQb, int numOps, std::vector< std::vector< std::vector< qcomp > > > matrices)
void setSuperOp(SuperOp op, qcomp **matrix)
void setKrausMap(KrausMap map, qcomp ***matrices)
void syncSuperOp(SuperOp op)
void syncKrausMap(KrausMap map)