코드 기본 구조만 남기고 상세 코드는 분석을 위해 삭제
$ cat nvdsiplugin_ssd.cpp #include "NvInferPlugin.h" #include <vector> #include "cuda_runtime_api.h" #include <cassert> #include <cublas_v2.h> #include <functional> #include <numeric> #include <algorithm> #include <iostream> using namespace nvinfer1; class FlattenConcat : public IPluginV2 { public: FlattenConcat(int concatAxis, bool ignoreBatch) : mIgnoreBatch(ignoreBatch) , mConcatAxisID(concatAxis) { assert(mConcatAxisID == 1 || mConcatAxisID == 2 || mConcatAxisID == 3); } //clone constructor FlattenConcat(int concatAxis, bool ignoreBatch, int numInputs, int outputConcatAxis, int* inputConcatAxis) : mIgnoreBatch(ignoreBatch) , mConcatAxisID(concatAxis) , mOutputConcatAxis(outputConcatAxis) , mNumInputs(numInputs) { CHECK(cudaMallocHost((void**) &mInputConcatAxis, mNumInputs * sizeof(int))); for (int i = 0; i < mNumInputs; ++i) mInputConcatAxis[i] = inputConcatAxis[i]; } FlattenConcat(const void* data, size_t length) { } ~FlattenConcat() { } int getNbOutputs() const noexcept override { return 1; } Dims getOutputDimensions(int index, const Dims* inputs, int nbInputDims) noexcept override { } int initialize() noexcept override { } void terminate() noexcept override { } size_t getWorkspaceSize(int) const noexcept override { return 0; } int enqueue(int batchSize, void const* const* inputs, void* const* outputs, void*, cudaStream_t stream) noexcept override { } size_t getSerializationSize() const noexcept override { } void serialize(void* buffer) const noexcept override { } void configureWithFormat(const Dims* inputs, int nbInputs, const Dims* outputDims, int nbOutputs, nvinfer1::DataType type, nvinfer1::PluginFormat format, int maxBatchSize) noexcept override { } bool supportsFormat(DataType type, PluginFormat format) const noexcept override { } const char* getPluginType() const noexcept override { return "FlattenConcat_TRT"; } const char* getPluginVersion() const noexcept override { return "1"; } void destroy() noexcept override { delete this; } IPluginV2* clone() const noexcept override { } void setPluginNamespace(const char* libNamespace) noexcept override { mNamespace = libNamespace; } const char* getPluginNamespace() const noexcept override { return mNamespace.c_str(); } private: template <typename T> void write(char*& buffer, const T& val) const { } template <typename T> T read(const char*& buffer) { } size_t* mCopySize = nullptr; bool mIgnoreBatch{false}; int mConcatAxisID{0}, mOutputConcatAxis{0}, mNumInputs{0}; int* mInputConcatAxis = nullptr; nvinfer1::Dims mCHW; cublasHandle_t mCublas; std::string mNamespace; }; namespace { const char* FLATTENCONCAT_PLUGIN_VERSION{"1"}; const char* FLATTENCONCAT_PLUGIN_NAME{"FlattenConcat_TRT"}; } // namespace class FlattenConcatPluginCreator : public IPluginCreator { public: FlattenConcatPluginCreator() { mPluginAttributes.emplace_back(PluginField("axis", nullptr, PluginFieldType::kINT32, 1)); mPluginAttributes.emplace_back(PluginField("ignoreBatch", nullptr, PluginFieldType::kINT32, 1)); mFC.nbFields = mPluginAttributes.size(); mFC.fields = mPluginAttributes.data(); } ~FlattenConcatPluginCreator() {} const char* getPluginName() const noexcept override { return FLATTENCONCAT_PLUGIN_NAME; } const char* getPluginVersion() const noexcept override { return FLATTENCONCAT_PLUGIN_VERSION; } const PluginFieldCollection* getFieldNames() noexcept override { return &mFC; } IPluginV2* createPlugin(const char* name, const PluginFieldCollection* fc) noexcept override { } IPluginV2* deserializePlugin(const char* name, const void* serialData, size_t serialLength) noexcept override { return new FlattenConcat(serialData, serialLength); } void setPluginNamespace(const char* libNamespace) noexcept override { mNamespace = libNamespace; } const char* getPluginNamespace() const noexcept override { return mNamespace.c_str(); } private: static PluginFieldCollection mFC; bool mIgnoreBatch{false}; int mConcatAxisID; static std::vector<PluginField> mPluginAttributes; std::string mNamespace = ""; }; PluginFieldCollection FlattenConcatPluginCreator::mFC{}; std::vector<PluginField> FlattenConcatPluginCreator::mPluginAttributes; REGISTER_TENSORRT_PLUGIN(FlattenConcatPluginCreator); |
$ cat nvdsparsebbox_ssd.cpp #include <cstring> #include <iostream> #include "nvdsinfer_custom_impl.h" #define MIN(a,b) ((a) < (b) ? (a) : (b)) #define MAX(a,b) ((a) > (b) ? (a) : (b)) #define CLIP(a,min,max) (MAX(MIN(a, max), min)) /* This is a sample bounding box parsing function for the sample SSD UFF * detector model provided with the TensorRT samples. */ extern "C" bool NvDsInferParseCustomSSD (std::vector<NvDsInferLayerInfo> const &outputLayersInfo, NvDsInferNetworkInfo const &networkInfo, NvDsInferParseDetectionParams const &detectionParams, std::vector<NvDsInferObjectDetectionInfo> &objectList); /* C-linkage to prevent name-mangling */ extern "C" bool NvDsInferParseCustomSSD (std::vector<NvDsInferLayerInfo> const &outputLayersInfo, NvDsInferNetworkInfo const &networkInfo, NvDsInferParseDetectionParams const &detectionParams, std::vector<NvDsInferObjectDetectionInfo> &objectList) { for (int i = 0; i < keepCount; ++i) { NvDsInferObjectDetectionInfo object; object.classId = classId; object.detectionConfidence = det[2]; object.left = CLIP(rectx1, 0, networkInfo.width - 1); object.top = CLIP(recty1, 0, networkInfo.height - 1); object.width = CLIP(rectx2, 0, networkInfo.width - 1) - object.left + 1; object.height = CLIP(recty2, 0, networkInfo.height - 1) - object.top + 1; objectList.push_back(object); } return true; } /* Check that the custom function has been defined correctly */ CHECK_CUSTOM_PARSE_FUNC_PROTOTYPE(NvDsInferParseCustomSSD); |
+ ?
+
NvDsInferDataType dataType union { NvDsInferDims inferDims }; int bindingIndex const char * layerName void * buffer int isInput |
'embeded > jetson' 카테고리의 다른 글
deepstream (0) | 2022.04.15 |
---|---|
ssd_inception_v2_coco_2017_11_17.tar.gz (0) | 2022.04.13 |
nvidia jetson deepstream objectDetector_SSD 실행 스크립트 분석 (0) | 2022.04.13 |
jetson / armv8 EL (0) | 2022.04.07 |
nvidia jetson partition table (0) | 2022.04.06 |