embeded/jetson2022. 4. 13. 15:39

netron 웹 버전에서 받아서 보는 중

 

받아볼 녀석은 아래의 링크이고..

[링크 : http://download.tensorflow.org/models/object_detection/ssd_inception_v2_coco_2017_11_17.tar.gz]

 

어떤 이름의 레이어에서 출력을 내주는지 한번 찾아보는 중

크게 4개인 것 같고 명칭은 아래와 같은데..

detection_boxes, detection_scores, detection_classes, num_detections

detection_boxes의 경우 4개 좌표로 나와있을줄 알았는데 단순히 float32라고만 되어있어서 멘붕..

 

 

 

 

 

 

 

 

 

'embeded > jetson' 카테고리의 다른 글

deepstream SSD  (0) 2022.04.15
deepstream  (0) 2022.04.15
nvidia jetson deepstream objectDetector_SSD 플러그인 분석  (0) 2022.04.13
nvidia jetson deepstream objectDetector_SSD 실행 스크립트 분석  (0) 2022.04.13
jetson / armv8 EL  (0) 2022.04.07
Posted by 구차니
embeded/jetson2022. 4. 13. 11:48

코드 기본 구조만 남기고 상세 코드는 분석을 위해 삭제

$ cat nvdsiplugin_ssd.cpp
#include "NvInferPlugin.h"
#include <vector>
#include "cuda_runtime_api.h"
#include <cassert>
#include <cublas_v2.h>
#include <functional>
#include <numeric>
#include <algorithm>
#include <iostream>

using namespace nvinfer1;

class FlattenConcat : public IPluginV2
{
public:
    FlattenConcat(int concatAxis, bool ignoreBatch)
        : mIgnoreBatch(ignoreBatch)
        , mConcatAxisID(concatAxis)
    {
        assert(mConcatAxisID == 1 || mConcatAxisID == 2 || mConcatAxisID == 3);
    }
    //clone constructor
    FlattenConcat(int concatAxis, bool ignoreBatch, int numInputs, int outputConcatAxis, int* inputConcatAxis)
        : mIgnoreBatch(ignoreBatch)
        , mConcatAxisID(concatAxis)
        , mOutputConcatAxis(outputConcatAxis)
        , mNumInputs(numInputs)
    {
        CHECK(cudaMallocHost((void**) &mInputConcatAxis, mNumInputs * sizeof(int)));
        for (int i = 0; i < mNumInputs; ++i)
            mInputConcatAxis[i] = inputConcatAxis[i];
    }

    FlattenConcat(const void* data, size_t length)     {    }
    ~FlattenConcat()    {    }
    int getNbOutputs() const noexcept override { return 1; }
    Dims getOutputDimensions(int index, const Dims* inputs, int nbInputDims) noexcept override    {    }
    int initialize() noexcept override    {    }
    void terminate() noexcept override    {    }
    size_t getWorkspaceSize(int) const noexcept override { return 0; }
    int enqueue(int batchSize, void const* const* inputs, void* const* outputs, void*, cudaStream_t stream) noexcept override    {    }
    size_t getSerializationSize() const noexcept override   {    }
    void serialize(void* buffer) const noexcept override    {   }
    void configureWithFormat(const Dims* inputs, int nbInputs, const Dims* outputDims, int nbOutputs, nvinfer1::DataType type, nvinfer1::PluginFormat format, int maxBatchSize) noexcept override   {    }
    bool supportsFormat(DataType type, PluginFormat format) const noexcept override    {    }
    const char* getPluginType() const noexcept override { return "FlattenConcat_TRT"; }
    const char* getPluginVersion() const noexcept override { return "1"; }
    void destroy() noexcept override { delete this; }
    IPluginV2* clone() const noexcept override    {    }
    void setPluginNamespace(const char* libNamespace) noexcept override { mNamespace = libNamespace; }
    const char* getPluginNamespace() const noexcept override { return mNamespace.c_str(); }

private:
    template <typename T>    void write(char*& buffer, const T& val) const    {    }
    template <typename T>    T read(const char*& buffer)    {    }
    size_t* mCopySize = nullptr;
    bool mIgnoreBatch{false};
    int mConcatAxisID{0}, mOutputConcatAxis{0}, mNumInputs{0};
    int* mInputConcatAxis = nullptr;
    nvinfer1::Dims mCHW;
    cublasHandle_t mCublas;
    std::string mNamespace;
};

namespace
{
const char* FLATTENCONCAT_PLUGIN_VERSION{"1"};
const char* FLATTENCONCAT_PLUGIN_NAME{"FlattenConcat_TRT"};
} // namespace

class FlattenConcatPluginCreator : public IPluginCreator
{
public:
    FlattenConcatPluginCreator()
    {
        mPluginAttributes.emplace_back(PluginField("axis", nullptr, PluginFieldType::kINT32, 1));
        mPluginAttributes.emplace_back(PluginField("ignoreBatch", nullptr, PluginFieldType::kINT32, 1));
        mFC.nbFields = mPluginAttributes.size();
        mFC.fields = mPluginAttributes.data();
    }

    ~FlattenConcatPluginCreator() {}
    const char* getPluginName() const noexcept override { return FLATTENCONCAT_PLUGIN_NAME; }
    const char* getPluginVersion() const noexcept override { return FLATTENCONCAT_PLUGIN_VERSION; }
    const PluginFieldCollection* getFieldNames() noexcept override { return &mFC; }
    IPluginV2* createPlugin(const char* name, const PluginFieldCollection* fc) noexcept override    {    }
    IPluginV2* deserializePlugin(const char* name, const void* serialData, size_t serialLength) noexcept override    {        return new FlattenConcat(serialData, serialLength);    }
    void setPluginNamespace(const char* libNamespace) noexcept override { mNamespace = libNamespace; }
    const char* getPluginNamespace() const noexcept override { return mNamespace.c_str(); }

private:
    static PluginFieldCollection mFC;
    bool mIgnoreBatch{false};
    int mConcatAxisID;
    static std::vector<PluginField> mPluginAttributes;
    std::string mNamespace = "";
};

PluginFieldCollection FlattenConcatPluginCreator::mFC{};
std::vector<PluginField> FlattenConcatPluginCreator::mPluginAttributes;

REGISTER_TENSORRT_PLUGIN(FlattenConcatPluginCreator);

 

$ cat nvdsparsebbox_ssd.cpp
#include <cstring>
#include <iostream>
#include "nvdsinfer_custom_impl.h"

#define MIN(a,b) ((a) < (b) ? (a) : (b))
#define MAX(a,b) ((a) > (b) ? (a) : (b))
#define CLIP(a,min,max) (MAX(MIN(a, max), min))

/* This is a sample bounding box parsing function for the sample SSD UFF
 * detector model provided with the TensorRT samples. */

extern "C"
bool NvDsInferParseCustomSSD (std::vector<NvDsInferLayerInfo> const &outputLayersInfo,
        NvDsInferNetworkInfo  const &networkInfo,
        NvDsInferParseDetectionParams const &detectionParams,
        std::vector<NvDsInferObjectDetectionInfo> &objectList);

/* C-linkage to prevent name-mangling */
extern "C"
bool NvDsInferParseCustomSSD (std::vector<NvDsInferLayerInfo> const &outputLayersInfo,
        NvDsInferNetworkInfo  const &networkInfo,
        NvDsInferParseDetectionParams const &detectionParams,
        std::vector<NvDsInferObjectDetectionInfo> &objectList)
{
  for (int i = 0; i < keepCount; ++i)
  {
    NvDsInferObjectDetectionInfo object;
        object.classId = classId;
        object.detectionConfidence = det[2];
        object.left = CLIP(rectx1, 0, networkInfo.width - 1);
        object.top = CLIP(recty1, 0, networkInfo.height - 1);
        object.width = CLIP(rectx2, 0, networkInfo.width - 1) - object.left + 1;
        object.height = CLIP(recty2, 0, networkInfo.height - 1) - object.top + 1;
        objectList.push_back(object);
  }

  return true;
}

/* Check that the custom function has been defined correctly */
CHECK_CUSTOM_PARSE_FUNC_PROTOTYPE(NvDsInferParseCustomSSD);

 

 

+ ?

[링크 : https://github.com/AastaNV/eLinux_data/blob/main/deepstream/ssd-jetson_inference/ssd-jetson_inference.patch]

 

+

NvDsInferDataType  dataType
union {
   NvDsInferDims   inferDims
}; 
int  bindingIndex
const char *  layerName
void *  buffer
int  isInput

[링크 : https://docs.nvidia.com/metropolis/deepstream/5.0DP/dev-guide/DeepStream_Development_Guide/baggage/structNvDsInferLayerInfo.html]

'embeded > jetson' 카테고리의 다른 글

deepstream  (0) 2022.04.15
ssd_inception_v2_coco_2017_11_17.tar.gz  (0) 2022.04.13
nvidia jetson deepstream objectDetector_SSD 실행 스크립트 분석  (0) 2022.04.13
jetson / armv8 EL  (0) 2022.04.07
nvidia jetson partition table  (0) 2022.04.06
Posted by 구차니
embeded/jetson2022. 4. 13. 11:32

 

- With gst-launch-1.0
  For Jetson:
  $ gst-launch-1.0 filesrc location=../../samples/streams/sample_1080p_h264.mp4 ! \
        decodebin ! m.sink_0 nvstreammux name=m batch-size=1 width=1280 height=720 ! \
        nvinfer config-file-path= config_infer_primary_ssd.txt ! \
        nvvideoconvert ! nvdsosd ! nvegltransform ! nveglglessink

- With deepstream-app
  $ deepstream-app -c deepstream_app_config_ssd.txt

 

$ cat deepstream_app_config_ssd.txt
[application]
enable-perf-measurement=1
perf-measurement-interval-sec=1
gie-kitti-output-dir=streamscl

[tiled-display]
enable=0
rows=1
columns=1
width=1280
height=720
gpu-id=0
nvbuf-memory-type=0

[source0]
enable=1
#Type - 1=CameraV4L2 2=URI 3=MultiURI
type=3
num-sources=1
uri=file://../../samples/streams/sample_1080p_h264.mp4
gpu-id=0
cudadec-memtype=0

[streammux]
gpu-id=0
batch-size=1
batched-push-timeout=-1
## Set muxer output width and height
width=1920
height=1080
nvbuf-memory-type=0

[sink0]
enable=1
#Type - 1=FakeSink 2=EglSink 3=File
type=2
sync=1
source-id=0
gpu-id=0

[osd]
enable=1
gpu-id=0
border-width=3
text-size=15
text-color=1;1;1;1;
text-bg-color=0.3;0.3;0.3;1
font=Serif
show-clock=0
clock-x-offset=800
clock-y-offset=820
clock-text-size=12
clock-color=1;0;0;0
nvbuf-memory-type=0

[primary-gie]
enable=1
gpu-id=0
batch-size=1
gie-unique-id=1
interval=0

labelfile-path=/home/nvidia/tmp_onnx/labels.txt
#labelfile-path=ssd_coco_labels.txt

model-engine-file=sample_ssd_relu6.uff_b1_gpu0_fp32.engine
config-file=config_infer_primary_ssd.txt
nvbuf-memory-type=0

 

$ cat config_infer_primary_ssd.txt
[property]
gpu-id=0
net-scale-factor=0.0078431372
offsets=127.5;127.5;127.5
model-color-format=0

# yw
onnx-file=/home/nvidia/tmp_onnx/model.onnx
labelfile=/home/nvidia/tmp_onnx/labels.txt

model-engine-file=sample_ssd_relu6.uff_b1_gpu0_fp32.engine
labelfile-path=ssd_coco_labels.txt
uff-file=sample_ssd_relu6.uff
infer-dims=3;300;300
uff-input-order=0
uff-input-blob-name=Input
batch-size=1
## 0=FP32, 1=INT8, 2=FP16 mode
network-mode=2
num-detected-classes=91
interval=0
gie-unique-id=1
is-classifier=0
output-blob-names=MarkOutput_0
parse-bbox-func-name=NvDsInferParseCustomSSD
custom-lib-path=nvdsinfer_custom_impl_ssd/libnvdsinfer_custom_impl_ssd.so
#scaling-filter=0
#scaling-compute-hw=0

[class-attrs-all]
threshold=0.5
roi-top-offset=0
roi-bottom-offset=0
detected-min-w=0
detected-min-h=0
detected-max-w=0
detected-max-h=0

## Per class configuration
#[class-attrs-2]
#threshold=0.6
#roi-top-offset=20
#roi-bottom-offset=10
#detected-min-w=40
#detected-min-h=40
#detected-max-w=400
#detected-max-h=800

'embeded > jetson' 카테고리의 다른 글

ssd_inception_v2_coco_2017_11_17.tar.gz  (0) 2022.04.13
nvidia jetson deepstream objectDetector_SSD 플러그인 분석  (0) 2022.04.13
jetson / armv8 EL  (0) 2022.04.07
nvidia jetson partition table  (0) 2022.04.06
jetson nano 부팅이 안됨  (0) 2022.04.06
Posted by 구차니
embeded/jetson2022. 4. 7. 15:36
Posted by 구차니
embeded/jetson2022. 4. 6. 18:32

약자에 대한 테이블 없나..

 

[링크 : https://docs.nvidia.com/jetson/l4t/index.html#page/Tegra%20Linux%20Driver%20Package%20Development%20Guide/bootflow_jetson_xavier.html#]

 

Jetson Nano Development Module (P3448-0000) Flashed to Micro SD Card

NameTypeAlloc PolicyFS TypeSizeFS AttributeAlloc AttributeReservedFilenameDescription
GP1
GP1
sequential
basic
2097152
0
8
0 %
 
Required. Contains primary GPT of the sdcard device. All partitions defined after this entry are configured in the kernel, and are accessible by standard partition tools such as gdisk and parted.
APP
data
sequential
basic
APPSIZE
0
0x8
0 %
APPFILE
Required. Contains the rootfs. This partition must be defined after primary_GPT so that it can be accessed as the fixed known special device /dev/mmcblk0p1.
TXC
TBCTYPE
sequential
basic
131072
0
8
0 %
TBCFILE
Required. Contains TegraBoot CPU-side binary.
RP1
data
sequential
basic
458752
0
0x8
0 %
DTBFILE
Required. Contains Bootloader DTB binary.
EBT
bootloader
sequential
basic
589824
0
8
0 %
EBTFILE
Required. Contains CBoot, the final boot stage CPU bootloader binary that loads the binary in the kernel partition..
WX0
WB0TYPE
sequential
basic
65536
0
8
0 %
WB0FILE
Required. Contains warm boot binary.
BXF
data
sequential
basic
196608
0
8
0 %
BPFFILE
Required. Contains SC7 entry firmware.
BXF-DTB
data
sequential
basic
393216
0
8
0 %
BPFDTB-FILE
Optional. Reserved for future use by BPMP DTB binary; can't remove.
FX
FBTYPE
sequential
basic
65536
0
0x8
0 %
FBFILE
Optional. Reserved for fuse bypass; removeable.
TXS
data
sequential
basic
458752
0
8
0 %
TOSFILE
Required. Contains TOS binary.
DXB
data
sequential
basic
458752
0
0x8
0 %
DTBFILE
Required. Contains kernel DTB binary.
LNX
data
sequential
basic
786432
0
0x8
0 %
LNXFILE
Required. Contains U-Boot, which loads and launches the kernel from the rootfs at /boot.
EXS
data
sequential
basic
65536
0
8
0 %
EKSFILE
Optional. Contains the encrypted keys.
BMP
data
sequential
basic
81920
0
0x8
0 %
bmp.blob
Optional. Contains BMP images for splash screen display during boot.
RP4
data
sequential
basic
131072
0
0x8
0 %
rp4.blob
Required. Contains XUSB module’s firmware file, making XUSB a true USB 3.0 compliant host controller.
GPT
GPT
sequential
basic
2097152
0
8
0 %
 
Required. Contains secondary GPT of the sdcard device.

[링크 : https://docs.nvidia.com/jetson/l4t/index.html#page/Tegra Linux Driver Package Development Guide/part_config.html#]

 

+

22.04.07

BootROM (BR) 
Boot Configuration Table (BCT)
bootloader (BL)

[링크 : https://docs.nvidia.com/jetson/l4t/index.html#page/Tegra%20Linux%20Driver%20Package%20Development%20Guide/bootflow_jetson_nano.html#wwpID0E02B0HA]


boot-file-set (BFS)
kernel-file-set (KFS) 
[링크 : https://docs.nvidia.com/jetson/l4t/index.html#page/Tegra%20Linux%20Driver%20Package%20Development%20Guide/bootloader_update_nano_tx1.html]


EKB or EKS: Encrypted keyblob, an encrypted blob which holds developer-defined content
[링크 : https://docs.nvidia.com/jetson/l4t/index.html#page/Tegra%20Linux%20Driver%20Package%20Development%20Guide/trusty.html]

1.Boot partitions, which are used in the boot process, and are visible only to Bootloader.
Many of the boot partitions have redundant copy partitions. The copy partitions must have the same names as their primaries with the suffix ‘‑1’. For example, the NVC partition’s copy must be named NVC‑1.
The boot partitions are:
•BCT, which contains redundant instances of the Boot Configuration Table. This must be the first partition on the boot device.
•NVC contains TegraBoot. This must be the second boot partition. The following boot partitions, PT through SPF, are part of the BFS.
•PT contains layout information for each BFS, and indicates the beginning of each one. It is the first partition in the BFS.
•TBC contains the TegraBoot CPU-side binary.
•RP1 contains TegraBoot DTBs.
•EBT contains CBoot.
•WB0 contains the warm boot binary.
•BPF contains BPMP microcode.
•NVC‑1 contains a copy of NVC.
•PT‑1 through BPF‑1 are copy partitions for the primaries NVC through BPF, making up a copy of the BFS, denoted BFS‑1.
•PAD is an empty partition which ensures the VER and VER_b are at the very end of the boot partition.
•VER_b contains additional version information for redundancy and version checking.
•VER contains version information.
2.GP1 contains the sdmmc_user device’s primary GPT. All partitions defined after this one are configured in the Linux kernel, and are accessible by standard partition tools such as gdisk and parted.
3.User partitions, which have a variety of uses. Some of them may be deleted, and/or may be mounted and used to store application files.
The following partitions constitute the kernel-file-set (KFS), and have redundant copy partitions:
•DTB contains kernel DTBs.
•TOS contains the trusted OS binary.
•EKS is optional, and is reserved for future use.
•LNX contains either the Linux kernel or U-Boot, depending on your choice of DFLT_KERNEL_IMAGE in the configuration file.
•DTB‑1 through EKS‑1 constitute a copy of the primary KFS, denoted KFS‑1.
•Other partitions, such as APP and BMP, are outside the scope of this document. For information about these partitions, see the appropriate subsection for your Jetson platform in Default Partition Overview.
Kernel-file-set (KFS)

[링크 : https://docs.nvidia.com/jetson/l4t/index.html#page/Tegra%20Linux%20Driver%20Package%20Development%20Guide/bootloader_update_nano_tx1.html#]

'embeded > jetson' 카테고리의 다른 글

nvidia jetson deepstream objectDetector_SSD 실행 스크립트 분석  (0) 2022.04.13
jetson / armv8 EL  (0) 2022.04.07
jetson nano 부팅이 안됨  (0) 2022.04.06
deepstream triton server  (0) 2022.03.30
deepstream part.3  (0) 2022.03.29
Posted by 구차니
embeded/jetson2022. 4. 6. 15:07

월요일까지 잘 쓰던 녀석인데 수요일에 켜려니 안되서 멘붕

다른 SD 메모리 밀고 이미지 구워도 안켜져서 멘붕

젯슨 많이 다뤄보신분에게 여쭤보니 sdk manager를 통해서 밀어 버리지 않으면 복구 안된다고..

그러면.. jetson 보드 자체, SD 카드가 아닌 영역에도 먼가 손을 대는건가?

 

[0000.125] [L4T TegraBoot] (version 00.00.2018.01-l4t-8728f3cb)
[0000.130] Processing in cold boot mode Bootloader 2
[0000.135] A02 Bootrom Patch rev = 1023
[0000.138] Power-up reason: pmc por
[0000.141] No Battery Present
[0000.144] pmic max77620 reset reason
[0000.147] pmic max77620 NVERC : 0x40
[0000.151] RamCode = 0
[0000.153] Platform has DDR4 type RAM
[0000.156] max77620 disabling SD1 Remote Sense
[0000.161] Setting DDR voltage to 1125mv
[0000.165] Serial Number of Pmic Max77663: 0x291ae2
[0000.172] Entering ramdump check
[0000.175] Get RamDumpCarveOut = 0x0
[0000.179] RamDumpCarveOut=0x0,  RamDumperFlag=0xe59ff3f8
[0000.184] Last reboot was clean, booting normally!
[0000.188] Sdram initialization is successful
[0000.192] SecureOs Carveout Base=0x00000000ff800000 Size=0x00800000
[0000.199] Lp0 Carveout Base=0x00000000ff780000 Size=0x00001000
[0000.204] BpmpFw Carveout Base=0x00000000ff700000 Size=0x00080000
[0000.210] GSC1 Carveout Base=0x00000000ff600000 Size=0x00100000
[0000.216] GSC2 Carveout Base=0x00000000ff500000 Size=0x00100000
[0000.222] GSC4 Carveout Base=0x00000000ff400000 Size=0x00100000
[0000.228] GSC5 Carveout Base=0x00000000ff300000 Size=0x00100000
[0000.234] GSC3 Carveout Base=0x000000017f300000 Size=0x00d00000
[0000.250] RamDump Carveout Base=0x00000000ff280000 Size=0x00080000
[0000.256] Platform-DebugCarveout: 0
[0000.259] Nck Carveout Base=0x00000000ff080000 Size=0x00200000
[0000.265] Non secure mode, and RB not enabled.
[0000.269] BoardID = 3448, SKU = 0x0
[0000.272] QSPI-ONLY: SkipQspiOnlyFlag = 0
[0000.276] Nano-SD: checking PT table on QSPI ...
[0000.281] Initialize FailControl
[0000.284] Read PT from (2:0)
[0000.314] PT crc32 and magic check passed.
[0000.318] Using BFS PT to query partitions
[0000.324] Loading Tboot-CPU binary
[0000.352] Verifying TBC in OdmNonSecureSBK mode
[0000.362] Bootloader load address is 0xa0000000, entry address is 0xa0000258
[0000.369] Bootloader downloaded successfully.
[0000.373] Downloaded Tboot-CPU binary to 0xa0000258
[0000.378] MAX77620_GPIO5 configured
[0000.381] CPU power rail is up
[0000.384] CPU clock enabled
[0000.388] Performing RAM repair
[0000.391] Updating A64 Warmreset Address to 0xa00002e9
[0000.396] BoardID = 3448, SKU = 0x0
[0000.399] QSPI-ONLY: SkipQspiOnlyFlag = 0
[0000.403] Nano-SD: checking PT table on QSPI ...
[0000.407] NvTbootFailControlDoFailover: No failover; Continuing ...
[0000.413] Loading NvTbootBootloaderDTB
[0000.480] Verifying NvTbootBootloaderDTB in OdmNonSecureSBK mode
[0000.549] Bootloader DTB Load Address: 0x83000000
[0000.554] BoardID = 3448, SKU = 0x0
[0000.557] QSPI-ONLY: SkipQspiOnlyFlag = 0
[0000.561] Nano-SD: checking PT table on QSPI ...
[0000.565] NvTbootFailControlDoFailover: No failover; Continuing ...
[0000.571] Loading NvTbootKernelDTB
[0000.637] Verifying NvTbootKernelDTB in OdmNonSecureSBK mode
[0000.660] Verification of NvTbootKernelDTB failed!
[0000.665] NvTbootKernelDTB partition is corrupted!
[0000.669] *** Set to failover in the next boot ***
[0000.674] NvTbootFailControlSetClobber:
[0000.678] *** Rebooting ***

 

+ 22.04.07

jetson nano sd 정상부팅 로그

비교해보니 "Verification of NvTbootKernelDTB failed!" 에러가 나면서 무한 리부팅 걸린 듯.

그런데 image로 구으면.. DTB 파티션 안쓰나?

[0000.125] [L4T TegraBoot] (version 00.00.2018.01-l4t-8728f3cb)
[0000.130] Processing in cold boot mode Bootloader 2
[0000.135] A02 Bootrom Patch rev = 1023
[0000.138] Power-up reason: pmc por
[0000.141] No Battery Present
[0000.144] pmic max77620 reset reason
[0000.147] pmic max77620 NVERC : 0x40
[0000.151] RamCode = 0
[0000.153] Platform has DDR4 type RAM
[0000.156] max77620 disabling SD1 Remote Sense
[0000.161] Setting DDR voltage to 1125mv
[0000.165] Serial Number of Pmic Max77663: 0x291ae2
[0000.172] Entering ramdump check
[0000.175] Get RamDumpCarveOut = 0x0
[0000.179] RamDumpCarveOut=0x0,  RamDumperFlag=0xe59ff3f8
[0000.184] Last reboot was clean, booting normally!
[0000.188] Sdram initialization is successful
[0000.192] SecureOs Carveout Base=0x00000000ff800000 Size=0x00800000
[0000.199] Lp0 Carveout Base=0x00000000ff780000 Size=0x00001000
[0000.204] BpmpFw Carveout Base=0x00000000ff700000 Size=0x00080000
[0000.210] GSC1 Carveout Base=0x00000000ff600000 Size=0x00100000
[0000.216] GSC2 Carveout Base=0x00000000ff500000 Size=0x00100000
[0000.222] GSC4 Carveout Base=0x00000000ff400000 Size=0x00100000
[0000.228] GSC5 Carveout Base=0x00000000ff300000 Size=0x00100000
[0000.234] GSC3 Carveout Base=0x000000017f300000 Size=0x00d00000
[0000.250] RamDump Carveout Base=0x00000000ff280000 Size=0x00080000
[0000.256] Platform-DebugCarveout: 0
[0000.259] Nck Carveout Base=0x00000000ff080000 Size=0x00200000
[0000.265] Non secure mode, and RB not enabled.
[0000.269] BoardID = 3448, SKU = 0x0
[0000.272] QSPI-ONLY: SkipQspiOnlyFlag = 0
[0000.276] Nano-SD: checking PT table on QSPI ...
[0000.281] Initialize FailControl
[0000.284] Read PT from (2:0)
[0000.314] PT crc32 and magic check passed.
[0000.318] Using BFS PT to query partitions
[0000.324] Loading Tboot-CPU binary
[0000.352] Verifying TBC in OdmNonSecureSBK mode
[0000.362] Bootloader load address is 0xa0000000, entry address is 0xa0000258
[0000.369] Bootloader downloaded successfully.
[0000.373] Downloaded Tboot-CPU binary to 0xa0000258
[0000.378] MAX77620_GPIO5 configured
[0000.381] CPU power rail is up
[0000.384] CPU clock enabled
[0000.388] Performing RAM repair
[0000.391] Updating A64 Warmreset Address to 0xa00002e9
[0000.396] BoardID = 3448, SKU = 0x0
[0000.399] QSPI-ONLY: SkipQspiOnlyFlag = 0
[0000.403] Nano-SD: checking PT table on QSPI ...
[0000.407] NvTbootFailControlDoFailover: No failover; Continuing ...
[0000.413] Loading NvTbootBootloaderDTB
[0000.480] Verifying NvTbootBootloaderDTB in OdmNonSecureSBK mode
[0000.549] Bootloader DTB Load Address: 0x83000000
[0000.554] BoardID = 3448, SKU = 0x0
[0000.557] QSPI-ONLY: SkipQspiOnlyFlag = 0
[0000.561] Nano-SD: checking PT table on QSPI ...
[0000.565] NvTbootFailControlDoFailover: No failover; Continuing ...
[0000.571] Loading NvTbootKernelDTB
[0000.637] Verifying NvTbootKernelDTB in OdmNonSecureSBK mode
[0000.706] Kernel DTB Load Address: 0x83100000
[0000.710] BoardID = 3448, SKU = 0x0
[0000.714] QSPI-ONLY: SkipQspiOnlyFlag = 0
[0000.717] Nano-SD: checking PT table on QSPI ...
[0000.722] NvTbootFailControlDoFailover: No failover; Continuing ...
[0000.730] Loading cboot binary
[0000.845] Verifying EBT in OdmNonSecureSBK mode
[0000.887] Bootloader load address is 0x92c00000, entry address is 0x92c00258
[0000.894] Bootloader downloaded successfully.
[0000.898] BoardID = 3448, SKU = 0x0
[0000.901] QSPI-ONLY: SkipQspiOnlyFlag = 0
[0000.905] Nano-SD: checking PT table on QSPI ...
[0000.910] NvTbootFailControlDoFailover: No failover; Continuing ...
[0000.916] PT: Partition NCT NOT found !
[0000.920] Warning: Find Partition via PT Failed
[0000.924] Next binary entry address: 0x92c00258
[0000.928] BoardId: 3448
[0000.933] Overriding pmu board id with proc board id
[0000.938] Display board id is not available
[0000.942] BoardID = 3448, SKU = 0x0
[0000.945] QSPI-ONLY: SkipQspiOnlyFlag = 0
[0000.949] Nano-SD: checking PT table on QSPI ...
[0000.953] NvTbootFailControlDoFailover: No failover; Continuing ...
[0001.060] Verifying SC7EntryFw in OdmNonSecureSBK mode
[0001.115] /bpmp deleted
[0001.117] SC7EntryFw header found loaded at 0xff700000
[0001.311] OVR2 PMIC
[0001.313] Bpmp FW successfully loaded
[0001.316] BoardID = 3448, SKU = 0x0
[0001.320] QSPI-ONLY: SkipQspiOnlyFlag = 0
[0001.323] Nano-SD: checking PT table on QSPI ...
[0001.328] NvTbootFailControlDoFailover: No failover; Continuing ...
[0001.335] WB0 init successfully at 0xff780000
[0001.339] Verifying NvTbootWb0 in OdmNonSecureSBK mode
[0001.344] Set NvDecSticky Bits
[0001.348] GSC2 address ff53fffc value c0edbbcc
[0001.354] GSC MC Settings done
[0001.357] BoardID = 3448, SKU = 0x0
[0001.360] QSPI-ONLY: SkipQspiOnlyFlag = 0
[0001.364] Nano-SD: checking PT table on QSPI ...
[0001.368] NvTbootFailControlDoFailover: No failover; Continuing ...
[0001.375] TOS Image length 53680
[0001.378]  Monitor size 53680
[0001.381]  OS size 0
[0001.396] Secure Os AES-CMAC Verification Success!
[0001.401] TOS image cipher info: plaintext
[0001.405] Loading and Validation of Secure OS Successful
[0001.421] SC7 Entry Firmware - 0xff700000, 0x4000
[0001.425] NvTbootPackSdramParams: start.
[0001.430] NvTbootPackSdramParams: done.
[0001.434] Tegraboot started after 51854 us
[0001.438] Basic modules init took 931208 us
[0001.442] Sec Bootdevice Read Time = 12 ms, Read Size = 61 KB
[0001.448] Sec Bootdevice Write Time = 0 ms, Write Size = 0 KB
[0001.453] Next stage binary read took 102860 us
[0001.457] Carveout took -132464 us
[0001.461] CPU initialization took 515446 us
[0001.465] Total time taken by TegraBoot 1417050 us

[0001.469] Starting CPU & Halting co-processor

64NOTICE:  BL31: v1.3(release):b5eeb33
NOTICE:  BL31: Built : 08:56:32, Feb 19 2022
ERROR:   Error initializing runtime service trusty_fast

 

1번핀 위치 잘 보고 꽂아야 함.

아무생각없이 꽂아서 PMIC_SYS_RST 가 연결되면 전원 LED에 불도 안들어 온다.

 

[링크 : https://www.mouser.com/pdfDocs/Jetson_Nano_Developer_Kit_User_Guide.pdf]

 

아래 명령어들로 인해서 cboot, EBT, SBK 키 등이 다르게 설정되면서

SD 메모리 부팅이 정상적으로 진행되는거 아닐까?

15:57:09 INFO: Flash Jetson Nano - flash: *** Flashing target device started. ***
15:57:09 INFO: Flash Jetson Nano - flash: [ 0.0047 ] tegrasign --getmode mode.txt --key None
15:57:09 INFO: Flash Jetson Nano - flash: [ 0.0190 ] RCM 1 is saved as rcm_1.rcm
15:57:09 INFO: Flash Jetson Nano - flash: [ 0.0757 ] Assuming zero filled SBK key
15:57:09 INFO: Flash Jetson Nano - flash: [ 0.1716 ] tegrabct --bct P3448_A00_lpddr4_204Mhz_P987.cfg --chip 0x21 0
15:57:09 INFO: Flash Jetson Nano - flash: [ 0.1847 ] tegrabct --bct P3448_A00_lpddr4_204Mhz_P987.bct --chip 0x21 0 --updatedevparam flash.xml.bin
15:57:09 INFO: Flash Jetson Nano - flash: [ 0.1942 ] tegraparser --pt flash.xml.bin --chip 0x21 0 --updatecustinfo P3448_A00_lpddr4_204Mhz_P987.bct
15:57:09 INFO: Flash Jetson Nano - flash: [ 0.2350 ] RCM version 0X210001
15:57:10 INFO: Flash Jetson Nano - flash: [ 1.2474 ] tegrarcm --download bct P3448_A00_lpddr4_204Mhz_P987.bct
15:57:10 INFO: Flash Jetson Nano - flash: [ 1.2547 ] [...] 100%
15:57:10 INFO: Flash Jetson Nano - flash: [ 1.4867 ] tegrahost --chip 0x21 --align cboot.bin
15:57:10 INFO: Flash Jetson Nano - flash: [ 1.4955 ] tegrahost --magicid EBT --appendsigheader cboot.bin cboot.bin_blheader
15:57:10 INFO: Flash Jetson Nano - flash: [ 1.5437 ] Assuming zero filled SBK key
15:57:10 INFO: Flash Jetson Nano - flash: [ 1.5577 ] tegrahost --updatesigheader tegra210-p3448-0000-p3449-0000-a02.dtb_blheader.encrypt tegra210-p3448-0000-p3449-0000-a02.dtb_blheader.hash zerosbk
15:57:10 INFO: Flash Jetson Nano - flash: [ 1.5704 ] Sending ebt
15:57:11 INFO: Flash Jetson Nano - flash: [ 1.5706 ] [...] 100%
15:57:11 INFO: Flash Jetson Nano - flash: [ 1.6794 ] Sending rp1
15:57:11 INFO: Flash Jetson Nano - flash: [ 1.6833 ] [...] 100%
15:57:11 INFO: Flash Jetson Nano - flash: [ 1.7451 ] Applet version 00.01.0000
15:57:11 INFO: Flash Jetson Nano - flash: [ 1.7561 ] tegrarcm --oem platformdetails storage storage_info.bin
15:57:11 INFO: Flash Jetson Nano - flash: [ 2.4503 ] [...] 100%
15:57:26 INFO: Flash Jetson Nano - flash: [ 2.4576 ] Writing partition PT with crc-flash.xml.bin
15:57:26 INFO: Flash Jetson Nano - flash: [ 17.2106 ] [...] 100%
15:57:30 INFO: Flash Jetson Nano - flash: [ 17.2180 ] Writing partition NVC with nvtboot.bin.encrypt
15:57:30 INFO: Flash Jetson Nano - flash: [ 21.1299 ] [...] 100%
15:57:40 INFO: Flash Jetson Nano - flash: [ 21.1431 ] Writing partition NVC_R with nvtboot.bin.encrypt
15:57:40 INFO: Flash Jetson Nano - flash: [ 31.1584 ] [...] 100%
15:57:50 INFO: Flash Jetson Nano - flash: [ 31.1746 ] Writing partition TBC with nvtboot_cpu.bin.encrypt
15:57:50 INFO: Flash Jetson Nano - flash: [ 41.1998 ] [...] 100%
15:57:54 INFO: Flash Jetson Nano - flash: [ 41.2121 ] Writing partition RP1 with kernel_tegra210-p3448-0000-p3449-0000-a02.dtb.encrypt
15:57:54 INFO: Flash Jetson Nano - flash: [ 45.3820 ] [...] 100%
15:58:08 INFO: Flash Jetson Nano - flash: [ 45.3993 ] Writing partition EBT with cboot.bin.encrypt
15:58:08 INFO: Flash Jetson Nano - flash: [ 59.3284 ] [...] 100%
15:58:37 INFO: Flash Jetson Nano - flash: [ 59.3567 ] Writing partition WB0 with warmboot.bin.encrypt
15:58:37 INFO: Flash Jetson Nano - flash: [ 88.4141 ] [...] 100%
15:58:38 INFO: Flash Jetson Nano - flash: [ 88.4254 ] Writing partition BPF with sc7entry-firmware.bin.encrypt
15:58:38 INFO: Flash Jetson Nano - flash: [ 88.6887 ] [...] 100%
15:58:38 INFO: Flash Jetson Nano - flash: [ 88.7003 ] Writing partition TOS with tos-mon-only.img.encrypt
15:58:38 INFO: Flash Jetson Nano - flash: [ 88.9647 ] [...] 100%
15:58:41 INFO: Flash Jetson Nano - flash: [ 88.9777 ] Writing partition DTB with kernel_tegra210-p3448-0000-p3449-0000-a02.dtb.encrypt
15:58:41 INFO: Flash Jetson Nano - flash: [ 92.4136 ] [...] 100%
15:58:55 INFO: Flash Jetson Nano - flash: [ 92.4320 ] Writing partition LNX with boot.img.encrypt
15:58:55 INFO: Flash Jetson Nano - flash: [ 106.3609 ] [...] 100%
15:59:35 INFO: Flash Jetson Nano - flash: [ 146.1933 ] Writing partition EKS with eks.img
15:59:35 INFO: Flash Jetson Nano - flash: [ 146.1946 ] [...] 100%
15:59:35 INFO: Flash Jetson Nano - flash: [ 146.2034 ] Writing partition BMP with bmp.blob
15:59:35 INFO: Flash Jetson Nano - flash: [ 146.4667 ] [...] 100%
15:59:44 INFO: Flash Jetson Nano - flash: [ 146.4847 ] Writing partition RP4 with rp4.blob
15:59:44 INFO: Flash Jetson Nano - flash: [ 155.5340 ] [...] 100%
15:59:52 INFO: Flash Jetson Nano - flash: [ 155.5537 ] Writing partition VER_b with qspi_bootblob_ver.txt
15:59:52 INFO: Flash Jetson Nano - flash: [ 163.3820 ] [...] 100%
15:59:53 INFO: Flash Jetson Nano - flash: [ 163.3969 ] Writing partition VER with qspi_bootblob_ver.txt
15:59:53 INFO: Flash Jetson Nano - flash: [ 163.6605 ] [...] 100%
15:59:53 INFO: Flash Jetson Nano - flash: [ 163.6712 ] Writing partition APP with system.img
100%
16:09:26 INFO: Flash Jetson Nano - flash: [ 736.6352 ] tegradevflash --write BCT P3448_A00_lpddr4_204Mhz_P987.bct
16:09:26 INFO: Flash Jetson Nano - flash: [ 736.6451 ] [...] 100%
16:09:32 INFO: Flash Jetson Nano - flash: [ 743.3077 ] tegradevflash --reboot coldboot
16:09:32 INFO: Flash Jetson Nano - flash: [ 743.3115 ] Cboot version 00.01.0000
16:09:34 INFO: Flash Jetson Nano - flash: [ 743.3184 ]
16:09:54 INFO: Flash Jetson Nano - flash: [ Component Install Finished Successfully ]
16:09:54 INFO: Flash Jetson Nano - flash: command finished successfully
16:09:54 SUMMARY: Flash Jetson Nano - flash: Install completed successfully.
16:12:05 INFO: Exactly one NVIDIA device detected, as expected.
16:12:05 INFO: Start to check if if default ip avaliable in ip addr
16:12:05 DEBUG: running command < true >
16:12:05 INFO: exec_command: true
16:12:05 INFO: command finished successfully
16:12:05 DEBUG: running command < ip addr | grep 192.168.55.1 >
16:12:05 INFO: exec_command: ip addr | grep 192.168.55.1
16:12:05 INFO: command finished successfully
16:12:05 INFO: Validated default ip avaliable.
16:12:05 INFO: Start to check if ip and ssh up with customize ip...
16:12:05 DEBUG: running command < true >
16:12:05 INFO: command finished successfully
16:12:05 DEBUG: running command < nc -z -vv -w 5 192.168.55.1 22 >
16:12:05 DEBUG: command terminated with error

 

+

~/nvidia/nvidia_sdk/JetPack_4.6.1_Linux_JETSON_NANO_TARGETS/Linux_for_Tegra/bootloader$ ls -al tegra*
-rw-r--r-- 1 root     root      229792  4월  6 15:57 tegra210-p3448-0000-p3449-0000-a02.dtb
-rwxrwxr-x 1 minimonk minimonk  959237  2월 20 02:04 tegrabct
-rwxrwxr-x 1 minimonk minimonk  922636  2월 20 02:04 tegradevflash
-rwxrwxr-x 1 minimonk minimonk   50627  2월 20 02:04 tegraflash.py
-rwxrwxr-x 1 minimonk minimonk  163998  2월 20 02:04 tegraflash_internal.py
-rwxrwxr-x 1 minimonk minimonk  912826  2월 20 02:04 tegrahost
-rwxrwxr-x 1 minimonk minimonk 2539744  2월 20 02:04 tegraopenssl
-rwxrwxr-x 1 minimonk minimonk  917258  2월 20 02:04 tegraparser
-rwxrwxr-x 1 minimonk minimonk  931472  2월 20 02:04 tegrarcm
-rwxrwxr-x 1 minimonk minimonk 1092265  2월 20 02:04 tegrasign
-rwxrwxr-x 1 minimonk minimonk   10349  2월 20 02:04 tegrasign_v3.py
-rwxrwxr-x 1 minimonk minimonk   33127  2월 20 02:04 tegrasign_v3_internal.py
-rwxrwxr-x 1 minimonk minimonk    7830  2월 20 02:04 tegrasign_v3_util.py

 

 

+ 22.04.07

 

Have you made any changes to device tree? If not, then likely you will need to reflash the unit (NvTboot is a very early stage in boot, long before Linux is ever reached). If you have an SD card for rootfs you could save a copy of that. Note that where some of this content is found differs between QSPI memory and the SD card depending on release. I can’t say much more which is useful, but if you do need more help, then knowing the release used on this unit and any flash history would help whoever answers.

[링크 : https://forums.developer.nvidia.com/t/jetson-nano-4gb-is-not-booting-suddenly/198991]

[링크 : https://forums.developer.nvidia.com/t/flashing-just-dtb-on-28-2-and-tx1/62007/3]

'embeded > jetson' 카테고리의 다른 글

jetson / armv8 EL  (0) 2022.04.07
nvidia jetson partition table  (0) 2022.04.06
deepstream triton server  (0) 2022.03.30
deepstream part.3  (0) 2022.03.29
deepstream onnx part.2  (0) 2022.03.29
Posted by 구차니
embeded/jetson2022. 3. 30. 21:49

이 한줄이 참.. 많은 문제를 낳는구만.. -_-

$ gst-launch-1.0 nvinferserver
ERROR: pipeline could not be constructed: no element "nvinferserver".

 

왜 안되나 했는데 native하게 까는건 없는지 멀 하려고 하면 다 막히고, 걍 docker로 ㄱㄱ -_-

Can Gst-nvinfereserver (DeepSream Triton plugin) run on Nano platform?
Yes. But due to Nano’s memory limitation, performance of certain models is slow and even run into OOM (out of memory) issues, specifically on heavy Tensorflow models. There is an option to run CPU instance for certain models on Nano. For more details, see samples/configs/deepstream-app-triton/README

[링크 : https://docs.nvidia.com/metropolis/deepstream/dev-guide/text/DS_FAQ.html]

[링크 : https://docs.nvidia.com/metropolis/deepstream/dev-guide/text/DS_plugin_gst-nvinferserver.html]

 

어느쪽 말이 맞는거냐 -_-

[링크 : https://docs.nvidia.com/metropolis/deepstream/dev-guide/text/DS_plugin_gst-nvinferserver.html]

 

일단 실행은 이렇게 하면 되려나.. ds l4t 인지 ds triton으로 해야할지 모르겠네

$ sudo docker run --rm -it nvcr.io/nvidia/deepstream-l4t:6.0-samples /bin/bash

[링크 : https://medium.com/@Smartcow_ai/building-arm64-based-docker-containers-for-nvidia-jetson-devices-on-an-x86-based-host-d72cfa535786]

 

하나가 되면 하나가 안되고 아놔 ㅋㅋㅋ

$ sudo docker image ls
REPOSITORY                      TAG            IMAGE ID       CREATED       SIZE
nvcr.io/nvidia/deepstream       6.0.1-triton   ac5f4c456b5b   5 weeks ago   17.5GB
nvcr.io/nvidia/deepstream-l4t   6.0.1-triton   d3984db2b6b1   6 weeks ago   3.98GB

$ sudo docker run --rm -it d3984db2b6b1 /bin/bash
root@d30ca855a6ce:/opt/nvidia/deepstream/deepstream-6.0# gst-inspect-1.0 nvinferserver

(gst-plugin-scanner:12): GStreamer-WARNING **: 01:40:47.344: Failed to load plugin '/usr/lib/aarch64-linux-gnu/gstreamer-1.0/deepstream/libnvdsgst_osd.so': libnvbufsurface.so.1.0.0: cannot open shared object file: No such file or directory

(gst-plugin-scanner:12): GStreamer-WARNING **: 01:40:47.378: Failed to load plugin '/usr/lib/aarch64-linux-gnu/gstreamer-1.0/deepstream/libnvdsgst_inferaudio.so': libcufft.so.10: cannot open shared object file: No such file or directory

(gst-plugin-scanner:12): GStreamer-WARNING **: 01:40:47.454: Failed to load plugin '/usr/lib/aarch64-linux-gnu/gstreamer-1.0/deepstream/libnvdsgst_deepstream_bins.so': libnvdsbufferpool.so.1.0.0: cannot open shared object file: No such file or directory

(gst-plugin-scanner:12): GStreamer-WARNING **: 01:40:47.456: Failed to load plugin '/usr/lib/aarch64-linux-gnu/gstreamer-1.0/deepstream/libgstnvvideoconvert.so': libnvdsbufferpool.so.1.0.0: cannot open shared object file: No such file or directory

(gst-plugin-scanner:12): GStreamer-WARNING **: 01:40:47.477: Failed to load plugin '/usr/lib/aarch64-linux-gnu/gstreamer-1.0/deepstream/libnvdsgst_preprocess.so': libnvbufsurface.so.1.0.0: cannot open shared object file: No such file or directory

(gst-plugin-scanner:12): GStreamer-WARNING **: 01:40:47.479: Failed to load plugin '/usr/lib/aarch64-linux-gnu/gstreamer-1.0/deepstream/libnvdsgst_infer.so': libnvbufsurface.so.1.0.0: cannot open shared object file: No such file or directory

(gst-plugin-scanner:12): GStreamer-WARNING **: 01:40:47.490: Failed to load plugin '/usr/lib/aarch64-linux-gnu/gstreamer-1.0/deepstream/libnvdsgst_multistream.so': libnvbufsurface.so.1.0.0: cannot open shared object file: No such file or directory

(gst-plugin-scanner:12): GStreamer-WARNING **: 01:40:47.499: Failed to load plugin '/usr/lib/aarch64-linux-gnu/gstreamer-1.0/deepstream/libnvdsgst_ofvisual.so': libnvdsbufferpool.so.1.0.0: cannot open shared object file: No such file or directory

(gst-plugin-scanner:12): GStreamer-WARNING **: 01:40:47.508: Failed to load plugin '/usr/lib/aarch64-linux-gnu/gstreamer-1.0/deepstream/libnvdsgst_dsexample.so': libnvbufsurface.so.1.0.0: cannot open shared object file: No such file or directory

(gst-plugin-scanner:12): GStreamer-WARNING **: 01:40:47.511: Failed to load plugin '/usr/lib/aarch64-linux-gnu/gstreamer-1.0/deepstream/libnvdsgst_tracker.so': libnvbufsurface.so.1.0.0: cannot open shared object file: No such file or directory

(gst-plugin-scanner:12): GStreamer-WARNING **: 01:40:47.543: Failed to load plugin '/usr/lib/aarch64-linux-gnu/gstreamer-1.0/deepstream/libnvdsgst_segvisual.so': libnvdsbufferpool.so.1.0.0: cannot open shared object file: No such file or directory

(gst-plugin-scanner:12): GStreamer-WARNING **: 01:40:47.619: Failed to load plugin '/usr/lib/aarch64-linux-gnu/gstreamer-1.0/deepstream/libnvdsgst_inferserver.so': libnvbufsurface.so.1.0.0: cannot open shared object file: No such file or directory

(gst-plugin-scanner:12): GStreamer-WARNING **: 01:40:47.638: Failed to load plugin '/usr/lib/aarch64-linux-gnu/gstreamer-1.0/deepstream/libcustom2d_preprocess.so': libnvbufsurftransform.so.1.0.0: cannot open shared object file: No such file or directory

(gst-plugin-scanner:12): GStreamer-WARNING **: 01:40:47.660: Failed to load plugin '/usr/lib/aarch64-linux-gnu/gstreamer-1.0/deepstream/libnvdsgst_udp.so': librivermax.so.0: cannot open shared object file: No such file or directory

(gst-plugin-scanner:12): GStreamer-WARNING **: 01:40:47.664: Failed to load plugin '/usr/lib/aarch64-linux-gnu/gstreamer-1.0/deepstream/libnvdsgst_dewarper.so': libnvdsbufferpool.so.1.0.0: cannot open shared object file: No such file or directory

(gst-plugin-scanner:12): GStreamer-WARNING **: 01:40:47.667: Failed to load plugin '/usr/lib/aarch64-linux-gnu/gstreamer-1.0/deepstream/libnvdsgst_multistreamtiler.so': libnvbufsurface.so.1.0.0: cannot open shared object file: No such file or directory
No such element or plugin 'nvinferserver'

 

에라이 -_-

$ sudo docker image list
REPOSITORY                      TAG            IMAGE ID       CREATED       SIZE
nvcr.io/nvidia/deepstream       6.0.1-triton   ac5f4c456b5b   5 weeks ago   17.5GB
nvcr.io/nvidia/deepstream-l4t   6.0.1-triton   d3984db2b6b1   6 weeks ago   3.98GB

$ sudo docker run --rm -it ac5f4c456b5b /bin/bash
WARNING: The requested image's platform (linux/amd64) does not match the detected host platform (linux/arm64/v8) and no specific platform was requested
standard_init_linux.go:228: exec user process caused: exec format error

 

Building Jetson Containers on an x86 workstation (using qemu)

[링크 : https://github.com/NVIDIA/nvidia-docker/wiki/NVIDIA-Container-Runtime-on-Jetson]

 

+

[링크 : https://blog.ml6.eu/nvidia-deepstream-quickstart-9147dd49a15d]

 

+

deb로 깔아도 install을 해주어야 하는것인가!!!

$ cd /opt/nvidia/deepstream/deepstream-6.0
$ sudo ./install.sh
$ sudo ldconfig

[링크 : https://docs.nvidia.com/metropolis/deepstream/dev-guide/text/DS_Quickstart.html#jetson-setup]

 

 

$ gst-inspect-1.0 nvinferserver
Factory Details:
  Rank                     primary (256)
  Long-name                NvInferServer plugin
  Klass                    NvInferServer Plugin
  Description              Nvidia DeepStreamSDK TensorRT plugin
  Author                   NVIDIA Corporation. Deepstream for Tesla forum: https://devtalk.nvidia.com/default/board/209

Plugin Details:
  Name                     nvdsgst_inferserver
  Description              NVIDIA DeepStreamSDK TensorRT Inference Server plugin
  Filename                 /usr/lib/aarch64-linux-gnu/gstreamer-1.0/deepstream/libnvdsgst_inferserver.so
  Version                  6.0.0
  License                  Proprietary
  Source module            nvinferserver
  Binary package           NVIDIA DeepStreamSDK TensorRT Inference Server plugin
  Origin URL               http://nvidia.com/

GObject
 +----GInitiallyUnowned
       +----GstObject
             +----GstElement
                   +----GstBaseTransform
                         +----GstNvInferServer

Pad Templates:
  SRC template: 'src'
    Availability: Always
    Capabilities:
      video/x-raw(memory:NVMM)
                 format: { (string)NV12, (string)RGBA }
                  width: [ 1, 2147483647 ]
                 height: [ 1, 2147483647 ]
              framerate: [ 0/1, 2147483647/1 ]

  SINK template: 'sink'
    Availability: Always
    Capabilities:
      video/x-raw(memory:NVMM)
                 format: { (string)NV12, (string)RGBA }
                  width: [ 1, 2147483647 ]
                 height: [ 1, 2147483647 ]
              framerate: [ 0/1, 2147483647/1 ]

Element has no clocking capabilities.
Element has no URI handling capabilities.

Pads:
  SINK: 'sink'
    Pad Template: 'sink'
  SRC: 'src'
    Pad Template: 'src'

Element Properties:
  name                : The name of the object
                        flags: readable, writable
                        String. Default: "nvinferserver0"
  parent              : The parent of the object
                        flags: readable, writable
                        Object of type "GstObject"
  qos                 : Handle Quality-of-Service events
                        flags: readable, writable
                        Boolean. Default: false
  unique-id           : Unique ID for the element. Can be used to identify output of the element
                        flags: readable, writable, changeable only in NULL or READY state
                        Unsigned Integer. Range: 0 - 4294967295 Default: 0
  process-mode        : Inferserver processing mode, (0):None, (1)FullFrame, (2)ClipObject
                        flags: readable, writable, changeable only in NULL or READY state
                        Unsigned Integer. Range: 0 - 2 Default: 0
  config-file-path    : Path to the configuration file for this instance of nvinferserver
                        flags: readable, writable, changeable in NULL, READY, PAUSED or PLAYING state
                        String. Default: ""
  batch-size          : Maximum batch size for inference
                        flags: readable, writable, changeable only in NULL or READY state
                        Unsigned Integer. Range: 0 - 1024 Default: 0
  infer-on-gie-id     : Infer on metadata generated by GIE with this unique ID.
                        Set to -1 to infer on all metadata.
                        flags: readable, writable, changeable only in NULL or READY state
                        Integer. Range: -1 - 2147483647 Default: -1
  infer-on-class-ids  : Operate on objects with specified class ids
                        Use string with values of class ids in ClassID (int) to set the property.
                         e.g. 0:2:3
                        flags: readable, writable, changeable only in NULL or READY state
                        String. Default: ""
  interval            : Specifies number of consecutive batches to be skipped for inference
                        flags: readable, writable, changeable only in NULL or READY state
                        Unsigned Integer. Range: 0 - 2147483647 Default: 0
  raw-output-generated-callback: Pointer to the raw output generated callback funtion
                        (type: gst_nvinfer_server_raw_output_generated_callback in 'gstnvdsinfer.h')
                        flags: readable, writable, changeable only in NULL or READY state
                        Pointer.
  raw-output-generated-userdata: Pointer to the userdata to be supplied with raw output generated callback
                        flags: readable, writable, changeable only in NULL or READY state
                        Pointer.

'embeded > jetson' 카테고리의 다른 글

nvidia jetson partition table  (0) 2022.04.06
jetson nano 부팅이 안됨  (0) 2022.04.06
deepstream part.3  (0) 2022.03.29
deepstream onnx part.2  (0) 2022.03.29
jetson nano python numpy Illegal instruction (core dumped)  (0) 2022.03.29
Posted by 구차니
embeded/jetson2022. 3. 29. 16:02

 

[링크 : https://github.com/NVIDIA-AI-IOT/deepstream_python_apps]

[링크 : https://github.com/NVIDIA-AI-IOT/deepstream_python_apps/tree/master/apps/deepstream-ssd-parser]

 

 

----

tritonserver for jetson (build)

[링크 : https://github.com/triton-inference-server/server/blob/main/docs/jetson.md]

 

+

[ 50%] Building CXX object CMakeFiles/triton-core.dir/backend_model_instance.cc.o
In file included from /home/jetson/work/server/build/_deps/repo-core-src/src/backend_model_instance.cc:37:0:
/home/jetson/work/server/build/_deps/repo-core-src/src/metrics.h:40:10: fatal error: dcgm_agent.h: No such file or directory
 #include <dcgm_agent.h>
          ^~~~~~~~~~~~~~
compilation terminated.

[링크 : https://github.com/NVIDIA/gpu-monitoring-tools/tree/master/bindings/go/dcgm]

  [링크 : https://github.com/NVIDIA/gpu-monitoring-tools]

 

pytorch 다운로드 경로

[링크 : https://jstar0525.tistory.com/171]

 

Known Issues
Triton PIP wheels for ARM SBSA are not available from PyPI and pip will install an incorrect Jetson version of Triton for ARM SBSA. The correct wheel file can be pulled directly from the ARM SBSA SDK image and manually installed.

[링크 : https://github.com/triton-inference-server/server/releases]

 

$ sudo docker pull nvcr.io/nvidia/tritonserver:21.11-py3-sdk

[링크 : https://zhuanlan.zhihu.com/p/471291236]

'embeded > jetson' 카테고리의 다른 글

jetson nano 부팅이 안됨  (0) 2022.04.06
deepstream triton server  (0) 2022.03.30
deepstream onnx part.2  (0) 2022.03.29
jetson nano python numpy Illegal instruction (core dumped)  (0) 2022.03.29
deepstream onnx  (0) 2022.03.28
Posted by 구차니
embeded/jetson2022. 3. 29. 11:32

생각해보니 deepstream onnx  github프로젝트의 경우

tiny_yolov2를 기반으로 작동하도록 libnvdsinfer_custom_bbox_tiny_yolo.so 를 생성했으니

ssd 와는 구조가 달라 당연히(?) 맞지 않으니 에러가 발생하고 죽는 듯.

[링크 : https://github.com/thatbrguy/Deep-Stream-ONNX]

 

ERROR: [TRT]: 2: [pluginV2DynamicExtRunner.cpp::execute::115] Error Code 2: Internal Error (Assertion status == kSTATUS_SUCCESS failed.)
ERROR: Build engine failed from config file
ERROR: failed to build trt engine.
0:08:17.537206102  9070     0x3f617730 ERROR                nvinfer gstnvinfer.cpp:632:gst_nvinfer_logger:<primary_gie> NvDsInferContext[UID 1]: Error in NvDsInferContextImpl::buildModel() <nvdsinfer_context_impl.cpp:1934> [UID = 1]: build engine file failed
0:08:17.545680634  9070     0x3f617730 ERROR                nvinfer gstnvinfer.cpp:632:gst_nvinfer_logger:<primary_gie> NvDsInferContext[UID 1]: Error in NvDsInferContextImpl::generateBackendContext() <nvdsinfer_context_impl.cpp:2020> [UID = 1]: build backend context failed
0:08:17.545766053  9070     0x3f617730 ERROR                nvinfer gstnvinfer.cpp:632:gst_nvinfer_logger:<primary_gie> NvDsInferContext[UID 1]: Error in NvDsInferContextImpl::initialize() <nvdsinfer_context_impl.cpp:1257> [UID = 1]: generate backend failed, check config file settings
0:08:17.546456543  9070     0x3f617730 WARN                 nvinfer gstnvinfer.cpp:841:gst_nvinfer_start:<primary_gie> error: Failed to create NvDsInferContext instance
0:08:17.546521285  9070     0x3f617730 WARN                 nvinfer gstnvinfer.cpp:841:gst_nvinfer_start:<primary_gie> error: Config file path: /home/jetson/work/Deep-Stream-ONNX/config/config_infer_custom_yolo.txt, NvDsInfer Error: NVDSINFER_CONFIG_FAILED
** ERROR: <main:658>: Failed to set pipeline to PAUSED

 

azure의 custom vision 의 README에 기재된 링크를 가보았는데

[링크 : https://github.com/Azure-Samples/customvision-export-samples]

 

onnx 포맷으로는 python과 c#만 제공하고

해당 사이트에서 python을 받아서 실행해보니 하나의 사진에 대해서 처리가 가능한 예제를 제공한다.

[링크 : https://github.com/Azure-Samples/customvision-export-samples/tree/main/samples/python/onnx]

[링크 : https://github.com/Azure-Samples/customvision-export-samples/tree/main/samples/csharp/onnx]

 

 

+

ssd deepstream 예제가 있는데

python 스크립트에 h264 elementary stream을 넣어주어야 한댄다

[링크 : https://github.com/NVIDIA-AI-IOT/deepstream_python_apps/tree/master/apps/deepstream-ssd-parser]

 

-h h264가 포인트 인 듯.

$ ffmpeg -f video4linux2 -s 320x240 -i /dev/video0 -vcodec libx264 -f h264 test.264

[링크 : https://stackoverflow.com/questions/27090114/what-does-elementary-stream-mean-in-terms-of-h264]

 

JVT NAL sequence, H.264 라는 타입으로 변경된 듯.

sample_0.h264: JVT NAL sequence, H.264 video @ L 31
sample_0.mp4:  ISO Media, MP4 v2 [ISO 14496-14]

 

Joint Video Team (JVT)
NAL: Network Abstraction Layer

[링크 : http://iphome.hhi.de/suehring/tml/JM%20Reference%20Software%20Manual%20(JVT-AE010).pdf]

 

+

sample_ssd_relu6.uff 파일은 ssd inception v2 기반 모델인가?

[링크 :  https://eva-support.adlinktech.com/docs/ssdnbspinception-v2-nbsp-nbsp-nbsp-nbspnbsp]

'embeded > jetson' 카테고리의 다른 글

deepstream triton server  (0) 2022.03.30
deepstream part.3  (0) 2022.03.29
jetson nano python numpy Illegal instruction (core dumped)  (0) 2022.03.29
deepstream onnx  (0) 2022.03.28
azure custom vision - precision, recall  (0) 2022.03.28
Posted by 구차니
embeded/jetson2022. 3. 29. 11:27

'embeded > jetson' 카테고리의 다른 글

deepstream part.3  (0) 2022.03.29
deepstream onnx part.2  (0) 2022.03.29
deepstream onnx  (0) 2022.03.28
azure custom vision - precision, recall  (0) 2022.03.28
flud nvidia cuda  (0) 2022.03.28
Posted by 구차니