버전 정보 추적

 

python 3.7

keras 2.8.0

numpy 1.21.6

[링크 : https://github.com/saunack/MobileNetv2-SSD/blob/master/model.ipynb]

 

설치한 버전들

$ pip install tensorflow==2.8.0
$ pip install numpy==1.21.6
$ pip install keras==2.8.0
$ pip install protobuf==3.19.0

 

------

keras 2.8을 써야 한다고 나오니 keras의 릴리즈 날짜로 추적

v2.8.0
 on Jan 7, 2022  d8fcb9d  zip  tar.gz  Notes

[링크 : https://github.com/keras-team/keras/tags?after=v2.9.0-rc1]

 

tensorflow 버전 추적

v2.8.0
 on Feb 1, 2022  3f878cf  zip  tar.gz  Notes

[링크 : https://github.com/tensorflow/tensorflow/tags?after=v2.7.2]

 

protobuf 3.19.0

numpy 1.24.4 (1.25 미만)

$ pip install numpy==1.34
Defaulting to user installation because normal site-packages is not writeable
ERROR: Could not find a version that satisfies the requirement numpy==1.34 (from versions: 1.3.0, 1.4.1, 1.5.0, 1.5.1, 1.6.0, 1.6.1, 1.6.2, 1.7.0, 1.7.1, 1.7.2, 1.8.0, 1.8.1, 1.8.2, 1.9.0, 1.9.1, 1.9.2, 1.9.3, 1.10.0.post2, 1.10.1, 1.10.2, 1.10.4, 1.11.0, 1.11.1, 1.11.2, 1.11.3, 1.12.0, 1.12.1, 1.13.0, 1.13.1, 1.13.3, 1.14.0, 1.14.1, 1.14.2, 1.14.3, 1.14.4, 1.14.5, 1.14.6, 1.15.0, 1.15.1, 1.15.2, 1.15.3, 1.15.4, 1.16.0, 1.16.1, 1.16.2, 1.16.3, 1.16.4, 1.16.5, 1.16.6, 1.17.0, 1.17.1, 1.17.2, 1.17.3, 1.17.4, 1.17.5, 1.18.0, 1.18.1, 1.18.2, 1.18.3, 1.18.4, 1.18.5, 1.19.0, 1.19.1, 1.19.2, 1.19.3, 1.19.4, 1.19.5, 1.20.0, 1.20.1, 1.20.2, 1.20.3, 1.21.0, 1.21.1, 1.21.2, 1.21.3, 1.21.4, 1.21.5, 1.21.6, 1.22.0, 1.22.1, 1.22.2, 1.22.3, 1.22.4, 1.23.0rc1, 1.23.0rc2, 1.23.0rc3, 1.23.0, 1.23.1, 1.23.2, 1.23.3, 1.23.4, 1.23.5, 1.24.0rc1, 1.24.0rc2, 1.24.0, 1.24.1, 1.24.2, 1.24.3, 1.24.4, 1.25.0rc1, 1.25.0, 1.25.1, 1.25.2, 1.26.0b1, 1.26.0rc1, 1.26.0, 1.26.1, 1.26.2, 1.26.3)
ERROR: No matching distribution found for numpy==1.34

------

 

으아아아아 tensorflow가 문제냐 keras가 문제냐 ㅠㅠ

import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()

tf_v2 끄고 하면 아래와 같이 나오고

---------------------------------------------------------------------------
RuntimeError                              Traceback (most recent call last)
Cell In[27], line 1
----> 1 history = model.fit(train_dataset,
      2                     epochs=25,
      3                     validation_data = test_dataset,
      4                     validation_steps=1)

File ~/.local/lib/python3.10/site-packages/keras/engine/training_v1.py:773, in Model.fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_freq, max_queue_size, workers, use_multiprocessing, **kwargs)
    771 if kwargs:
    772   raise TypeError('Unrecognized keyword arguments: ' + str(kwargs))
--> 773 self._assert_compile_was_called()
    774 self._check_call_args('fit')
    776 func = self._select_training_loop(x)

File ~/.local/lib/python3.10/site-packages/keras/engine/training_v1.py:2788, in Model._assert_compile_was_called(self)
   2782 def _assert_compile_was_called(self):
   2783   # Checks whether `compile` has been called. If it has been called,
   2784   # then the optimizer is set. This is different from whether the
   2785   # model is compiled
   2786   # (i.e. whether the model is built and its inputs/outputs are set).
   2787   if not self._compile_was_called:
-> 2788     raise RuntimeError('You must compile your model before '
   2789                        'training/testing. '
   2790                        'Use `model.compile(optimizer, loss)`.')

RuntimeError: You must compile your model before training/testing. Use `model.compile(optimizer, loss)`.

 

tf_v2를 쓰게 하면

import tensorflow.compat.v1 as tf
#tf.disable_v2_behavior()

사용자 쪽 코드로 문제를 넘기는데 머가 문제일까..

---------------------------------------------------------------------------
ValueError                                Traceback (most recent call last)
Cell In[28], line 1
----> 1 history = model.fit(train_dataset,
      2                     epochs=25,
      3                     validation_data = test_dataset,
      4                     validation_steps=1)

File ~/.local/lib/python3.10/site-packages/keras/engine/training_v1.py:777, in Model.fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_freq, max_queue_size, workers, use_multiprocessing, **kwargs)
    774 self._check_call_args('fit')
    776 func = self._select_training_loop(x)
--> 777 return func.fit(
    778     self,
    779     x=x,
    780     y=y,
    781     batch_size=batch_size,
    782     epochs=epochs,
    783     verbose=verbose,
    784     callbacks=callbacks,
    785     validation_split=validation_split,
    786     validation_data=validation_data,
    787     shuffle=shuffle,
    788     class_weight=class_weight,
    789     sample_weight=sample_weight,
    790     initial_epoch=initial_epoch,
    791     steps_per_epoch=steps_per_epoch,
    792     validation_steps=validation_steps,
    793     validation_freq=validation_freq,
    794     max_queue_size=max_queue_size,
    795     workers=workers,
    796     use_multiprocessing=use_multiprocessing)

File ~/.local/lib/python3.10/site-packages/keras/engine/training_arrays_v1.py:616, in ArrayLikeTrainingLoop.fit(self, model, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_freq, **kwargs)
    595 def fit(self,
    596         model,
    597         x=None,
   (...)
    611         validation_freq=1,
    612         **kwargs):
    613   batch_size = model._validate_or_infer_batch_size(batch_size,
    614                                                    steps_per_epoch, x)
--> 616   x, y, sample_weights = model._standardize_user_data(
    617       x,
    618       y,
    619       sample_weight=sample_weight,
    620       class_weight=class_weight,
    621       batch_size=batch_size,
    622       check_steps=True,
    623       steps_name='steps_per_epoch',
    624       steps=steps_per_epoch,
    625       validation_split=validation_split,
    626       shuffle=shuffle)
    628   if validation_data:
    629     val_x, val_y, val_sample_weights = model._prepare_validation_data(
    630         validation_data, batch_size, validation_steps)

File ~/.local/lib/python3.10/site-packages/keras/engine/training_v1.py:2318, in Model._standardize_user_data(self, x, y, sample_weight, class_weight, batch_size, check_steps, steps_name, steps, validation_split, shuffle, extract_tensors_from_dataset)
   2316 is_compile_called = False
   2317 if not self._is_compiled and self.optimizer:
-> 2318   self._compile_from_inputs(all_inputs, y_input, x, y)
   2319   is_compile_called = True
   2321 # In graph mode, if we had just set inputs and targets as symbolic tensors
   2322 # by invoking build and compile on the model respectively, we do not have to
   2323 # feed anything to the model. Model already has input and target data as
   (...)
   2327 
   2328 # self.run_eagerly is not free to compute, so we want to reuse the value.

File ~/.local/lib/python3.10/site-packages/keras/engine/training_v1.py:2568, in Model._compile_from_inputs(self, all_inputs, target, orig_inputs, orig_target)
   2565   else:
   2566     target_tensors = None
-> 2568 self.compile(
   2569     optimizer=self.optimizer,
   2570     loss=self.loss,
   2571     metrics=self._compile_metrics,
   2572     weighted_metrics=self._compile_weighted_metrics,
   2573     loss_weights=self.loss_weights,
   2574     target_tensors=target_tensors,
   2575     sample_weight_mode=self.sample_weight_mode,
   2576     run_eagerly=self.run_eagerly,
   2577     experimental_run_tf_function=self._experimental_run_tf_function)

File ~/.local/lib/python3.10/site-packages/tensorflow/python/training/tracking/base.py:629, in no_automatic_dependency_tracking.<locals>._method_wrapper(self, *args, **kwargs)
    627 self._self_setattr_tracking = False  # pylint: disable=protected-access
    628 try:
--> 629   result = method(self, *args, **kwargs)
    630 finally:
    631   self._self_setattr_tracking = previous_value  # pylint: disable=protected-access

File ~/.local/lib/python3.10/site-packages/keras/engine/training_v1.py:443, in Model.compile(self, optimizer, loss, metrics, loss_weights, sample_weight_mode, weighted_metrics, target_tensors, distribute, **kwargs)
    439 training_utils_v1.prepare_sample_weight_modes(
    440     self._training_endpoints, sample_weight_mode)
    442 # Creates the model loss and weighted metrics sub-graphs.
--> 443 self._compile_weights_loss_and_weighted_metrics()
    445 # Functions for train, test and predict will
    446 # be compiled lazily when required.
    447 # This saves time when the user is not using all functions.
    448 self.train_function = None

File ~/.local/lib/python3.10/site-packages/tensorflow/python/training/tracking/base.py:629, in no_automatic_dependency_tracking.<locals>._method_wrapper(self, *args, **kwargs)
    627 self._self_setattr_tracking = False  # pylint: disable=protected-access
    628 try:
--> 629   result = method(self, *args, **kwargs)
    630 finally:
    631   self._self_setattr_tracking = previous_value  # pylint: disable=protected-access

File ~/.local/lib/python3.10/site-packages/keras/engine/training_v1.py:1537, in Model._compile_weights_loss_and_weighted_metrics(self, sample_weights)
   1524 self._handle_metrics(
   1525     self.outputs,
   1526     targets=self._targets,
   (...)
   1529     masks=masks,
   1530     return_weighted_metrics=True)
   1532 # Compute total loss.
   1533 # Used to keep track of the total loss value (stateless).
   1534 # eg., total_loss = loss_weight_1 * output_1_loss_fn(...) +
   1535 #                   loss_weight_2 * output_2_loss_fn(...) +
   1536 #                   layer losses.
-> 1537 self.total_loss = self._prepare_total_loss(masks)

File ~/.local/lib/python3.10/site-packages/keras/engine/training_v1.py:1597, in Model._prepare_total_loss(self, masks)
   1594     sample_weight *= mask
   1596 if hasattr(loss_fn, 'reduction'):
-> 1597   per_sample_losses = loss_fn.call(y_true, y_pred)
   1598   weighted_losses = losses_utils.compute_weighted_loss(
   1599       per_sample_losses,
   1600       sample_weight=sample_weight,
   1601       reduction=losses_utils.ReductionV2.NONE)
   1602   loss_reduction = loss_fn.reduction

File ~/.local/lib/python3.10/site-packages/keras/losses.py:245, in LossFunctionWrapper.call(self, y_true, y_pred)
    242   y_pred, y_true = losses_utils.squeeze_or_expand_dimensions(y_pred, y_true)
    244 ag_fn = tf.__internal__.autograph.tf_convert(self.fn, tf.__internal__.autograph.control_status_ctx())
--> 245 return ag_fn(y_true, y_pred, **self._fn_kwargs)

File ~/.local/lib/python3.10/site-packages/tensorflow/python/autograph/impl/api.py:692, in convert.<locals>.decorator.<locals>.wrapper(*args, **kwargs)
    690 except Exception as e:  # pylint:disable=broad-except
    691   if hasattr(e, 'ag_error_metadata'):
--> 692     raise e.ag_error_metadata.to_exception(e)
    693   else:
    694     raise

ValueError: in user code:

    File "/tmp/ipykernel_49162/810674056.py", line 8, in Loss  *
        loss += confidenceLoss(y[:,:,:-4],tf.cast(gt[:,:,0],tf.int32))
    File "/tmp/ipykernel_49162/2037607510.py", line 2, in confidenceLoss  *
        unweighted_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(label, y)

    ValueError: Only call sparse_softmax_cross_entropy_with_logits with named arguments (labels=..., logits=..., ...). Received unnamed argument: Tensor("loss/output_1_loss/Cast:0", shape=(None, None), dtype=int32)

 

버전이 문제였고 tensorflow는 그냥 v2로 쓰면 되는거였네 -_-

import tensorflow as tf
#import tensorflow.compat.v1 as tf
#tf.disable_v2_behavior()

 

최초는 2020년 7월 22일, 나중은 2022년 7월 21일(2년 만!)

saunack committed on Jul 21, 2022
saunack committed on Jul 22, 2020 

[링크 : https://github.com/saunack/MobileNetv2-SSD/commits/master/model.ipynb]

[링크 : https://github.com/saunack/MobileNetv2-SSD/blob/master/model.ipynb]

 

 

모델 저장(실패)

from keras.models import load_model
model.save('mnist_mlp_model.h5')

 

에러는 아래와 같이 나옴

NotImplementedError: Saving the model to HDF5 format requires the model to be a Functional model or a Sequential model. It does not work for subclassed models, because such models are defined via the body of a Python method, which isn't safely serializable. Consider saving to the Tensorflow SavedModel format (by setting save_format="tf") or using `save_weights`.

 

TensorFlow 2.0
TL;DR:

do not use model.save() for custom subclass keras model;
use save_weights() and load_weights() instead.

[링크 : https://stackoverflow.com/questions/51806852/cant-save-custom-subclassed-model]

 

sequential_model.save_weights("ckpt")

[링크 : https://www.tensorflow.org/guide/keras/save_and_serialize?hl=ko]

 

model.save_weights('model_weights', save_format='tf')

 

AttributeError: in user code:

    File "/home/user/.local/lib/python3.10/site-packages/keras/saving/saving_utils.py", line 138, in _wrapped_model  *
        outputs = model(*args, **kwargs)
    File "/tmp/ipykernel_53483/1508227539.py", line 46, in call  *
        x = self.MobileNet(x)
    File "/tmp/ipykernel_53483/3997091176.py", line 70, in call  *
        x = self.B2_2(x)
    File "/tmp/ipykernel_53483/1796771022.py", line 69, in call  *
        x = self.residual([inputs,x])
    File "/home/user/.local/lib/python3.10/site-packages/keras/utils/traceback_utils.py", line 67, in error_handler  **
        raise e.with_traceback(filtered_tb) from None
    File "/home/user/.local/lib/python3.10/site-packages/keras/engine/base_layer.py", line 1102, in __call__
        if self._saved_model_inputs_spec is None:

    AttributeError: 'Add' object has no attribute '_saved_model_inputs_spec'

[링크 : https://github.com/tensorflow/tensorflow/issues/29545]

 

에라이 저장을 못하겠다!

[링크 : https://www.tensorflow.org/lite/convert?hl=ko]

 

엉뚱(?)한데서 터지는 느낌인데

tensorflow 버전을 2.14.0 으로 올려야 하나? 2.8.0이 아니라?

[링크 : https://www.tensorflow.org/api_docs/python/tf/keras/layers/Add]

 

+

24.01.11

2.14.0 으로 한다고 달라지는 건 없음.. 도대체 Add 객체는 멀까?

 

+

def SSD()  로 생성된걸 keras.Sequential로 감싸고 학습은 진행되는데.. 저장이 왜 또 안될까? ㅠㅠ

model = SSD(numBoxes=numBoxes, layerWidth=layerWidths, k = outputChannels)
model = tf.keras.Sequential(model)
# model.model().summary()

[링크 : https://www.tensorflow.org/tutorials/keras/save_and_load?hl=ko]

[링크 : https://www.tensorflow.org/hub/exporting_tf2_saved_model?hl=ko]

Posted by 구차니

아래의 스크립트를 이용하여 변환이 가능하다는데, 정작 변환하고 실행하려고 하면 안되고(import는 안건드리니)

$ tf_upgrade_v2 --infile tensorfoo.py --outfile tensorfoo-upgraded.py

[링크 : https://www.tensorflow.org/guide/upgrade?hl=ko]

 

차라리 아래처럼 import tensorflow as tf를 compat.v1 으로 바꾸어주고, v2 를 끄면 구버전이 실행된다.

import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()

[링크 : https://www.tensorflow.org/guide/migrate?hl=ko]

 

 

--------------------------------

돌려보니 먼가 나오긴 한데

INFO line 7:16: Renamed 'tf.random_uniform' to 'tf.random.uniform'
INFO line 8:16: Renamed 'tf.random_uniform' to 'tf.random.uniform'
INFO line 11:4: Renamed 'tf.placeholder' to 'tf.compat.v1.placeholder'
INFO line 12:4: Renamed 'tf.placeholder' to 'tf.compat.v1.placeholder'
INFO line 25:12: Renamed 'tf.train.GradientDescentOptimizer' to 'tf.compat.v1.train.GradientDescentOptimizer'
INFO line 30:5: Renamed 'tf.Session' to 'tf.compat.v1.Session'
INFO line 31:13: Renamed 'tf.global_variables_initializer' to 'tf.compat.v1.global_variables_initializer'
TensorFlow 2.0 Upgrade Script
-----------------------------
Converted 1 files

 

잘 돈다는 보장은 없다 -_-

 

요런 에러가 뜨면

RuntimeError: tf.placeholder() is not compatible with eager execution.

 

아래줄 추가해주면 되는데

tf.compat.v1.disable_eager_execution()

[링크 : https://luvstudy.tistory.com/122]

 

정작 텐서 곱할 때, 에러가 발생한다.

RuntimeError: resource: Attempting to capture an EagerTensor without building a function.

 

요건 막혀서 모르겠네 -_-

함수를 만들지 않고 eagertensor를 capture 하기 시도해서 에러가 발생한거라면..

함수(building a function)를 만들면 되는건가?

'프로그램 사용 > yolo_tensorflow' 카테고리의 다른 글

mobilenet v2 ssd  (0) 2024.01.11
ssd-mobilenetv2 on jupyter notebook  (2) 2024.01.10
골빈해커의 3분 딥러닝 github  (0) 2024.01.10
ReLU - Rectified Linear Unit  (0) 2024.01.10
softmax  (0) 2024.01.10
Posted by 구차니

타이핑 하기 귀찮으니(!)

[링크 : https://github.com/golbin/TensorFlow-Tutorials]

 

+

현재 시점에서 아래의 소스는 단 두 줄 손 보면 돌아는 간다. (tfv2 인데 tfv1 하위 호환성으로 작동 시키기)

# X 와 Y 의 상관관계를 분석하는 기초적인 선형 회귀 모델을 만들고 실행해봅니다.
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()

x_data = [1, 2, 3]
y_data = [1, 2, 3]

W = tf.Variable(tf.random_uniform([1], -1.0, 1.0))
b = tf.Variable(tf.random_uniform([1], -1.0, 1.0))

# name: 나중에 텐서보드등으로 값의 변화를 추적하거나 살펴보기 쉽게 하기 위해 이름을 붙여줍니다.
X = tf.placeholder(tf.float32, name="X")
Y = tf.placeholder(tf.float32, name="Y")
print(X)
print(Y)

# X 와 Y 의 상관 관계를 분석하기 위한 가설 수식을 작성합니다.
# y = W * x + b
# W 와 X 가 행렬이 아니므로 tf.matmul 이 아니라 기본 곱셈 기호를 사용했습니다.
hypothesis = W * X + b

# 손실 함수를 작성합니다.
# mean(h - Y)^2 : 예측값과 실제값의 거리를 비용(손실) 함수로 정합니다.
cost = tf.reduce_mean(tf.square(hypothesis - Y))
# 텐서플로우에 기본적으로 포함되어 있는 함수를 이용해 경사 하강법 최적화를 수행합니다.
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1)
# 비용을 최소화 하는 것이 최종 목표
train_op = optimizer.minimize(cost)

# 세션을 생성하고 초기화합니다.
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())

    # 최적화를 100번 수행합니다.
    for step in range(100):
        # sess.run 을 통해 train_op 와 cost 그래프를 계산합니다.
        # 이 때, 가설 수식에 넣어야 할 실제값을 feed_dict 을 통해 전달합니다.
        _, cost_val = sess.run([train_op, cost], feed_dict={X: x_data, Y: y_data})

        print(step, cost_val, sess.run(W), sess.run(b))

    # 최적화가 완료된 모델에 테스트 값을 넣고 결과가 잘 나오는지 확인해봅니다.
    print("\n=== Test ===")
    print("X: 5, Y:", sess.run(hypothesis, feed_dict={X: 5}))
    print("X: 2.5, Y:", sess.run(hypothesis, feed_dict={X: 2.5}))

 

$ python lr.py
2024-01-10 11:39:49.775206: E external/local_xla/xla/stream_executor/cuda/cuda_dnn.cc:9261] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered
2024-01-10 11:39:49.775245: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:607] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered
2024-01-10 11:39:49.776215: E external/local_xla/xla/stream_executor/cuda/cuda_blas.cc:1515] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered
2024-01-10 11:39:49.781682: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.
To enable the following instructions: AVX2 FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.
2024-01-10 11:39:50.440334: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT
/usr/lib/python3/dist-packages/scipy/__init__.py:146: UserWarning: A NumPy version >=1.17.3 and <1.25.0 is required for this version of SciPy (detected version 1.26.3
  warnings.warn(f"A NumPy version >={np_minversion} and <{np_maxversion}"
WARNING:tensorflow:From /home/falinux/.local/lib/python3.10/site-packages/tensorflow/python/compat/v2_compat.py:108: disable_resource_variables (from tensorflow.python.ops.variable_scope) is deprecated and will be removed in a future version.
Instructions for updating:
non-resource variables are not supported in the long term
Tensor("X:0", dtype=float32)
Tensor("Y:0", dtype=float32)
2024-01-10 11:39:51.327415: I tensorflow/compiler/mlir/mlir_graph_optimization_pass.cc:388] MLIR V1 optimization pass is not enabled
0 6.4782066 [1.2373642] [-0.24653786]
1 0.089632 [1.1144395] [-0.29217595]
2 0.012737438 [1.1244997] [-0.27951655]
3 0.011264746 [1.1201066] [-0.2734131]
4 0.01071928 [1.1173724] [-0.2667731]
5 0.010209985 [1.1145341] [-0.26036742]
6 0.009725014 [1.1117826] [-0.2541076]
7 0.009263077 [1.1090952] [-0.2479991]
8 0.008823066 [1.1064726] [-0.24203737]
9 0.008403975 [1.1039131] [-0.23621896]
10 0.008004769 [1.1014152] [-0.2305404]
11 0.007624544 [1.0989771] [-0.22499838]
12 0.007262358 [1.0965978] [-0.21958955]
13 0.0069174054 [1.0942756] [-0.21431077]
14 0.0065888255 [1.0920093] [-0.20915887]
15 0.0062758424 [1.0897975] [-0.20413081]
16 0.0059777307 [1.0876389] [-0.19922365]
17 0.0056937817 [1.0855321] [-0.19443446]
18 0.0054233256 [1.083476] [-0.1897604]
19 0.0051657106 [1.0814693] [-0.1851987]
20 0.0049203373 [1.0795108] [-0.18074667]
21 0.004686633 [1.0775993] [-0.17640167]
22 0.0044640056 [1.0757339] [-0.17216106]
23 0.0042519583 [1.0739133] [-0.1680224]
24 0.004049988 [1.0721365] [-0.16398326]
25 0.0038576098 [1.0704024] [-0.16004121]
26 0.0036743751 [1.06871] [-0.15619393]
27 0.0034998383 [1.0670582] [-0.15243913]
28 0.003333594 [1.0654461] [-0.1487746]
29 0.003175243 [1.0638729] [-0.14519812]
30 0.0030244188 [1.0623374] [-0.14170769]
31 0.0028807523 [1.0608389] [-0.13830112]
32 0.0027439168 [1.0593764] [-0.13497646]
33 0.0026135833 [1.057949] [-0.13173172]
34 0.002489428 [1.056556] [-0.12856494]
35 0.0023711843 [1.0551964] [-0.12547435]
36 0.0022585478 [1.0538695] [-0.12245804]
37 0.0021512664 [1.0525745] [-0.11951423]
38 0.0020490757 [1.0513107] [-0.11664119]
39 0.0019517452 [1.0500772] [-0.11383722]
40 0.0018590376 [1.0488734] [-0.11110065]
41 0.0017707323 [1.0476985] [-0.10842989]
42 0.0016866213 [1.0465518] [-0.1058233]
43 0.0016065066 [1.0454327] [-0.10327938]
44 0.0015301956 [1.0443406] [-0.1007966]
45 0.0014575059 [1.0432746] [-0.09837352]
46 0.0013882784 [1.0422344] [-0.09600867]
47 0.0013223292 [1.0412191] [-0.0937007]
48 0.0012595187 [1.0402282] [-0.0914482]
49 0.0011996872 [1.0392612] [-0.08924985]
50 0.0011427039 [1.0383173] [-0.08710436]
51 0.0010884297 [1.0373962] [-0.08501042]
52 0.0010367227 [1.0364972] [-0.08296681]
53 0.0009874817 [1.0356199] [-0.08097235]
54 0.0009405748 [1.0347636] [-0.07902583]
55 0.00089589664 [1.0339279] [-0.07712609]
56 0.00085334125 [1.0331123] [-0.07527205]
57 0.0008128048 [1.0323163] [-0.07346255]
58 0.0007741994 [1.0315394] [-0.07169659]
59 0.00073742354 [1.0307813] [-0.06997304]
60 0.00070239493 [1.0300413] [-0.06829095]
61 0.00066903216 [1.0293192] [-0.0666493]
62 0.0006372516 [1.0286143] [-0.06504711]
63 0.0006069818 [1.0279264] [-0.0634834]
64 0.00057814806 [1.0272552] [-0.0619573]
65 0.00055068725 [1.0265999] [-0.06046791]
66 0.0005245278 [1.0259604] [-0.05901428]
67 0.0004996119 [1.0253364] [-0.0575956]
68 0.00047588357 [1.0247273] [-0.05621104]
69 0.0004532766 [1.0241328] [-0.05485978]
70 0.00043174453 [1.0235528] [-0.05354097]
71 0.00041123512 [1.0229865] [-0.05225388]
72 0.0003917031 [1.022434] [-0.05099772]
73 0.00037309653 [1.0218947] [-0.04977177]
74 0.00035537416 [1.0213684] [-0.04857529]
75 0.00033849102 [1.0208547] [-0.04740757]
76 0.00032241447 [1.0203533] [-0.04626793]
77 0.00030709928 [1.0198641] [-0.04515567]
78 0.00029251093 [1.0193865] [-0.04407016]
79 0.0002786171 [1.0189205] [-0.04301074]
80 0.00026538406 [1.0184656] [-0.04197682]
81 0.00025277727 [1.0180218] [-0.04096771]
82 0.0002407704 [1.0175885] [-0.0399829]
83 0.0002293337 [1.0171658] [-0.03902172]
84 0.00021844136 [1.0167531] [-0.03808369]
85 0.00020806213 [1.0163504] [-0.03716817]
86 0.00019818085 [1.0159572] [-0.0362747]
87 0.000188766 [1.0155737] [-0.03540265]
88 0.00017980166 [1.0151993] [-0.03455162]
89 0.00017126095 [1.0148339] [-0.03372103]
90 0.00016312544 [1.0144774] [-0.0329104]
91 0.0001553779 [1.0141293] [-0.03211929]
92 0.00014799698 [1.0137897] [-0.03134715]
93 0.00014096718 [1.0134581] [-0.03059359]
94 0.00013426914 [1.0131347] [-0.02985811]
95 0.00012789248 [1.0128189] [-0.02914038]
96 0.00012181744 [1.0125108] [-0.02843988]
97 0.00011603059 [1.01221] [-0.02775621]
98 0.00011052046 [1.0119165] [-0.02708898]
99 0.00010527024 [1.01163] [-0.02643778]

=== Test ===
X: 5, Y: [5.0317125]
X: 2.5, Y: [2.5026374]

[링크 : https://github.com/golbin/TensorFlow-Tutorials/blob/master/03%20-%20TensorFlow%20Basic/03%20-%20Linear%20Regression.py]

'프로그램 사용 > yolo_tensorflow' 카테고리의 다른 글

ssd-mobilenetv2 on jupyter notebook  (2) 2024.01.10
텐서플로우 v1 을 v2로 마이그레이션은 실패 -_-  (0) 2024.01.10
ReLU - Rectified Linear Unit  (0) 2024.01.10
softmax  (0) 2024.01.10
텐서플로우 학습  (0) 2024.01.09
Posted by 구차니

ReLU는 일종의 threshold 함수인데(loss 함수 혹은 손실 함수 등등등..)

0 미만은 0 으로 억제하는 함수이다.

 

그나저나 Rectified를 찾아보니 정정하다(correct) 정류하다 등으로 뜻이 나오는데

수정된 선형 단위 라고 번역을 하면 되려나?

 

[링크 : https://ko.wikipedia.org/wiki/ReLU]

 

retifier는 전자회로에서 "정류기"로 많이 번역되는데, 다이오드 등을 통해서 교류를 직류로 바꾸는 걸 의미한다.

[링크 : https://ko.wikipedia.org/wiki/정류기]

 

그나저나 다이오드의 전압 그래프를 보면 ReLU랑 비슷한 것 같으면서도 아닌것 같기도 하고(...?!)

아무튼 머.. 그렇다고 한다.

[링크 : http://www.ktechno.co.kr/ls_parts/parts04.html]

Posted by 구차니

netron으로 보다보면 softmax라는게 나오는데

그냥 그러려니 하고 넘어가던거에서 조금은 이론적으로 설명이 되는걸 보니 궁금해짐

[링크 : https://m.hanbit.co.kr/store/books/book_view.html?p_code=B7257101308]

 

아무튼 수식으로는 먼가 와닫지 않는데

[링크 : https://syj9700.tistory.com/38]

 

값들의 평균을 내어 합이 1이 되도록 정규화한다고 해야하나..

(1,2,8)을 (0.001, 0.002, 0.997) 로 변환한다.

(1,2,8) 에 e^n 을 하면

(e^1, e^2, e^8) 이 되고

밑은 e^1 +  e^2 + e^8 하면 되니까

(e^1 / (e^1 +  e^2 + e^8), e^2 / (e^1 +  e^2 + e^8), e^8 / (e^1 +  e^2 + e^8)) 로 계산하면

 

(2.71828182845904, 7.38905609893065, 2980.95798704173)

2.71828182845904 + 7.38905609893065 + 2980.95798704173 = 2991.06532496912

(2.71828182845904 / 2991.06532496912, 7.38905609893065 / 2991.06532496912, 2980.95798704173 / 2991.06532496912)

= (0.000908800555363033, 0.00247037603533682, 0.9966208234093)

 

이름과 달리 최댓값(max) 함수를 매끄럽거나 부드럽게 한 것이 아니라, 최댓값의 인수인 원핫 형태의 arg max 함수를 매끄럽게 한 것이다. 그 계산 방법은 입력값을 자연로그의 밑을 밑으로 한 지수 함수를 취한 뒤 그 지수함수의 합으로 나눠주는 것이다.

[링크 : https://ko.wikipedia.org/wiki/소프트맥스_함수]

 

For example, the standard softmax of (1,2,8) is approximately (0.001,0.002,0.997), which amounts to assigning almost all of the total unit weight in the result to the position of the vector's maximal element (of 8).

>>> import numpy as np
>>> a = [1.0, 2.0, 3.0, 4.0, 1.0, 2.0, 3.0]
>>> np.exp(a) / np.sum(np.exp(a)) 
array([0.02364054, 0.06426166, 0.1746813, 0.474833, 0.02364054,
       0.06426166, 0.1746813])

[링크 : https://en.wikipedia.org/wiki/Softmax_function]

 

아무튼 계산에 의한 결과가 true, false로 판별할 수 있는 값이 아닌

사람이 보기 편한 값으로 환산되기 때문에, 에측에는 softmax를 쓰지 말라는게 이해 될 것 같기도, 안 갈 것 같기도..

[링크 : https://velog.io/@francomoon7/예측에-Softmax를-사용하면-안되는-이유]

Posted by 구차니

오래된 글들이라 지금에 와서는 pip 패키지 버전 문제등으로 여전히 쉽지 않다.

학습에 대해서 어렵게 생각했는데.. python과 tensorflow를 통해서 텐서(다차원 행렬)을 곱하는 것이 포인트 이고

그 데이터를 어떻게 구성하고 돌리냐(학습 데이터, 검증 데이터, 라벨)가 전부인 듯.

 

[링크 : https://github.com/abhimanyu1990/SSD-Mobilenet-Custom-Object-Detector-Model-using-Tensorflow-2]

 

[링크 : https://seoftware.tistory.com/108]

[링크 : https://seoftware.tistory.com/109]

[링크 : https://towardsdatascience.com/custom-object-detection-using-tensorflow-from-scratch-e61da2e10087]

    [링크 : https://github.com/bourdakos1/Custom-Object-Detection]

    [링크 : https://github.com/cloud-annotations/cloud-annotations]

 

[링크 : https://blog.roboflow.com/how-to-train-mobilenetv2-on-a-custom-dataset/]

    [링크 : https://colab.research.google.com/drive/1bOzVaDQo8h6Ngstb7AcfzC35OihpHspt]

Posted by 구차니

이게 ssd 인지, ssd + mobilenet v2 쪽인진 모르겠다.

[링크 : https://stackoverflow.com/questions/67868644/post-process-of-tf2-ssd-detection-models]

'프로그램 사용 > yolo_tensorflow' 카테고리의 다른 글

softmax  (0) 2024.01.10
텐서플로우 학습  (0) 2024.01.09
우분투에 jupyter notebook 설치 및 실행하기  (0) 2024.01.02
주피터 노트북 프로젝트(?) 실행하기  (0) 2024.01.02
i.mx8mp gopoint 실행 경로  (0) 2024.01.02
Posted by 구차니

어쩌면 당연한데.. python 으로 짠녀석이니 pip로 설치하면 된다.

 

$ pip install notebook
$ jupyter notebook

[링크 : https://jupyter.org/install]

 

주피터 노트북 실행하면 아래와 같이 웹으로 뜨고 ipynb를 더블클릭으로 열면 끝

 

아무튼 Run에 Run All Cells 하면 순차적으로 실행된다.

 

막상실행해보려니 2년전꺼라 패키지가 달라져서 안되는 듯.. 쩝

 

+

numpy 버전 문제인가.. 1.25.0 미만이어야 하는데 1.26.2라서 에러라..

1.17.3 ~ 1.24.x 면 될테니 적당하게 바꾸고 해봐야지

pip show numpy 
pip uninstall numpy
pip install numpy==1.16.4

[링크 : https://reyrei.tistory.com/m/28]

 

+

24.01.03

numpy 손 보니 tensorflow.keras 에서 막히는 마법이.. -_-

Posted by 구차니

ipynb 라는 확장자가 보여서 확인 중

[링크 : https://github.com/saunack/MobileNetv2-SSD]

 

아나콘다 깔고

거기서 jupiter notebook을 설치하면 된다고

[링크 : https://mananacho.tistory.com/31]

[링크 : https://blog.naver.com/tamiel/221956194782]

 

커맨드 라인으로는 먼가 복잡한데, 쥬피터 없이 돌리는것도 아니고 무슨 의미가 있나 싶긴하다.

$ jupyter nbconvert --execute --to notebook lda2.ipynb

[링크 : https://data-scient2st.tistory.com/234]

Posted by 구차니

문서를  찾다가 지쳐서 걍 실행하고 인자를 보는걸로..

root        3019     925 72 06:38 ?        00:00:25 /usr/bin/python3 /home/root/.nxp-demo-experience/scripts/machine_learning/MLDemoLauncher.py detect

 

root@imx8mpevk:~/.nxp-demo-experience/scripts/machine_learning# cat MLDemoLauncher.py 
#!/usr/bin/env python3

"""
Copyright 2021-2023 NXP

SPDX-License-Identifier: BSD-2-Clause

This script launches the NNStreamer ML Demos using a UI to pick settings.
"""

import gi
import os
import sys
import glob
from gi.repository import Gtk, GLib, Gio

gi.require_version("Gtk", "3.0")

sys.path.append("/home/root/.nxp-demo-experience/scripts/")
import utils


class MLLaunch(Gtk.Window):
    """The GUI window for the ML demo launcher"""

    def __init__(self, demo):
        """Creates the UI window"""
        # Initialization
        self.demo = demo
        super().__init__(title=demo)
        self.set_default_size(450, 200)
        self.set_resizable(False)

        # Get platform
        self.platform = os.uname().nodename
     
        # OpenVX graph caching is not available on i.MX 8QuadMax platform.
        if self.platform != "imx8qmmek" :
            os.environ["VIV_VX_CACHE_BINARY_GRAPH_DIR"] = "/home/root/.cache/gopoint"
            os.environ["VIV_VX_ENABLE_CACHE_GRAPH_BINARY"] = "1"

        # Get widget properties
        devices = []
        if self.demo != "brand" and self.demo != "selfie_nn":
            if self.platform != "imx93evk":
                devices.append("Example Video")

        for device in glob.glob("/dev/video*"):
            devices.append(device)

        backends_available = ["CPU"]
        if (
            os.path.exists("/usr/lib/libvx_delegate.so")
            and self.demo != "pose"
            and self.demo != "selfie_nn"
        ):
            backends_available.insert(1, "GPU")
        if os.path.exists("/usr/lib/libneuralnetworks.so") and self.demo != "brand" and self.platform != "imx8qmmek":
            backends_available.insert(0, "NPU")
        if os.path.exists("/usr/lib/libethosu_delegate.so"):
            backends_available.insert(0, "NPU")
            backends_available.pop()

        displays_available = ["Weston"]

        colors_available = ["Red", "Green", "Blue", "Black", "White"]

        demo_modes_available = ["Background Substitution", "Segmentation Mask"]

        # Create widgets
        main_grid = Gtk.Grid.new()
        device_label = Gtk.Label.new("Source")
        self.device_combo = Gtk.ComboBoxText()
        backend_label = Gtk.Label.new("Backend")
        self.backend_combo = Gtk.ComboBoxText()
        self.display_combo = Gtk.ComboBoxText()
        self.launch_button = Gtk.Button.new_with_label("Run")
        self.status_bar = Gtk.Label.new()
        header = Gtk.HeaderBar()
        quit_button = Gtk.Button()
        quit_icon = Gio.ThemedIcon(name="process-stop-symbolic")
        quit_image = Gtk.Image.new_from_gicon(quit_icon, Gtk.IconSize.BUTTON)
        separator = Gtk.Separator.new(0)
        time_title_label = Gtk.Label.new("Video Refresh")
        self.time_label = Gtk.Label.new("--.-- ms")
        self.fps_label = Gtk.Label.new("-- FPS")
        inference_title_label = Gtk.Label.new("Inference Time")
        self.inference_label = Gtk.Label.new("--.-- ms")
        self.ips_label = Gtk.Label.new("-- IPS")
        if self.demo != "selfie_nn":
            self.width_entry = self.r_scale = Gtk.Scale.new_with_range(
                Gtk.Orientation.HORIZONTAL, 300, 1920, 2
            )
            self.height_entry = self.r_scale = Gtk.Scale.new_with_range(
                Gtk.Orientation.HORIZONTAL, 300, 1080, 2
            )
            self.width_label = Gtk.Label.new("Height")
            self.height_label = Gtk.Label.new("Width")
            self.color_label = Gtk.Label.new("Label Color")
        else:
            self.color_label = Gtk.Label.new("Text Color")
            self.demo_mode = Gtk.Label.new("Demo Mode")
            self.mode_combo = Gtk.ComboBoxText()
        self.color_combo = Gtk.ComboBoxText()

        # Organize widgets
        self.add(main_grid)
        self.set_titlebar(header)

        quit_button.add(quit_image)
        header.pack_end(quit_button)

        main_grid.set_row_spacing(10)
        main_grid.set_border_width(10)

        main_grid.attach(device_label, 0, 1, 2, 1)
        device_label.set_hexpand(True)
        main_grid.attach(backend_label, 0, 2, 2, 1)
        # main_grid.attach(display_label, 0, 3, 2, 1)
        if self.demo != "selfie_nn":
            main_grid.attach(self.width_label, 0, 4, 2, 1)
            main_grid.attach(self.height_label, 0, 5, 2, 1)
            main_grid.attach(self.color_label, 0, 6, 2, 1)
        else:
            main_grid.attach(self.demo_mode, 0, 4, 2, 1)
            main_grid.attach(self.color_label, 0, 5, 2, 1)

        main_grid.attach(self.device_combo, 2, 1, 2, 1)
        self.device_combo.set_hexpand(True)
        main_grid.attach(self.backend_combo, 2, 2, 2, 1)
        # main_grid.attach(self.display_combo, 2, 3, 2, 1)
        if self.demo != "selfie_nn":
            main_grid.attach(self.width_entry, 2, 4, 2, 1)
            main_grid.attach(self.height_entry, 2, 5, 2, 1)
            main_grid.attach(self.color_combo, 2, 6, 2, 1)
        else:
            main_grid.attach(self.mode_combo, 2, 4, 2, 1)
            main_grid.attach(self.color_combo, 2, 5, 2, 1)

        main_grid.attach(self.launch_button, 0, 7, 4, 1)
        main_grid.attach(self.status_bar, 0, 8, 4, 1)

        main_grid.attach(separator, 0, 9, 4, 1)

        main_grid.attach(time_title_label, 0, 10, 2, 1)
        main_grid.attach(self.time_label, 0, 11, 1, 1)
        main_grid.attach(self.fps_label, 1, 11, 1, 1)
        main_grid.attach(inference_title_label, 2, 10, 2, 1)
        main_grid.attach(self.inference_label, 2, 11, 1, 1)
        main_grid.attach(self.ips_label, 3, 11, 1, 1)

        # Configure widgets
        for device in devices:
            self.device_combo.append_text(device)
        for backend in backends_available:
            self.backend_combo.append_text(backend)
        for display in displays_available:
            self.display_combo.append_text(display)
        for color in colors_available:
            self.color_combo.append_text(color)
        if self.demo == "selfie_nn":
            for mode in demo_modes_available:
                self.mode_combo.append_text(mode)

        self.device_combo.set_active(0)
        self.backend_combo.set_active(0)
        self.display_combo.set_active(0)
        self.color_combo.set_active(0)
        if self.demo != "selfie_nn":
            self.width_entry.set_value(1920)
            self.height_entry.set_value(1080)
            self.width_entry.set_sensitive(False)
            self.height_entry.set_sensitive(False)
        else:
            self.mode_combo.set_active(0)
        self.device_combo.connect("changed", self.on_source_change)
        self.launch_button.connect("clicked", self.start)
        quit_button.connect("clicked", exit)
        if self.demo == "detect":
            header.set_title("Detection Demo")
        elif self.demo == "id":
            header.set_title("Classification Demo")
        elif self.demo == "pose":
            header.set_title("Pose Demo")
        elif self.demo == "brand":
            header.set_title("Brand Demo")
        elif self.demo == "selfie_nn":
            header.set_title("Selfie Segmenter Demo")
        else:
            header.set_title("NNStreamer Demo")
        header.set_subtitle("NNStreamer Examples")

    def start(self, button):
        """Starts the ML Demo with selected settings"""
        self.update_time = GLib.get_monotonic_time()
        self.launch_button.set_sensitive(False)
        if self.color_combo.get_active_text() == "Red":
            r = 1
            g = 0
            b = 0
        elif self.color_combo.get_active_text() == "Blue":
            r = 0
            g = 0
            b = 1
        elif self.color_combo.get_active_text() == "Green":
            r = 0
            g = 1
            b = 0
        elif self.color_combo.get_active_text() == "Black":
            r = 0
            g = 0
            b = 0
        elif self.color_combo.get_active_text() == "White":
            r = 1
            g = 1
            b = 1
        else:
            r = 1
            g = 0
            b = 0
        if self.demo == "detect":
            if self.platform == "imx93evk":
                model = utils.download_file("mobilenet_ssd_v2_coco_quant_postprocess_vela.tflite")
            else:
                model = utils.download_file("mobilenet_ssd_v2_coco_quant_postprocess.tflite")
            labels = utils.download_file("coco_labels.txt")
            if self.device_combo.get_active_text() == "Example Video":
                device = utils.download_file("detect_example.mov")
            else:
                device = self.device_combo.get_active_text()
            if model == -1 or model == -2 or model == -3:
                if self.platform == "imx93evk":
                    error = "mobilenet_ssd_v2_coco_quant_postprocess_vela.tflite"
                else:
                    error = "mobilenet_ssd_v2_coco_quant_postprocess.tflite"
            elif labels == -1 or labels == -2 or labels == -3:
                error = "coco_labels.txt"
            elif device == -1 or device == -2 or device == -3:
                error = "detect_example.mov"
        if self.demo == "id":
            if self.platform == "imx93evk":
                model = utils.download_file("mobilenet_v1_1.0_224_quant_vela.tflite")
            else:
                model = utils.download_file("mobilenet_v1_1.0_224_quant.tflite")
            labels = utils.download_file("1_1.0_224_labels.txt")
            if self.device_combo.get_active_text() == "Example Video":
                device = utils.download_file("id_example.mov")
            else:
                device = self.device_combo.get_active_text()
            if model == -1 or model == -2 or model == -3:
                if self.platform == "imx93evk":
                    error = "mobilenet_v1_1.0_224_quant_vela.tflite"
                else:
                    error = "mobilenet_v1_1.0_224_quant.tflite"
            elif labels == -1 or labels == -2 or labels == -3:
                error = "1_1.0_224_labels.txt"
            elif device == -1 or device == -2 or device == -3:
                error = "id_example.mov"
        if self.demo == "pose":
            model = utils.download_file("posenet_resnet50_uint8_float32_quant.tflite")
            labels = utils.download_file("key_point_labels.txt")
            if self.device_combo.get_active_text() == "Example Video":
                device = utils.download_file("pose_example.mov")
            else:
                device = self.device_combo.get_active_text()
            if model == -1 or model == -2 or model == -3:
                error = "posenet_resnet50_uint8_float32_quant.tflite"
            elif labels == -1 or labels == -2 or labels == -3:
                error = "key_point_labels.txt"
            elif device == -1 or device == -2 or device == -3:
                error = "pose_example.mov"
        if self.demo == "brand":
            model = utils.download_file("brand_model.tflite")
            labels = utils.download_file("brand_labels.txt")
            if self.device_combo.get_active_text() == "Example Video":
                device = utils.download_file("brand_example.mov")
            else:
                device = self.device_combo.get_active_text()
            if model == -1 or model == -2 or model == -3:
                error = "brand_model.tflite"
            elif labels == -1 or labels == -2 or labels == -3:
                error = "brand_labels.txt"
            elif device == -1 or device == -2 or device == -3:
                error = "brand_example.mov"
        if self.demo == "selfie_nn":
            if self.platform == "imx93evk":
                model = utils.download_file(
                    "selfie_segmenter_landscape_int8_vela.tflite"
                )
            else:
                model = utils.download_file("selfie_segmenter_int8.tflite")
            # Labels refer to background img
            if self.platform == "imx93evk":
                labels = utils.download_file("bg_image_landscape.jpg")
            else:
                labels = utils.download_file("bg_image.jpg")
            if self.device_combo.get_active_text() == "Example Video":
                device = utils.download_file("selfie_example.mov")
            else:
                device = self.device_combo.get_active_text()
            if model == -1 or model == -2 or model == -3:
                if self.platform == "imx93evk":
                    error = "selfie_segmenter_landscape_int8_vela.tflite"
                else:
                    error = "selfie_segmenter_int8.tflite"
            elif labels == -1 or labels == -2 or labels == -3:
                if self.platform == "imx93evk":
                    error = "bg_image_landscape.jpg"
                else:
                    error = "bg_image.jpg"
            elif device == -1 or device == -2 or device == -3:
                error = "selfie_example.mov"
            if self.mode_combo.get_active_text() == "Background Substitution":
                set_mode = 0
            else:
                set_mode = 1

        if model == -1 or labels == -1 or device == -1:
            """
            dialog = Gtk.MessageDialog(
                transient_for=self,
                flags=0,
                message_type=Gtk.MessageType.ERROR,
                buttons=Gtk.ButtonsType.CANCEL,
                text="Cannot find files! The file that you requested" +
                " does not have any metadata that is related to it. " +
                "Please see /home/root/.nxp-demo-experience/downloads.txt" +
                " to see if the requested file exists! \n \n Cannot find:" +
                error)
            dialog.run()
            dialog.destroy()
            """
            self.status_bar.set_text("Cannot find files!")
            self.launch_button.set_sensitive(True)
            return
        if model == -2 or labels == -2 or device == -2:
            """
            dialog = Gtk.MessageDialog(
                transient_for=self,
                flags=0,
                message_type=Gtk.MessageType.ERROR,
                buttons=Gtk.ButtonsType.CANCEL,
                text="Cannot download files! The URL used to download the" +
                " file cannot be reached. If you are connected to the " +
                "internet, please check the /home/root/.nxp-demo-experience" +
                "/downloads.txt for the URL. For some regions, " +
                "these sites may be blocked. To install these manually," +
                " please go to the file listed above and provide the " +
                "path to the file in \"PATH\" \n \n Cannot download " + error)
            dialog.run()
            dialog.destroy()
            """
            self.status_bar.set_text("Download failed!")
            self.launch_button.set_sensitive(True)
            return
        if model == -3 or labels == -3 or device == -4:
            """
            dialog = Gtk.MessageDialog(
                transient_for=self,
                flags=0,
                message_type=Gtk.MessageType.ERROR,
                buttons=Gtk.ButtonsType.CANCEL,
                text="Invalid files! The files where not what we expected." +
                "If you are SURE that the files are correct, delete " +
                "the \"SHA\" value in /home/root/.nxp-demo-experience" +
                "/downloads.txt to bypass the SHA check. \n \n Bad SHA for " +
                error)
            dialog.run()
            dialog.destroy()
            """
            self.status_bar.set_text("Downloaded bad file!")
            self.launch_button.set_sensitive(True)
            return
        if self.demo == "detect":
            import nndetection

            example = nndetection.ObjectDetection(
                self.platform,
                device,
                self.backend_combo.get_active_text(),
                model,
                labels,
                self.display_combo.get_active_text(),
                self.update_stats,
                self.width_entry.get_value(),
                self.height_entry.get_value(),
                r,
                g,
                b,
            )
            example.run()
        if self.demo == "id":
            import nnclassification

            example = nnclassification.NNStreamerExample(
                self.platform,
                device,
                self.backend_combo.get_active_text(),
                model,
                labels,
                self.display_combo.get_active_text(),
                self.update_stats,
                self.width_entry.get_value(),
                self.height_entry.get_value(),
                r,
                g,
                b,
            )
            example.run_example()
        if self.demo == "pose":
            import nnpose

            example = nnpose.NNStreamerExample(
                self.platform,
                device,
                self.backend_combo.get_active_text(),
                model,
                labels,
                self.display_combo.get_active_text(),
                self.update_stats,
                self.width_entry.get_value(),
                self.height_entry.get_value(),
                r,
                g,
                b,
            )
            example.run_example()
        if self.demo == "brand":
            import nnbrand

            example = nnbrand.NNStreamerExample(
                self.platform,
                device,
                self.backend_combo.get_active_text(),
                model,
                labels,
                self.display_combo.get_active_text(),
                self.update_stats,
                self.width_entry.get_value(),
                self.height_entry.get_value(),
                r,
                g,
                b,
            )
            example.run_example()
        if self.demo == "selfie_nn":
            import selfie_segmenter

            example = selfie_segmenter.SelfieSegmenter(
                self.platform,
                device,
                self.backend_combo.get_active_text(),
                model,
                labels,
                self.update_stats,
                set_mode,
                r,
                g,
                b,
            )
            example.run()

        self.launch_button.set_sensitive(True)

    def update_stats(self, time):
        """Callback used the update stats in GUI"""
        interval_time = (GLib.get_monotonic_time() - self.update_time) / 1000000
        if interval_time > 1:
            refresh_time = time.interval_time
            inference_time = time.tensor_filter.get_property("latency")

            if refresh_time != 0 and inference_time != 0:
                # Print pipeline information
                if self.demo == "selfie_nn" or self.demo == "id" or self.demo == "detect":
                    self.time_label.set_text(
                        "{:12.2f} ms".format(1.0 / time.current_framerate * 1000.0)
                    )
                    self.fps_label.set_text(
                        "{:12.2f} FPS".format(time.current_framerate)
                    )
                else:
                    self.time_label.set_text("{:12.2f} ms".format(refresh_time / 1000))
                    self.fps_label.set_text(
                        "{:12.2f} FPS".format(1 / (refresh_time / 1000000))
                    )
                # Print inference information
                self.inference_label.set_text(
                    "{:12.2f} ms".format(inference_time / 1000)
                )
                self.ips_label.set_text(
                    "{:12.2f} FPS".format(1 / (inference_time / 1000000))
                )
            self.update_time = GLib.get_monotonic_time()
        return True

    def on_source_change(self, widget):
        """Callback to lock sliders"""
        if self.demo != "selfie_nn":
            if self.device_combo.get_active_text() == "Example Video":
                self.width_entry.set_value(1920)
                self.height_entry.set_value(1080)
                self.width_entry.set_sensitive(False)
                self.height_entry.set_sensitive(False)
            else:
                self.width_entry.set_sensitive(True)
                self.height_entry.set_sensitive(True)


if __name__ == "__main__":
    if (
        len(sys.argv) != 2
        and sys.argv[1] != "detect"
        and sys.argv[1] != "id"
        and sys.argv[1] != "pose"
        and sys.argv[1] != "selfie_nn"
    ):
        print("Demos available: detect, id, pose, selfie_nn")
    else:
        win = MLLaunch(sys.argv[1])
        win.connect("destroy", Gtk.main_quit)
        win.show_all()
        Gtk.main()

 

아래가 실행되는 녀석인데 nndetection을 import 하니까 그걸 따라가서 보는 중.

그나저나 LGPL 이면 그냥 공개해도 되려나?

root@imx8mpevk:~/.nxp-demo-experience/scripts/machine_learning# find / -name nndetection.py
/run/media/root-mmcblk2p2/home/root/.nxp-demo-experience/scripts/machine_learning/nndetection.py
/home/root/.nxp-demo-experience/scripts/machine_learning/nndetection.py

root@imx8mpevk:~/.nxp-demo-experience/scripts/machine_learning# cat /home/root/.nxp-demo-experience/scripts/machine_learning/nndetection.py
#!/usr/bin/env python3

"""
Copyright SSAFY Team 1 <jangjongha.sw@gmail.com>
Copyright 2021-2023 NXP

SPDX-License-Identifier: LGPL-2.1-only
Original Source: https://github.com/nnstreamer/nnstreamer-example

This demo shows how you can use the NNStreamer to detect objects.

From the original source, this was modified to better work with the a
GUI and to get better performance on the i.MX 8M Plus and i.MX93.
"""

import os
import sys
import gi
import re
import logging
import numpy as np
import cairo

gi.require_version("Gst", "1.0")
gi.require_foreign("cairo")
from gi.repository import Gst, GObject, GLib

DEBUG = False


class ObjectDetection:
    """The class that manages the demo"""

    def __init__(
        self,
        platform,
        device,
        backend,
        model,
        labels,
        display="Weston",
        callback=None,
        width=1920,
        height=1080,
        r=1,
        g=0,
        b=0,
    ):
        """Creates an instance of the demo

        Arguments:
        device -- What camera or video file to use
        backend -- Whether to use NPU or CPU
        model -- the path to the model
        labels -- the path to the labels
        display -- Whether to use X11 or Weston
        callback -- Callback to pass stats to
        width -- Width of output
        height -- Height of output
        r -- Red value for labels
        g -- Green value for labels
        b -- Blue value for labels
        """
        self.loop = None
        self.pipeline = None
        self.running = False
        self.video_caps = None
        self.first_frame = True

        self.BOX_SIZE = 4
        self.LABEL_SIZE = 91
        self.DETECTION_MAX = 20
        self.MAX_OBJECT_DETECTION = 20

        self.Y_SCALE = 10.0
        self.X_SCALE = 10.0
        self.H_SCALE = 5.0
        self.W_SCALE = 5.0

        self.VIDEO_WIDTH = width
        self.VIDEO_HEIGHT = height
        self.MODEL_WIDTH = 300
        self.MODEL_HEIGHT = 300

        self.tflite_model = model
        self.label_path = labels
        self.device = device
        self.backend = backend
        self.display = display
        self.tflite_labels = []
        self.detected_objects = []
        self.callback = callback
        self.r = r
        self.b = b
        self.g = g
        self.platform = platform
        self.current_framerate = 1000

        # Define PXP or GPU2D converter
        if self.platform == "imx93evk":
            self.nxp_converter = "imxvideoconvert_pxp "
        else:
            self.nxp_converter = "imxvideoconvert_g2d "

        if not self.tflite_init():
            raise Exception

        Gst.init(None)

    def run(self):
        """Starts pipeline and run demo"""

        if self.backend == "CPU":
            if self.platform == "imx93evk":
                backend = "true:cpu custom=NumThreads:2"
            else:
                backend = "true:cpu custom=NumThreads:4"
        elif self.backend == "GPU":
            os.environ["USE_GPU_INFERENCE"] = "1"
            backend = (
                "true:gpu custom=Delegate:External," "ExtDelegateLib:libvx_delegate.so"
            )
        else:
            if self.platform == "imx93evk":
                backend = (
                    "true:npu custom=Delegate:External,"
                    "ExtDelegateLib:libethosu_delegate.so"
                )
            else:
                os.environ["USE_GPU_INFERENCE"] = "0"
                backend = (
                    "true:npu custom=Delegate:External,"
                    "ExtDelegateLib:libvx_delegate.so"
                )

        if self.display == "X11":
            display = "ximagesink name=img_tensor "
        elif self.display == "None":
            self.print_time = GLib.get_monotonic_time()
            display = "fakesink "
        else:
            display = "fpsdisplaysink name=img_tensor text-overlay=false video-sink=waylandsink sync=false"

        # main loop
        self.loop = GLib.MainLoop()
        self.old_time = GLib.get_monotonic_time()
        self.update_time = GLib.get_monotonic_time()
        self.reload_time = -1
        self.interval_time = 999999

        # Create decoder for video file
        if self.platform == "imx8qmmek":
            decoder = "h264parse ! v4l2h264dec "
        else:
            decoder = "vpudec "

        if "/dev/video" in self.device:
            gst_launch_cmdline = "v4l2src name=cam_src device=" + self.device
            gst_launch_cmdline += " ! " + self.nxp_converter + "! video/x-raw,width="
            gst_launch_cmdline += str(int(self.VIDEO_WIDTH)) + ",height="
            gst_launch_cmdline += str(int(self.VIDEO_HEIGHT))
            gst_launch_cmdline += ",framerate=30/1,format=BGRx ! tee name=t"
        else:
            gst_launch_cmdline = "filesrc location=" + self.device
            gst_launch_cmdline += " ! qtdemux ! " + decoder + "! tee name=t"

        gst_launch_cmdline += " t. ! " + self.nxp_converter + "!  video/x-raw,"
        gst_launch_cmdline += "width={:d},".format(self.MODEL_WIDTH)
        gst_launch_cmdline += "height={:d},".format(self.MODEL_HEIGHT)
        gst_launch_cmdline += " ! queue max-size-buffers=2 leaky=2 ! "
        gst_launch_cmdline += "videoconvert ! video/x-raw,format=RGB !"
        gst_launch_cmdline += " tensor_converter ! tensor_filter"
        gst_launch_cmdline += " framework=tensorflow-lite model="
        gst_launch_cmdline += self.tflite_model + " accelerator=" + backend
        gst_launch_cmdline += " silent=FALSE name=tensor_filter latency=1 ! "
        gst_launch_cmdline += "tensor_sink name=tensor_sink t. ! "
        gst_launch_cmdline += self.nxp_converter + "! "
        gst_launch_cmdline += "cairooverlay name=tensor_res ! "
        gst_launch_cmdline += "queue max-size-buffers=2 leaky=2 ! "
        gst_launch_cmdline += display

        self.pipeline = Gst.parse_launch(gst_launch_cmdline)

        # bus and message callback
        bus = self.pipeline.get_bus()
        bus.add_signal_watch()
        bus.connect("message", self.on_bus_message)

        self.tensor_filter = self.pipeline.get_by_name("tensor_filter")
        self.wayland_sink = self.pipeline.get_by_name("img_tensor")

        # tensor sink signal : new data callback
        tensor_sink = self.pipeline.get_by_name("tensor_sink")
        tensor_sink.connect("new-data", self.new_data_cb)

        tensor_res = self.pipeline.get_by_name("tensor_res")
        tensor_res.connect("draw", self.draw_overlay_cb)
        tensor_res.connect("caps-changed", self.prepare_overlay_cb)
        if self.callback is not None:
            GObject.timeout_add(500, self.callback, self)

        # start pipeline
        self.pipeline.set_state(Gst.State.PLAYING)
        self.running = True

        self.set_window_title("img_tensor", "NNStreamer Object Detection Example")

        # run main loop
        self.loop.run()

        # quit when received eos or error message
        self.running = False
        self.pipeline.set_state(Gst.State.NULL)

        bus.remove_signal_watch()

    def tflite_init(self):
        """
        :return: True if successfully initialized
        """

        if not os.path.exists(self.tflite_model):
            logging.error("cannot find tflite model [%s]", self.tflite_model)
            return False

        label_path = self.label_path
        try:
            with open(label_path, "r") as label_file:
                for line in label_file.readlines():
                    if line[0].isdigit():
                        while str(len(self.tflite_labels)) not in line:
                            self.tflite_labels.append("Invalid")
                        self.tflite_labels.append(line[line.find(" ") + 1 :])
                    else:
                        self.tflite_labels.append(line)
        except FileNotFoundError:
            logging.error("cannot find tflite label [%s]", label_path)
            return False

        logging.info("finished to load labels, total [%d]", len(self.tflite_labels))
        return True

    # @brief Callback for tensor sink signal.
    def new_data_cb(self, sink, buffer):
        """Callback for tensor sink signal.

        :param sink: tensor sink element
        :param buffer: buffer from element
        :return: None
        """
        if self.running:
            new_time = GLib.get_monotonic_time()
            self.interval_time = new_time - self.old_time
            self.old_time = new_time
            if buffer.n_memory() != 4:
                return False

            #  tensor type is float32.
            # LOCATIONS_IDX:CLASSES_IDX:SCORES_IDX:NUM_DETECTION_IDX
            # 4:20:1:1\,20:1:1:1\,20:1:1:1\,1:1:1:1
            # [0] detection_boxes (default 4th tensor). BOX_SIZE :
            # #MaxDetection, ANY-TYPE
            # [1] detection_classes (default 2nd tensor).
            # #MaxDetection, ANY-TYPE
            # [2] detection_scores (default 3rd tensor)
            # #MaxDetection, ANY-TYPE
            # [3] num_detection (default 1st tensor). 1, ANY-TYPE

            # bytestrings that are based on float32 must be
            # decoded into float list.

            # boxes
            mem_boxes = buffer.peek_memory(0)
            ret, info_boxes = mem_boxes.map(Gst.MapFlags.READ)
            if ret:
                assert info_boxes.size == (
                    self.BOX_SIZE * self.DETECTION_MAX * 4
                ), "Invalid info_box size"
                decoded_boxes = list(
                    np.frombuffer(info_boxes.data, dtype=np.float32)
                )  # decode bytestrings to float list

            # detections
            mem_detections = buffer.peek_memory(1)
            ret, info_detections = mem_detections.map(Gst.MapFlags.READ)
            if ret:
                assert info_detections.size == (
                    self.DETECTION_MAX * 4
                ), "Invalid info_detection size"
                decoded_detections = list(
                    np.frombuffer(info_detections.data, dtype=np.float32)
                )  # decode bytestrings to float list

            # scores
            mem_scores = buffer.peek_memory(2)
            ret, info_scores = mem_scores.map(Gst.MapFlags.READ)
            if ret:
                assert info_scores.size == (
                    self.DETECTION_MAX * 4
                ), "Invalid info_score size"
                decoded_scores = list(
                    np.frombuffer(info_scores.data, dtype=np.float32)
                )  # decode bytestrings to float list

            # num detection
            mem_num = buffer.peek_memory(3)
            ret, info_num = mem_num.map(Gst.MapFlags.READ)
            if ret:
                assert info_num.size == 4, "Invalid info_num size"
                decoded_num = list(
                    np.frombuffer(info_num.data, dtype=np.float32)
                )  # decode bytestrings to float list

            self.get_detected_objects(
                decoded_boxes, decoded_detections, decoded_scores, int(decoded_num[0])
            )

            mem_boxes.unmap(info_boxes)
            mem_detections.unmap(info_detections)
            mem_scores.unmap(info_scores)
            mem_num.unmap(info_num)

            if self.display == "None":
                if (GLib.get_monotonic_time() - self.print_time) > 1000000:
                    inference = self.tensor_filter.get_property("latency")
                    print(
                        "Inference time: "
                        + str(inference / 1000)
                        + " ms ("
                        + "{:5.2f}".format(1 / (inference / 1000000))
                        + " IPS)"
                    )
                    self.print_time = GLib.get_monotonic_time()

    def get_detected_objects(self, boxes, detections, scores, num):
        """Pairs boxes with dectected objects"""
        threshold_score = 0.5
        detected = list()

        for i in range(num):
            score = scores[i]
            if score < threshold_score:
                continue

            c = detections[i]

            box_offset = self.BOX_SIZE * i
            ymin = boxes[box_offset + 0]
            xmin = boxes[box_offset + 1]
            ymax = boxes[box_offset + 2]
            xmax = boxes[box_offset + 3]

            x = xmin * self.MODEL_WIDTH
            y = ymin * self.MODEL_HEIGHT
            width = (xmax - xmin) * self.MODEL_WIDTH
            height = (ymax - ymin) * self.MODEL_HEIGHT

            obj = {
                "class_id": int(c),
                "x": x,
                "y": y,
                "width": width,
                "height": height,
                "prob": score,
            }

            detected.append(obj)

        # update result
        self.detected_objects.clear()

        for d in detected:
            self.detected_objects.append(d)
            if DEBUG:
                print("==============================")
                print("LABEL           : {}".format(self.tflite_labels[d["class_id"]]))
                print("x               : {}".format(d["x"]))
                print("y               : {}".format(d["y"]))
                print("width           : {}".format(d["width"]))
                print("height          : {}".format(d["height"]))
                print("Confidence Score: {}".format(d["prob"]))

    def prepare_overlay_cb(self, overlay, caps):
        """Store the information from the caps that we are interested in."""
        self.video_caps = caps

    def draw_overlay_cb(self, overlay, context, timestamp, duration):
        """Callback to draw the overlay."""
        if self.video_caps is None or not self.running:
            return
        scale_height = self.VIDEO_HEIGHT / 1080
        scale_width = self.VIDEO_WIDTH / 1920
        scale_text = max(scale_height, scale_width)

        # mutex_lock alternative required
        detected = self.detected_objects
        # mutex_unlock alternative needed

        drawed = 0
        context.select_font_face(
            "Sans", cairo.FONT_SLANT_NORMAL, cairo.FONT_WEIGHT_BOLD
        )
        context.set_font_size(int(50.0 * scale_text))
        context.set_source_rgb(self.r, self.g, self.b)

        for obj in detected:
            label = self.tflite_labels[obj["class_id"]][:-1]
            x = obj["x"] * self.VIDEO_WIDTH // self.MODEL_WIDTH
            y = obj["y"] * self.VIDEO_HEIGHT // self.MODEL_HEIGHT
            width = obj["width"] * self.VIDEO_WIDTH // self.MODEL_WIDTH
            height = obj["height"] * self.VIDEO_HEIGHT // self.MODEL_HEIGHT

            # draw rectangle
            context.rectangle(x, y, width, height)
            context.set_line_width(3)
            context.stroke()

            # draw title
            context.move_to(x + 5, y + int(50.0 * scale_text))
            context.show_text(label)

            drawed += 1
            if drawed >= self.MAX_OBJECT_DETECTION:
                break

        inference = self.tensor_filter.get_property("latency")
        # Get current framerate and avg. framerate
        output_wayland = self.wayland_sink.get_property("last-message")
        if output_wayland:
            current_text = re.findall(r"current:\s[\d]+[.\d]*", output_wayland)[0]
            self.current_framerate = float(re.findall(r"[\d]+[.\d]*", current_text)[0])

        context.set_font_size(int(25.0 * scale_text))
        context.move_to(
            int(50 * scale_width), int(self.VIDEO_HEIGHT - (100 * scale_height))
        )
        context.show_text("i.MX NNStreamer Detection Demo")
        if inference == 0:
            context.move_to(
                int(50 * scale_width), int(self.VIDEO_HEIGHT - (75 * scale_height))
            )
            context.show_text("FPS: ")
            context.move_to(
                int(50 * scale_width), int(self.VIDEO_HEIGHT - (50 * scale_height))
            )
            context.show_text("IPS: ")
        elif (
            GLib.get_monotonic_time() - self.reload_time
        ) < 100000 and self.refresh_time != -1:
            context.move_to(
                int(50 * scale_width), int(self.VIDEO_HEIGHT - (75 * scale_height))
            )
            context.show_text(
                "FPS: {:6.2f} ({:6.2f} ms)".format(
                    self.current_framerate, 1.0 / self.current_framerate * 1000
                )
            )
            context.move_to(
                int(50 * scale_width), int(self.VIDEO_HEIGHT - (50 * scale_height))
            )
            context.show_text(
                "IPS: {:6.2f} ({:6.2f} ms)".format(
                    1 / (inference / 1000000), inference / 1000
                )
            )
        else:
            self.reload_time = GLib.get_monotonic_time()
            self.refresh_time = self.interval_time
            self.inference = self.tensor_filter.get_property("latency")
            context.move_to(
                int(50 * scale_width), int(self.VIDEO_HEIGHT - (75 * scale_height))
            )
            context.show_text(
                "FPS: {:6.2f} ({:6.2f} ms)".format(
                    self.current_framerate, 1.0 / self.current_framerate * 1000
                )
            )
            context.move_to(
                int(50 * scale_width), int(self.VIDEO_HEIGHT - (50 * scale_height))
            )
            context.show_text(
                "IPS: {:6.2f} ({:6.2f} ms)".format(
                    1 / (inference / 1000000), inference / 1000
                )
            )
        if self.first_frame:
            context.move_to(int(400 * scale_width), int(600 * scale_height))
            context.set_font_size(int(200.0 * min(scale_width, scale_height)))
            context.show_text("Loading...")
            self.first_frame = False
        context.fill()

    def on_bus_message(self, bus, message):
        """Callback for message.

        :param bus: pipeline bus
        :param message: message from pipeline
        :return: None
        """
        if message.type == Gst.MessageType.EOS:
            logging.info("received eos message")
            self.loop.quit()
        elif message.type == Gst.MessageType.ERROR:
            error, debug = message.parse_error()
            logging.warning("[error] %s : %s", error.message, debug)
            self.loop.quit()
        elif message.type == Gst.MessageType.WARNING:
            error, debug = message.parse_warning()
            logging.warning("[warning] %s : %s", error.message, debug)
        elif message.type == Gst.MessageType.STREAM_START:
            logging.info("received start message")
        elif message.type == Gst.MessageType.QOS:
            data_format, processed, dropped = message.parse_qos_stats()
            format_str = Gst.Format.get_name(data_format)
            logging.debug(
                "[qos] format[%s] processed[%d] dropped[%d]",
                format_str,
                processed,
                dropped,
            )

    def set_window_title(self, name, title):
        """Set window title for X11.

        :param name: GstXImageasink element name
        :param title: window title
        :return: None
        """
        element = self.pipeline.get_by_name(name)
        if element is not None:
            pad = element.get_static_pad("sink")
            if pad is not None:
                tags = Gst.TagList.new_empty()
                tags.add_value(Gst.TagMergeMode.APPEND, "title", title)
                pad.send_event(Gst.Event.new_tag(tags))


if __name__ == "__main__":
    if (
        len(sys.argv) != 7
        and len(sys.argv) != 5
        and len(sys.argv) != 9
        and len(sys.argv) != 12
        and len(sys.argv) != 6
    ):
        print(
            "Usage: python3 nndetection.py <dev/video*/video file>"
            + " <NPU/CPU> <model file> <label file>"
        )
        exit()
    # Get platform
    platform = os.uname().nodename
    if len(sys.argv) == 7:
        example = ObjectDetection(
            platform,
            sys.argv[1],
            sys.argv[2],
            sys.argv[3],
            sys.argv[4],
            sys.argv[5],
            sys.argv[6],
        )
    if len(sys.argv) == 5:
        example = ObjectDetection(
            platform, sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4]
        )
    if len(sys.argv) == 6:
        example = ObjectDetection(
            platform, sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5]
        )
    if len(sys.argv) == 9:
        example = ObjectDetection(
            platform,
            sys.argv[1],
            sys.argv[2],
            sys.argv[3],
            sys.argv[4],
            sys.argv[5],
            sys.argv[6],
            int(sys.argv[7]),
            int(sys.argv[8]),
        )
    if len(sys.argv) == 12:
        example = ObjectDetection(
            platform,
            sys.argv[1],
            sys.argv[2],
            sys.argv[3],
            sys.argv[4],
            sys.argv[5],
            sys.argv[6],
            int(sys.argv[7]),
            int(sys.argv[8]),
            int(sys.argv[9]),
            int(sys.argv[10]),
            int(sys.argv[11]),
        )
    example.run()

 

        self.pipeline = Gst.parse_launch(
            'v4l2src name=cam_src ! videoconvert ! videoscale ! '
            'video/x-raw,width=640,height=480,format=RGB ! tee name=t_raw '
            't_raw. ! queue leaky=2 max-size-buffers=2 ! videoscale ! video/x-raw,width=300,height=300 ! tensor_converter ! '
            'tensor_transform mode=arithmetic option=typecast:float32,add:-127.5,div:127.5 ! '
            'tensor_filter framework=tensorflow-lite model=' + self.tflite_model + ' ! '
            'tensor_decoder mode=bounding_boxes option1=mobilenet-ssd option2='
            + self.tflite_label + ' option3=' + self.tflite_box_prior + ' option4=640:480 option5=300:300 !'
            'compositor name=mix sink_0::zorder=2 sink_1::zorder=1 ! videoconvert ! ximagesink '
            't_raw. ! queue leaky=2 max-size-buffers=10 ! mix. '
        )

[링크 : https://github.com/nnstreamer/nnstreamer-example/blob/main/native/example_object_detection_tensorflow_lite/nnstreamer_example_object_detection_tflite.py]

 

gst_launch_cmdline 를 출력해보니 아래와 같이 gstreamer 파이프라인이 나온다.

v4l2src name=cam_src device=/dev/video3 ! imxvideoconvert_g2d ! video/x-raw,width=1920,height=1080,framerate=30/1,format=BGRx ! tee name=t t. ! imxvideoconvert_g2d !  video/x-raw,width=300,height=300, ! queue max-size-buffers=2 leaky=2 ! videoconvert ! video/x-raw,format=RGB ! tensor_converter ! tensor_filter framework=tensorflow-lite model=/home/root/.cache/gopoint/mobilenet_ssd_v2_coco_quant_postprocess.tflite accelerator=true:npu custom=Delegate:External,ExtDelegateLib:libvx_delegate.so silent=FALSE name=tensor_filter latency=1 ! tensor_sink name=tensor_sink t. ! imxvideoconvert_g2d ! cairooverlay name=tensor_res ! queue max-size-buffers=2 leaky=2 ! fpsdisplaysink name=img_tensor text-overlay=false video-sink=waylandsink sync=false

 

보기어려우니 엔터로 구분

v4l2src name=cam_src device=/dev/video3 !
imxvideoconvert_g2d !
video/x-raw,width=1920,height=1080,framerate=30/1,format=BGRx !
tee name=t t. !
imxvideoconvert_g2d !
video/x-raw,width=300,height=300, !
queue max-size-buffers=2 leaky=2 !
videoconvert !
video/x-raw,format=RGB !
tensor_converter !
tensor_filter framework=tensorflow-lite model=/home/root/.cache/gopoint/mobilenet_ssd_v2_coco_quant_postprocess.tflite accelerator=true:npu custom=Delegate:External,ExtDelegateLib:libvx_delegate.so silent=FALSE name=tensor_filter latency=1 !
tensor_sink name=tensor_sink t. !
imxvideoconvert_g2d !
cairooverlay name=tensor_res !
queue max-size-buffers=2 leaky=2 !
fpsdisplaysink name=img_tensor text-overlay=false video-sink=waylandsink sync=false

 

+

2024.01.03

# cd /home/root/.nxp-demo-experience/scripts/machine_learning
# python3 nndetection.py /dev/video3 NPU /home/root/.cache/gopoint/mobilenet_ssd_v2_coco_quant_postprocess.tflite /home/root/.cache/gopoint/coco_labels.txt

 

gst-launch 로도 실행은 되는데 callback 처리가 안되서 overlay가 출력이 안되어 동일한 화면을 보여주진 않는다.

gst-launch-1.0 v4l2src name=cam_src device=/dev/video3 ! imxvideoconvert_g2d ! video/x-raw,width=1920,height=1080,framerate=30/1,format=BGRx ! tee name=t t. ! imxvideoconvert_g2d ! video/x-raw,width=300,height=300, ! queue max-size-buffers=2 leaky=2 ! videoconvert ! video/x-raw,format=RGB ! tensor_converter ! tensor_filter framework=tensorflow-lite model=/home/root/.cache/gopoint/mobilenet_ssd_v2_coco_quant_postprocess.tflite accelerator=true:npu custom=Delegate:External,ExtDelegateLib:libvx_delegate.so silent=FALSE name=tensor_filter latency=1 !  tensor_sink name=tensor_sink t. ! imxvideoconvert_g2d ! cairooverlay name=tensor_res ! queue max-size-buffers=2 leaky=2 ! fpsdisplaysink name=img_tensor text-overlay=false video-sink=waylandsink sync=false
Posted by 구차니