--------------------------------------------------------------------------- ValueError Traceback (most recent call last) Cell In[28], line 1 ----> 1 history = model.fit(train_dataset, 2 epochs=25, 3 validation_data = test_dataset, 4 validation_steps=1)
File ~/.local/lib/python3.10/site-packages/keras/engine/training_v1.py:777, in Model.fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_freq, max_queue_size, workers, use_multiprocessing, **kwargs) 774 self._check_call_args('fit') 776 func = self._select_training_loop(x) --> 777 return func.fit( 778 self, 779 x=x, 780 y=y, 781 batch_size=batch_size, 782 epochs=epochs, 783 verbose=verbose, 784 callbacks=callbacks, 785 validation_split=validation_split, 786 validation_data=validation_data, 787 shuffle=shuffle, 788 class_weight=class_weight, 789 sample_weight=sample_weight, 790 initial_epoch=initial_epoch, 791 steps_per_epoch=steps_per_epoch, 792 validation_steps=validation_steps, 793 validation_freq=validation_freq, 794 max_queue_size=max_queue_size, 795 workers=workers, 796 use_multiprocessing=use_multiprocessing)
File ~/.local/lib/python3.10/site-packages/keras/engine/training_arrays_v1.py:616, in ArrayLikeTrainingLoop.fit(self, model, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_freq, **kwargs) 595 def fit(self, 596 model, 597 x=None, (...) 611 validation_freq=1, 612 **kwargs): 613 batch_size = model._validate_or_infer_batch_size(batch_size, 614 steps_per_epoch, x) --> 616 x, y, sample_weights = model._standardize_user_data( 617 x, 618 y, 619 sample_weight=sample_weight, 620 class_weight=class_weight, 621 batch_size=batch_size, 622 check_steps=True, 623 steps_name='steps_per_epoch', 624 steps=steps_per_epoch, 625 validation_split=validation_split, 626 shuffle=shuffle) 628 if validation_data: 629 val_x, val_y, val_sample_weights = model._prepare_validation_data( 630 validation_data, batch_size, validation_steps)
File ~/.local/lib/python3.10/site-packages/keras/engine/training_v1.py:2318, in Model._standardize_user_data(self, x, y, sample_weight, class_weight, batch_size, check_steps, steps_name, steps, validation_split, shuffle, extract_tensors_from_dataset) 2316 is_compile_called = False 2317 if not self._is_compiled and self.optimizer: -> 2318 self._compile_from_inputs(all_inputs, y_input, x, y) 2319 is_compile_called = True 2321 # In graph mode, if we had just set inputs and targets as symbolic tensors 2322 # by invoking build and compile on the model respectively, we do not have to 2323 # feed anything to the model. Model already has input and target data as (...) 2327 2328 # self.run_eagerly is not free to compute, so we want to reuse the value.
File ~/.local/lib/python3.10/site-packages/keras/engine/training_v1.py:2568, in Model._compile_from_inputs(self, all_inputs, target, orig_inputs, orig_target) 2565 else: 2566 target_tensors = None -> 2568 self.compile( 2569 optimizer=self.optimizer, 2570 loss=self.loss, 2571 metrics=self._compile_metrics, 2572 weighted_metrics=self._compile_weighted_metrics, 2573 loss_weights=self.loss_weights, 2574 target_tensors=target_tensors, 2575 sample_weight_mode=self.sample_weight_mode, 2576 run_eagerly=self.run_eagerly, 2577 experimental_run_tf_function=self._experimental_run_tf_function)
File ~/.local/lib/python3.10/site-packages/tensorflow/python/training/tracking/base.py:629, in no_automatic_dependency_tracking.<locals>._method_wrapper(self, *args, **kwargs) 627 self._self_setattr_tracking = False # pylint: disable=protected-access 628 try: --> 629 result = method(self, *args, **kwargs) 630 finally: 631 self._self_setattr_tracking = previous_value # pylint: disable=protected-access
File ~/.local/lib/python3.10/site-packages/keras/engine/training_v1.py:443, in Model.compile(self, optimizer, loss, metrics, loss_weights, sample_weight_mode, weighted_metrics, target_tensors, distribute, **kwargs) 439 training_utils_v1.prepare_sample_weight_modes( 440 self._training_endpoints, sample_weight_mode) 442 # Creates the model loss and weighted metrics sub-graphs. --> 443 self._compile_weights_loss_and_weighted_metrics() 445 # Functions for train, test and predict will 446 # be compiled lazily when required. 447 # This saves time when the user is not using all functions. 448 self.train_function = None
File ~/.local/lib/python3.10/site-packages/tensorflow/python/training/tracking/base.py:629, in no_automatic_dependency_tracking.<locals>._method_wrapper(self, *args, **kwargs) 627 self._self_setattr_tracking = False # pylint: disable=protected-access 628 try: --> 629 result = method(self, *args, **kwargs) 630 finally: 631 self._self_setattr_tracking = previous_value # pylint: disable=protected-access
File ~/.local/lib/python3.10/site-packages/keras/engine/training_v1.py:1537, in Model._compile_weights_loss_and_weighted_metrics(self, sample_weights) 1524 self._handle_metrics( 1525 self.outputs, 1526 targets=self._targets, (...) 1529 masks=masks, 1530 return_weighted_metrics=True) 1532 # Compute total loss. 1533 # Used to keep track of the total loss value (stateless). 1534 # eg., total_loss = loss_weight_1 * output_1_loss_fn(...) + 1535 # loss_weight_2 * output_2_loss_fn(...) + 1536 # layer losses. -> 1537 self.total_loss = self._prepare_total_loss(masks)
File ~/.local/lib/python3.10/site-packages/keras/engine/training_v1.py:1597, in Model._prepare_total_loss(self, masks) 1594 sample_weight *= mask 1596 if hasattr(loss_fn, 'reduction'): -> 1597 per_sample_losses = loss_fn.call(y_true, y_pred) 1598 weighted_losses = losses_utils.compute_weighted_loss( 1599 per_sample_losses, 1600 sample_weight=sample_weight, 1601 reduction=losses_utils.ReductionV2.NONE) 1602 loss_reduction = loss_fn.reduction
File ~/.local/lib/python3.10/site-packages/keras/losses.py:245, in LossFunctionWrapper.call(self, y_true, y_pred) 242 y_pred, y_true = losses_utils.squeeze_or_expand_dimensions(y_pred, y_true) 244 ag_fn = tf.__internal__.autograph.tf_convert(self.fn, tf.__internal__.autograph.control_status_ctx()) --> 245 return ag_fn(y_true, y_pred, **self._fn_kwargs)
File ~/.local/lib/python3.10/site-packages/tensorflow/python/autograph/impl/api.py:692, in convert.<locals>.decorator.<locals>.wrapper(*args, **kwargs) 690 except Exception as e: # pylint:disable=broad-except 691 if hasattr(e, 'ag_error_metadata'): --> 692 raise e.ag_error_metadata.to_exception(e) 693 else: 694 raise
ValueError: in user code:
File "/tmp/ipykernel_49162/810674056.py", line 8, in Loss * loss += confidenceLoss(y[:,:,:-4],tf.cast(gt[:,:,0],tf.int32)) File "/tmp/ipykernel_49162/2037607510.py", line 2, in confidenceLoss * unweighted_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(label, y)
ValueError: Only call sparse_softmax_cross_entropy_with_logits with named arguments (labels=..., logits=..., ...). Received unnamed argument: Tensor("loss/output_1_loss/Cast:0", shape=(None, None), dtype=int32) |