Hello! I'm excited to start using this tool, and I'm in the middle of setting up an environment for it on a cloud based platform. To make sure everything is working as intended, I'm working through the leaf counting tutorial here.
I would say the biggest difference between my code and the tutorial is this line:
I'm wondering if the dataset here is somehow different from when the tutorial was first written?
Everything executes fine, but once I get to model.begin_training() I get a value error. Here's my code:
import sys
sys.path.append('/repos/plantcv')
sys.path.append('/repos/deepplantphenomics')
from plantcv import plantcv as pcv
import cv2
import deepplantphenomics as dpp
model = dpp.DPPModel(debug=True, save_checkpoints=False, tensorboard_dir='/mnt/Setup and Troubleshooting/tensorlogs', report_rate=20)
# 3 channels for colour, 1 channel for greyscale
channels = 3
# Setup and hyperparameters
model.set_batch_size(4)
model.set_number_of_threads(8)
model.set_image_dimensions(128, 128, channels)
model.set_resize_images(True)
model.set_problem_type('regression')
model.set_num_regression_outputs(1)
model.set_train_test_split(0.8)
model.set_learning_rate(0.0001)
model.set_weight_initializer('xavier')
model.set_maximum_training_epochs(500)
# Augmentation options
model.set_augmentation_brightness_and_contrast(True)
model.set_augmentation_flip_horizontal(True)
model.set_augmentation_flip_vertical(True)
model.set_augmentation_crop(True)
# Load all data for IPPN leaf counting dataset
model.load_ippn_leaf_count_dataset_from_directory('/repos/deepplantphenomics/deepplantphenomics/test_data/test_Ara2013_Canon')
# Define a model architecture
model.add_input_layer()
model.add_convolutional_layer(filter_dimension=[5, 5, channels, 32], stride_length=1, activation_function='tanh')
model.add_pooling_layer(kernel_size=3, stride_length=2)
model.add_convolutional_layer(filter_dimension=[5, 5, 32, 64], stride_length=1, activation_function='tanh')
model.add_pooling_layer(kernel_size=3, stride_length=2)
model.add_convolutional_layer(filter_dimension=[3, 3, 64, 64], stride_length=1, activation_function='tanh')
model.add_pooling_layer(kernel_size=3, stride_length=2)
model.add_convolutional_layer(filter_dimension=[3, 3, 64, 64], stride_length=1, activation_function='tanh')
model.add_pooling_layer(kernel_size=3, stride_length=2)
model.add_output_layer()
# Begin training the regression model
model.begin_training()
I'm executing it in a Jupyter Notebook. Everything executes fine until I get to that final line, which throws me this error:
InvalidArgumentError Traceback (most recent call last)
/usr/local/anaconda3/lib/python3.5/site-packages/tensorflow/python/framework/ops.py in _create_c_op(graph, node_def, inputs, control_inputs)
1575 try:
-> 1576 c_op = c_api.TF_FinishOperation(op_desc)
1577 except errors.InvalidArgumentError as e:
InvalidArgumentError: Dimensions must be equal, but are 8 and 0 for 'DynamicPartition' (op: 'DynamicPartition') with input shapes: [8], [0].
During handling of the above exception, another exception occurred:
ValueError Traceback (most recent call last)
<ipython-input-9-7fb97535fd52> in <module>()
1 # Begin training the regression model
----> 2 model.begin_training()
/repos/deepplantphenomics/deepplantphenomics/deepplantpheno.py in begin_training(self, return_test_loss)
896
897 with self.__graph.as_default():
--> 898 self.__assemble_graph()
899
900 # Either load the network parameters from a checkpoint file or start training
/repos/deepplantphenomics/deepplantphenomics/deepplantpheno.py in __assemble_graph(self)
557 self.__validation_split, self.__all_moderation_features,
558 self.__training_augmentation_images, self.__training_augmentation_labels,
--> 559 self.__split_labels)
560
561 # parse the images and set the appropriate environment variables
/repos/deepplantphenomics/deepplantphenomics/loaders.py in split_raw_data(images, labels, test_ratio, validation_ratio, moderation_features, augmentation_images, augmentation_labels, split_labels)
41 # create partitions, we set train/validation to None if they're not being used
42 if test_ratio != 0 and validation_ratio != 0:
---> 43 train_images, test_images, val_images = tf.dynamic_partition(images, mask, 3)
44 train_labels, test_labels, val_labels = tf.dynamic_partition(labels, mask, 3)
45 elif test_ratio != 0 and validation_ratio == 0:
/usr/local/anaconda3/lib/python3.5/site-packages/tensorflow/python/ops/gen_data_flow_ops.py in dynamic_partition(data, partitions, num_partitions, name)
607 _, _, _op = _op_def_lib._apply_op_helper(
608 "DynamicPartition", data=data, partitions=partitions,
--> 609 num_partitions=num_partitions, name=name)
610 _result = _op.outputs[:]
611 _inputs_flat = _op.inputs
/usr/local/anaconda3/lib/python3.5/site-packages/tensorflow/python/framework/op_def_library.py in _apply_op_helper(self, op_type_name, name, **keywords)
785 op = g.create_op(op_type_name, inputs, output_types, name=scope,
786 input_types=input_types, attrs=attr_protos,
--> 787 op_def=op_def)
788 return output_structure, op_def.is_stateful, op
789
/usr/local/anaconda3/lib/python3.5/site-packages/tensorflow/python/util/deprecation.py in new_func(*args, **kwargs)
452 'in a future version' if date is None else ('after %s' % date),
453 instructions)
--> 454 return func(*args, **kwargs)
455 return tf_decorator.make_decorator(func, new_func, 'deprecated',
456 _add_deprecated_arg_notice_to_docstring(
/usr/local/anaconda3/lib/python3.5/site-packages/tensorflow/python/framework/ops.py in create_op(***failed resolving arguments***)
3153 input_types=input_types,
3154 original_op=self._default_original_op,
-> 3155 op_def=op_def)
3156 self._create_op_helper(ret, compute_device=compute_device)
3157 return ret
/usr/local/anaconda3/lib/python3.5/site-packages/tensorflow/python/framework/ops.py in __init__(self, node_def, g, inputs, output_types, control_inputs, input_types, original_op, op_def)
1729 op_def, inputs, node_def.attr)
1730 self._c_op = _create_c_op(self._graph, node_def, grouped_inputs,
-> 1731 control_input_ops)
1732
1733 # Initialize self._outputs.
/usr/local/anaconda3/lib/python3.5/site-packages/tensorflow/python/framework/ops.py in _create_c_op(graph, node_def, inputs, control_inputs)
1577 except errors.InvalidArgumentError as e:
1578 # Convert to ValueError for backwards compatibility.
-> 1579 raise ValueError(str(e))
1580
1581 return c_op
ValueError: Dimensions must be equal, but are 8 and 0 for 'DynamicPartition' (op: 'DynamicPartition') with input shapes: [8], [0].
Any help would be appreciated. Thanks for putting all this together!