i have built a cnn network which im trying to train with my own data. but when i run the code i get the next error:
File "C:/linear regression/new_net.py", line 92, in <module>
ValueError: Cannot feed value of shape (500, 99999) for Tensor 'X:0', which has shape '(500, 4392)'
and i dont know what value i need to fix. my data is built of 100,000 rows on 4392 column. i reshape each row to 61*72 pixels which means i have 100,000 images. i take 80,000 to the training and 20,000 to the test. and using batch size of 500. i dont know what is wrong with the code if someone can help please!!!
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import time
import os
import urllib
import numpy as np
import tensorflow as tf
#input images- chek if i need the transpose command
data_in = np.transpose(np.loadtxt(open("images.csv"), delimiter=","))
images_training="images.csv"
train_X=data_in[0:80000,:]
test_X=data_in[80001:100000,:]
test_images=test_X[:,1]
test_labels=test_X[:,0]
n = train_X.shape[1]
l = test_X.shape[0]
n_epochs = 10
batch_size = 500
regions=50
height = 61
width = 72
channels = 1
n_inputs = height * width #4392
X = tf.placeholder(tf.float32, shape=[batch_size, n_inputs], name="X")
X_reshaped = tf.reshape(X, shape=[-1, height, width, channels])
y = tf.placeholder(tf.int32, shape=[None], name="y")
labels=tf.placeholder(tf.int32, shape=[batch_size], name="labels")
conv1 = tf.layers.conv2d(inputs=X_reshaped,filters=32,kernel_size=[5,
5],padding="same",activation=tf.nn.relu)
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)
#need to change pool size and strides according to image pixels
conv2 = tf.layers.conv2d(inputs=pool1,filters=64,kernel_size=[5,
5],padding="same",activation=tf.nn.relu)
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2],
strides=2)#need to change pool size and strides according to image pixels
pool2_flat = tf.reshape(pool2, [-1, 15 * 18 * 64]) #change it according to
the pooling layers size:[batch_size,18432]
dense = tf.layers.dense(inputs=pool2_flat, units=1024,
activation=tf.nn.relu)
# Add dropout operation; 0.6 probability that element will be kept
dropout = tf.layers.dropout(inputs=dense, rate=0.4)
# Logits layer
# Input Tensor Shape: [batch_size, 1024]
# Output Tensor Shape: [batch_size, 50]
logits = tf.layers.dense(inputs=dropout, units=50) #size=[batch_size,50]
predictions = {
# Generate predictions (for PREDICT and EVAL mode)
"classes": tf.argmax(input=logits, axis=1),
# Add `sigmoid_tensor` to the graph. It is used for PREDICT and by the
# `logging_hook`.
"probabilities": tf.nn.sigmoid(logits, name="sigmoid_tensor")}
onehot_labels = tf.one_hot(indices=tf.cast(labels, tf.int32), depth=50)
#loss = tf.losses.sigmoid_cross_entropy(multi_class_labels=labels,
logits=logits)
loss = tf.losses.sigmoid_cross_entropy(onehot_labels, logits=logits)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001)
train_op = optimizer.minimize(loss=loss,
global_step=tf.train.get_global_step())
# Initializing the variables
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for epoch in range(n_epochs):
for iteration in range(len(train_X) // batch_size):
# this cycle is for dividing step by step the heavy work of each neuron
X_batch = train_X[iteration * batch_size:iteration * batch_size + batch_size, 1:]
y_batch = train_X[iteration * batch_size:iteration * batch_size + batch_size, 0]
sess.run(optimizer,feed_dict={X:X_batch, y:y_batch})
sess.run(train_op, feed_dict={X: X_batch, y: y_batch})
acc_train =accuracy.eval(feed_dict={X: X_batch, y: y_batch})
acc_test =accuracy.eval(feed_dict={X: test_images, y: test_labels})
print("Epoch:", epoch + 1, "Train accuracy:", acc_train, "test accuracy:", acc_test)
print("Optimization Finished!")
in addition i dont understand why python takes my data as 99,999 and not 100,000.