Comments (1)
Now I am able to extract pointclouds from tfrecords. But what I see is the pointclouds have negative RGB values (for the ones I have checked). May I know if someone has faced this problem or I am doing something wrong?
from RecordReader import *
from utils import *
import numpy as np
import argparse
from open3d import *
import cv2
import random
tfrecord_path = ["./data/Tango_train.tfrecords"]
def compute3Dcentroid(points):
centre = np.sum(points, axis =0)/len(points)
return centre
def normalizePoints(points):
max_value = np.max(np.max(points))
min_value = np.min(np.min(points))
normalized_points = 1.0*(points - min_value)/(max_value - min_value + 1e-8)
return normalized_points
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Planenet')
parser.add_argument('--gpu', dest='gpu_id',
help='GPU device id to use [0]',
default='0', type=str)
#task: [train, test, predict]
parser.add_argument('--task', dest='task',
help='task type: [train, test, predict]',
default='train', type=str)
parser.add_argument('--restore', dest='restore',
help='how to restore the model',
default=1, type=int)
parser.add_argument('--batchSize', dest='batchSize',
help='batch size',
default=4, type=int)
parser.add_argument('--dataset', dest='dataset',
help='dataset name for test/predict',
default='1', type=str)
parser.add_argument('--slice', dest='slice', help='whether or not to use the slice version.',
action='store_true')
parser.add_argument('--numTestingImages', dest='numTestingImages',
help='the number of images to test/predict',
default=20, type=int)
parser.add_argument('--fineTuningCheckpoint', dest='fineTuningCheckpoint',
help='specify the model for fine-tuning',
default='checkpoint/floornet_hybrid4_branch0123_wsf', type=str)
parser.add_argument('--suffix', dest='suffix',
help='add a suffix to keyname to distinguish experiments',
default='', type=str)
parser.add_argument('--l2Weight', dest='l2Weight',
help='L2 regulation weight',
default=5e-4, type=float)
parser.add_argument('--LR', dest='LR',
help='learning rate',
default=3e-5, type=float)
parser.add_argument('--hybrid', dest='hybrid',
help='hybrid training',
default='1', type=str)
parser.add_argument('--branches', help='active branches of the network: 0: PointNet, 1: top-down, 2: bottom-up, 3: PointNet segmentation, 4: Image Features, 5: Image Features with Joint training, 6: Additional Layers Before Pred (0, 01, 012, 0123, 01234, 1, 02*, 013)',
default='0123', type=str)
#parser.add_argument('--batch_norm', help='add batch normalization to network', action='store_true')
parser.add_argument('--cornerLossType', dest='cornerLossType',
help='corner loss type',
default='sigmoid', type=str)
parser.add_argument('--loss',
help='loss type needed. [wall corner loss, door corner loss, icon corner loss, icon segmentation, room segmentation]',
default='01234', type=str)
parser.add_argument('--cornerLossWeight', dest='cornerLossWeight',
help='corner loss weight',
default=10, type=float)
parser.add_argument('--augmentation', dest='augmentation',
help='augmentation (wsfd)',
default='wsf', type=str)
parser.add_argument('--numPoints', dest='numPoints',
help='number of points',
default=50000, type=int)
parser.add_argument('--numInputChannels', dest='numInputChannels',
help='number of input channels',
default=3, type=int)
parser.add_argument('--sumScale', dest='sumScale',
help='avoid segment sum results to be too large',
default=10, type=int)
parser.add_argument('--visualizeReconstruction', dest='visualizeReconstruction',
help='whether to visualize flooplan reconstruction or not',
default=0, type=int)
parser.add_argument('--numFinalChannels', dest='numFinalChannels', help='the number of final channels', default=256, type=int)
parser.add_argument('--numIterations', dest='numIterations', help='the number of iterations', default=10000, type=int)
parser.add_argument('--startIteration', dest='startIteration', help='the index of iteration to start', default=0, type=int)
parser.add_argument('--useCache', dest='useCache',
help='whether to cache or not',
default=1, type=int)
parser.add_argument('--debug', dest='debug',
help='debug index',
default=-1, type=int)
parser.add_argument('--outputLayers', dest='outputLayers',
help='output layers',
default='two', type=str)
parser.add_argument('--kernelSize', dest='kernelSize',
help='corner kernel size',
default=11, type=int)
parser.add_argument('--iconLossWeight', dest='iconLossWeight',
help='icon loss weight',
default=1, type=float)
parser.add_argument('--poolingTypes', dest='poolingTypes',
help='pooling types',
default='sssmm', type=str)
parser.add_argument('--visualize', dest='visualize',
help='visualize during training',
action='store_false')
parser.add_argument('--iconPositiveWeight', dest='iconPositiveWeight',
help='icon positive weight',
default=10, type=int)
parser.add_argument('--prefix', dest='prefix',
help='prefix',
default='floornet', type=str)
parser.add_argument('--drawFinal', dest='drawFinal',
help='draw final',
action='store_false')
parser.add_argument('--separateIconLoss', dest='separateIconLoss',
help='separate loss for icon',
action='store_false')
parser.add_argument('--evaluateImage', dest='evaluateImage',
help='evaluate image',
action='store_true')
args = parser.parse_args()
#args.keyname = os.path.basename(__file__).rstrip('.py')
#args.keyname = args.keyname.replace('train_', '')
#layers where deep supervision happens
#addArgs(args)
return args
args = parse_args()
dataset_train = getDatasetTrain(tfrecord_path, args.augmentation, False, args.batchSize)
handle = tf.placeholder(tf.string, shape=[])
iterator = tf.data.Iterator.from_string_handle(handle, dataset_train.output_types, dataset_train.output_shapes)
input_dict, gt_dict = iterator.get_next()
iterator_train = dataset_train.make_one_shot_iterator()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.allow_soft_placement = True
with tf.Session(config=config) as sess:
handle_train = sess.run(iterator_train.string_handle())
sess.run(tf.global_variables_initializer())
#tf.set_random_seed(1029)
input_dict, gt_dict = sess.run([input_dict, gt_dict], feed_dict={handle: handle_train})
points = input_dict['points']
#icon = gt_dict['icon']
#room = gt_dict['room']
scan_1 = points[2,:,:3]
m = np.max(scan_1, axis=0)
#index_1 = point_indices[0]
image = drawTopDownView(scan_1,256,256)
#d = getDensity(scan_1,256,256)
#i = getDensityFromIndices(index_1)
#cv2.namedWindow('image', cv2.WINDOW_NORMAL)
#cv2.imshow('image',i)
pcd = PointCloud()
pcd.points = Vector3dVector(scan_1)
pcd.colors = Vector3dVector(points[2,:,3:6])
draw_geometries([pcd])
If you see the index from 3 to 5 which are RGB values, they are all negative.
If someone has faced this problem or if @art-programmer can give some insights on this, that will be really appreciated.
Thanks.
from floornet.
Related Issues (20)
- Disable Gurobi HOT 1
- Question about metadata HOT 4
- Question about metadatata
- Question about checkpoint HOT 2
- How can I got a 3D model of the indoor space by a tango phone? HOT 1
- What is the 'point_indices' in the example data? HOT 1
- Questions about floorplan.txt HOT 1
- I want to get reconstructFloorplan pred data, but when i debug code , the floornetplan (result_pred) is {} HOT 2
- Usage of image branch and possible scaling bug HOT 1
- Question about 'reconstructFloorplan'. HOT 3
- How to do inferencing for a given 3d point cloud of room ? HOT 7
- Alternatives to Gurobi HOT 10
- Generate custom ground truth floor plan for a point cloud HOT 1
- question about getCoarseIndicesMapsBatch and getCoarseIndicesMaps
- Why don't you use python3?
- If there is new point cloud data, how to use the trained model to get the predicted floorplan? HOT 1
- Image Data From tfrecord File
- test自己的数据时,需要多大内存?
- How to use scene_list.txt (association between raw point cloud and annotations) ?
Recommend Projects
-
React
A declarative, efficient, and flexible JavaScript library for building user interfaces.
-
Vue.js
🖖 Vue.js is a progressive, incrementally-adoptable JavaScript framework for building UI on the web.
-
Typescript
TypeScript is a superset of JavaScript that compiles to clean JavaScript output.
-
TensorFlow
An Open Source Machine Learning Framework for Everyone
-
Django
The Web framework for perfectionists with deadlines.
-
Laravel
A PHP framework for web artisans
-
D3
Bring data to life with SVG, Canvas and HTML. 📊📈🎉
-
Recommend Topics
-
javascript
JavaScript (JS) is a lightweight interpreted programming language with first-class functions.
-
web
Some thing interesting about web. New door for the world.
-
server
A server is a program made to process requests and deliver data to clients.
-
Machine learning
Machine learning is a way of modeling and interpreting data that allows a piece of software to respond intelligently.
-
Visualization
Some thing interesting about visualization, use data art
-
Game
Some thing interesting about game, make everyone happy.
Recommend Org
-
Facebook
We are working to build community through open source technology. NB: members must have two-factor auth.
-
Microsoft
Open source projects and samples from Microsoft.
-
Google
Google ❤️ Open Source for everyone.
-
Alibaba
Alibaba Open Source for everyone
-
D3
Data-Driven Documents codes.
-
Tencent
China tencent open source team.
from floornet.