Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Pruned #22

Open
wants to merge 12 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
.idea
38 changes: 38 additions & 0 deletions pointpillar_inference_README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
# 本地inference操作步骤
## 1.远程连接服务器
```
ssh -p 8851 [email protected]
```

## 2. 进入工程路径
```
cd Projects/pointpillars2/second/pytorch/
```

## 3. 激活虚拟环境
```
source activate pointpillar
```

## 4. 添加PYTHONPATH路径
```
export PYTHONPATH=~/Projects/pointpillars2:$PYTHONPATH
```

## 5. 运行evaluate程序
```
python train.py evaluate --config_path=../configs/pointpillars/xyres_16_4cls.proto --model_dir=/nfs/nas/model/songhongli/ppbaidusecond_pretrained_3cls/
```


## 6. 运行的点云文件修改方法
```
vim /home/songhongli/Projects/pointpillars2/second/data/preprocess.py
```

line 329:

```
points = np.fromfile("/home/songhongli/000000.bin", dtype=np.float32, co unt=-1).reshape([-1, num_point_features])
```
将这里的fromfile打开的文件路径修改为要运行的一帧点云文件
Binary file removed second/__pycache__/__init__.cpython-36.pyc
Binary file not shown.
47 changes: 43 additions & 4 deletions second/builder/dataset_builder.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@

from second.protos import input_reader_pb2
from second.data.dataset import KittiDataset
from second.data.preprocess import prep_pointcloud
from second.data.preprocess import prep_pointcloud, prep_pointcloud_inference
import numpy as np
from second.builder import dbsampler_builder
from functools import partial
Expand All @@ -34,7 +34,10 @@ def build(input_reader_config,
model_config,
training,
voxel_generator,
target_assigner=None):
target_assigner=None,
multi_gpu=False,
inference=False,
points=None):
"""Builds a tensor dictionary based on the InputReader config.

Args:
Expand Down Expand Up @@ -99,13 +102,49 @@ def build(input_reader_config,
remove_points_after_sample=cfg.remove_points_after_sample,
remove_environment=cfg.remove_environment,
use_group_id=cfg.use_group_id,
out_size_factor=out_size_factor)
out_size_factor=out_size_factor,
multi_gpu=multi_gpu)
prep_func_inference = partial(
prep_pointcloud_inference,
root_path=cfg.kitti_root_path,
class_names=list(cfg.class_names),
voxel_generator=voxel_generator,
target_assigner=target_assigner,
training=training,
max_voxels=cfg.max_number_of_voxels,
remove_outside_points=False,
remove_unknown=cfg.remove_unknown_examples,
create_targets=training,
shuffle_points=cfg.shuffle_points,
gt_rotation_noise=list(cfg.groundtruth_rotation_uniform_noise),
gt_loc_noise_std=list(cfg.groundtruth_localization_noise_std),
global_rotation_noise=list(cfg.global_rotation_uniform_noise),
global_scaling_noise=list(cfg.global_scaling_uniform_noise),
global_loc_noise_std=(0.2, 0.2, 0.2),
global_random_rot_range=list(
cfg.global_random_rotation_range_per_object),
db_sampler=db_sampler,
unlabeled_db_sampler=u_db_sampler,
generate_bev=generate_bev,
without_reflectivity=without_reflectivity,
num_point_features=num_point_features,
anchor_area_threshold=cfg.anchor_area_threshold,
gt_points_drop=cfg.groundtruth_points_drop_percentage,
gt_drop_max_keep=cfg.groundtruth_drop_max_keep_points,
remove_points_after_sample=cfg.remove_points_after_sample,
remove_environment=cfg.remove_environment,
use_group_id=cfg.use_group_id,
out_size_factor=out_size_factor,
multi_gpu=multi_gpu)
dataset = KittiDataset(
info_path=cfg.kitti_info_path,
root_path=cfg.kitti_root_path,
num_point_features=num_point_features,
target_assigner=target_assigner,
feature_map_size=feature_map_size,
prep_func=prep_func)
prep_func=prep_func,
inference=inference,
points=points,
prep_func_inference=prep_func_inference)

return dataset
Empty file.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
44 changes: 44 additions & 0 deletions second/builder_pruned/anchor_generator_builder.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
import numpy as np

from second.protos import box_coder_pb2
from second.core.anchor_generator import (
AnchorGeneratorStride, AnchorGeneratorRange)


def build(anchor_config):
"""Create optimizer based on config.

Args:
optimizer_config: A Optimizer proto message.

Returns:
An optimizer and a list of variables for summary.

Raises:
ValueError: when using an unsupported input data type.
"""
ag_type = anchor_config.WhichOneof('anchor_generator')

if ag_type == 'anchor_generator_stride':
config = anchor_config.anchor_generator_stride
ag = AnchorGeneratorStride(
sizes=list(config.sizes),
anchor_strides=list(config.strides),
anchor_offsets=list(config.offsets),
rotations=list(config.rotations),
match_threshold=config.matched_threshold,
unmatch_threshold=config.unmatched_threshold,
class_id=config.class_name)
return ag
elif ag_type == 'anchor_generator_range':
config = anchor_config.anchor_generator_range
ag = AnchorGeneratorRange(
sizes=list(config.sizes),
anchor_ranges=list(config.anchor_ranges),
rotations=list(config.rotations),
match_threshold=config.matched_threshold,
unmatch_threshold=config.unmatched_threshold,
class_id=config.class_name)
return ag
else:
raise ValueError(" unknown anchor generator type")
150 changes: 150 additions & 0 deletions second/builder_pruned/dataset_builder.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,150 @@
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Input reader builder.

Creates data sources for DetectionModels from an InputReader config. See
input_reader.proto for options.

Note: If users wishes to also use their own InputReaders with the Object
Detection configuration framework, they should define their own builder function
that wraps the build function.
"""

from second.protos import input_reader_pb2
from second.data.dataset import KittiDataset
from second.data.preprocess import prep_pointcloud, prep_pointcloud_inference
import numpy as np
from second.builder import dbsampler_builder
from functools import partial


def build(input_reader_config,
model_config,
training,
voxel_generator,
target_assigner=None,
multi_gpu=False,
inference=False,
points=None):
"""Builds a tensor dictionary based on the InputReader config.

Args:
input_reader_config: A input_reader_pb2.InputReader object.

Returns:
A tensor dict based on the input_reader_config.

Raises:
ValueError: On invalid input reader proto.
ValueError: If no input paths are specified.
"""
if not isinstance(input_reader_config, input_reader_pb2.InputReader):
raise ValueError('input_reader_config not of type '
'input_reader_pb2.InputReader.')
generate_bev = model_config.use_bev
without_reflectivity = model_config.without_reflectivity
num_point_features = model_config.num_point_features
out_size_factor = model_config.rpn.layer_strides[0] // model_config.rpn.upsample_strides[0]

cfg = input_reader_config
db_sampler_cfg = input_reader_config.database_sampler
db_sampler = None
if len(db_sampler_cfg.sample_groups) > 0: # enable sample
db_sampler = dbsampler_builder.build(db_sampler_cfg)
u_db_sampler_cfg = input_reader_config.unlabeled_database_sampler
u_db_sampler = None
if len(u_db_sampler_cfg.sample_groups) > 0: # enable sample
u_db_sampler = dbsampler_builder.build(u_db_sampler_cfg)
grid_size = voxel_generator.grid_size
# [352, 400]
feature_map_size = grid_size[:2] // out_size_factor
feature_map_size = [*feature_map_size, 1][::-1]

prep_func = partial(
prep_pointcloud,
root_path=cfg.kitti_root_path,
class_names=list(cfg.class_names),
voxel_generator=voxel_generator,
target_assigner=target_assigner,
training=training,
max_voxels=cfg.max_number_of_voxels,
remove_outside_points=False,
remove_unknown=cfg.remove_unknown_examples,
create_targets=training,
shuffle_points=cfg.shuffle_points,
gt_rotation_noise=list(cfg.groundtruth_rotation_uniform_noise),
gt_loc_noise_std=list(cfg.groundtruth_localization_noise_std),
global_rotation_noise=list(cfg.global_rotation_uniform_noise),
global_scaling_noise=list(cfg.global_scaling_uniform_noise),
global_loc_noise_std=(0.2, 0.2, 0.2),
global_random_rot_range=list(
cfg.global_random_rotation_range_per_object),
db_sampler=db_sampler,
unlabeled_db_sampler=u_db_sampler,
generate_bev=generate_bev,
without_reflectivity=without_reflectivity,
num_point_features=num_point_features,
anchor_area_threshold=cfg.anchor_area_threshold,
gt_points_drop=cfg.groundtruth_points_drop_percentage,
gt_drop_max_keep=cfg.groundtruth_drop_max_keep_points,
remove_points_after_sample=cfg.remove_points_after_sample,
remove_environment=cfg.remove_environment,
use_group_id=cfg.use_group_id,
out_size_factor=out_size_factor,
multi_gpu=multi_gpu)
prep_func_inference = partial(
prep_pointcloud_inference,
root_path=cfg.kitti_root_path,
class_names=list(cfg.class_names),
voxel_generator=voxel_generator,
target_assigner=target_assigner,
training=training,
max_voxels=cfg.max_number_of_voxels,
remove_outside_points=False,
remove_unknown=cfg.remove_unknown_examples,
create_targets=training,
shuffle_points=cfg.shuffle_points,
gt_rotation_noise=list(cfg.groundtruth_rotation_uniform_noise),
gt_loc_noise_std=list(cfg.groundtruth_localization_noise_std),
global_rotation_noise=list(cfg.global_rotation_uniform_noise),
global_scaling_noise=list(cfg.global_scaling_uniform_noise),
global_loc_noise_std=(0.2, 0.2, 0.2),
global_random_rot_range=list(
cfg.global_random_rotation_range_per_object),
db_sampler=db_sampler,
unlabeled_db_sampler=u_db_sampler,
generate_bev=generate_bev,
without_reflectivity=without_reflectivity,
num_point_features=num_point_features,
anchor_area_threshold=cfg.anchor_area_threshold,
gt_points_drop=cfg.groundtruth_points_drop_percentage,
gt_drop_max_keep=cfg.groundtruth_drop_max_keep_points,
remove_points_after_sample=cfg.remove_points_after_sample,
remove_environment=cfg.remove_environment,
use_group_id=cfg.use_group_id,
out_size_factor=out_size_factor,
multi_gpu=multi_gpu)
dataset = KittiDataset(
info_path=cfg.kitti_info_path,
root_path=cfg.kitti_root_path,
num_point_features=num_point_features,
target_assigner=target_assigner,
feature_map_size=feature_map_size,
prep_func=prep_func,
inference=inference,
points=points,
prep_func_inference=prep_func_inference)

return dataset
27 changes: 27 additions & 0 deletions second/builder_pruned/dbsampler_builder.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
import pickle

import second.core.preprocess as prep
from second.builder import preprocess_builder
from second.core.preprocess import DataBasePreprocessor
from second.core.sample_ops import DataBaseSamplerV2


def build(sampler_config):
cfg = sampler_config
groups = list(cfg.sample_groups)
prepors = [
preprocess_builder.build_db_preprocess(c)
for c in cfg.database_prep_steps
]
db_prepor = DataBasePreprocessor(prepors)
rate = cfg.rate
grot_range = cfg.global_random_rotation_range_per_object
groups = [dict(g.name_to_max_num) for g in groups]
info_path = cfg.database_info_path
with open(info_path, 'rb') as f:
db_infos = pickle.load(f)
grot_range = list(grot_range)
if len(grot_range) == 0:
grot_range = None
sampler = DataBaseSamplerV2(db_infos, groups, db_prepor, rate, grot_range)
return sampler
14 changes: 14 additions & 0 deletions second/builder_pruned/preprocess_builder.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
import second.core.preprocess as prep

def build_db_preprocess(db_prep_config):
prep_type = db_prep_config.WhichOneof('database_preprocessing_step')

if prep_type == 'filter_by_difficulty':
cfg = db_prep_config.filter_by_difficulty
return prep.DBFilterByDifficulty(list(cfg.removed_difficulties))
elif prep_type == 'filter_by_min_num_points':
cfg = db_prep_config.filter_by_min_num_points
return prep.DBFilterByMinNumPoint(dict(cfg.min_num_point_pairs))
else:
raise ValueError("unknown database prep type")

31 changes: 31 additions & 0 deletions second/builder_pruned/similarity_calculator_builder.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
import numpy as np

from second.core import region_similarity
from second.protos import similarity_pb2


def build(similarity_config):
"""Create optimizer based on config.

Args:
optimizer_config: A Optimizer proto message.

Returns:
An optimizer and a list of variables for summary.

Raises:
ValueError: when using an unsupported input data type.
"""
similarity_type = similarity_config.WhichOneof('region_similarity')
if similarity_type == 'rotate_iou_similarity':
return region_similarity.RotateIouSimilarity()
elif similarity_type == 'nearest_iou_similarity':
return region_similarity.NearestIouSimilarity()
elif similarity_type == 'distance_similarity':
cfg = similarity_config.distance_similarity
return region_similarity.DistanceSimilarity(
distance_norm=cfg.distance_norm,
with_rotation=cfg.with_rotation,
rotation_alpha=cfg.rotation_alpha)
else:
raise ValueError("unknown similarity type")
Loading