#!/usr/bin/env python # -*- coding: utf-8 -*- ''' @File : export_scn.py @Ver : 1.0 @Desc : None @Author : Claude,refined by YangTianxi @Time : 2025/08/09 05:22:13 @Dev Software: Vscode ''' import sys import os import torch import pickle import argparse import numpy as np from pathlib import Path from pcdet.config import cfg, cfg_from_yaml_file # for kitti daset set # os.environ['SPCONV_ALGO'] = 'native' # os.environ['SPCONV_DEBUG'] = '1' # Add paths for different module locations sys.path.insert(0, ".") sys.path.insert(0, "..") sys.path.insert(0, "../..") # Add OpenPCDet and det3d paths current_dir = os.path.dirname(os.path.abspath(__file__)) openpcdet_path = os.path.join(current_dir, "..", "..") det3d_path = os.path.join(openpcdet_path, "det3d") if os.path.exists(openpcdet_path): sys.path.insert(0, openpcdet_path) if os.path.exists(det3d_path): sys.path.insert(0, det3d_path) # OpenPCDet imports - try different import strategies OPENPCDET_AVAILABLE = False DET3D_AVAILABLE = False try: from pcdet.models.backbones_3d import VoxelBackBone8x from pcdet.models.backbones_3d.spconv_backbone import VoxelResBackBone8x from pcdet.config import cfg, cfg_from_yaml_file OPENPCDET_AVAILABLE = True print("Successfully imported OpenPCDet modules") except ImportError as e: print(f"Warning: Could not import OpenPCDet modules: {e}") # Try to import det3d from different locations if not OPENPCDET_AVAILABLE: try: # Try importing from OpenPCDet/det3d from det3d.models.backbones.scn import SpMiddleResNetFHD DET3D_AVAILABLE = True print("Successfully imported det3d modules from OpenPCDet/det3d") except ImportError: try: # Try importing det3d directly import det3d from det3d.models.backbones.scn import SpMiddleResNetFHD DET3D_AVAILABLE = True print("Successfully imported det3d modules directly") except ImportError as e: print(f"Warning: Could not import det3d modules: {e}") if not OPENPCDET_AVAILABLE and not DET3D_AVAILABLE: print("Error: Neither OpenPCDet nor det3d modules are available.") sys.exit(1) # custom functional package import funcs import exptool def get_dataset_defaults(dataset_type): """Get default parameters for different datasets""" if dataset_type == "nuscenes": return { 'input_channels': 5, # x, y, z, intensity, elongation 'grid_size': np.array([1440, 1440, 40]), # Typical nuScenes grid size 'point_cloud_range': [-54.0, -54.0, -5.0, 54.0, 54.0, 3.0], 'voxel_size': [0.075, 0.075, 0.2] } elif dataset_type == "kitti": return { 'input_channels': 4, # x, y, z, intensity 'grid_size': np.array([1408, 1600, 40]), # Typical KITTI grid size 'point_cloud_range': [0, -40.0, -3.0, 70.4, 40.0, 1.0], 'voxel_size': [0.05, 0.05, 0.1] } else: # custom return { 'input_channels': 4, # Default to 4 channels 'grid_size': np.array([1440, 1440, 40]), 'point_cloud_range': [-54.0, -54.0, -5.0, 54.0, 54.0, 3.0], 'voxel_size': [0.075, 0.075, 0.2], } def generate_oneVoxel_dummy_data(args): """ 根据 cfg_file 自动计算 spatial_shape,生成单体素数据 """ if not Path(args.cfg_file).exists(): raise FileNotFoundError(f"Config file {args.cfg_file} not found") cfg_from_yaml_file(args.cfg_file, cfg) pcr = cfg.DATA_CONFIG.POINT_CLOUD_RANGE print(type(cfg.DATA_CONFIG.DATA_PROCESSOR)) print(cfg.DATA_CONFIG.DATA_PROCESSOR) vs = None for item in cfg.DATA_CONFIG.DATA_PROCESSOR: if isinstance(item, dict) and "VOXEL_SIZE" in item: vs = item["VOXEL_SIZE"] break if vs is None: raise AttributeError("VOXEL_SIZE not found in DATA_PROCESSOR") nx = int(round((pcr[3] - pcr[0]) / vs[0])) ny = int(round((pcr[4] - pcr[1]) / vs[1])) nz = int(round((pcr[5] - pcr[2]) / vs[2])) spatial_shape = [nx, ny, nz] dtype = torch.float16 if args.half else torch.float32 voxel = torch.zeros(1, args.in_channel, dtype=dtype, device="cuda") coor = torch.zeros(1, 4, dtype=torch.int32, device="cuda") print(f"[INFO] spatial_shape={spatial_shape}, dtype={dtype}") return voxel, coor, spatial_shape if __name__ == "__main__": parser = argparse.ArgumentParser(description="Export scn to onnx file") parser.add_argument("--in_channel", type=int, default=None,help="nuscenes is 5, kitti is 4, auto-detect if not specified") parser.add_argument("--dataset_type", type=str, default="nuscenes", choices=["nuscenes", "kitti", "custom"],help="specify dataset") parser.add_argument("--ckpt", type=str, required=True, help="OpenPCDet model checkpoint path") parser.add_argument("--cfg_file", type=str, default=None, help="OpenPCDet config file path") parser.add_argument("--input", type=str, default=None, help="input pickle data, random if there have no input") parser.add_argument("--save_onnx", type=str, default="centerpoint.scn.onnx", help="output onnx") parser.add_argument("--save_tensor", type=str, default=None, help="Save input/output tensor to file. The purpose of this operation is to verify the inference result of c++") parser.add_argument("--backbone_type", type=str, default="VoxelResBackBone8x", choices=["VoxelBackBone8x", "VoxelResBackBone8x", "SpMiddleResNetFHD"], help="Model backbone type") parser.add_argument("--grid_size", type=int, nargs=3, default=None,help="Grid size for voxelization (x, y, z), auto-detect from dataset if not specified") parser.add_argument('--half', type=bool, default=True, help='True:export FP16 onnx model, else, FP32 model') args = parser.parse_args() # Get dataset defaults dataset_defaults = get_dataset_defaults(args.dataset_type) # Determine input channels if args.in_channel is None: args.in_channel = dataset_defaults['input_channels'] else: # Warn if mismatch with dataset if args.in_channel != dataset_defaults['input_channels']: print(f"Warning: You specified {args.in_channel} channels but {args.dataset_type} typically uses {dataset_defaults['input_channels']} channels") # Determine grid size if args.grid_size is None: args.grid_size = dataset_defaults['grid_size'] # Load config if using OpenPCDet and config file is provided model_cfg = None if args.cfg_file: cfg_from_yaml_file(args.cfg_file, cfg) # Get model config from yaml if hasattr(cfg, 'MODEL') and hasattr(cfg.MODEL, 'BACKBONE_3D'): model_cfg = cfg.MODEL.BACKBONE_3D # Override with our parameters model_cfg.INPUT_CHANNELS = args.in_channel model_cfg.GRID_SIZE = args.grid_size # Initialize model based on type and availability if args.backbone_type == "VoxelBackBone8x": # Ensure model_cfg has required attributes if not hasattr(model_cfg, 'INPUT_CHANNELS'): model_cfg.INPUT_CHANNELS = args.in_channel if not hasattr(model_cfg, 'GRID_SIZE'): model_cfg.GRID_SIZE = args.grid_size model = VoxelBackBone8x( model_cfg=model_cfg, input_channels=model_cfg.INPUT_CHANNELS, grid_size=model_cfg.GRID_SIZE ) print('model of VoxelBackBone8x:') print(model) total_params = sum(p.numel() for p in model.parameters() if p.requires_grad) print(f"Total trainable parameters: {total_params}") #Total trainable parameters: 711872 elif args.backbone_type == "VoxelResBackBone8x": # Ensure model_cfg has required attributes if not hasattr(model_cfg, 'INPUT_CHANNELS'): model_cfg.INPUT_CHANNELS = args.in_channel if not hasattr(model_cfg, 'GRID_SIZE'): model_cfg.GRID_SIZE = args.grid_size model = VoxelResBackBone8x( model_cfg=model_cfg, input_channels=model_cfg.INPUT_CHANNELS, grid_size=model_cfg.GRID_SIZE ) elif args.backbone_type == "SpMiddleResNetFHD": # det3d model type, nvidia use this type model = SpMiddleResNetFHD(args.in_channel) else: if DET3D_AVAILABLE: print(f"Model type {args.model_type} not available, falling back to SpMiddleResNetFHD") model = SpMiddleResNetFHD(args.in_channel) else: raise RuntimeError("No compatible model backends available!") if args.half: model = model.cuda().eval().half() else: model = model.cuda().eval() # Load checkpoint if args.ckpt: #model = funcs.load_scn_checkpoint(model, args.ckpt, args.backbone_type) #Successfully loaded 78 tensors from checkpoint, total parameters: 860877 model = funcs.load_scn_checkpointV2(model, args.ckpt, args.backbone_type) #Trainable parameters in 'backbone_3d': 711,872 ckpt_dir = os.path.dirname(args.ckpt) #torch.save(model, os.path.join(ckpt_dir, 'scn.pth')) # Apply layer fusion model = funcs.layer_fusion(model) print("Fusion model:") print(model) # 保存完整的融合模型(新增这一行) #torch.save(model, os.path.join(ckpt_dir, 'scn_fusion.pth')) #torch.save(model.state_dict(), os.path.join(ckpt_dir, 'scn_fusion_state_dict.pth')) funcs.validate_fusion(model) # Prepare input data voxel, coor, spatial_shape = generate_oneVoxel_dummy_data(args) batch_size = 1 # Single batch for export # Export to ONNX exptool.export_onnx(model,voxel,coor,batch_size,spatial_shape,args) print("ONNX export completed successfully!")