model_config {
  arch: "cspdarknet"
  n_layers: 53
  use_batch_norm: True
  use_bias: False
  all_projections: False
  use_pooling: True
  retain_head: True
  resize_interpolation_method: BICUBIC
  input_image_size: "3,448,448"
}
train_config {
  train_dataset_path: "/workspace/tlt-experiments/data/train/"
  val_dataset_path: "/workspace/tlt-experiments/data/valid/"
  pretrained_model_path: "/workspace/tlt-experiments/pretrained/cspdarknet_53.hdf5"
  
  # Only ['sgd', 'adam'] are supported for optimizer
  optimizer {
      sgd {
      lr: 0.01
      decay: 0.0
      momentum: 0.9
      nesterov: False
      }
  }
  batch_size_per_gpu: 16
  n_epochs: 100
  # Number of CPU cores for loading data
  n_workers: 16
  # regularizer
  reg_config {
      # regularizer type can be "L1", "L2" or "None".
      type: "L2"
      # if the type is not "None",
      # scope can be either "Conv2D" or "Dense" or both.
      scope: "Conv2D,Dense"
      # 0 < weight decay < 1
      weight_decay: 0.000015
  }
  # learning_rate
  lr_config {
      cosine {
      learning_rate: 0.004
      soft_start: 0.0
      }
  }
  enable_random_crop: False
  enable_center_crop: False
  enable_color_augmentation: True
  mixup_alpha: 0.2
  label_smoothing: 0.1
  preprocess_mode: "caffe"
  image_mean {
    key: 'b'
    value: 103.9
  }
  image_mean {
    key: 'g'
    value: 116.8
  }
  image_mean {
    key: 'r'
    value: 123.7
  }
  visualizer {
    enabled: true
  }
}
eval_config {
  eval_dataset_path: "/workspace/tlt-experiments/data/test"
  model_path: "/workspace/tlt-experiments/trainings/results/weights/cspdarknet_007.tlt"
  top_k: 3
  batch_size: 128
  n_workers: 8
  enable_center_crop: False
}
