类似:caffe*** Aborted at 1457897730 (unix time) try "date -d @1457897730" if you are using GNU date ***
1)考虑***_train.prototxt文件配置问题
net: "RDbyDL/Models/segnet_train.prototxt" test_iter: 1 test_interval: 100 base_lr: 0.001 lr_policy: "step" #error1:此处设置为step,后面没有设置stepsize; #error2:此处设置为step,实际使用的是fixed; gamma: 1.0 stepsize: 1000 # display: 5 momentum: 0.9 max_iter: 10000 weight_decay: 0.0005 snapshot: 100 snapshot_prefix: "RDbyDL/Models/Training/" solver_mode: CPU
2)考虑***_train.prototxt文件配置问题
name: "RDbyDL"
layer {
name: "data"
type: "DenseImageData"
top: "data"
top: "label"
dense_image_data_param {
source: "RDbyDL/RDTD/train.txt"
batch_size: 1
shuffle: false
}
}
.
.
.
layer {
name: "upsample5"
type: "Upsample"
bottom: "pool5"
top: "pool5_D"
bottom: "pool5_mask"
upsample_param {
scale: 2
upsample_w: 30 #error1: upsample时,有可能宽/高度不能被scale整除,所以需要设置。一方面,可以保证最到
# 后得的结果与原图大小一致;另一方面,只有与对应pooling得到的mask(pool5_mask)
# 大小一致,才可能实现对应的upsample。
upsample_h: 23
}
}
.
.
.
layer {
bottom: "conv1_2_D"
top: "conv1_2_D"
name: "relu1_2_D"
type: "ReLU"
}
layer {
bottom: "conv1_2_D"
top: "conv1_1_D"
name: "conv1_1_D"
type: "Convolution"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
weight_filler {
type: "msra"
}
bias_filler {
type: "constant"
}
num_output: 12 #error2: 样本中的label类别要与此对应。
pad: 1
kernel_size: 3
}
}
layer {
name: "loss"
type: "SoftmaxWithLoss"
bottom: "conv1_1_D"
bottom: "label"
top: "loss"
softmax_param {engine: CAFFE}
loss_param: {
ignore_label: 12 #error3:不用于训练的label,可以在这设置忽略(比如unlabeled对应的标签)
}
}
.
.
.