Experiment file: train_both.py batch_size: 32 dropout_probability: 0.5 eta: 0.0001 filter_size: 1 filter_type: median initializer: Xavier memory_size: 10 num_epochs: 75 activation_function: relu dense_layer_units: 64 drop_probab: 0.5 input_size: (350, 350, 10) kernel_initializer: Xavier kernel_shape: (3, 3) model: num_kernels: 64 output_shape: 2 padding: same regularization_l2: 0.0 use_bias: True training_time: 1754.4534485340118 test_error_pitch_power: -36.30038216183645 test_error_power: -23.223233207919574 test_error_yaw_power: -23.442507129364294 val_error_pitch_power: -37.57347033800852 val_error_power: -21.984248946566467 val_error_yaw_power: -22.105847359555355 train_error_pitch_power: -31.602607738464286 train_error_power: -18.452034893088676 train_error_yaw_power: -18.6675400810823