diff --git a/demo/app/train-cloud-microphysics.F90 b/demo/app/train-cloud-microphysics.F90 index fbdff9f73..0fef13830 100644 --- a/demo/app/train-cloud-microphysics.F90 +++ b/demo/app/train-cloud-microphysics.F90 @@ -19,6 +19,7 @@ program train_cloud_microphysics use phase_space_bin_m, only : phase_space_bin_t use NetCDF_file_m, only: NetCDF_file_t use NetCDF_variable_m, only: NetCDF_variable_t, tensors + use occupancy_m, only : occupancy_t implicit none character(len=*), parameter :: usage = new_line('a') // new_line('a') // & @@ -125,7 +126,7 @@ function get_command_line_arguments() result(command_line_arguments) stride = default_integer_or_read(1, stride_string) start_step = default_integer_or_read(1, start_string) report_step = default_integer_or_read(1, report_string) - num_bins = default_integer_or_read(1, bins_string) + num_bins = default_integer_or_read(3, bins_string) cost_tolerance = default_real_or_read(5E-8, tolerance_string) if (len(end_string)/=0) then @@ -149,27 +150,24 @@ subroutine read_train_write(training_configuration, args, plot_file) type(training_configuration_t), intent(in) :: training_configuration type(command_line_arguments_t), intent(in) :: args type(plot_file_t), intent(in), optional :: plot_file - type(NetCDF_variable_t), allocatable :: input_variable(:), output_variable(:) + type(NetCDF_variable_t), allocatable :: input_variable(:), output_variable(:), derivative(:) type(NetCDF_variable_t) input_time, output_time ! local variables: - integer t, b, t_end, v + type(trainable_network_t) trainable_network + type(mini_batch_t), allocatable :: mini_batches(:) + type(bin_t), allocatable :: bins(:) + type(input_output_pair_t), allocatable :: input_output_pairs(:) + type(tensor_t), allocatable, dimension(:) :: input_tensors, output_tensors + real, allocatable :: cost(:) + integer i, network_unit, io_status, epoch, end_step, t, b, t_end, v + integer(int64) start_training, finish_training logical stop_requested - enum, bind(C) - enumerator :: pressure=1, potential_temperature, temperature, qv, qc, qr, qs - end enum - - enum, bind(C) - enumerator :: dpotential_temperature_t=1, dqv_dt, dqc_dt, dqr_dt, dqs_dt - end enum - - !associate(input_names => & - ! [string_t("pressure"), string_t("potential_temperature"), string_t("temperature"), & - ! string_t("qv"), string_t("qc"), string_t("qr"), string_t("qs")] & - !) - associate(input_names => [string_t("qv"), string_t("qc")]) - + associate(input_names => & + [string_t("pressure"), string_t("potential_temperature"), string_t("temperature"), & + string_t("qv"), string_t("qc"), string_t("qr"), string_t("qs")] & + ) allocate(input_variable(size(input_names))) associate(input_file_name => args%base_name // "_input.nc") @@ -194,8 +192,7 @@ subroutine read_train_write(training_configuration, args, plot_file) end associate end associate - !associate(output_names => [string_t("potential_temperature"),string_t("qv"), string_t("qc"), string_t("qr"), string_t("qs")]) - associate(output_names => [string_t("qv"), string_t("qc")]) + associate(output_names => [string_t("potential_temperature"),string_t("qv"), string_t("qc"), string_t("qr"), string_t("qs")]) allocate(output_variable(size(output_names))) @@ -222,143 +219,139 @@ subroutine read_train_write(training_configuration, args, plot_file) end associate end associate - block - type(NetCDF_variable_t) derivative(size(output_variable)) - - print *,"Calculating desired neural-network model outputs" + print *,"Calculating desired neural-network model outputs" - associate(dt => NetCDF_variable_t(output_time - input_time, "dt")) - do v = 1, size(derivative) - associate(derivative_name => "d" // output_names(v)%string() // "/dt") - print *,"- " // derivative_name - derivative(v) = NetCDF_variable_t( input_variable(v) - output_variable(v) / dt, derivative_name) - call assert(.not. derivative(v)%any_nan(), "train_cloud_microhphysics: non NaN's") - end associate - end do - end associate - end block + allocate(derivative, mold=output_variable) + associate(dt => NetCDF_variable_t(output_time - input_time, "dt")) + do v = 1, size(derivative) + associate(derivative_name => "d" // output_names(v)%string() // "/dt") + print *,"- " // derivative_name + derivative(v) = NetCDF_variable_t( input_variable(v) - output_variable(v) / dt, derivative_name) + call assert(.not. derivative(v)%any_nan(), "train_cloud_microhphysics: non NaN's") + end associate + end do + end associate end associate - train_network: & - block - type(trainable_network_t) trainable_network - type(mini_batch_t), allocatable :: mini_batches(:) - type(bin_t), allocatable :: bins(:) - type(input_output_pair_t), allocatable :: input_output_pairs(:) - type(tensor_t), allocatable, dimension(:) :: inputs, outputs - real, allocatable :: cost(:) - integer i, network_unit, io_status, epoch, end_step - integer(int64) start_training, finish_training - + if (allocated(args%end_step)) then + end_step = args%end_step + else + end_step = input_variable(1)%end_step() + end if + + print *,"Defining input tensors for time step", args%start_step, "through", end_step, "with strides of", args%stride + input_tensors = tensors(input_variable, step_start = args%start_step, step_end = end_step, step_stride = args%stride) + + print *,"Defining output tensors for time step", args%start_step, "through", end_step, "with strides of", args%stride + output_tensors = tensors(derivative, step_start = args%start_step, step_end = end_step, step_stride = args%stride) + + associate( & + output_map => tensor_map_t( & + layer = "outputs" & + ,minima = [( derivative(v)%minimum(), v=1, size(derivative) )] & + ,maxima = [( derivative(v)%maximum(), v=1, size(derivative) )] & + )) + train_network: & + block + associate( network_file => args%base_name // "_network.json") - - open(newunit=network_unit, file=network_file, form='formatted', status='old', iostat=io_status, action='read') - - if (allocated(args%end_step)) then - end_step = args%end_step - else - end_step = input_variable(1)%end_step() - end if - - print *,"Defining input tensors starting from time step", args%start_step, "through", end_step, "with strides of", args%stride - inputs = tensors(input_variable, step_start = args%start_step, step_end = end_step, step_stride = args%stride) - - print *,"Defining output tensors starting from time step", args%start_step, "through", end_step, "with strides of", args%stride - outputs = tensors(output_variable, step_start = args%start_step, step_end = end_step, step_stride = args%stride) - - print *,"Calculating output tensor component ranges." - tensor_extrema: & - associate( & - input_minima => [( input_variable(v)%minimum(), v=1,size( input_variable) )] & - ,input_maxima => [( input_variable(v)%maximum(), v=1,size( input_variable) )] & - ,output_minima => [( output_variable(v)%minimum(), v=1,size(output_variable) )] & - ,output_maxima => [( output_variable(v)%maximum(), v=1,size(output_variable) )] & - ) - ! output_map: & - ! associate( output_map => tensor_map_t(layer = "outputs", minima = output_minima, maxima = output_maxima)) - ! read_or_initialize_network: & - ! if (io_status==0) then - ! print *,"Reading network from file " // network_file - ! trainable_network = trainable_network_t(neural_network_t(file_t(string_t(network_file)))) - ! close(network_unit) - ! else - ! close(network_unit) - - ! initialize_network: & - ! block - ! character(len=len('YYYYMMDD')) date - - ! call date_and_time(date) - - ! print *,"Calculating input tensor component ranges." - ! associate( & - ! input_map => tensor_map_t( & - ! layer = "inputs", & - ! minima = [minval(pressure_in), minval(potential_temperature_in), minval(temperature_in), & - ! minval(qv_in), minval(qc_in), minval(qr_in), minval(qs_in)], & - ! maxima = [maxval(pressure_in), maxval(potential_temperature_in), maxval(temperature_in), & - ! maxval(qv_in), maxval(qc_in), maxval(qr_in), maxval(qs_in)] & - ! ) ) - ! associate(activation => training_configuration%differentiable_activation()) - ! associate(residual_network=> string_t(trim(merge("true ", "false", training_configuration%skip_connections())))) - ! trainable_network = trainable_network_t( & - ! training_configuration, & - ! perturbation_magnitude = 0.05, & - ! metadata = [ & - ! string_t("Simple microphysics"), string_t("train-on-flat-dist"), string_t(date), & - ! activation%function_name(), residual_network & - ! ], input_map = input_map, output_map = output_map & - ! ) - ! end associate - ! end associate - ! end associate ! input_map, date_string - ! end block initialize_network - ! end if read_or_initialize_network - - ! print *, "Conditionally sampling for a flat distribution of output values" - ! block - ! integer i - ! logical occupied(argsnum_bins, args%num_bins, args%num_bins, args%num_bins, args%num_bins) - ! logical keepers(size(outputs)) - ! type(phase_space_bin_t), allocatable :: bin(:) - ! occupied = .false. - ! keepers = .false. - - ! bin = [(phase_space_bin_t(outputs(i), output_minima, output_maxima, args%num_bins), i=1,size(outputs))] - - ! do i = 1, size(outputs) - ! if (occupied(bin(i)%loc(1),bin(i)%loc(2),bin(i)%loc(3),bin(i)%loc(4),bin(i)%loc(5))) cycle - ! occupied(bin(i)%loc(1),bin(i)%loc(2),bin(i)%loc(3),bin(i)%loc(4),bin(i)%loc(5)) = .true. - ! keepers(i) = .true. - ! end do - ! input_output_pairs = input_output_pair_t(pack(inputs, keepers), pack(outputs, keepers)) - ! print *, "Kept ", size(input_output_pairs), " out of ", size(outputs, kind=int64), " input/output pairs " // & - ! " in ", count(occupied)," out of ", size(occupied, kind=int64), " bins." - ! end block - ! end associate output_map - end associate tensor_extrema - - ! print *,"Normalizing the remaining input and output tensors" - ! input_output_pairs = trainable_network%map_to_training_ranges(input_output_pairs) - - ! associate( & - ! num_pairs => size(input_output_pairs), & - ! n_bins => training_configuration%mini_batches(), & - ! adam => merge(.true., .false., training_configuration%optimizer_name() == "adam"), & - ! learning_rate => training_configuration%learning_rate() & - ! ) - ! bins = [(bin_t(num_items=num_pairs, num_bins=n_bins, bin_number=b), b = 1, n_bins)] - - ! print *,"Training network" - ! print *, " Epoch Cost (avg)" - - ! call system_clock(start_training) - ! - ! train_write_and_maybe_exit: & - ! block - ! integer first_epoch - ! integer me + + open(newunit=network_unit, file=network_file, form='formatted', status='old', iostat=io_status, action='read') + + read_or_initialize_network: & + if (io_status==0) then + print *,"Reading network from file " // network_file + trainable_network = trainable_network_t(neural_network_t(file_t(string_t(network_file)))) + close(network_unit) + else + close(network_unit) + + initialize_network: & + block + character(len=len('YYYYMMDD')) date + + call date_and_time(date) + + print *,"Defining a new network from training_configuration_t and tensor_map_t objects" + + associate(activation => training_configuration%activation()) + trainable_network = trainable_network_t( & + training_configuration & + ,perturbation_magnitude = 0.05 & + ,metadata = [ & + string_t("ICAR microphysics" ) & + ,string_t("max-entropy-filter") & + ,string_t(date ) & + ,activation%function_name( ) & + ,string_t(trim(merge("true ", "false", training_configuration%skip_connections()))) & + ] & + ,input_map = tensor_map_t( & + layer = "inputs" & + ,minima = [( input_variable(v)%minimum(), v=1, size( input_variable) )] & + ,maxima = [( input_variable(v)%maximum(), v=1, size( input_variable) )] & + ) & + ,output_map = output_map & + ) + end associate + end block initialize_network + + end if read_or_initialize_network + + print *, "Conditionally sampling for a flat distribution of output values" + + flatten_histogram: & + block + integer i + !logical occupied(args%num_bins, args%num_bins, args%num_bins, args%num_bins, args%num_bins) + logical occupied(args%num_bins, args%num_bins) + logical keepers(size(output_tensors)) + type(phase_space_bin_t), allocatable :: bin(:) + type(occupancy_t) occupancy + + ! Determine the phase-space bin that holds each output tensor + bin = [(phase_space_bin_t(output_tensors(i), output_map%minima(), output_map%maxima(), args%num_bins), i = 1, size(output_tensors))] + call occupancy%vacate( dims = [( args%num_bins, i = 1, size(output_variable))] ) + + keepers = .false. + + do i = 1, size(output_tensors) + if (occupancy%occupied(bin(i)%loc)) cycle + call occupancy%occupy(bin(i)%loc) + keepers(i) = .true. + end do + input_output_pairs = input_output_pair_t(pack(input_tensors, keepers), pack(output_tensors, keepers)) + print '(*(a,i))' & + ," Keeping " , size(input_output_pairs, kind=int64) & + ," out of " , size(output_tensors, kind=int64) & + ," input/output pairs in ", occupancy%num_occupied() & + ," out of " , occupancy%num_bins() & + ," bins." + + end block flatten_histogram + + end associate + + ! print *,"Normalizing the remaining input and output tensors" + ! input_output_pairs = trainable_network%map_to_training_ranges(input_output_pairs) + + ! associate( & + ! num_pairs => size(input_output_pairs), & + ! n_bins => training_configuration%mini_batches(), & + ! adam => merge(.true., .false., training_configuration%optimizer_name() == "adam"), & + ! learning_rate => training_configuration%learning_rate() & + ! ) + ! bins = [(bin_t(num_items=num_pairs, num_bins=n_bins, bin_number=b), b = 1, n_bins)] + + ! print *,"Training network" + ! print *, " Epoch Cost (avg)" + + ! call system_clock(start_training) + ! + ! train_write_and_maybe_exit: & + ! block + ! integer first_epoch + ! integer me #if defined(MULTI_IMAGE_SUPPORT) @@ -425,9 +418,11 @@ subroutine read_train_write(training_configuration, args, plot_file) ! print *,"Training time: ", real(finish_training - start_training, real64)/real(clock_rate, real64),"for", & ! args%num_epochs,"epochs" - end associate ! network_file + ! end associate end block train_network + end associate + !close(plot_file%plot_unit) end subroutine read_train_write diff --git a/demo/src/occupancy_m.f90 b/demo/src/occupancy_m.f90 new file mode 100644 index 000000000..15b2ef247 --- /dev/null +++ b/demo/src/occupancy_m.f90 @@ -0,0 +1,75 @@ +module occupancy_m + use iso_fortran_env, only : int64 + implicit none + + private + public :: occupancy_t + + type occupancy_t + private + logical, allocatable :: occupied_1D_(:) + logical, allocatable :: occupied_2D_(:,:) + logical, allocatable :: occupied_3D_(:,:,:) + logical, allocatable :: occupied_4D_(:,:,:,:) + logical, allocatable :: occupied_5D_(:,:,:,:,:) + logical, allocatable :: occupied_6D_(:,:,:,:,:,:) + logical, allocatable :: occupied_7D_(:,:,:,:,:,:,:) + logical, allocatable :: occupied_8D_(:,:,:,:,:,:,:,:) + logical, allocatable :: occupied_9D_(:,:,:,:,:,:,:,:,:) + logical, allocatable :: occupied_10D_(:,:,:,:,:,:,:,:,:,:) + logical, allocatable :: occupied_11D_(:,:,:,:,:,:,:,:,:,:,:) + logical, allocatable :: occupied_12D_(:,:,:,:,:,:,:,:,:,:,:,:) + logical, allocatable :: occupied_13D_(:,:,:,:,:,:,:,:,:,:,:,:,:) + logical, allocatable :: occupied_14D_(:,:,:,:,:,:,:,:,:,:,:,:,:,:) + logical, allocatable :: occupied_15D_(:,:,:,:,:,:,:,:,:,:,:,:,:,:,:) + contains + procedure, non_overridable :: vacate + procedure, non_overridable :: occupy + procedure, non_overridable :: occupied + procedure, non_overridable :: num_occupied + procedure, non_overridable :: num_bins + procedure, non_overridable :: allocated_dim + end type + + interface + + pure module subroutine vacate(self, dims) + implicit none + class(occupancy_t), intent(inout) :: self + integer, intent(in) :: dims(:) + end subroutine + + pure module subroutine occupy(self, loc) + implicit none + class(occupancy_t), intent(inout) :: self + integer, intent(in) :: loc(:) + end subroutine + + pure module function occupied(self, loc) result(bin_occupied) + implicit none + class(occupancy_t), intent(in) :: self + integer, intent(in) :: loc(:) + logical bin_occupied + end function + + pure module function num_occupied(self) result(bins_occupied) + implicit none + class(occupancy_t), intent(in) :: self + integer(int64) bins_occupied + end function + + pure module function num_bins(self) result(bins_total) + implicit none + class(occupancy_t), intent(in) :: self + integer(int64) bins_total + end function + + pure module function allocated_dim(self) result(my_dim) + implicit none + class(occupancy_t), intent(in) :: self + integer my_dim + end function + + end interface + +end module occupancy_m diff --git a/demo/src/occupancy_s.f90 b/demo/src/occupancy_s.f90 new file mode 100644 index 000000000..3b4e4a44e --- /dev/null +++ b/demo/src/occupancy_s.f90 @@ -0,0 +1,261 @@ +submodule(occupancy_m) occupancy_s + use assert_m, only : assert, intrinsic_array_t + implicit none + +contains + + pure function allocations(occupancy) result(components_allocated) + + type(occupancy_t), intent(in) :: occupancy + logical, allocatable :: components_allocated(:) + + components_allocated = [ & + allocated(occupancy%occupied_1D_ ) & + ,allocated(occupancy%occupied_2D_ ) & + ,allocated(occupancy%occupied_3D_ ) & + ,allocated(occupancy%occupied_4D_ ) & + ,allocated(occupancy%occupied_5D_ ) & + ,allocated(occupancy%occupied_6D_ ) & + ,allocated(occupancy%occupied_7D_ ) & + ,allocated(occupancy%occupied_8D_ ) & + ,allocated(occupancy%occupied_9D_ ) & + ,allocated(occupancy%occupied_10D_) & + ,allocated(occupancy%occupied_11D_) & + ,allocated(occupancy%occupied_12D_) & + ,allocated(occupancy%occupied_13D_) & + ,allocated(occupancy%occupied_14D_) & + ,allocated(occupancy%occupied_15D_) & + ] + + end function allocations + + module procedure vacate + + select case(size(dims)) + case(1) + if (allocated(self%occupied_1D_)) deallocate(self%occupied_1D_) + allocate(self%occupied_1D_(dims(1)), source = .false.) + case(2) + if (allocated(self%occupied_2D_)) deallocate(self%occupied_2D_) + allocate(self%occupied_2D_(dims(1),dims(2)), source = .false.) + case(3) + if (allocated(self%occupied_3D_)) deallocate(self%occupied_3D_) + allocate(self%occupied_3D_(dims(1),dims(2),dims(3)), source = .false.) + case(4) + if (allocated(self%occupied_4D_)) deallocate(self%occupied_4D_) + allocate(self%occupied_4D_(dims(1),dims(2),dims(3),dims(4)), source = .false.) + case(5) + if (allocated(self%occupied_5D_)) deallocate(self%occupied_5D_) + allocate(self%occupied_5D_(dims(1),dims(2),dims(3),dims(4),dims(5)), source = .false.) + case(6) + if (allocated(self%occupied_6D_)) deallocate(self%occupied_6D_) + allocate(self%occupied_6D_(dims(1),dims(2),dims(3),dims(4),dims(5),dims(6)), source = .false.) + case(7) + if (allocated(self%occupied_7D_)) deallocate(self%occupied_7D_) + allocate(self%occupied_7D_(dims(1),dims(2),dims(3),dims(4),dims(5),dims(6),dims(7)), source = .false.) + case(8) + if (allocated(self%occupied_8D_)) deallocate(self%occupied_8D_) + allocate(self%occupied_8D_(dims(1),dims(2),dims(3),dims(4),dims(5),dims(6),dims(7),dims(8)), source = .false.) + case(9) + if (allocated(self%occupied_9D_)) deallocate(self%occupied_9D_) + allocate(self%occupied_9D_(dims(1),dims(2),dims(3),dims(4),dims(5),dims(6),dims(7),dims(8),dims(9)), source = .false.) + case(10) + if (allocated(self%occupied_10D_)) deallocate(self%occupied_10D_) + allocate(self%occupied_10D_(dims(1),dims(2),dims(3),dims(4),dims(5),dims(6),dims(7),dims(8),dims(9),dims(10)), source = .false.) + case(11) + if (allocated(self%occupied_11D_)) deallocate(self%occupied_11D_) + allocate(self%occupied_11D_(dims(1),dims(2),dims(3),dims(4),dims(5),dims(6),dims(7),dims(8),dims(9),dims(10),dims(11)), source = .false.) + case(12) + if (allocated(self%occupied_12D_)) deallocate(self%occupied_12D_) + allocate(self%occupied_12D_(dims(1),dims(2),dims(3),dims(4),dims(5),dims(6),dims(7),dims(8),dims(9),dims(10),dims(11),dims(12)), source = .false.) + case(13) + if (allocated(self%occupied_13D_)) deallocate(self%occupied_13D_) + allocate(self%occupied_13D_(dims(1),dims(2),dims(3),dims(4),dims(5),dims(6),dims(7),dims(8),dims(9),dims(10),dims(11),dims(12),dims(13)), source = .false.) + case(14) + if (allocated(self%occupied_14D_)) deallocate(self%occupied_14D_) + allocate(self%occupied_14D_(dims(1),dims(2),dims(3),dims(4),dims(5),dims(6),dims(7),dims(8),dims(9),dims(10),dims(11),dims(12),dims(13),dims(14)), source = .false.) + case(15) + if (allocated(self%occupied_15D_)) deallocate(self%occupied_15D_) + allocate(self%occupied_15D_(dims(1),dims(2),dims(3),dims(4),dims(5),dims(6),dims(7),dims(8),dims(9),dims(10),dims(11),dims(12),dims(13),dims(14),dims(15)), source= .false.) + case default + error stop "occupancy_s(vacate): unsupported rank" + end select + + call assert(self%allocated_dim()==size(dims), "occupancy_s(vacate): count(self%allocations()) == 1") + + end procedure vacate + + module procedure occupy + + associate_o: & + associate(o => (loc)) + select case(size(loc)) + case(1) + self%occupied_1D_(o(1)) = .true. + case(2) + self%occupied_2D_(o(1),o(2)) = .true. + case(3) + self%occupied_3D_(o(1),o(2),o(3)) = .true. + case(4) + self%occupied_4D_(o(1),o(2),o(3),o(4)) = .true. + case(5) + self%occupied_5D_(o(1),o(2),o(3),o(4),o(5)) = .true. + case(6) + self%occupied_6D_(o(1),o(2),o(3),o(4),o(5),o(6)) = .true. + case(7) + self%occupied_7D_(o(1),o(2),o(3),o(4),o(5),o(6),o(7)) = .true. + case(8) + self%occupied_8D_(o(1),o(2),o(3),o(4),o(5),o(6),o(7),o(8)) = .true. + case(9) + self%occupied_9D_(o(1),o(2),o(3),o(4),o(5),o(6),o(7),o(8),o(9)) = .true. + case(10) + self%occupied_10D_(o(1),o(2),o(3),o(4),o(5),o(6),o(7),o(8),o(9),o(10)) = .true. + case(11) + self%occupied_11D_(o(1),o(2),o(3),o(4),o(5),o(6),o(7),o(8),o(9),o(10),o(11)) = .true. + case(12) + self%occupied_12D_(o(1),o(2),o(3),o(4),o(5),o(6),o(7),o(8),o(9),o(10),o(11),o(12)) = .true. + case(13) + self%occupied_13D_(o(1),o(2),o(3),o(4),o(5),o(6),o(7),o(8),o(9),o(10),o(11),o(12),o(13)) = .true. + case(14) + self%occupied_14D_(o(1),o(2),o(3),o(4),o(5),o(6),o(7),o(8),o(9),o(10),o(11),o(12),o(13),o(14)) = .true. + case(15) + self%occupied_15D_(o(1),o(2),o(3),o(4),o(5),o(6),o(7),o(8),o(9),o(10),o(11),o(12),o(13),o(14),o(15)) = .true. + case default + error stop "occupancy_s(occupy): unsupported rank" + end select + end associate associate_o + + end procedure occupy + + module procedure occupied + + nickname_loc: & + associate(b => (loc)) + select case(size(loc)) + case(1) + bin_occupied = self%occupied_1D_(b(1)) + case(2) + bin_occupied = self%occupied_2D_(b(1),b(2)) + case(3) + bin_occupied = self%occupied_3D_(b(1),b(2),b(3)) + case(4) + bin_occupied = self%occupied_4D_(b(1),b(2),b(3),b(4)) + case(5) + bin_occupied = self%occupied_5D_(b(1),b(2),b(3),b(4),b(5)) + case(6) + bin_occupied = self%occupied_6D_(b(1),b(2),b(3),b(4),b(5),b(6)) + case(7) + bin_occupied = self%occupied_7D_(b(1),b(2),b(3),b(4),b(5),b(6),b(7)) + case(8) + bin_occupied = self%occupied_8D_(b(1),b(2),b(3),b(4),b(5),b(6),b(7),b(8)) + case(9) + bin_occupied = self%occupied_9D_(b(1),b(2),b(3),b(4),b(5),b(6),b(7),b(8),b(9)) + case(10) + bin_occupied = self%occupied_10D_(b(1),b(2),b(3),b(4),b(5),b(6),b(7),b(8),b(9),b(10)) + case(11) + bin_occupied = self%occupied_11D_(b(1),b(2),b(3),b(4),b(5),b(6),b(7),b(8),b(9),b(10),b(11)) + case(12) + bin_occupied = self%occupied_12D_(b(1),b(2),b(3),b(4),b(5),b(6),b(7),b(8),b(9),b(10),b(11),b(12)) + case(13) + bin_occupied = self%occupied_13D_(b(1),b(2),b(3),b(4),b(5),b(6),b(7),b(8),b(9),b(10),b(11),b(12),b(13)) + case(14) + bin_occupied = self%occupied_14D_(b(1),b(2),b(3),b(4),b(5),b(6),b(7),b(8),b(9),b(10),b(11),b(12),b(13),b(14)) + case(15) + bin_occupied = self%occupied_15D_(b(1),b(2),b(3),b(4),b(5),b(6),b(7),b(8),b(9),b(10),b(11),b(12),b(13),b(14),b(15)) + case default + error stop "occupancy_s(occupied): unsupported rank" + end select + end associate nickname_loc + + end procedure occupied + + module procedure num_occupied + + call assert(count(allocations(self))==1, "occupancy_s(allocated_dim): count(self%allocations()) == 1") + + select case(self%allocated_dim()) + case(1) + bins_occupied = count(self%occupied_1D_, kind=int64) + case(2) + bins_occupied = count(self%occupied_2D_, kind=int64) + case(3) + bins_occupied = count(self%occupied_3D_, kind=int64) + case(4) + bins_occupied = count(self%occupied_4D_, kind=int64) + case(5) + bins_occupied = count(self%occupied_5D_, kind=int64) + case(6) + bins_occupied = count(self%occupied_6D_, kind=int64) + case(7) + bins_occupied = count(self%occupied_7D_, kind=int64) + case(8) + bins_occupied = count(self%occupied_8D_, kind=int64) + case(9) + bins_occupied = count(self%occupied_9D_, kind=int64) + case(10) + bins_occupied = count(self%occupied_10D_, kind=int64) + case(11) + bins_occupied = count(self%occupied_11D_, kind=int64) + case(12) + bins_occupied = count(self%occupied_12D_, kind=int64) + case(13) + bins_occupied = count(self%occupied_13D_, kind=int64) + case(14) + bins_occupied = count(self%occupied_14D_, kind=int64) + case(15) + bins_occupied = count(self%occupied_15D_, kind=int64) + case default + error stop "occupancy_s(num_occupied): unsupported rank" + end select + + end procedure num_occupied + + module procedure num_bins + + select case(self%allocated_dim()) + case(1) + bins_total = size(self%occupied_1D_) + case(2) + bins_total = size(self%occupied_2D_) + case(3) + bins_total = size(self%occupied_3D_) + case(4) + bins_total = size(self%occupied_4D_) + case(5) + bins_total = size(self%occupied_5D_) + case(6) + bins_total = size(self%occupied_6D_) + case(7) + bins_total = size(self%occupied_7D_) + case(8) + bins_total = size(self%occupied_8D_) + case(9) + bins_total = size(self%occupied_9D_) + case(10) + bins_total = size(self%occupied_10D_) + case(11) + bins_total = size(self%occupied_11D_) + case(12) + bins_total = size(self%occupied_12D_) + case(13) + bins_total = size(self%occupied_13D_) + case(14) + bins_total = size(self%occupied_14D_) + case(15) + bins_total = size(self%occupied_15D_) + case default + error stop "occupancy_s(num_bins): unsupported rank" + end select + + end procedure num_bins + + module procedure allocated_dim + + associate(my_allocations => allocations(self)) + call assert(count(my_allocations)==1, "occupancy_s(allocated_dim): count(self%allocations()) == 1") + my_dim = findloc(my_allocations, .true., dim=1) + end associate + + end procedure + +end submodule occupancy_s diff --git a/src/fiats/tensor_map_m.f90 b/src/fiats/tensor_map_m.f90 index b105f4613..1343343fb 100644 --- a/src/fiats/tensor_map_m.f90 +++ b/src/fiats/tensor_map_m.f90 @@ -15,14 +15,18 @@ module tensor_map_m character(len=:), allocatable, private :: layer_ real(k), dimension(:), allocatable, private :: intercept_, slope_ contains - generic :: map_to_training_range => default_real_map_to_training_range, double_precision_map_to_training_range - procedure, private, non_overridable :: default_real_map_to_training_range, double_precision_map_to_training_range - generic :: map_from_training_range => default_real_map_from_training_range, double_precision_map_from_training_range - procedure, private, non_overridable :: default_real_map_from_training_range, double_precision_map_from_training_range - generic :: to_json => default_real_to_json, double_precision_to_json - procedure, private :: default_real_to_json, double_precision_to_json - generic :: operator(==) => default_real_equals, double_precision_equals - procedure, private :: default_real_equals, double_precision_equals + generic :: map_to_training_range => default_real_map_to_training_range , double_precision_map_to_training_range + procedure, private, non_overridable :: default_real_map_to_training_range , double_precision_map_to_training_range + generic :: map_from_training_range => default_real_map_from_training_range, double_precision_map_from_training_range + procedure, private, non_overridable :: default_real_map_from_training_range, double_precision_map_from_training_range + generic :: minima => default_real_minima , double_precision_minima + procedure, private, non_overridable :: default_real_minima , double_precision_minima + generic :: maxima => default_real_maxima , double_precision_maxima + procedure, private, non_overridable :: default_real_maxima , double_precision_maxima + generic :: to_json => default_real_to_json , double_precision_to_json + procedure, private :: default_real_to_json , double_precision_to_json + generic :: operator(==) => default_real_equals , double_precision_equals + procedure, private :: default_real_equals , double_precision_equals end type @@ -58,6 +62,30 @@ module function double_precision_from_json(lines) result(tensor_map) interface + pure module function default_real_minima(self) result(minima) + implicit none + class(tensor_map_t), intent(in) :: self + real, allocatable :: minima(:) + end function + + pure module function double_precision_minima(self) result(minima) + implicit none + class(tensor_map_t(double_precision)), intent(in) :: self + double precision, allocatable :: minima(:) + end function + + pure module function default_real_maxima(self) result(maxima) + implicit none + class(tensor_map_t), intent(in) :: self + real, allocatable :: maxima(:) + end function + + pure module function double_precision_maxima(self) result(maxima) + implicit none + class(tensor_map_t(double_precision)), intent(in) :: self + double precision, allocatable :: maxima(:) + end function + elemental module function default_real_map_to_training_range(self, tensor) result(normalized_tensor) implicit none class(tensor_map_t), intent(in) :: self diff --git a/src/fiats/tensor_map_s.f90 b/src/fiats/tensor_map_s.f90 index c0e504d81..4651078f2 100644 --- a/src/fiats/tensor_map_s.f90 +++ b/src/fiats/tensor_map_s.f90 @@ -15,6 +15,22 @@ tensor_map%slope_ = maxima - minima end procedure + module procedure default_real_minima + minima = self%intercept_ + end procedure + + module procedure default_real_maxima + maxima = self%intercept_ + self%slope_ + end procedure + + module procedure double_precision_minima + minima = self%intercept_ + end procedure + + module procedure double_precision_maxima + maxima = self%intercept_ + self%slope_ + end procedure + module procedure construct_double_precision call assert(size(minima)==size(maxima),"tensor_map_s(construct_double_precision): size(minima)==size(maxima)") tensor_map%layer_ = layer diff --git a/src/fiats/trainable_network_m.f90 b/src/fiats/trainable_network_m.f90 index 0cb2cfc50..a085168fa 100644 --- a/src/fiats/trainable_network_m.f90 +++ b/src/fiats/trainable_network_m.f90 @@ -34,8 +34,8 @@ module function perturbed_identity_network(training_configuration, perturbation_ result(trainable_network) implicit none type(training_configuration_t), intent(in) :: training_configuration - type(string_t), intent(in) :: metadata(:) real, intent(in) :: perturbation_magnitude + type(string_t), intent(in) :: metadata(:) type(tensor_map_t) input_map, output_map type(trainable_network_t) trainable_network end function diff --git a/src/fiats/training_configuration_m.f90 b/src/fiats/training_configuration_m.f90 index eb20a30f0..507d77089 100644 --- a/src/fiats/training_configuration_m.f90 +++ b/src/fiats/training_configuration_m.f90 @@ -18,20 +18,20 @@ module training_configuration_m type(hyperparameters_t(k)), private :: hyperparameters_ type(network_configuration_t), private :: network_configuration_ contains - generic :: operator(==) => default_real_equals, double_precision_equals - procedure, private :: default_real_equals, double_precision_equals - generic :: to_json => default_real_to_json, double_precision_to_json - procedure, private :: default_real_to_json, double_precision_to_json - generic :: mini_batches => default_real_mini_batches, double_precision_mini_batches - procedure, private :: default_real_mini_batches, double_precision_mini_batches - generic :: optimizer_name => default_real_optimizer_name, double_precision_optimizer_name - procedure, private :: default_real_optimizer_name, double_precision_optimizer_name - generic :: learning_rate => default_real_learning_rate, double_precision_learning_rate - procedure, private :: default_real_learning_rate, double_precision_learning_rate - generic :: differentiable_activation => default_real_differentiable_activation, double_precision_differentiable_activation - procedure, private :: default_real_differentiable_activation, double_precision_differentiable_activation - generic :: nodes_per_layer => default_real_nodes_per_layer, double_precision_nodes_per_layer - procedure, private :: default_real_nodes_per_layer, double_precision_nodes_per_layer + generic :: operator(==) => default_real_equals , double_precision_equals + procedure, private :: default_real_equals , double_precision_equals + generic :: to_json => default_real_to_json , double_precision_to_json + procedure, private :: default_real_to_json , double_precision_to_json + generic :: mini_batches => default_real_mini_batches , double_precision_mini_batches + procedure, private :: default_real_mini_batches , double_precision_mini_batches + generic :: optimizer_name => default_real_optimizer_name , double_precision_optimizer_name + procedure, private :: default_real_optimizer_name , double_precision_optimizer_name + generic :: learning_rate => default_real_learning_rate , double_precision_learning_rate + procedure, private :: default_real_learning_rate , double_precision_learning_rate + generic :: activation => default_real_activation , double_precision_activation + procedure, private :: default_real_activation , double_precision_activation + generic :: nodes_per_layer => default_real_nodes_per_layer , double_precision_nodes_per_layer + procedure, private :: default_real_nodes_per_layer , double_precision_nodes_per_layer generic :: skip_connections => default_real_skip_connections, double_precision_skip_connections procedure, private :: default_real_skip_connections, double_precision_skip_connections end type @@ -128,13 +128,13 @@ elemental module function double_precision_learning_rate(self) result(rate) double precision rate end function - module function default_real_differentiable_activation(self) result(activation) + module function default_real_activation(self) result(activation) implicit none class(training_configuration_t), intent(in) :: self type(activation_t) activation end function - module function double_precision_differentiable_activation(self) result(activation) + module function double_precision_activation(self) result(activation) implicit none class(training_configuration_t(double_precision)), intent(in) :: self type(activation_t) activation diff --git a/src/fiats/training_configuration_s.F90 b/src/fiats/training_configuration_s.F90 index 3b982a202..5ecdb5423 100644 --- a/src/fiats/training_configuration_s.F90 +++ b/src/fiats/training_configuration_s.F90 @@ -181,7 +181,7 @@ using_skip = self%network_configuration_%skip_connections() end procedure - module procedure default_real_differentiable_activation + module procedure default_real_activation #if defined __INTEL_COMPILER || _CRAYFTN type(string_t) :: activation_name activation_name = self%network_configuration_%activation_name() @@ -206,7 +206,7 @@ #endif end procedure - module procedure double_precision_differentiable_activation + module procedure double_precision_activation #if defined __INTEL_COMPILER || _CRAYFTN type(string_t) :: activation_name activation_name = self%network_configuration_%activation_name()