From 3da9fb199fdf51ae242c8c7f98dd5890522d684c Mon Sep 17 00:00:00 2001 From: Damian Rouson Date: Tue, 15 Oct 2024 20:06:14 -0700 Subject: [PATCH 01/25] chore(demo/app): rm unused normalization function --- demo/app/train-cloud-microphysics.F90 | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/demo/app/train-cloud-microphysics.F90 b/demo/app/train-cloud-microphysics.F90 index 1187b42e7..794afa416 100644 --- a/demo/app/train-cloud-microphysics.F90 +++ b/demo/app/train-cloud-microphysics.F90 @@ -1,6 +1,6 @@ ! Copyright (c), The Regents of the University of California ! Terms of use are as specified in LICENSE.txt -program train_on_flat_distribution +program train_cloud_microphysics !! Train a neural network to represent the simplest cloud microphysics model from !! the Intermediate Complexity Atmospheric Research Model (ICAR) at !! https://github.com/BerkeleyLab/icar. @@ -471,11 +471,4 @@ subroutine read_train_write(training_configuration, args, plot_file) end subroutine read_train_write - pure function normalize(x, x_min, x_max) result(x_normalized) - real, intent(in) :: x(:,:,:,:), x_min, x_max - real, allocatable :: x_normalized(:,:,:,:) - call assert(x_min/=x_max, "train_cloud_microphysics(normaliz): x_min/=x_max") - x_normalized = (x - x_min)/(x_max - x_min) - end function - -end program train_on_flat_distribution +end program train_cloud_microphysics From 187d8dec6b95a43b11c87c28f917c8cd6d72c80d Mon Sep 17 00:00:00 2001 From: Damian Rouson Date: Wed, 16 Oct 2024 00:15:40 -0700 Subject: [PATCH 02/25] WIP: define/read NetCDF_variable_t objects --- demo/app/train-cloud-microphysics.F90 | 540 +++++++++++++------------- demo/src/NetCDF_file_m.f90 | 6 +- demo/src/NetCDF_file_s.f90 | 6 +- demo/src/NetCDF_variable_m.f90 | 86 ++++ demo/src/NetCDF_variable_s.f90 | 196 ++++++++++ demo/src/run-fpm.sh-header | 2 +- 6 files changed, 562 insertions(+), 274 deletions(-) create mode 100644 demo/src/NetCDF_variable_m.f90 create mode 100644 demo/src/NetCDF_variable_s.f90 diff --git a/demo/app/train-cloud-microphysics.F90 b/demo/app/train-cloud-microphysics.F90 index 794afa416..cc0429334 100644 --- a/demo/app/train-cloud-microphysics.F90 +++ b/demo/app/train-cloud-microphysics.F90 @@ -19,16 +19,16 @@ program train_cloud_microphysics !! Internal dependencies: use phase_space_bin_m, only : phase_space_bin_t use NetCDF_file_m, only: NetCDF_file_t + use NetCDF_variable_m, only: NetCDF_variable_t use ubounds_m, only : ubounds_t implicit none - character(len=*), parameter :: usage = & - new_line('a') // new_line('a') // & - 'Usage: ' // new_line('a') // new_line('a') // & - './build/run-fpm.sh run train-cloud-microphysics -- \' // new_line('a') // & - ' --base --epochs \' // new_line('a') // & + character(len=*), parameter :: usage = new_line('a') // new_line('a') // & + 'Usage: ' // new_line('a') // new_line('a') // & + './build/run-fpm.sh run train-cloud-microphysics -- \' // new_line('a') // & + ' --base --epochs \' // new_line('a') // & ' [--start ] [--end ] [--stride ] [--bins ] [--report ] [--tolerance ]'// & - new_line('a') // new_line('a') // & + new_line('a') // new_line('a') // & 'where angular brackets denote user-provided values and square brackets denote optional arguments.' // new_line('a') // & 'The presence of a file named "stop" halts execution gracefully.' @@ -45,28 +45,24 @@ program train_cloud_microphysics end type integer(int64) t_start, t_finish, clock_rate + character(len=*), parameter :: config= "training_configuration.json" call system_clock(t_start, clock_rate) - - associate( & - command_line_arguments => get_command_line_arguments(), & - training_configuration => training_configuration_t(file_t(string_t("training_configuration.json"))) & - ) + #if defined(MULTI_IMAGE_SUPPORT) - if (this_image()==1) then + if (this_image()==1) then #endif - call read_train_write(training_configuration, command_line_arguments, create_or_append_to("cost.plt")) + call read_train_write(training_configuration_t(file_t(config)), get_command_line_arguments(), create_or_append_to("cost.plt")) #if defined(MULTI_IMAGE_SUPPORT) - else - call read_train_write(training_configuration, command_line_arguments) - end if + else + call read_train_write(training_configuration_t(file_t(config)), get_command_line_arguments()) + end if #endif - end associate call system_clock(t_finish) print *,"System clock time: ", real(t_finish - t_start, real64)/real(clock_rate, real64) - print *,new_line('a') // "______training_cloud_microhpysics done _______" + print *,new_line('a') // "______train_cloud_microhpysics done _______" contains @@ -77,16 +73,16 @@ function create_or_append_to(plot_file_name) result(plot_file) logical preexisting_plot_file inquire(file=plot_file_name, exist=preexisting_plot_file) - open(newunit=plot_unit, file=plot_file_name, status="unknown", position="append") if (.not. preexisting_plot_file) then + open(newunit=plot_unit, file=plot_file_name, status="new", action="write") write(plot_unit,*) " Epoch Cost (avg)" previous_epoch = 0 else associate(plot_file => file_t(string_t(plot_file_name))) associate(lines => plot_file%lines()) associate(num_lines => size(lines)) - if (num_lines == 0) then + if (num_lines == 0 .or. num_lines == 1) then previous_epoch = 0 else block @@ -99,7 +95,9 @@ function create_or_append_to(plot_file_name) result(plot_file) end associate end associate end if + plot_file = plot_file_t(plot_file_name, plot_unit, previous_epoch) + end function function get_command_line_arguments() result(command_line_arguments) @@ -183,6 +181,10 @@ subroutine read_train_write(training_configuration, args, plot_file) type(training_configuration_t), intent(in) :: training_configuration type(command_line_arguments_t), intent(in) :: args type(plot_file_t), intent(in), optional :: plot_file + type(NetCDF_variable_t), allocatable :: input_variable(:), output_variable(:) + type(NetCDF_variable_t) input_time, output_time + + type(string_t), allocatable :: names(:) ! local variables: real, allocatable, dimension(:,:,:,:) :: & @@ -194,280 +196,292 @@ subroutine read_train_write(training_configuration, args, plot_file) type(ubounds_t), allocatable :: ubounds(:) double precision, allocatable, dimension(:) :: time_in, time_out integer, allocatable :: lbounds(:) - integer t, b, t_end + integer t, b, t_end, i logical stop_requested - associate( network_file => args%base_name // "_network.json") - associate(network_input => args%base_name // "_input.nc") - print *,"Reading network inputs from " // network_input - associate(network_input_file => netCDF_file_t(network_input)) - ! Skipping the following unnecessary inputs that are in the current file format as of 14 Aug 2023: - ! precipitation, snowfall - call network_input_file%input("pressure", pressure_in) - call network_input_file%input("potential_temperature", potential_temperature_in) - call network_input_file%input("temperature", temperature_in) - call network_input_file%input("qv", qv_in) - call network_input_file%input("qc", qc_in) - call network_input_file%input("qr", qr_in) - call network_input_file%input("qs", qs_in) - call network_input_file%input("time", time_in) - t_end = size(time_in) - lbounds = [lbound(pressure_in), lbound(temperature_in), lbound(qv_in), lbound(qc_in), lbound(qr_in), lbound(qs_in)] - ubounds = & - [ubounds_t(ubound(qv_in)), ubounds_t(ubound(qc_in)), ubounds_t(ubound(qr_in)), ubounds_t(ubound(qs_in)), & - ubounds_t(ubound(pressure_in)), ubounds_t(ubound(temperature_in)) & - ] - end associate - end associate + enum, bind(C) + enumerator :: pressure=1, potential_temperature, temperature, qv, qc, qr, qs + end enum - associate(network_output => args%base_name // "_output.nc") - print *,"Reading network outputs from " // network_output - associate(network_output_file => netCDF_file_t(network_output)) - call network_output_file%input("potential_temperature", potential_temperature_out) - ! Skipping the following unnecessary outputs that are in the current file format as of 14 Aug 2023: - ! pressure, temperature, precipitation, snowfall - call network_output_file%input("qv", qv_out) - call network_output_file%input("qc", qc_out) - call network_output_file%input("qr", qr_out) - call network_output_file%input("qs", qs_out) - call network_output_file%input("time", time_out) - lbounds = [lbounds, lbound(qv_out), lbound(qc_out), lbound(qr_out), lbound(qs_out)] - ubounds = [ubounds, ubounds_t(ubound(qv_out)), ubounds_t(ubound(qc_out)), & - ubounds_t(ubound(qr_out)), ubounds_t(ubound(qs_out))] - call assert(all(lbounds == 1), "main: default input/output lower bounds", intrinsic_array_t(lbounds)) - call assert(all(ubounds == ubounds(1)), "main: matching input/output upper bounds") - block - double precision, parameter :: time_tolerance = 1.E-07 - associate(matching_time_stamps => all(abs(time_in(2:t_end) - time_out(1:t_end-1)) real(time_out - time_in)) - do concurrent(t = 1:t_end) - dpt_dt(:,:,:,t) = (potential_temperature_out(:,:,:,t) - potential_temperature_in(:,:,:,t))/dt(t) - dqv_dt(:,:,:,t) = (qv_out(:,:,:,t)- qv_in(:,:,:,t))/dt(t) - dqc_dt(:,:,:,t) = (qc_out(:,:,:,t)- qc_in(:,:,:,t))/dt(t) - dqr_dt(:,:,:,t) = (qr_out(:,:,:,t)- qr_in(:,:,:,t))/dt(t) - dqs_dt(:,:,:,t) = (qs_out(:,:,:,t)- qs_in(:,:,:,t))/dt(t) + associate( network_file => args%base_name // "_network.json", network_input => args%base_name // "_input.nc") + print *,"Reading network inputs from " // network_input + associate(network_input_file => netCDF_file_t(network_input)) + + do i=1, size(names) + print *,"- reading ", names(i)%string() + call input_variable(i)%input(names(i), network_input_file, rank=4) end do + + print *,"- reading time" + call input_time%input("time", network_input_file, rank=1) + + t_end = size(time_in) + + do i = 2, size(input_variable) + call assert(input_variable(i)%conformable_with(input_variable(1)), "train_cloud_microphysics: variables conformance") + end do + + !ubounds = & + ! [ubounds_t(ubound(qv_in)), ubounds_t(ubound(qc_in)), ubounds_t(ubound(qr_in)), ubounds_t(ubound(qs_in)), & + ! ubounds_t(ubound(pressure_in)), ubounds_t(ubound(temperature_in)) & + ! ] + end associate + end associate - call assert(.not. any(ieee_is_nan(dpt_dt)), ".not. any(ieee_is_nan(dpt_dt)") - call assert(.not. any(ieee_is_nan(dqv_dt)), ".not. any(ieee_is_nan(dqv_dt)") - call assert(.not. any(ieee_is_nan(dqc_dt)), ".not. any(ieee_is_nan(dqc_dt)") - call assert(.not. any(ieee_is_nan(dqr_dt)), ".not. any(ieee_is_nan(dqr_dt)") - call assert(.not. any(ieee_is_nan(dqs_dt)), ".not. any(ieee_is_nan(dqs_dt)") - - train_network: & - block - type(trainable_network_t) trainable_network - type(mini_batch_t), allocatable :: mini_batches(:) - type(bin_t), allocatable :: bins(:) - type(input_output_pair_t), allocatable :: input_output_pairs(:) - type(tensor_t), allocatable, dimension(:) :: inputs, outputs - real, allocatable :: cost(:) - integer i, lon, lat, level, time, network_unit, io_status, epoch, end_step - integer(int64) start_training, finish_training - - open(newunit=network_unit, file=network_file, form='formatted', status='old', iostat=io_status, action='read') - - if (allocated(args%end_step)) then - end_step = args%end_step - else - end_step = t_end - end if - - print *,"Defining tensors from time step", args%start_step, "through", end_step, "with strides of", args%stride - - ! The following temporary copies are required by gfortran bug 100650 and possibly 49324 - ! See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=100650 and https://gcc.gnu.org/bugzilla/show_bug.cgi?id=49324 - inputs = [( [( [( [( & - tensor_t( & - [ pressure_in(lon,lat,level,time), potential_temperature_in(lon,lat,level,time), temperature_in(lon,lat,level,time), & - qv_in(lon,lat,level,time), qc_in(lon,lat,level,time), qr_in(lon,lat,level,time), qs_in(lon,lat,level,time) & - ] & - ), lon = 1, size(qv_in,1))], lat = 1, size(qv_in,2))], level = 1, size(qv_in,3))], & - time = args%start_step, end_step, args%stride)] + !associate(network_output => args%base_name // "_output.nc") + ! print *,"Reading network outputs from " // network_output + ! associate(network_output_file => netCDF_file_t(network_output)) + ! call network_output_file%input("potential_temperature", potential_temperature_out) + ! call network_output_file%input("qv", qv_out) + ! call network_output_file%input("qc", qc_out) + ! call network_output_file%input("qr", qr_out) + ! call network_output_file%input("qs", qs_out) + ! call network_output_file%input("time", time_out) + ! lbounds = [lbounds, lbound(qv_out), lbound(qc_out), lbound(qr_out), lbound(qs_out)] + ! ubounds = [ubounds, ubounds_t(ubound(qv_out)), ubounds_t(ubound(qc_out)), & + ! ubounds_t(ubound(qr_out)), ubounds_t(ubound(qs_out))] + ! call assert(all(lbounds == 1), "main: default input/output lower bounds", intrinsic_array_t(lbounds)) + ! call assert(all(ubounds == ubounds(1)), "main: matching input/output upper bounds") + ! block + ! double precision, parameter :: time_tolerance = 1.E-07 + ! associate(matching_time_stamps => all(abs(time_in(2:t_end) - time_out(1:t_end-1)) real(time_out - time_in)) + ! do concurrent(t = 1:t_end) + ! dpt_dt(:,:,:,t) = (potential_temperature_out(:,:,:,t) - potential_temperature_in(:,:,:,t))/dt(t) + ! dqv_dt(:,:,:,t) = (qv_out(:,:,:,t)- qv_in(:,:,:,t))/dt(t) + ! dqc_dt(:,:,:,t) = (qc_out(:,:,:,t)- qc_in(:,:,:,t))/dt(t) + ! dqr_dt(:,:,:,t) = (qr_out(:,:,:,t)- qr_in(:,:,:,t))/dt(t) + ! dqs_dt(:,:,:,t) = (qs_out(:,:,:,t)- qs_in(:,:,:,t))/dt(t) + ! end do + !end associate + + !call assert(.not. any(ieee_is_nan(dpt_dt)), ".not. any(ieee_is_nan(dpt_dt)") + !call assert(.not. any(ieee_is_nan(dqv_dt)), ".not. any(ieee_is_nan(dqv_dt)") + !call assert(.not. any(ieee_is_nan(dqc_dt)), ".not. any(ieee_is_nan(dqc_dt)") + !call assert(.not. any(ieee_is_nan(dqr_dt)), ".not. any(ieee_is_nan(dqr_dt)") + !call assert(.not. any(ieee_is_nan(dqs_dt)), ".not. any(ieee_is_nan(dqs_dt)") + + !train_network: & + !block + ! type(trainable_network_t) trainable_network + ! type(mini_batch_t), allocatable :: mini_batches(:) + ! type(bin_t), allocatable :: bins(:) + ! type(input_output_pair_t), allocatable :: input_output_pairs(:) + ! type(tensor_t), allocatable, dimension(:) :: inputs, outputs + ! real, allocatable :: cost(:) + ! integer i, lon, lat, level, time, network_unit, io_status, epoch, end_step + ! integer(int64) start_training, finish_training + + ! open(newunit=network_unit, file=network_file, form='formatted', status='old', iostat=io_status, action='read') + + ! if (allocated(args%end_step)) then + ! end_step = args%end_step + ! else + ! end_step = t_end + ! end if + + ! print *,"Defining tensors from time step", args%start_step, "through", end_step, "with strides of", args%stride + + ! ! The following temporary copies are required by gfortran bug 100650 and possibly 49324 + ! ! See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=100650 and https://gcc.gnu.org/bugzilla/show_bug.cgi?id=49324 + ! inputs = [( [( [( [( & + ! tensor_t( & + ! [ pressure_in(lon,lat,level,time), potential_temperature_in(lon,lat,level,time), temperature_in(lon,lat,level,time), & + ! qv_in(lon,lat,level,time), qc_in(lon,lat,level,time), qr_in(lon,lat,level,time), qs_in(lon,lat,level,time) & + ! ] & + ! ), lon = 1, size(qv_in,1))], lat = 1, size(qv_in,2))], level = 1, size(qv_in,3))], & + ! time = args%start_step, end_step, args%stride)] - outputs = [( [( [( [( & - tensor_t( & - [dpt_dt(lon,lat,level,time), dqv_dt(lon,lat,level,time), dqc_dt(lon,lat,level,time), dqr_dt(lon,lat,level,time), & - dqs_dt(lon,lat,level,time) & - ] & - ), lon = 1, size(qv_in,1))], lat = 1, size(qv_in,2))], level = 1, size(qv_in,3))], & - time = args%start_step, end_step, args%stride)] - - print *,"Calculating output tensor component ranges." - output_extrema: & - associate( & - output_minima => [minval(dpt_dt), minval(dqv_dt), minval(dqc_dt), minval(dqr_dt), minval(dqs_dt)], & - output_maxima => [maxval(dpt_dt), maxval(dqv_dt), maxval(dqc_dt), maxval(dqr_dt), maxval(dqs_dt)] & - ) - output_map: & - associate( output_map => tensor_map_t(layer = "outputs", minima = output_minima, maxima = output_maxima)) - read_or_initialize_engine: & - if (io_status==0) then - print *,"Reading network from file " // network_file - trainable_network = trainable_network_t(neural_network_t(file_t(string_t(network_file)))) - close(network_unit) - else - close(network_unit) - - initialize_network: & - block - character(len=len('YYYYMMDD')) date - - call date_and_time(date) - - print *,"Calculating input tensor component ranges." - associate( & - input_map => tensor_map_t( & - layer = "inputs", & - minima = [minval(pressure_in), minval(potential_temperature_in), minval(temperature_in), & - minval(qv_in), minval(qc_in), minval(qr_in), minval(qs_in)], & - maxima = [maxval(pressure_in), maxval(potential_temperature_in), maxval(temperature_in), & - maxval(qv_in), maxval(qc_in), maxval(qr_in), maxval(qs_in)] & - ) ) - associate(activation => training_configuration%differentiable_activation()) - associate(residual_network=> string_t(trim(merge("true ", "false", training_configuration%skip_connections())))) - trainable_network = trainable_network_t( & - training_configuration, & - perturbation_magnitude = 0.05, & - metadata = [ & - string_t("Simple microphysics"), string_t("train-on-flat-dist"), string_t(date), & - activation%function_name(), residual_network & - ], input_map = input_map, output_map = output_map & - ) - end associate - end associate - end associate ! input_map, date_string - end block initialize_network - end if read_or_initialize_engine - - print *, "Conditionally sampling for a flat distribution of output values" - block - integer i - logical occupied(args%num_bins, args%num_bins, args%num_bins, args%num_bins, args%num_bins) - logical keepers(size(outputs)) - type(phase_space_bin_t), allocatable :: bin(:) - occupied = .false. - keepers = .false. - - bin = [(phase_space_bin_t(outputs(i), output_minima, output_maxima, args%num_bins), i=1,size(outputs))] - - do i = 1, size(outputs) - if (occupied(bin(i)%loc(1),bin(i)%loc(2),bin(i)%loc(3),bin(i)%loc(4),bin(i)%loc(5))) cycle - occupied(bin(i)%loc(1),bin(i)%loc(2),bin(i)%loc(3),bin(i)%loc(4),bin(i)%loc(5)) = .true. - keepers(i) = .true. - end do - input_output_pairs = input_output_pair_t(pack(inputs, keepers), pack(outputs, keepers)) - print *, "Kept ", size(input_output_pairs), " out of ", size(outputs, kind=int64), " input/output pairs " // & - " in ", count(occupied)," out of ", size(occupied, kind=int64), " bins." - end block - end associate output_map - end associate output_extrema - - print *,"Normalizing the remaining input and output tensors" - input_output_pairs = trainable_network%map_to_training_ranges(input_output_pairs) - - associate( & - num_pairs => size(input_output_pairs), & - n_bins => training_configuration%mini_batches(), & - adam => merge(.true., .false., training_configuration%optimizer_name() == "adam"), & - learning_rate => training_configuration%learning_rate() & - ) - bins = [(bin_t(num_items=num_pairs, num_bins=n_bins, bin_number=b), b = 1, n_bins)] - - print *,"Training network" - print *, " Epoch Cost (avg)" - - call system_clock(start_training) - - train_write_and_maybe_exit: & - block - integer first_epoch - integer me + ! outputs = [( [( [( [( & + ! tensor_t( & + ! [dpt_dt(lon,lat,level,time), dqv_dt(lon,lat,level,time), dqc_dt(lon,lat,level,time), dqr_dt(lon,lat,level,time), & + ! dqs_dt(lon,lat,level,time) & + ! ] & + ! ), lon = 1, size(qv_in,1))], lat = 1, size(qv_in,2))], level = 1, size(qv_in,3))], & + ! time = args%start_step, end_step, args%stride)] + + ! print *,"Calculating output tensor component ranges." + ! output_extrema: & + ! associate( & + ! output_minima => [minval(dpt_dt), minval(dqv_dt), minval(dqc_dt), minval(dqr_dt), minval(dqs_dt)], & + ! output_maxima => [maxval(dpt_dt), maxval(dqv_dt), maxval(dqc_dt), maxval(dqr_dt), maxval(dqs_dt)] & + ! ) + ! output_map: & + ! associate( output_map => tensor_map_t(layer = "outputs", minima = output_minima, maxima = output_maxima)) + ! read_or_initialize_network: & + ! if (io_status==0) then + ! print *,"Reading network from file " // network_file + ! trainable_network = trainable_network_t(neural_network_t(file_t(string_t(network_file)))) + ! close(network_unit) + ! else + ! close(network_unit) + + ! initialize_network: & + ! block + ! character(len=len('YYYYMMDD')) date + + ! call date_and_time(date) + + ! print *,"Calculating input tensor component ranges." + ! associate( & + ! input_map => tensor_map_t( & + ! layer = "inputs", & + ! minima = [minval(pressure_in), minval(potential_temperature_in), minval(temperature_in), & + ! minval(qv_in), minval(qc_in), minval(qr_in), minval(qs_in)], & + ! maxima = [maxval(pressure_in), maxval(potential_temperature_in), maxval(temperature_in), & + ! maxval(qv_in), maxval(qc_in), maxval(qr_in), maxval(qs_in)] & + ! ) ) + ! associate(activation => training_configuration%differentiable_activation()) + ! associate(residual_network=> string_t(trim(merge("true ", "false", training_configuration%skip_connections())))) + ! trainable_network = trainable_network_t( & + ! training_configuration, & + ! perturbation_magnitude = 0.05, & + ! metadata = [ & + ! string_t("Simple microphysics"), string_t("train-on-flat-dist"), string_t(date), & + ! activation%function_name(), residual_network & + ! ], input_map = input_map, output_map = output_map & + ! ) + ! end associate + ! end associate + ! end associate ! input_map, date_string + ! end block initialize_network + ! end if read_or_initialize_network + + ! print *, "Conditionally sampling for a flat distribution of output values" + ! block + ! integer i + ! logical occupied(args%num_bins, args%num_bins, args%num_bins, args%num_bins, args%num_bins) + ! logical keepers(size(outputs)) + ! type(phase_space_bin_t), allocatable :: bin(:) + ! occupied = .false. + ! keepers = .false. + + ! bin = [(phase_space_bin_t(outputs(i), output_minima, output_maxima, args%num_bins), i=1,size(outputs))] + + ! do i = 1, size(outputs) + ! if (occupied(bin(i)%loc(1),bin(i)%loc(2),bin(i)%loc(3),bin(i)%loc(4),bin(i)%loc(5))) cycle + ! occupied(bin(i)%loc(1),bin(i)%loc(2),bin(i)%loc(3),bin(i)%loc(4),bin(i)%loc(5)) = .true. + ! keepers(i) = .true. + ! end do + ! input_output_pairs = input_output_pair_t(pack(inputs, keepers), pack(outputs, keepers)) + ! print *, "Kept ", size(input_output_pairs), " out of ", size(outputs, kind=int64), " input/output pairs " // & + ! " in ", count(occupied)," out of ", size(occupied, kind=int64), " bins." + ! end block + ! end associate output_map + ! end associate output_extrema + + ! print *,"Normalizing the remaining input and output tensors" + ! input_output_pairs = trainable_network%map_to_training_ranges(input_output_pairs) + + ! associate( & + ! num_pairs => size(input_output_pairs), & + ! n_bins => training_configuration%mini_batches(), & + ! adam => merge(.true., .false., training_configuration%optimizer_name() == "adam"), & + ! learning_rate => training_configuration%learning_rate() & + ! ) + ! bins = [(bin_t(num_items=num_pairs, num_bins=n_bins, bin_number=b), b = 1, n_bins)] + + ! print *,"Training network" + ! print *, " Epoch Cost (avg)" + + ! call system_clock(start_training) + ! + ! train_write_and_maybe_exit: & + ! block + ! integer first_epoch + ! integer me #if defined(MULTI_IMAGE_SUPPORT) - me = this_image() + ! me = this_image() #else - me = 1 + ! me = 1 #endif - if (me==1) first_epoch = plot_file%previous_epoch + 1 + ! if (me==1) first_epoch = plot_file%previous_epoch + 1 #if defined(MULTI_IMAGE_SUPPORT) - call co_broadcast(first_epoch, source_image=1) + ! call co_broadcast(first_epoch, source_image=1) #endif - associate(last_epoch => first_epoch + args%num_epochs - 1) - epochs: & - do epoch = first_epoch, last_epoch + ! associate(last_epoch => first_epoch + args%num_epochs - 1) + ! epochs: & + ! do epoch = first_epoch, last_epoch - if (size(bins)>1) call shuffle(input_output_pairs) ! set up for stochastic gradient descent - mini_batches = [(mini_batch_t(input_output_pairs(bins(b)%first():bins(b)%last())), b = 1, size(bins))] + ! if (size(bins)>1) call shuffle(input_output_pairs) ! set up for stochastic gradient descent + ! mini_batches = [(mini_batch_t(input_output_pairs(bins(b)%first():bins(b)%last())), b = 1, size(bins))] - call trainable_network%train(mini_batches, cost, adam, learning_rate) + ! call trainable_network%train(mini_batches, cost, adam, learning_rate) - associate(average_cost => sum(cost)/size(cost)) - associate(converged => average_cost <= args%cost_tolerance) + ! associate(average_cost => sum(cost)/size(cost)) + ! associate(converged => average_cost <= args%cost_tolerance) - image_1_maybe_writes: & - if (me==1 .and. any([converged, epoch==[first_epoch,last_epoch], mod(epoch,args%report_interval)==0])) then + ! image_1_maybe_writes: & + ! if (me==1 .and. any([converged, epoch==[first_epoch,last_epoch], mod(epoch,args%report_interval)==0])) then - print *, epoch, average_cost - write(plot_file%plot_unit,*) epoch, average_cost + ! print *, epoch, average_cost + ! write(plot_file%plot_unit,*) epoch, average_cost - associate(json_file => trainable_network%to_json()) - call json_file%write_lines(string_t(network_file)) - end associate + ! associate(json_file => trainable_network%to_json()) + ! call json_file%write_lines(string_t(network_file)) + ! end associate - end if image_1_maybe_writes + ! end if image_1_maybe_writes - signal_convergence: & - if (converged) then - block - integer unit - open(newunit=unit, file="converged", status="unknown") ! The train.sh script detects & removes this file. - close(unit) - exit epochs - end block - end if signal_convergence - end associate - end associate + ! signal_convergence: & + ! if (converged) then + ! block + ! integer unit + ! open(newunit=unit, file="converged", status="unknown") ! The train.sh script detects & removes this file. + ! close(unit) + ! exit epochs + ! end block + ! end if signal_convergence + ! end associate + ! end associate - inquire(file="stop", exist=stop_requested) + ! inquire(file="stop", exist=stop_requested) - graceful_exit: & - if (stop_requested) then - print *,'Shutting down because a file named "stop" was found.' - return - end if graceful_exit + ! graceful_exit: & + ! if (stop_requested) then + ! print *,'Shutting down because a file named "stop" was found.' + ! return + ! end if graceful_exit - end do epochs - end associate - end block train_write_and_maybe_exit + ! end do epochs + ! end associate + ! end block train_write_and_maybe_exit - end associate + ! end associate - call system_clock(finish_training) - print *,"Training time: ", real(finish_training - start_training, real64)/real(clock_rate, real64),"for", & - args%num_epochs,"epochs" + ! call system_clock(finish_training) + ! print *,"Training time: ", real(finish_training - start_training, real64)/real(clock_rate, real64),"for", & + ! args%num_epochs,"epochs" - end block train_network - end associate ! network_file + !end block train_network + !end associate ! network_file - close(plot_file%plot_unit) + !close(plot_file%plot_unit) end subroutine read_train_write diff --git a/demo/src/NetCDF_file_m.f90 b/demo/src/NetCDF_file_m.f90 index 7bba60ab0..91d1dac92 100644 --- a/demo/src/NetCDF_file_m.f90 +++ b/demo/src/NetCDF_file_m.f90 @@ -1,8 +1,5 @@ ! Copyright (c), The Regents of the University of California ! Terms of use are as specified in LICENSE.txt -#ifndef __INTEL_FORTRAN -!! Due to a suspected bug in the Intel ifx compiler, the above C preprocessor macro -!! effectively eliminates this file's source code when building with an Intel compiler. module NetCDF_file_m implicit none @@ -52,5 +49,4 @@ module subroutine input_double_precision(self, varname, values) end interface -end module NetCDF_file_m -#endif // __INTEL_FORTRAN \ No newline at end of file +end module NetCDF_file_m \ No newline at end of file diff --git a/demo/src/NetCDF_file_s.f90 b/demo/src/NetCDF_file_s.f90 index 0200cfc89..e1b40908a 100644 --- a/demo/src/NetCDF_file_s.f90 +++ b/demo/src/NetCDF_file_s.f90 @@ -1,8 +1,5 @@ ! Copyright (c), The Regents of the University of California ! Terms of use are as specified in LICENSE.txt -#ifndef __INTEL_FORTRAN -!! Due to a suspected bug in the Intel ifx compiler, the above C preprocessor macro -!! effectively eliminates this file's source code when building with an Intel compiler. submodule(netCDF_file_m) netCDF_file_s use netcdf, only : & nf90_create, nf90_def_dim, nf90_def_var, nf90_enddef, nf90_put_var, nf90_inquire_dimension, & ! functions @@ -226,5 +223,4 @@ function get_shape(ncid, varname) result(array_shape) end procedure -end submodule netCDF_file_s -#endif // __INTEL_FORTRAN \ No newline at end of file +end submodule netCDF_file_s \ No newline at end of file diff --git a/demo/src/NetCDF_variable_m.f90 b/demo/src/NetCDF_variable_m.f90 new file mode 100644 index 000000000..b12b58664 --- /dev/null +++ b/demo/src/NetCDF_variable_m.f90 @@ -0,0 +1,86 @@ +! Copyright (c), The Regents of the University of California +! Terms of use are as specified in LICENSE.txt +module NetCDF_variable_m + use NetCDF_file_m, only : NetCDF_file_t + use kind_parameters_m, only : default_real, double_precision + use julienne_m, only : string_t + implicit none + + private + public :: NetCDF_variable_t + + type NetCDF_variable_t(k) + integer, kind :: k = default_real + private + real(k), allocatable :: values_1D_(:), values_2D_(:,:), values_3D_(:,:,:), values_4D_(:,:,:,:) + character(len=:), allocatable :: name_ + contains + generic :: input => default_real_input, double_precision_input, default_real_input_character_name, double_precision_input_character_name + procedure, private, non_overridable :: default_real_input, double_precision_input, default_real_input_character_name, double_precision_input_character_name + generic :: conformable_with => default_real_conformable_with, double_precision_conformable_with + procedure, private, non_overridable :: default_real_conformable_with, double_precision_conformable_with + generic :: rank => default_real_rank, double_precision_rank + procedure, private, non_overridable :: default_real_rank, double_precision_rank + end type + + interface + + impure elemental module subroutine default_real_input(self, variable_name, file, rank) + implicit none + class(NetCDF_variable_t), intent(inout) :: self + type(string_t), intent(in) :: variable_name + type(NetCDF_file_t), intent(in) :: file + integer, intent(in) :: rank + end subroutine + + impure elemental module subroutine double_precision_input(self, variable_name, file, rank) + implicit none + class(NetCDF_variable_t(double_precision)), intent(inout) :: self + type(string_t), intent(in) :: variable_name + type(NetCDF_file_t), intent(in) :: file + integer, intent(in) :: rank + end subroutine + + impure elemental module subroutine default_real_input_character_name(self, variable_name, file, rank) + implicit none + class(NetCDF_variable_t), intent(inout) :: self + character(len=*), intent(in) :: variable_name + type(NetCDF_file_t), intent(in) :: file + integer, intent(in) :: rank + end subroutine + + impure elemental module subroutine double_precision_input_character_name(self, variable_name, file, rank) + implicit none + class(NetCDF_variable_t(double_precision)), intent(inout) :: self + character(len=*), intent(in) :: variable_name + type(NetCDF_file_t), intent(in) :: file + integer, intent(in) :: rank + end subroutine + + elemental module function default_real_conformable_with(self, NetCDF_variable) result(conformable) + implicit none + class(NetCDF_variable_t), intent(in) :: self, NetCDF_variable + logical conformable + end function + + elemental module function double_precision_conformable_with(self, NetCDF_variable) result(conformable) + implicit none + class(NetCDF_variable_t(double_precision)), intent(in) :: self, NetCDF_variable + logical conformable + end function + + elemental module function default_real_rank(self) result(my_rank) + implicit none + class(NetCDF_variable_t), intent(in) :: self + integer my_rank + end function + + elemental module function double_precision_rank(self) result(my_rank) + implicit none + class(NetCDF_variable_t(double_precision)), intent(in) :: self + integer my_rank + end function + + end interface + +end module \ No newline at end of file diff --git a/demo/src/NetCDF_variable_s.f90 b/demo/src/NetCDF_variable_s.f90 new file mode 100644 index 000000000..28f673114 --- /dev/null +++ b/demo/src/NetCDF_variable_s.f90 @@ -0,0 +1,196 @@ +! Copyright (c), The Regents of the University of California +! Terms of use are as specified in LICENSE.txt +submodule(NetCDF_variable_m) NetCDF_variable_s + use kind_parameters_m, only : default_real + use assert_m, only : assert + implicit none + + interface components_allocated + module procedure default_real_components_allocated + module procedure double_precision_components_allocated + end interface + + interface lower_bounds + module procedure default_real_lower_bounds + module procedure double_precision_lower_bounds + end interface + + interface upper_bounds + module procedure default_real_upper_bounds + module procedure double_precision_upper_bounds + end interface + +contains + + module procedure default_real_input + self%name_ = variable_name + select case (rank) + case (1) + call file%input(variable_name%string(), self%values_1D_) + case (2) + call file%input(variable_name%string(), self%values_2D_) + case (3) + call file%input(variable_name%string(), self%values_3D_) + case (4) + call file%input(variable_name%string(), self%values_4D_) + case default + error stop 'NetCDF_variable_s(default_real_input): unsupported rank' + end select + end procedure + + module procedure default_real_input_character_name + call self%default_real_input(string_t(variable_name), file, rank) + end procedure + + module procedure double_precision_input + self%name_ = variable_name + select case (rank) + case (1) + call file%input(variable_name%string(), self%values_1D_) + case (2) + call file%input(variable_name%string(), self%values_2D_) + case (3) + call file%input(variable_name%string(), self%values_3D_) + case (4) + call file%input(variable_name%string(), self%values_4D_) + case default + error stop 'NetCDF_variable_s(double_precision_input): unsupported rank' + end select + end procedure + + module procedure double_precision_input_character_name + call self%double_precision_input(string_t(variable_name), file, rank) + end procedure + + pure function default_real_components_allocated(NetCDF_variable) result(allocation_vector) + type(NetCDF_variable_t), intent(in) :: NetCDF_variable + logical, allocatable :: allocation_vector(:) + allocation_vector = [allocated(NetCDF_variable%values_1D_), allocated(NetCDF_variable%values_4D_)] + end function + + pure function double_precision_components_allocated(NetCDF_variable) result(allocation_vector) + type(NetCDF_variable_t(double_precision)), intent(in) :: NetCDF_variable + logical, allocatable :: allocation_vector(:) + allocation_vector = [allocated(NetCDF_variable%values_1D_), allocated(NetCDF_variable%values_4D_)] + end function + + module procedure default_real_rank + associate(allocation_vector => components_allocated(self)) + call assert(count(allocation_vector) == 1, "NetCDF_variable_s(default_real_rank): allocation count") + my_rank = findloc(allocation_vector, .true., dim=1) + end associate + end procedure + + module procedure double_precision_rank + associate(allocation_vector => components_allocated(self)) + call assert(count(allocation_vector) == 1, "NetCDF_variable_s(double_precision_rank): allocation count") + my_rank = findloc(allocation_vector, .true., dim=1) + end associate + end procedure + + pure function default_real_lower_bounds(NetCDF_variable) result(lbounds) + type(NetCDF_variable_t), intent(in) :: NetCDF_variable + integer, allocatable :: lbounds(:) + select case(NetCDF_variable%rank()) + case(1) + lbounds = lbound(NetCDF_variable%values_1D_) + case(2) + lbounds = lbound(NetCDF_variable%values_2D_) + case(3) + lbounds = lbound(NetCDF_variable%values_3D_) + case(4) + lbounds = lbound(NetCDF_variable%values_4D_) + case default + error stop "NetCDF_variable_s(default_real_lower_bounds): unsupported rank" + end select + end function + + pure function double_precision_lower_bounds(NetCDF_variable) result(lbounds) + type(NetCDF_variable_t(double_precision)), intent(in) :: NetCDF_variable + integer, allocatable :: lbounds(:) + select case(NetCDF_variable%rank()) + case(1) + lbounds = lbound(NetCDF_variable%values_1D_) + case(2) + lbounds = lbound(NetCDF_variable%values_2D_) + case(3) + lbounds = lbound(NetCDF_variable%values_3D_) + case(4) + lbounds = lbound(NetCDF_variable%values_4D_) + case default + error stop "NetCDF_variable_s(double_precision_lower_bounds): unsupported rank" + end select + end function + + pure function default_real_upper_bounds(NetCDF_variable) result(ubounds) + type(NetCDF_variable_t), intent(in) :: NetCDF_variable + integer, allocatable :: ubounds(:) + select case(NetCDF_variable%rank()) + case(1) + ubounds = ubound(NetCDF_variable%values_1D_) + case(2) + ubounds = ubound(NetCDF_variable%values_2D_) + case(3) + ubounds = ubound(NetCDF_variable%values_3D_) + case(4) + ubounds = ubound(NetCDF_variable%values_4D_) + case default + error stop "NetCDF_variable_s(default_real_upper_bounds): unsupported rank" + end select + end function + + pure function double_precision_upper_bounds(NetCDF_variable) result(ubounds) + type(NetCDF_variable_t(double_precision)), intent(in) :: NetCDF_variable + integer, allocatable :: ubounds(:) + select case(NetCDF_variable%rank()) + case(1) + ubounds = ubound(NetCDF_variable%values_1D_) + case(2) + ubounds = ubound(NetCDF_variable%values_2D_) + case(3) + ubounds = ubound(NetCDF_variable%values_3D_) + case(4) + ubounds = ubound(NetCDF_variable%values_4D_) + case default + error stop "NetCDF_variable_s(double_precision_upper_bounds): unsupported rank" + end select + end function + + module procedure default_real_conformable_with + + if (self%rank() /= NetCDF_variable%rank()) then + conformable = .false. + return + end if + + if (any(lower_bounds(self) /= lower_bounds(NetCDF_variable))) then + conformable = .false. + return + end if + + if (any(upper_bounds(self) /= upper_bounds(NetCDF_variable))) then + conformable = .false. + return + end if + + conformable = .true. + + end procedure + + module procedure double_precision_conformable_with + + if (self%rank() /= NetCDF_variable%rank()) then + conformable = .false. + return + end if + + if (any(lower_bounds(self) /= lower_bounds(NetCDF_variable))) then + conformable = .false. + return + end if + + conformable = .true. + + end procedure + +end submodule NetCDF_variable_s \ No newline at end of file diff --git a/demo/src/run-fpm.sh-header b/demo/src/run-fpm.sh-header index e09df076b..b0b3e167a 100644 --- a/demo/src/run-fpm.sh-header +++ b/demo/src/run-fpm.sh-header @@ -1,5 +1,5 @@ #!/bin/sh -#-- DO NOT EDIT -- created by inference-engine/setup.sh +#-- DO NOT EDIT -- created by fiats/demo/setup.sh export PKG_CONFIG_PATH fpm_arguments="" From 308e9d06b06715e5d6cb677b3b626ab402560b60 Mon Sep 17 00:00:00 2001 From: Damian Rouson Date: Thu, 17 Oct 2024 17:11:52 -0700 Subject: [PATCH 03/25] chore(ubounds_t): rm derived type --- demo/app/train-cloud-microphysics.F90 | 7 ------- src/fiats/tmp | 14 ++++++++++++++ src/fiats/ubounds_m.f90 | 22 ---------------------- src/fiats_m.f90 | 1 - 4 files changed, 14 insertions(+), 30 deletions(-) create mode 100644 src/fiats/tmp delete mode 100644 src/fiats/ubounds_m.f90 diff --git a/demo/app/train-cloud-microphysics.F90 b/demo/app/train-cloud-microphysics.F90 index cc0429334..42dca9f8a 100644 --- a/demo/app/train-cloud-microphysics.F90 +++ b/demo/app/train-cloud-microphysics.F90 @@ -20,7 +20,6 @@ program train_cloud_microphysics use phase_space_bin_m, only : phase_space_bin_t use NetCDF_file_m, only: NetCDF_file_t use NetCDF_variable_m, only: NetCDF_variable_t - use ubounds_m, only : ubounds_t implicit none character(len=*), parameter :: usage = new_line('a') // new_line('a') // & @@ -193,7 +192,6 @@ subroutine read_train_write(training_configuration, args, plot_file) qv_out, qc_out, qr_out, qs_out, & qv_in , qc_in , qr_in , qs_in , & dpt_dt, dqv_dt, dqc_dt, dqr_dt, dqs_dt - type(ubounds_t), allocatable :: ubounds(:) double precision, allocatable, dimension(:) :: time_in, time_out integer, allocatable :: lbounds(:) integer t, b, t_end, i @@ -228,11 +226,6 @@ subroutine read_train_write(training_configuration, args, plot_file) call assert(input_variable(i)%conformable_with(input_variable(1)), "train_cloud_microphysics: variables conformance") end do - !ubounds = & - ! [ubounds_t(ubound(qv_in)), ubounds_t(ubound(qc_in)), ubounds_t(ubound(qr_in)), ubounds_t(ubound(qs_in)), & - ! ubounds_t(ubound(pressure_in)), ubounds_t(ubound(temperature_in)) & - ! ] - end associate end associate diff --git a/src/fiats/tmp b/src/fiats/tmp new file mode 100644 index 000000000..2c56ec4c1 --- /dev/null +++ b/src/fiats/tmp @@ -0,0 +1,14 @@ ++operator(==) ++infer() ++to_json() ++map_to_input_range() ++map_from_output_range() ++num_hidden_layers() ++num_inputs() ++num_outputs() ++nodes_per_layer() ++assert_conformable_with () ++skip() ++activation_function_name() ++learn() ++assert_consistency() diff --git a/src/fiats/ubounds_m.f90 b/src/fiats/ubounds_m.f90 deleted file mode 100644 index d3d9762ea..000000000 --- a/src/fiats/ubounds_m.f90 +++ /dev/null @@ -1,22 +0,0 @@ -! Copyright (c), The Regents of the University of California -! Terms of use are as specified in LICENSE.txt -module ubounds_m - !! This module serves only to support array bounds checking in the main program below - implicit none - - type ubounds_t - integer, allocatable :: ubounds_(:) - contains - procedure equals - generic :: operator(==) => equals - end type - -contains - - elemental function equals(lhs, rhs) result(lhs_equals_rhs) - class(ubounds_t), intent(in) :: lhs, rhs - logical lhs_equals_rhs - lhs_equals_rhs = all(lhs%ubounds_ == rhs%ubounds_) - end function - -end module diff --git a/src/fiats_m.f90 b/src/fiats_m.f90 index 4d09e40c7..df6d35d7e 100644 --- a/src/fiats_m.f90 +++ b/src/fiats_m.f90 @@ -15,6 +15,5 @@ module fiats_m use tensor_map_m, only : tensor_map_t use trainable_network_m, only : trainable_network_t use training_configuration_m, only : training_configuration_t - use ubounds_m, only : ubounds_t implicit none end module fiats_m From 23113c5020b4f2ad783424201898cee2e4ade4fc Mon Sep 17 00:00:00 2001 From: Damian Rouson Date: Thu, 17 Oct 2024 19:35:31 -0700 Subject: [PATCH 04/25] feat(train_cloud_microphysics):generalize file I/O --- demo/app/tensor-statistics.f90 | 24 +- demo/app/train-cloud-microphysics.F90 | 417 +++++++++++++------------- demo/src/NetCDF_variable_m.f90 | 14 + demo/src/NetCDF_variable_s.f90 | 77 +++-- 4 files changed, 293 insertions(+), 239 deletions(-) diff --git a/demo/app/tensor-statistics.f90 b/demo/app/tensor-statistics.f90 index 8c5cf2d06..9a99d1e7d 100644 --- a/demo/app/tensor-statistics.f90 +++ b/demo/app/tensor-statistics.f90 @@ -1,5 +1,27 @@ ! Copyright (c), The Regents of the University of California ! Terms of use are as specified in LICENSE.txt + +module ubounds_m + !! This module serves only to support array bounds checking in the main program below + implicit none + + type ubounds_t + integer, allocatable :: ubounds_(:) + contains + procedure equals + generic :: operator(==) => equals + end type + +contains + + elemental function equals(lhs, rhs) result(lhs_equals_rhs) + class(ubounds_t), intent(in) :: lhs, rhs + logical lhs_equals_rhs + lhs_equals_rhs = all(lhs%ubounds_ == rhs%ubounds_) + end function + +end module + program tensor_statistics !! This program !! 1. Computes the ranges and histograms of input and output tensors saved by @@ -9,7 +31,7 @@ program tensor_statistics ! External dependencies: use julienne_m, only : command_line_t, file_t, string_t use assert_m, only : assert, intrinsic_array_t - use fiats_m, only : ubounds_t + use ubounds_m, only : ubounds_t use ieee_arithmetic, only : ieee_is_nan use iso_fortran_env, only : int64, real64 diff --git a/demo/app/train-cloud-microphysics.F90 b/demo/app/train-cloud-microphysics.F90 index 42dca9f8a..51cbde38e 100644 --- a/demo/app/train-cloud-microphysics.F90 +++ b/demo/app/train-cloud-microphysics.F90 @@ -183,228 +183,233 @@ subroutine read_train_write(training_configuration, args, plot_file) type(NetCDF_variable_t), allocatable :: input_variable(:), output_variable(:) type(NetCDF_variable_t) input_time, output_time - type(string_t), allocatable :: names(:) - ! local variables: - real, allocatable, dimension(:,:,:,:) :: & - pressure_in , potential_temperature_in , temperature_in , & - pressure_out, potential_temperature_out, temperature_out, & - qv_out, qc_out, qr_out, qs_out, & - qv_in , qc_in , qr_in , qs_in , & - dpt_dt, dqv_dt, dqc_dt, dqr_dt, dqs_dt - double precision, allocatable, dimension(:) :: time_in, time_out - integer, allocatable :: lbounds(:) - integer t, b, t_end, i + integer t, b, t_end, v logical stop_requested enum, bind(C) enumerator :: pressure=1, potential_temperature, temperature, qv, qc, qr, qs end enum - names = [ & - string_t("pressure"), string_t("potential_temperature"), string_t("temperature") & - ,string_t("qv"), string_t("qc"), string_t("qr"), string_t("qs") & - ] - allocate(input_variable(size(names))) - allocate(output_variable(size(names))) + enum, bind(C) + enumerator :: dpotential_temperature_t=1, dqv_dt, dqc_dt, dqr_dt, dqs_dt + end enum + + associate(input_names => & + [string_t("pressure"), string_t("potential_temperature"), string_t("temperature"), & + string_t("qv"), string_t("qc"), string_t("qr"), string_t("qs")] & + ) + allocate(input_variable(size(input_names))) + + associate(input_file_name => args%base_name // "_input.nc") - associate( network_file => args%base_name // "_network.json", network_input => args%base_name // "_input.nc") - print *,"Reading network inputs from " // network_input - associate(network_input_file => netCDF_file_t(network_input)) + print *,"Reading network inputs from " // input_file_name - do i=1, size(names) - print *,"- reading ", names(i)%string() - call input_variable(i)%input(names(i), network_input_file, rank=4) - end do + associate(input_file => netCDF_file_t(input_file_name)) - print *,"- reading time" - call input_time%input("time", network_input_file, rank=1) + do v=1, size(input_variable) + print *,"- reading ", input_names(v)%string() + call input_variable(v)%input(input_names(v), input_file, rank=4) + end do - t_end = size(time_in) + do v = 2, size(input_variable) + call assert(input_variable(v)%conformable_with(input_variable(1)), "train_cloud_microphysics: input variable conformance") + end do - do i = 2, size(input_variable) - call assert(input_variable(i)%conformable_with(input_variable(1)), "train_cloud_microphysics: variables conformance") - end do + print *,"- reading time" + call input_time%input("time", input_file, rank=1) + end associate end associate end associate - !associate(network_output => args%base_name // "_output.nc") - ! print *,"Reading network outputs from " // network_output - ! associate(network_output_file => netCDF_file_t(network_output)) - ! call network_output_file%input("potential_temperature", potential_temperature_out) - ! call network_output_file%input("qv", qv_out) - ! call network_output_file%input("qc", qc_out) - ! call network_output_file%input("qr", qr_out) - ! call network_output_file%input("qs", qs_out) - ! call network_output_file%input("time", time_out) - ! lbounds = [lbounds, lbound(qv_out), lbound(qc_out), lbound(qr_out), lbound(qs_out)] - ! ubounds = [ubounds, ubounds_t(ubound(qv_out)), ubounds_t(ubound(qc_out)), & - ! ubounds_t(ubound(qr_out)), ubounds_t(ubound(qs_out))] - ! call assert(all(lbounds == 1), "main: default input/output lower bounds", intrinsic_array_t(lbounds)) - ! call assert(all(ubounds == ubounds(1)), "main: matching input/output upper bounds") - ! block - ! double precision, parameter :: time_tolerance = 1.E-07 - ! associate(matching_time_stamps => all(abs(time_in(2:t_end) - time_out(1:t_end-1)) [string_t("potential_temperature"),string_t("qv"), string_t("qc"), string_t("qr"), string_t("qs")]) - !print *,"Calculating time derivatives" - - !allocate(dpt_dt, mold = potential_temperature_out) - !allocate(dqv_dt, mold = qv_out) - !allocate(dqc_dt, mold = qc_out) - !allocate(dqr_dt, mold = qr_out) - !allocate(dqs_dt, mold = qs_out) - - !associate(dt => real(time_out - time_in)) - ! do concurrent(t = 1:t_end) - ! dpt_dt(:,:,:,t) = (potential_temperature_out(:,:,:,t) - potential_temperature_in(:,:,:,t))/dt(t) - ! dqv_dt(:,:,:,t) = (qv_out(:,:,:,t)- qv_in(:,:,:,t))/dt(t) - ! dqc_dt(:,:,:,t) = (qc_out(:,:,:,t)- qc_in(:,:,:,t))/dt(t) - ! dqr_dt(:,:,:,t) = (qr_out(:,:,:,t)- qr_in(:,:,:,t))/dt(t) - ! dqs_dt(:,:,:,t) = (qs_out(:,:,:,t)- qs_in(:,:,:,t))/dt(t) - ! end do - !end associate - - !call assert(.not. any(ieee_is_nan(dpt_dt)), ".not. any(ieee_is_nan(dpt_dt)") - !call assert(.not. any(ieee_is_nan(dqv_dt)), ".not. any(ieee_is_nan(dqv_dt)") - !call assert(.not. any(ieee_is_nan(dqc_dt)), ".not. any(ieee_is_nan(dqc_dt)") - !call assert(.not. any(ieee_is_nan(dqr_dt)), ".not. any(ieee_is_nan(dqr_dt)") - !call assert(.not. any(ieee_is_nan(dqs_dt)), ".not. any(ieee_is_nan(dqs_dt)") - - !train_network: & - !block - ! type(trainable_network_t) trainable_network - ! type(mini_batch_t), allocatable :: mini_batches(:) - ! type(bin_t), allocatable :: bins(:) - ! type(input_output_pair_t), allocatable :: input_output_pairs(:) - ! type(tensor_t), allocatable, dimension(:) :: inputs, outputs - ! real, allocatable :: cost(:) - ! integer i, lon, lat, level, time, network_unit, io_status, epoch, end_step - ! integer(int64) start_training, finish_training - - ! open(newunit=network_unit, file=network_file, form='formatted', status='old', iostat=io_status, action='read') - - ! if (allocated(args%end_step)) then - ! end_step = args%end_step - ! else - ! end_step = t_end - ! end if - - ! print *,"Defining tensors from time step", args%start_step, "through", end_step, "with strides of", args%stride - - ! ! The following temporary copies are required by gfortran bug 100650 and possibly 49324 - ! ! See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=100650 and https://gcc.gnu.org/bugzilla/show_bug.cgi?id=49324 - ! inputs = [( [( [( [( & - ! tensor_t( & - ! [ pressure_in(lon,lat,level,time), potential_temperature_in(lon,lat,level,time), temperature_in(lon,lat,level,time), & - ! qv_in(lon,lat,level,time), qc_in(lon,lat,level,time), qr_in(lon,lat,level,time), qs_in(lon,lat,level,time) & - ! ] & - ! ), lon = 1, size(qv_in,1))], lat = 1, size(qv_in,2))], level = 1, size(qv_in,3))], & - ! time = args%start_step, end_step, args%stride)] - - ! outputs = [( [( [( [( & - ! tensor_t( & - ! [dpt_dt(lon,lat,level,time), dqv_dt(lon,lat,level,time), dqc_dt(lon,lat,level,time), dqr_dt(lon,lat,level,time), & - ! dqs_dt(lon,lat,level,time) & - ! ] & - ! ), lon = 1, size(qv_in,1))], lat = 1, size(qv_in,2))], level = 1, size(qv_in,3))], & - ! time = args%start_step, end_step, args%stride)] - - ! print *,"Calculating output tensor component ranges." - ! output_extrema: & - ! associate( & - ! output_minima => [minval(dpt_dt), minval(dqv_dt), minval(dqc_dt), minval(dqr_dt), minval(dqs_dt)], & - ! output_maxima => [maxval(dpt_dt), maxval(dqv_dt), maxval(dqc_dt), maxval(dqr_dt), maxval(dqs_dt)] & - ! ) - ! output_map: & - ! associate( output_map => tensor_map_t(layer = "outputs", minima = output_minima, maxima = output_maxima)) - ! read_or_initialize_network: & - ! if (io_status==0) then - ! print *,"Reading network from file " // network_file - ! trainable_network = trainable_network_t(neural_network_t(file_t(string_t(network_file)))) - ! close(network_unit) - ! else - ! close(network_unit) - - ! initialize_network: & - ! block - ! character(len=len('YYYYMMDD')) date - - ! call date_and_time(date) - - ! print *,"Calculating input tensor component ranges." - ! associate( & - ! input_map => tensor_map_t( & - ! layer = "inputs", & - ! minima = [minval(pressure_in), minval(potential_temperature_in), minval(temperature_in), & - ! minval(qv_in), minval(qc_in), minval(qr_in), minval(qs_in)], & - ! maxima = [maxval(pressure_in), maxval(potential_temperature_in), maxval(temperature_in), & - ! maxval(qv_in), maxval(qc_in), maxval(qr_in), maxval(qs_in)] & - ! ) ) - ! associate(activation => training_configuration%differentiable_activation()) - ! associate(residual_network=> string_t(trim(merge("true ", "false", training_configuration%skip_connections())))) - ! trainable_network = trainable_network_t( & - ! training_configuration, & - ! perturbation_magnitude = 0.05, & - ! metadata = [ & - ! string_t("Simple microphysics"), string_t("train-on-flat-dist"), string_t(date), & - ! activation%function_name(), residual_network & - ! ], input_map = input_map, output_map = output_map & - ! ) - ! end associate - ! end associate - ! end associate ! input_map, date_string - ! end block initialize_network - ! end if read_or_initialize_network - - ! print *, "Conditionally sampling for a flat distribution of output values" - ! block - ! integer i - ! logical occupied(args%num_bins, args%num_bins, args%num_bins, args%num_bins, args%num_bins) - ! logical keepers(size(outputs)) - ! type(phase_space_bin_t), allocatable :: bin(:) - ! occupied = .false. - ! keepers = .false. - - ! bin = [(phase_space_bin_t(outputs(i), output_minima, output_maxima, args%num_bins), i=1,size(outputs))] - - ! do i = 1, size(outputs) - ! if (occupied(bin(i)%loc(1),bin(i)%loc(2),bin(i)%loc(3),bin(i)%loc(4),bin(i)%loc(5))) cycle - ! occupied(bin(i)%loc(1),bin(i)%loc(2),bin(i)%loc(3),bin(i)%loc(4),bin(i)%loc(5)) = .true. - ! keepers(i) = .true. - ! end do - ! input_output_pairs = input_output_pair_t(pack(inputs, keepers), pack(outputs, keepers)) - ! print *, "Kept ", size(input_output_pairs), " out of ", size(outputs, kind=int64), " input/output pairs " // & - ! " in ", count(occupied)," out of ", size(occupied, kind=int64), " bins." - ! end block - ! end associate output_map - ! end associate output_extrema - - ! print *,"Normalizing the remaining input and output tensors" - ! input_output_pairs = trainable_network%map_to_training_ranges(input_output_pairs) - - ! associate( & - ! num_pairs => size(input_output_pairs), & - ! n_bins => training_configuration%mini_batches(), & - ! adam => merge(.true., .false., training_configuration%optimizer_name() == "adam"), & - ! learning_rate => training_configuration%learning_rate() & - ! ) - ! bins = [(bin_t(num_items=num_pairs, num_bins=n_bins, bin_number=b), b = 1, n_bins)] - - ! print *,"Training network" - ! print *, " Epoch Cost (avg)" - - ! call system_clock(start_training) - ! - ! train_write_and_maybe_exit: & - ! block - ! integer first_epoch - ! integer me + allocate(output_variable(size(output_names))) + + associate(output_file_name => args%base_name // "_output.nc") + + print *,"Reading network outputs from " // output_file_name + + associate(output_file => netCDF_file_t(output_file_name)) + + do v=1, size(output_variable) + print *,"- reading ", output_names(v)%string() + call output_variable(v)%input(output_names(v), output_file, rank=4) + end do + + do v = 1, size(output_variable) + call assert(output_variable(v)%conformable_with(input_variable(1)), "train_cloud_microphysics: output variable conformance") + end do + + print *,"- reading time" + call output_time%input("time", output_file, rank=1) + + call assert(output_time%conformable_with(input_time), "train_cloud_microphysics: input/output time conformance") + + end associate + end associate + end associate + + block + type(NetCDF_variable_t) derivative(size(output_variable)) + end block + + !t_end = size(time_in) + + !allocate(dpt_dt, mold = potential_temperature_out) + !allocate(dqv_dt, mold = qv_out) + !allocate(dqc_dt, mold = qc_out) + !allocate(dqr_dt, mold = qr_out) + !allocate(dqs_dt, mold = qs_out) + + !associate(dt => real(time_out - time_in)) + ! do concurrent(t = 1:t_end) + ! dpt_dt(:,:,:,t) = (potential_temperature_out(:,:,:,t) - potential_temperature_in(:,:,:,t))/dt(t) + ! dqv_dt(:,:,:,t) = (qv_out(:,:,:,t)- qv_in(:,:,:,t))/dt(t) + ! dqc_dt(:,:,:,t) = (qc_out(:,:,:,t)- qc_in(:,:,:,t))/dt(t) + ! dqr_dt(:,:,:,t) = (qr_out(:,:,:,t)- qr_in(:,:,:,t))/dt(t) + ! dqs_dt(:,:,:,t) = (qs_out(:,:,:,t)- qs_in(:,:,:,t))/dt(t) + ! end do + !end associate + + !call assert(.not. any(ieee_is_nan(dpt_dt)), ".not. any(ieee_is_nan(dpt_dt)") + !call assert(.not. any(ieee_is_nan(dqv_dt)), ".not. any(ieee_is_nan(dqv_dt)") + !call assert(.not. any(ieee_is_nan(dqc_dt)), ".not. any(ieee_is_nan(dqc_dt)") + !call assert(.not. any(ieee_is_nan(dqr_dt)), ".not. any(ieee_is_nan(dqr_dt)") + !call assert(.not. any(ieee_is_nan(dqs_dt)), ".not. any(ieee_is_nan(dqs_dt)") + + !train_network: & + !block + ! type(trainable_network_t) trainable_network + ! type(mini_batch_t), allocatable :: mini_batches(:) + ! type(bin_t), allocatable :: bins(:) + ! type(input_output_pair_t), allocatable :: input_output_pairs(:) + ! type(tensor_t), allocatable, dimension(:) :: inputs, outputs + ! real, allocatable :: cost(:) + ! integer i, lon, lat, level, time, network_unit, io_status, epoch, end_step + ! integer(int64) start_training, finish_training + + ! associate( network_file => args%base_name // "_network.json") + + ! open(newunit=network_unit, file=network_file, form='formatted', status='old', iostat=io_status, action='read') + + ! if (allocated(args%end_step)) then + ! end_step = args%end_step + ! else + ! end_step = t_end + ! end if + + ! print *,"Defining tensors from time step", args%start_step, "through", end_step, "with strides of", args%stride + + ! ! The following temporary copies are required by gfortran bug 100650 and possibly 49324 + ! ! See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=100650 and https://gcc.gnu.org/bugzilla/show_bug.cgi?id=49324 + ! inputs = [( [( [( [( & + ! tensor_t( & + ! [ pressure_in(lon,lat,level,time), potential_temperature_in(lon,lat,level,time), temperature_in(lon,lat,level,time), & + ! qv_in(lon,lat,level,time), qc_in(lon,lat,level,time), qr_in(lon,lat,level,time), qs_in(lon,lat,level,time) & + ! ] & + ! ), lon = 1, size(qv_in,1))], lat = 1, size(qv_in,2))], level = 1, size(qv_in,3))], & + ! time = args%start_step, end_step, args%stride)] + + ! outputs = [( [( [( [( & + ! tensor_t( & + ! [dpt_dt(lon,lat,level,time), dqv_dt(lon,lat,level,time), dqc_dt(lon,lat,level,time), dqr_dt(lon,lat,level,time), & + ! dqs_dt(lon,lat,level,time) & + ! ] & + ! ), lon = 1, size(qv_in,1))], lat = 1, size(qv_in,2))], level = 1, size(qv_in,3))], & + ! time = args%start_step, end_step, args%stride)] + + ! print *,"Calculating output tensor component ranges." + ! output_extrema: & + ! associate( & + ! output_minima => [minval(dpt_dt), minval(dqv_dt), minval(dqc_dt), minval(dqr_dt), minval(dqs_dt)], & + ! output_maxima => [maxval(dpt_dt), maxval(dqv_dt), maxval(dqc_dt), maxval(dqr_dt), maxval(dqs_dt)] & + ! ) + ! output_map: & + ! associate( output_map => tensor_map_t(layer = "outputs", minima = output_minima, maxima = output_maxima)) + ! read_or_initialize_network: & + ! if (io_status==0) then + ! print *,"Reading network from file " // network_file + ! trainable_network = trainable_network_t(neural_network_t(file_t(string_t(network_file)))) + ! close(network_unit) + ! else + ! close(network_unit) + + ! initialize_network: & + ! block + ! character(len=len('YYYYMMDD')) date + + ! call date_and_time(date) + + ! print *,"Calculating input tensor component ranges." + ! associate( & + ! input_map => tensor_map_t( & + ! layer = "inputs", & + ! minima = [minval(pressure_in), minval(potential_temperature_in), minval(temperature_in), & + ! minval(qv_in), minval(qc_in), minval(qr_in), minval(qs_in)], & + ! maxima = [maxval(pressure_in), maxval(potential_temperature_in), maxval(temperature_in), & + ! maxval(qv_in), maxval(qc_in), maxval(qr_in), maxval(qs_in)] & + ! ) ) + ! associate(activation => training_configuration%differentiable_activation()) + ! associate(residual_network=> string_t(trim(merge("true ", "false", training_configuration%skip_connections())))) + ! trainable_network = trainable_network_t( & + ! training_configuration, & + ! perturbation_magnitude = 0.05, & + ! metadata = [ & + ! string_t("Simple microphysics"), string_t("train-on-flat-dist"), string_t(date), & + ! activation%function_name(), residual_network & + ! ], input_map = input_map, output_map = output_map & + ! ) + ! end associate + ! end associate + ! end associate ! input_map, date_string + ! end block initialize_network + ! end if read_or_initialize_network + + ! print *, "Conditionally sampling for a flat distribution of output values" + ! block + ! integer i + ! logical occupied(argsnum_bins, args%num_bins, args%num_bins, args%num_bins, args%num_bins) + ! logical keepers(size(outputs)) + ! type(phase_space_bin_t), allocatable :: bin(:) + ! occupied = .false. + ! keepers = .false. + + ! bin = [(phase_space_bin_t(outputs(i), output_minima, output_maxima, args%num_bins), i=1,size(outputs))] + + ! do i = 1, size(outputs) + ! if (occupied(bin(i)%loc(1),bin(i)%loc(2),bin(i)%loc(3),bin(i)%loc(4),bin(i)%loc(5))) cycle + ! occupied(bin(i)%loc(1),bin(i)%loc(2),bin(i)%loc(3),bin(i)%loc(4),bin(i)%loc(5)) = .true. + ! keepers(i) = .true. + ! end do + ! input_output_pairs = input_output_pair_t(pack(inputs, keepers), pack(outputs, keepers)) + ! print *, "Kept ", size(input_output_pairs), " out of ", size(outputs, kind=int64), " input/output pairs " // & + ! " in ", count(occupied)," out of ", size(occupied, kind=int64), " bins." + ! end block + ! end associate output_map + ! end associate output_extrema + + ! print *,"Normalizing the remaining input and output tensors" + ! input_output_pairs = trainable_network%map_to_training_ranges(input_output_pairs) + + ! associate( & + ! num_pairs => size(input_output_pairs), & + ! n_bins => training_configuration%mini_batches(), & + ! adam => merge(.true., .false., training_configuration%optimizer_name() == "adam"), & + ! learning_rate => training_configuration%learning_rate() & + ! ) + ! bins = [(bin_t(num_items=num_pairs, num_bins=n_bins, bin_number=b), b = 1, n_bins)] + + ! print *,"Training network" + ! print *, " Epoch Cost (avg)" + + ! call system_clock(start_training) + ! + ! train_write_and_maybe_exit: & + ! block + ! integer first_epoch + ! integer me #if defined(MULTI_IMAGE_SUPPORT) @@ -471,8 +476,8 @@ subroutine read_train_write(training_configuration, args, plot_file) ! print *,"Training time: ", real(finish_training - start_training, real64)/real(clock_rate, real64),"for", & ! args%num_epochs,"epochs" + !end associate ! network_file !end block train_network - !end associate ! network_file !close(plot_file%plot_unit) diff --git a/demo/src/NetCDF_variable_m.f90 b/demo/src/NetCDF_variable_m.f90 index b12b58664..892d66670 100644 --- a/demo/src/NetCDF_variable_m.f90 +++ b/demo/src/NetCDF_variable_m.f90 @@ -21,6 +21,8 @@ module NetCDF_variable_m procedure, private, non_overridable :: default_real_conformable_with, double_precision_conformable_with generic :: rank => default_real_rank, double_precision_rank procedure, private, non_overridable :: default_real_rank, double_precision_rank + generic :: operator(-) => default_real_subtract, double_precision_subtract + procedure, private, non_overridable :: default_real_subtract, double_precision_subtract end type interface @@ -81,6 +83,18 @@ elemental module function double_precision_rank(self) result(my_rank) integer my_rank end function + elemental module function default_real_subtract(lhs, rhs) result(difference) + implicit none + class(NetCDF_variable_t), intent(in) :: lhs, rhs + type(NetCDF_variable_t) difference + end function + + elemental module function double_precision_subtract(lhs, rhs) result(difference) + implicit none + class(NetCDF_variable_t(double_precision)), intent(in) :: lhs, rhs + type(NetCDF_variable_t) difference + end function + end interface end module \ No newline at end of file diff --git a/demo/src/NetCDF_variable_s.f90 b/demo/src/NetCDF_variable_s.f90 index 28f673114..1e150ea7e 100644 --- a/demo/src/NetCDF_variable_s.f90 +++ b/demo/src/NetCDF_variable_s.f90 @@ -2,7 +2,7 @@ ! Terms of use are as specified in LICENSE.txt submodule(NetCDF_variable_m) NetCDF_variable_s use kind_parameters_m, only : default_real - use assert_m, only : assert + use assert_m, only : assert, intrinsic_array_t implicit none interface components_allocated @@ -65,13 +65,19 @@ pure function default_real_components_allocated(NetCDF_variable) result(allocation_vector) type(NetCDF_variable_t), intent(in) :: NetCDF_variable logical, allocatable :: allocation_vector(:) - allocation_vector = [allocated(NetCDF_variable%values_1D_), allocated(NetCDF_variable%values_4D_)] + allocation_vector = [ allocated(NetCDF_variable%values_1D_) & + ,allocated(NetCDF_variable%values_2D_) & + ,allocated(NetCDF_variable%values_3D_) & + ,allocated(NetCDF_variable%values_4D_) ] end function pure function double_precision_components_allocated(NetCDF_variable) result(allocation_vector) type(NetCDF_variable_t(double_precision)), intent(in) :: NetCDF_variable logical, allocatable :: allocation_vector(:) - allocation_vector = [allocated(NetCDF_variable%values_1D_), allocated(NetCDF_variable%values_4D_)] + allocation_vector = [ allocated(NetCDF_variable%values_1D_) & + ,allocated(NetCDF_variable%values_2D_) & + ,allocated(NetCDF_variable%values_3D_) & + ,allocated(NetCDF_variable%values_4D_) ] end function module procedure default_real_rank @@ -157,40 +163,47 @@ pure function double_precision_upper_bounds(NetCDF_variable) result(ubounds) end function module procedure default_real_conformable_with - - if (self%rank() /= NetCDF_variable%rank()) then - conformable = .false. - return - end if - - if (any(lower_bounds(self) /= lower_bounds(NetCDF_variable))) then - conformable = .false. - return - end if - - if (any(upper_bounds(self) /= upper_bounds(NetCDF_variable))) then - conformable = .false. - return - end if - - conformable = .true. - + conformable = all([ self%rank() == NetCDF_variable%rank() & + ,lower_bounds(self) == lower_bounds(NetCDF_variable) & + ,upper_bounds(self) == upper_bounds(NetCDF_variable) ]) end procedure module procedure double_precision_conformable_with + conformable = all([ self%rank() == NetCDF_variable%rank() & + ,lower_bounds(self) == lower_bounds(NetCDF_variable) & + ,upper_bounds(self) == upper_bounds(NetCDF_variable) ]) + end procedure - if (self%rank() /= NetCDF_variable%rank()) then - conformable = .false. - return - end if - - if (any(lower_bounds(self) /= lower_bounds(NetCDF_variable))) then - conformable = .false. - return - end if - - conformable = .true. + module procedure default_real_subtract + call assert(lhs%conformable_with(rhs), "NetCDF_variable_s(default_real_subtract): lhs%conformable_with(rhs)") + select case(lhs%rank()) + case(1) + difference%values_1D_ = lhs%values_1D_ - rhs%values_1D_ + case(2) + difference%values_2D_ = lhs%values_2D_ - rhs%values_2D_ + case(3) + difference%values_3D_ = lhs%values_3D_ - rhs%values_3D_ + case(4) + difference%values_4D_ = lhs%values_4D_ - rhs%values_4D_ + case default + error stop "NetCDF_variable_s(default_real_subtract): unsupported rank)" + end select + end procedure + module procedure double_precision_subtract + call assert(lhs%conformable_with(rhs), "NetCDF_variable_s(double_precision_subtract): lhs%conformable_with(rhs)") + select case(lhs%rank()) + case(1) + difference%values_1D_ = lhs%values_1D_ - rhs%values_1D_ + case(2) + difference%values_2D_ = lhs%values_2D_ - rhs%values_2D_ + case(3) + difference%values_3D_ = lhs%values_3D_ - rhs%values_3D_ + case(4) + difference%values_4D_ = lhs%values_4D_ - rhs%values_4D_ + case default + error stop "NetCDF_variable_s(double_precision_subtract): unsupported rank)" + end select end procedure end submodule NetCDF_variable_s \ No newline at end of file From 84c24bb972e90a3d4ea6f2646d53a5cd5bf8fc4a Mon Sep 17 00:00:00 2001 From: Damian Rouson Date: Thu, 17 Oct 2024 22:07:29 -0700 Subject: [PATCH 05/25] feat(NetCDF_variable): generalize time derivatives --- demo/app/train-cloud-microphysics.F90 | 38 ++++--- demo/src/NetCDF_variable_m.f90 | 62 +++++++++++- demo/src/NetCDF_variable_s.f90 | 138 ++++++++++++++++++++++++++ 3 files changed, 221 insertions(+), 17 deletions(-) diff --git a/demo/app/train-cloud-microphysics.F90 b/demo/app/train-cloud-microphysics.F90 index 51cbde38e..58123a0f9 100644 --- a/demo/app/train-cloud-microphysics.F90 +++ b/demo/app/train-cloud-microphysics.F90 @@ -22,13 +22,13 @@ program train_cloud_microphysics use NetCDF_variable_m, only: NetCDF_variable_t implicit none - character(len=*), parameter :: usage = new_line('a') // new_line('a') // & - 'Usage: ' // new_line('a') // new_line('a') // & - './build/run-fpm.sh run train-cloud-microphysics -- \' // new_line('a') // & - ' --base --epochs \' // new_line('a') // & + character(len=*), parameter :: usage = new_line('a') // new_line('a') // & + 'Usage: ' // new_line('a') // new_line('a') // & + './build/run-fpm.sh run train-cloud-microphysics -- \' // new_line('a') // & + ' --base --epochs \' // new_line('a') // & ' [--start ] [--end ] [--stride ] [--bins ] [--report ] [--tolerance ]'// & - new_line('a') // new_line('a') // & - 'where angular brackets denote user-provided values and square brackets denote optional arguments.' // new_line('a') // & + new_line('a') // new_line('a') // & + 'where angular brackets denote user-provided values and square brackets denote optional arguments.' // new_line('a') // & 'The presence of a file named "stop" halts execution gracefully.' type command_line_arguments_t @@ -249,20 +249,26 @@ subroutine read_train_write(training_configuration, args, plot_file) end associate end associate - end associate - block - type(NetCDF_variable_t) derivative(size(output_variable)) - end block + block + type(NetCDF_variable_t) derivative(size(output_variable)) + + print *,"Calculating time derivatives" + + associate(dt => NetCDF_variable_t(output_time - input_time, "dt")) + do v = 1, size(derivative) + associate(derivative_name => "d" // output_names(v)%string() // "/dt") + print *,"- " // derivative_name + derivative(v) = NetCDF_variable_t( input_variable(v) - output_variable(v) / dt, derivative_name) + end associate + end do + end associate + end block + + end associate !t_end = size(time_in) - !allocate(dpt_dt, mold = potential_temperature_out) - !allocate(dqv_dt, mold = qv_out) - !allocate(dqc_dt, mold = qc_out) - !allocate(dqr_dt, mold = qr_out) - !allocate(dqs_dt, mold = qs_out) - !associate(dt => real(time_out - time_in)) ! do concurrent(t = 1:t_end) ! dpt_dt(:,:,:,t) = (potential_temperature_out(:,:,:,t) - potential_temperature_in(:,:,:,t))/dt(t) diff --git a/demo/src/NetCDF_variable_m.f90 b/demo/src/NetCDF_variable_m.f90 index 892d66670..9855bbfdc 100644 --- a/demo/src/NetCDF_variable_m.f90 +++ b/demo/src/NetCDF_variable_m.f90 @@ -23,8 +23,44 @@ module NetCDF_variable_m procedure, private, non_overridable :: default_real_rank, double_precision_rank generic :: operator(-) => default_real_subtract, double_precision_subtract procedure, private, non_overridable :: default_real_subtract, double_precision_subtract + generic :: operator(/) => default_real_divide, double_precision_divide + procedure, private, non_overridable :: default_real_divide, double_precision_divide + generic :: assignment(=) => default_real_assign, double_precision_assign + procedure, private, non_overridable :: default_real_assign, double_precision_assign end type + interface NetCDF_variable_t + + elemental module function default_real_copy(source, rename) result(NetCDF_variable) + implicit none + type(NetCDF_variable_t), intent(in) :: source + type(string_t), intent(in), optional :: rename + type(NetCDF_variable_t) NetCDF_variable + end function + + elemental module function default_real_copy_character_name(source, rename) result(NetCDF_variable) + implicit none + type(NetCDF_variable_t), intent(in) :: source + character(len=*), intent(in), optional :: rename + type(NetCDF_variable_t) NetCDF_variable + end function + + elemental module function double_precision_copy(source, rename) result(NetCDF_variable) + implicit none + type(NetCDF_variable_t(double_precision)), intent(in) :: source + type(string_t), intent(in), optional :: rename + type(NetCDF_variable_t(double_precision)) NetCDF_variable + end function + + elemental module function double_precision_copy_character_name(source, rename) result(NetCDF_variable) + implicit none + type(NetCDF_variable_t(double_precision)), intent(in) :: source + character(len=*), intent(in), optional :: rename + type(NetCDF_variable_t(double_precision)) NetCDF_variable + end function + + end interface + interface impure elemental module subroutine default_real_input(self, variable_name, file, rank) @@ -92,9 +128,33 @@ elemental module function default_real_subtract(lhs, rhs) result(difference) elemental module function double_precision_subtract(lhs, rhs) result(difference) implicit none class(NetCDF_variable_t(double_precision)), intent(in) :: lhs, rhs - type(NetCDF_variable_t) difference + type(NetCDF_variable_t(double_precision)) difference end function + elemental module function default_real_divide(lhs, rhs) result(ratio) + implicit none + class(NetCDF_variable_t), intent(in) :: lhs, rhs + type(NetCDF_variable_t) ratio + end function + + elemental module function double_precision_divide(lhs, rhs) result(ratio) + implicit none + class(NetCDF_variable_t(double_precision)), intent(in) :: lhs, rhs + type(NetCDF_variable_t(double_precision)) ratio + end function + + elemental module subroutine default_real_assign(lhs, rhs) + implicit none + class(NetCDF_variable_t), intent(inout) :: lhs + type(NetCDF_variable_t), intent(in) :: rhs + end subroutine + + elemental module subroutine double_precision_assign(lhs, rhs) + implicit none + class(NetCDF_variable_t(double_precision)), intent(inout) :: lhs + type(NetCDF_variable_t(double_precision)), intent(in) :: rhs + end subroutine + end interface end module \ No newline at end of file diff --git a/demo/src/NetCDF_variable_s.f90 b/demo/src/NetCDF_variable_s.f90 index 1e150ea7e..15b47864a 100644 --- a/demo/src/NetCDF_variable_s.f90 +++ b/demo/src/NetCDF_variable_s.f90 @@ -22,6 +22,60 @@ contains + module procedure default_real_copy + + if (present(rename)) then + NetCDF_variable%name_ = rename + else + NetCDF_variable%name_ = source%name_ + end if + + select case(source%rank()) + case (1) + NetCDF_variable%values_1D_ = source%values_1D_ + case (2) + NetCDF_variable%values_2D_ = source%values_2D_ + case (3) + NetCDF_variable%values_3D_ = source%values_3D_ + case (4) + NetCDF_variable%values_4D_ = source%values_4D_ + case default + error stop 'NetCDF_variable_s(default_real_copy): unsupported rank' + end select + + end procedure + + module procedure default_real_copy_character_name + NetCDF_variable = default_real_copy(source, string_t(rename)) + end procedure + + module procedure double_precision_copy + + if (present(rename)) then + NetCDF_variable%name_ = rename + else + NetCDF_variable%name_ = source%name_ + end if + + select case(source%rank()) + case (1) + NetCDF_variable%values_1D_ = source%values_1D_ + case (2) + NetCDF_variable%values_2D_ = source%values_2D_ + case (3) + NetCDF_variable%values_3D_ = source%values_3D_ + case (4) + NetCDF_variable%values_4D_ = source%values_4D_ + case default + error stop 'NetCDF_variable_s(double_precision_copy): unsupported rank' + end select + + end procedure + + module procedure double_precision_copy_character_name + NetCDF_variable = double_precision_copy(source, string_t(rename)) + end procedure + module procedure default_real_input self%name_ = variable_name select case (rank) @@ -206,4 +260,88 @@ pure function double_precision_upper_bounds(NetCDF_variable) result(ubounds) end select end procedure + module procedure default_real_divide + + integer t + + call assert(rhs%rank()==1, "NetCDF_variable_s(default_real_divide): rhs%rank()==1") + + associate(t_end => size(rhs%values_1D_)) + + select case(lhs%rank()) + case(4) + + call assert(size(rhs%values_1D_) == size(lhs%values_4D_,4), "NetCDF_variable_s(default_real_divide): conformable numerator/denominator") + allocate(ratio%values_4D_, mold = lhs%values_4D_) + + do concurrent(t = 1:t_end) + ratio%values_4D_(:,:,:,t) = lhs%values_4D_(:,:,:,t) / rhs%values_1D_(t) + end do + + case default + error stop "NetCDF_variable_s(default_real_divide): unsupported lhs rank)" + end select + + end associate + + end procedure + + module procedure double_precision_divide + + integer t + + call assert(rhs%rank()==1, "NetCDF_variable_s(double_precision_divide): rhs%rank()==1") + + associate(t_end => size(rhs%values_1D_)) + + select case(lhs%rank()) + case(4) + + call assert(size(rhs%values_1D_) == size(lhs%values_4D_,4), "NetCDF_variable_s(double_precision_divide): conformable numerator/denominator") + allocate(ratio%values_4D_, mold = lhs%values_4D_) + + do concurrent(t = 1:t_end) + ratio%values_4D_(:,:,:,t) = lhs%values_4D_(:,:,:,t) / rhs%values_1D_(t) + end do + + case default + error stop "NetCDF_variable_s(double_precision_divide): unsupported lhs rank)" + end select + + end associate + + end procedure + + module procedure default_real_assign + select case(rhs%rank()) + case(1) + lhs%values_1D_ = rhs%values_1D_ + case(2) + lhs%values_2D_ = rhs%values_2D_ + case(3) + lhs%values_3D_ = rhs%values_3D_ + case(4) + lhs%values_4D_ = rhs%values_4D_ + case default + error stop "NetCDF_variable_s(default_real_assign): unsupported rank)" + end select + call assert(lhs%rank()==rhs%rank(), "NetCDF_variable_s(default_real_assign): ranks match)") + end procedure + + module procedure double_precision_assign + select case(rhs%rank()) + case(1) + lhs%values_1D_ = rhs%values_1D_ + case(2) + lhs%values_2D_ = rhs%values_2D_ + case(3) + lhs%values_3D_ = rhs%values_3D_ + case(4) + lhs%values_4D_ = rhs%values_4D_ + case default + error stop "NetCDF_variable_s(double_precision_assign): unsupported rank)" + end select + call assert(lhs%rank()==rhs%rank(), "NetCDF_variable_s(double_precision_assign): ranks match)") + end procedure + end submodule NetCDF_variable_s \ No newline at end of file From 0b0e16a0fd338e57914b2acef252539c62b76bc7 Mon Sep 17 00:00:00 2001 From: Damian Rouson Date: Thu, 17 Oct 2024 23:51:05 -0700 Subject: [PATCH 06/25] feat(train_cloud_micro): assert no NaN derivatives --- demo/app/train-cloud-microphysics.F90 | 50 +++++++++------------------ demo/src/NetCDF_variable_m.f90 | 14 ++++++++ demo/src/NetCDF_variable_s.f90 | 33 ++++++++++++++++++ 3 files changed, 63 insertions(+), 34 deletions(-) diff --git a/demo/app/train-cloud-microphysics.F90 b/demo/app/train-cloud-microphysics.F90 index 58123a0f9..eea7565c4 100644 --- a/demo/app/train-cloud-microphysics.F90 +++ b/demo/app/train-cloud-microphysics.F90 @@ -6,7 +6,6 @@ program train_cloud_microphysics !! https://github.com/BerkeleyLab/icar. !! Intrinic modules : - use ieee_arithmetic, only : ieee_is_nan use iso_fortran_env, only : int64, real64 !! External dependencies: @@ -260,6 +259,7 @@ subroutine read_train_write(training_configuration, args, plot_file) associate(derivative_name => "d" // output_names(v)%string() // "/dt") print *,"- " // derivative_name derivative(v) = NetCDF_variable_t( input_variable(v) - output_variable(v) / dt, derivative_name) + call assert(.not. derivative(v)%any_nan(), "train_cloud_microhphysics: non NaN's") end associate end do end associate @@ -267,38 +267,20 @@ subroutine read_train_write(training_configuration, args, plot_file) end associate - !t_end = size(time_in) - - !associate(dt => real(time_out - time_in)) - ! do concurrent(t = 1:t_end) - ! dpt_dt(:,:,:,t) = (potential_temperature_out(:,:,:,t) - potential_temperature_in(:,:,:,t))/dt(t) - ! dqv_dt(:,:,:,t) = (qv_out(:,:,:,t)- qv_in(:,:,:,t))/dt(t) - ! dqc_dt(:,:,:,t) = (qc_out(:,:,:,t)- qc_in(:,:,:,t))/dt(t) - ! dqr_dt(:,:,:,t) = (qr_out(:,:,:,t)- qr_in(:,:,:,t))/dt(t) - ! dqs_dt(:,:,:,t) = (qs_out(:,:,:,t)- qs_in(:,:,:,t))/dt(t) - ! end do - !end associate - - !call assert(.not. any(ieee_is_nan(dpt_dt)), ".not. any(ieee_is_nan(dpt_dt)") - !call assert(.not. any(ieee_is_nan(dqv_dt)), ".not. any(ieee_is_nan(dqv_dt)") - !call assert(.not. any(ieee_is_nan(dqc_dt)), ".not. any(ieee_is_nan(dqc_dt)") - !call assert(.not. any(ieee_is_nan(dqr_dt)), ".not. any(ieee_is_nan(dqr_dt)") - !call assert(.not. any(ieee_is_nan(dqs_dt)), ".not. any(ieee_is_nan(dqs_dt)") - - !train_network: & - !block - ! type(trainable_network_t) trainable_network - ! type(mini_batch_t), allocatable :: mini_batches(:) - ! type(bin_t), allocatable :: bins(:) - ! type(input_output_pair_t), allocatable :: input_output_pairs(:) - ! type(tensor_t), allocatable, dimension(:) :: inputs, outputs - ! real, allocatable :: cost(:) - ! integer i, lon, lat, level, time, network_unit, io_status, epoch, end_step - ! integer(int64) start_training, finish_training + train_network: & + block + type(trainable_network_t) trainable_network + type(mini_batch_t), allocatable :: mini_batches(:) + type(bin_t), allocatable :: bins(:) + type(input_output_pair_t), allocatable :: input_output_pairs(:) + type(tensor_t), allocatable, dimension(:) :: inputs, outputs + real, allocatable :: cost(:) + integer i, lon, lat, level, time, network_unit, io_status, epoch, end_step + integer(int64) start_training, finish_training - ! associate( network_file => args%base_name // "_network.json") - - ! open(newunit=network_unit, file=network_file, form='formatted', status='old', iostat=io_status, action='read') + associate( network_file => args%base_name // "_network.json") + + open(newunit=network_unit, file=network_file, form='formatted', status='old', iostat=io_status, action='read') ! if (allocated(args%end_step)) then ! end_step = args%end_step @@ -482,8 +464,8 @@ subroutine read_train_write(training_configuration, args, plot_file) ! print *,"Training time: ", real(finish_training - start_training, real64)/real(clock_rate, real64),"for", & ! args%num_epochs,"epochs" - !end associate ! network_file - !end block train_network + end associate ! network_file + end block train_network !close(plot_file%plot_unit) diff --git a/demo/src/NetCDF_variable_m.f90 b/demo/src/NetCDF_variable_m.f90 index 9855bbfdc..c55b446b0 100644 --- a/demo/src/NetCDF_variable_m.f90 +++ b/demo/src/NetCDF_variable_m.f90 @@ -21,6 +21,8 @@ module NetCDF_variable_m procedure, private, non_overridable :: default_real_conformable_with, double_precision_conformable_with generic :: rank => default_real_rank, double_precision_rank procedure, private, non_overridable :: default_real_rank, double_precision_rank + generic :: any_nan => default_real_any_nan, double_precision_any_nan + procedure, private, non_overridable :: default_real_any_nan, double_precision_any_nan generic :: operator(-) => default_real_subtract, double_precision_subtract procedure, private, non_overridable :: default_real_subtract, double_precision_subtract generic :: operator(/) => default_real_divide, double_precision_divide @@ -155,6 +157,18 @@ elemental module subroutine double_precision_assign(lhs, rhs) type(NetCDF_variable_t(double_precision)), intent(in) :: rhs end subroutine + elemental module function default_real_any_nan(self) result(any_nan) + implicit none + class(NetCDF_variable_t), intent(in) :: self + logical any_nan + end function + + elemental module function double_precision_any_nan(self) result(any_nan) + implicit none + class(NetCDF_variable_t(double_precision)), intent(in) :: self + logical any_nan + end function + end interface end module \ No newline at end of file diff --git a/demo/src/NetCDF_variable_s.f90 b/demo/src/NetCDF_variable_s.f90 index 15b47864a..cdd4e9fc3 100644 --- a/demo/src/NetCDF_variable_s.f90 +++ b/demo/src/NetCDF_variable_s.f90 @@ -1,6 +1,7 @@ ! Copyright (c), The Regents of the University of California ! Terms of use are as specified in LICENSE.txt submodule(NetCDF_variable_m) NetCDF_variable_s + use ieee_arithmetic, only : ieee_is_nan use kind_parameters_m, only : default_real use assert_m, only : assert, intrinsic_array_t implicit none @@ -344,4 +345,36 @@ pure function double_precision_upper_bounds(NetCDF_variable) result(ubounds) call assert(lhs%rank()==rhs%rank(), "NetCDF_variable_s(double_precision_assign): ranks match)") end procedure + module procedure default_real_any_nan + + select case(self%rank()) + case(1) + any_nan = any(ieee_is_nan(self%values_1D_)) + case(2) + any_nan = any(ieee_is_nan(self%values_2D_)) + case(3) + any_nan = any(ieee_is_nan(self%values_3D_)) + case(4) + any_nan = any(ieee_is_nan(self%values_4D_)) + case default + error stop "NetCDF_variable_s(default_real_any_nan): unsupported rank)" + end select + end procedure + + module procedure double_precision_any_nan + + select case(self%rank()) + case(1) + any_nan = any(ieee_is_nan(self%values_1D_)) + case(2) + any_nan = any(ieee_is_nan(self%values_2D_)) + case(3) + any_nan = any(ieee_is_nan(self%values_3D_)) + case(4) + any_nan = any(ieee_is_nan(self%values_4D_)) + case default + error stop "NetCDF_variable_s(double_precision_any_nan): unsupported rank)" + end select + end procedure + end submodule NetCDF_variable_s \ No newline at end of file From ba04e9fc1f2f4376494ef85caae11f5399a1dee8 Mon Sep 17 00:00:00 2001 From: Damian Rouson Date: Fri, 18 Oct 2024 18:22:58 -0400 Subject: [PATCH 07/25] refac(train-cloud): generalize tensor/tensor-range --- demo/app/train-cloud-microphysics.F90 | 149 ++++++++++----------- demo/src/NetCDF_variable_m.f90 | 83 ++++++++++-- demo/src/NetCDF_variable_s.f90 | 178 ++++++++++++++++++++++++++ 3 files changed, 319 insertions(+), 91 deletions(-) diff --git a/demo/app/train-cloud-microphysics.F90 b/demo/app/train-cloud-microphysics.F90 index eea7565c4..fbdff9f73 100644 --- a/demo/app/train-cloud-microphysics.F90 +++ b/demo/app/train-cloud-microphysics.F90 @@ -18,7 +18,7 @@ program train_cloud_microphysics !! Internal dependencies: use phase_space_bin_m, only : phase_space_bin_t use NetCDF_file_m, only: NetCDF_file_t - use NetCDF_variable_m, only: NetCDF_variable_t + use NetCDF_variable_m, only: NetCDF_variable_t, tensors implicit none character(len=*), parameter :: usage = new_line('a') // new_line('a') // & @@ -31,7 +31,7 @@ program train_cloud_microphysics 'The presence of a file named "stop" halts execution gracefully.' type command_line_arguments_t - integer num_epochs, start_step, stride, num_bins, report_interval + integer num_epochs, start_step, stride, num_bins, report_step integer, allocatable :: end_step character(len=:), allocatable :: base_name real cost_tolerance @@ -105,7 +105,7 @@ function get_command_line_arguments() result(command_line_arguments) base_name, epochs_string, start_string, end_string, stride_string, bins_string, report_string, tolerance_string real cost_tolerance integer, allocatable :: end_step - integer num_epochs, num_bins, start_step, stride, report_interval + integer num_epochs, num_bins, start_step, stride, report_step base_name = command_line%flag_value("--base") ! gfortran 13 seg faults if this is an association epochs_string = command_line%flag_value("--epochs") @@ -122,54 +122,24 @@ function get_command_line_arguments() result(command_line_arguments) read(epochs_string,*) num_epochs - if (len(stride_string)==0) then - stride = 1 - else - read(stride_string,*) stride - end if - - if (len(start_string)==0) then - start_step = 1 - else - read(start_string,*) start_step - end if - - if (len(report_string)==0) then - report_interval = 1 - else - read(report_string,*) report_interval - end if - - if (len(bins_string)/=0) then - read(bins_string,*) num_bins - else - num_bins = 1 - end if + stride = default_integer_or_read(1, stride_string) + start_step = default_integer_or_read(1, start_string) + report_step = default_integer_or_read(1, report_string) + num_bins = default_integer_or_read(1, bins_string) + cost_tolerance = default_real_or_read(5E-8, tolerance_string) if (len(end_string)/=0) then allocate(end_step) read(end_string,*) end_step end if - if (len(start_string)==0) then - start_step = 1 - else - read(start_string,*) start_step - end if - - if (len(tolerance_string)==0) then - cost_tolerance = 5.0E-08 - else - read(tolerance_string,*) cost_tolerance - end if - if (allocated(end_step)) then command_line_arguments = command_line_arguments_t( & - num_epochs, start_step, stride, num_bins, report_interval, end_step, base_name, cost_tolerance & + num_epochs, start_step, stride, num_bins, report_step, end_step, base_name, cost_tolerance & ) else command_line_arguments = command_line_arguments_t( & - num_epochs, start_step, stride, num_bins, report_interval, null(), base_name, cost_tolerance & + num_epochs, start_step, stride, num_bins, report_step, null(), base_name, cost_tolerance & ) end if @@ -194,15 +164,17 @@ subroutine read_train_write(training_configuration, args, plot_file) enumerator :: dpotential_temperature_t=1, dqv_dt, dqc_dt, dqr_dt, dqs_dt end enum - associate(input_names => & - [string_t("pressure"), string_t("potential_temperature"), string_t("temperature"), & - string_t("qv"), string_t("qc"), string_t("qr"), string_t("qs")] & - ) + !associate(input_names => & + ! [string_t("pressure"), string_t("potential_temperature"), string_t("temperature"), & + ! string_t("qv"), string_t("qc"), string_t("qr"), string_t("qs")] & + !) + associate(input_names => [string_t("qv"), string_t("qc")]) + allocate(input_variable(size(input_names))) associate(input_file_name => args%base_name // "_input.nc") - print *,"Reading network inputs from " // input_file_name + print *,"Reading physics-based model inputs from " // input_file_name associate(input_file => netCDF_file_t(input_file_name)) @@ -222,13 +194,14 @@ subroutine read_train_write(training_configuration, args, plot_file) end associate end associate - associate(output_names => [string_t("potential_temperature"),string_t("qv"), string_t("qc"), string_t("qr"), string_t("qs")]) + !associate(output_names => [string_t("potential_temperature"),string_t("qv"), string_t("qc"), string_t("qr"), string_t("qs")]) + associate(output_names => [string_t("qv"), string_t("qc")]) allocate(output_variable(size(output_names))) associate(output_file_name => args%base_name // "_output.nc") - print *,"Reading network outputs from " // output_file_name + print *,"Reading physics-based model outputs from " // output_file_name associate(output_file => netCDF_file_t(output_file_name)) @@ -252,7 +225,7 @@ subroutine read_train_write(training_configuration, args, plot_file) block type(NetCDF_variable_t) derivative(size(output_variable)) - print *,"Calculating time derivatives" + print *,"Calculating desired neural-network model outputs" associate(dt => NetCDF_variable_t(output_time - input_time, "dt")) do v = 1, size(derivative) @@ -275,45 +248,33 @@ subroutine read_train_write(training_configuration, args, plot_file) type(input_output_pair_t), allocatable :: input_output_pairs(:) type(tensor_t), allocatable, dimension(:) :: inputs, outputs real, allocatable :: cost(:) - integer i, lon, lat, level, time, network_unit, io_status, epoch, end_step + integer i, network_unit, io_status, epoch, end_step integer(int64) start_training, finish_training associate( network_file => args%base_name // "_network.json") open(newunit=network_unit, file=network_file, form='formatted', status='old', iostat=io_status, action='read') - ! if (allocated(args%end_step)) then - ! end_step = args%end_step - ! else - ! end_step = t_end - ! end if - - ! print *,"Defining tensors from time step", args%start_step, "through", end_step, "with strides of", args%stride + if (allocated(args%end_step)) then + end_step = args%end_step + else + end_step = input_variable(1)%end_step() + end if - ! ! The following temporary copies are required by gfortran bug 100650 and possibly 49324 - ! ! See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=100650 and https://gcc.gnu.org/bugzilla/show_bug.cgi?id=49324 - ! inputs = [( [( [( [( & - ! tensor_t( & - ! [ pressure_in(lon,lat,level,time), potential_temperature_in(lon,lat,level,time), temperature_in(lon,lat,level,time), & - ! qv_in(lon,lat,level,time), qc_in(lon,lat,level,time), qr_in(lon,lat,level,time), qs_in(lon,lat,level,time) & - ! ] & - ! ), lon = 1, size(qv_in,1))], lat = 1, size(qv_in,2))], level = 1, size(qv_in,3))], & - ! time = args%start_step, end_step, args%stride)] - - ! outputs = [( [( [( [( & - ! tensor_t( & - ! [dpt_dt(lon,lat,level,time), dqv_dt(lon,lat,level,time), dqc_dt(lon,lat,level,time), dqr_dt(lon,lat,level,time), & - ! dqs_dt(lon,lat,level,time) & - ! ] & - ! ), lon = 1, size(qv_in,1))], lat = 1, size(qv_in,2))], level = 1, size(qv_in,3))], & - ! time = args%start_step, end_step, args%stride)] + print *,"Defining input tensors starting from time step", args%start_step, "through", end_step, "with strides of", args%stride + inputs = tensors(input_variable, step_start = args%start_step, step_end = end_step, step_stride = args%stride) + + print *,"Defining output tensors starting from time step", args%start_step, "through", end_step, "with strides of", args%stride + outputs = tensors(output_variable, step_start = args%start_step, step_end = end_step, step_stride = args%stride) - ! print *,"Calculating output tensor component ranges." - ! output_extrema: & - ! associate( & - ! output_minima => [minval(dpt_dt), minval(dqv_dt), minval(dqc_dt), minval(dqr_dt), minval(dqs_dt)], & - ! output_maxima => [maxval(dpt_dt), maxval(dqv_dt), maxval(dqc_dt), maxval(dqr_dt), maxval(dqs_dt)] & - ! ) + print *,"Calculating output tensor component ranges." + tensor_extrema: & + associate( & + input_minima => [( input_variable(v)%minimum(), v=1,size( input_variable) )] & + ,input_maxima => [( input_variable(v)%maximum(), v=1,size( input_variable) )] & + ,output_minima => [( output_variable(v)%minimum(), v=1,size(output_variable) )] & + ,output_maxima => [( output_variable(v)%maximum(), v=1,size(output_variable) )] & + ) ! output_map: & ! associate( output_map => tensor_map_t(layer = "outputs", minima = output_minima, maxima = output_maxima)) ! read_or_initialize_network: & @@ -376,7 +337,7 @@ subroutine read_train_write(training_configuration, args, plot_file) ! " in ", count(occupied)," out of ", size(occupied, kind=int64), " bins." ! end block ! end associate output_map - ! end associate output_extrema + end associate tensor_extrema ! print *,"Normalizing the remaining input and output tensors" ! input_output_pairs = trainable_network%map_to_training_ranges(input_output_pairs) @@ -423,7 +384,7 @@ subroutine read_train_write(training_configuration, args, plot_file) ! associate(converged => average_cost <= args%cost_tolerance) ! image_1_maybe_writes: & - ! if (me==1 .and. any([converged, epoch==[first_epoch,last_epoch], mod(epoch,args%report_interval)==0])) then + ! if (me==1 .and. any([converged, epoch==[first_epoch,last_epoch], mod(epoch,args%report_step)==0])) then ! print *, epoch, average_cost ! write(plot_file%plot_unit,*) epoch, average_cost @@ -471,4 +432,30 @@ subroutine read_train_write(training_configuration, args, plot_file) end subroutine read_train_write + pure function default_integer_or_read(default, string) result(set_value) + integer, intent(in) :: default + character(len=*), intent(in) :: string + integer set_value + + if (len(string)==0) then + set_value = default + else + read(string,*) set_value + end if + + end function + + pure function default_real_or_read(default, string) result(set_value) + real, intent(in) :: default + character(len=*), intent(in) :: string + real set_value + + if (len(string)==0) then + set_value = default + else + read(string,*) set_value + end if + + end function + end program train_cloud_microphysics diff --git a/demo/src/NetCDF_variable_m.f90 b/demo/src/NetCDF_variable_m.f90 index c55b446b0..113145236 100644 --- a/demo/src/NetCDF_variable_m.f90 +++ b/demo/src/NetCDF_variable_m.f90 @@ -4,10 +4,12 @@ module NetCDF_variable_m use NetCDF_file_m, only : NetCDF_file_t use kind_parameters_m, only : default_real, double_precision use julienne_m, only : string_t + use fiats_m, only : tensor_t implicit none private public :: NetCDF_variable_t + public :: tensors type NetCDF_variable_t(k) integer, kind :: k = default_real @@ -19,16 +21,22 @@ module NetCDF_variable_m procedure, private, non_overridable :: default_real_input, double_precision_input, default_real_input_character_name, double_precision_input_character_name generic :: conformable_with => default_real_conformable_with, double_precision_conformable_with procedure, private, non_overridable :: default_real_conformable_with, double_precision_conformable_with - generic :: rank => default_real_rank, double_precision_rank - procedure, private, non_overridable :: default_real_rank, double_precision_rank - generic :: any_nan => default_real_any_nan, double_precision_any_nan - procedure, private, non_overridable :: default_real_any_nan, double_precision_any_nan - generic :: operator(-) => default_real_subtract, double_precision_subtract - procedure, private, non_overridable :: default_real_subtract, double_precision_subtract - generic :: operator(/) => default_real_divide, double_precision_divide - procedure, private, non_overridable :: default_real_divide, double_precision_divide - generic :: assignment(=) => default_real_assign, double_precision_assign - procedure, private, non_overridable :: default_real_assign, double_precision_assign + generic :: rank => default_real_rank , double_precision_rank + procedure, private, non_overridable :: default_real_rank , double_precision_rank + generic :: end_step => default_real_end_step , double_precision_end_step + procedure, private, non_overridable :: default_real_end_step , double_precision_end_step + generic :: any_nan => default_real_any_nan , double_precision_any_nan + procedure, private, non_overridable :: default_real_any_nan , double_precision_any_nan + generic :: minimum => default_real_minimum , double_precision_minimum + procedure, private, non_overridable :: default_real_minimum , double_precision_minimum + generic :: maximum => default_real_maximum , double_precision_maximum + procedure, private, non_overridable :: default_real_maximum , double_precision_maximum + generic :: operator(-) => default_real_subtract , double_precision_subtract + procedure, private, non_overridable :: default_real_subtract , double_precision_subtract + generic :: operator(/) => default_real_divide , double_precision_divide + procedure, private, non_overridable :: default_real_divide , double_precision_divide + generic :: assignment(=) => default_real_assign , double_precision_assign + procedure, private, non_overridable :: default_real_assign , double_precision_assign end type interface NetCDF_variable_t @@ -121,6 +129,18 @@ elemental module function double_precision_rank(self) result(my_rank) integer my_rank end function + elemental module function default_real_end_step(self) result(end_step) + implicit none + class(NetCDF_variable_t), intent(inout) :: self + integer end_step + end function + + elemental module function double_precision_end_step(self) result(end_step) + implicit none + class(NetCDF_variable_t(double_precision)), intent(inout) :: self + integer end_step + end function + elemental module function default_real_subtract(lhs, rhs) result(difference) implicit none class(NetCDF_variable_t), intent(in) :: lhs, rhs @@ -169,6 +189,49 @@ elemental module function double_precision_any_nan(self) result(any_nan) logical any_nan end function + elemental module function default_real_minimum(self) result(minimum) + implicit none + class(NetCDF_variable_t), intent(in) :: self + real minimum + end function + + elemental module function double_precision_minimum(self) result(minimum) + implicit none + class(NetCDF_variable_t(double_precision)), intent(in) :: self + real minimum + end function + + elemental module function default_real_maximum(self) result(maximum) + implicit none + class(NetCDF_variable_t), intent(in) :: self + real maximum + end function + + elemental module function double_precision_maximum(self) result(maximum) + implicit none + class(NetCDF_variable_t(double_precision)), intent(in) :: self + real maximum + end function + + module function tensors(NetCDF_variables, step_start, step_end, step_stride) + implicit none + type(NetCDF_variable_t), intent(in) :: NetCDF_variables(:) + type(tensor_t), allocatable :: tensors(:) + integer, optional :: step_start, step_end, step_stride + end function + + elemental module function default_real_end_time(self) result(end_time) + implicit none + class(NetCDF_variable_t), intent(inout) :: self + integer end_time + end function + + elemental module function double_precision_end_time(self) result(end_time) + implicit none + class(NetCDF_variable_t), intent(inout) :: self + integer end_time + end function + end interface end module \ No newline at end of file diff --git a/demo/src/NetCDF_variable_s.f90 b/demo/src/NetCDF_variable_s.f90 index cdd4e9fc3..0e7dec711 100644 --- a/demo/src/NetCDF_variable_s.f90 +++ b/demo/src/NetCDF_variable_s.f90 @@ -21,6 +21,11 @@ module procedure double_precision_upper_bounds end interface + interface default_if_not_present + module procedure default_integer_if_not_present + module procedure default_real_if_not_present + end interface + contains module procedure default_real_copy @@ -149,6 +154,36 @@ pure function double_precision_components_allocated(NetCDF_variable) result(allo end associate end procedure + module procedure default_real_end_step + select case(self%rank()) + case(1) + end_step = ubound(self%values_1D_,1) + case(2) + end_step = ubound(self%values_2D_,2) + case(3) + end_step = ubound(self%values_3D_,3) + case(4) + end_step = ubound(self%values_4D_,4) + case default + error stop "NetCDF_variable_s(default_real_end_step): unsupported rank" + end select + end procedure + + module procedure double_precision_end_step + select case(self%rank()) + case(1) + end_step = ubound(self%values_1D_,1) + case(2) + end_step = ubound(self%values_2D_,2) + case(3) + end_step = ubound(self%values_3D_,3) + case(4) + end_step = ubound(self%values_4D_,4) + case default + error stop "NetCDF_variable_s(double_precision_end_step): unsupported rank" + end select + end procedure + pure function default_real_lower_bounds(NetCDF_variable) result(lbounds) type(NetCDF_variable_t), intent(in) :: NetCDF_variable integer, allocatable :: lbounds(:) @@ -377,4 +412,147 @@ pure function double_precision_upper_bounds(NetCDF_variable) result(ubounds) end select end procedure + module procedure tensors + + integer t_start, t_end, t_stride + + select case(NetCDF_variables(1)%rank()) + case(4) + + t_start = default_if_not_present(1, step_start ) + t_stride = default_if_not_present(1, step_stride) + t_end = default_if_not_present(size(NetCDF_variables(1)%values_4D_,4), step_end) + + associate( longitudes => size(NetCDF_variables(1)%values_4D_,1) & + ,latitudes => size(NetCDF_variables(1)%values_4D_,2) & + ,levels => size(NetCDF_variables(1)%values_4D_,3) & + ) + block + integer v, lon, lat, lev, time + + tensors = [( [( [( [( tensor_t( [( NetCDF_variables(v)%values_4D_(lon,lat,lev,time), v=1,size(NetCDF_variables) )] ), & + lon = 1, longitudes)], lat = 1, latitudes)], lev = 1, levels)], time = t_start, t_end, t_stride)] + end block + end associate + + case default + error stop "NetCDF_variable_s(tensors): unsupported rank)" + end select + + end procedure + + module procedure default_real_end_time + select case(self%rank()) + case (1) + end_time = size(self%values_1D_,1) + case (2) + end_time = size(self%values_2D_,2) + case (3) + end_time = size(self%values_3D_,3) + case (4) + end_time = size(self%values_4D_,4) + case default + error stop 'NetCDF_variable_s(default_real_end_time): unsupported rank' + end select + end procedure + + module procedure double_precision_end_time + select case(self%rank()) + case (1) + end_time = size(self%values_1D_,1) + case (2) + end_time = size(self%values_2D_,2) + case (3) + end_time = size(self%values_3D_,3) + case (4) + end_time = size(self%values_4D_,4) + case default + error stop 'NetCDF_variable_s(double_precision_end_time): unsupported rank' + end select + end procedure + + module procedure default_real_minimum + select case(self%rank()) + case (1) + minimum = minval(self%values_1D_) + case (2) + minimum = minval(self%values_2D_) + case (3) + minimum = minval(self%values_3D_) + case (4) + minimum = minval(self%values_4D_) + case default + error stop 'NetCDF_variable_s(default_real_minimum): unsupported rank' + end select + end procedure + + module procedure double_precision_minimum + select case(self%rank()) + case (1) + minimum = minval(self%values_1D_) + case (2) + minimum = minval(self%values_2D_) + case (3) + minimum = minval(self%values_3D_) + case (4) + minimum = minval(self%values_4D_) + case default + error stop 'NetCDF_variable_s(double_precision_minimum): unsupported rank' + end select + end procedure + + module procedure default_real_maximum + select case(self%rank()) + case (1) + maximum = maxval(self%values_1D_) + case (2) + maximum = maxval(self%values_2D_) + case (3) + maximum = maxval(self%values_3D_) + case (4) + maximum = maxval(self%values_4D_) + case default + error stop 'NetCDF_variable_s(default_real_maximum): unsupported rank' + end select + end procedure + + module procedure double_precision_maximum + select case(self%rank()) + case (1) + maximum = maxval(self%values_1D_) + case (2) + maximum = maxval(self%values_2D_) + case (3) + maximum = maxval(self%values_3D_) + case (4) + maximum = maxval(self%values_4D_) + case default + error stop 'NetCDF_variable_s(double_precision_maximum): unsupported rank' + end select + end procedure + + pure function default_integer_if_not_present(default_value, optional_argument) result(set_value) + integer, intent(in) :: default_value + integer, intent(in), optional :: optional_argument + integer set_value + + if (present(optional_argument)) then + set_value = optional_argument + else + set_value = default_value + end if + end function + + pure function default_real_if_not_present(default_value, optional_argument) result(set_value) + real, intent(in) :: default_value + real, intent(in), optional :: optional_argument + real set_value + + if (present(optional_argument)) then + set_value = optional_argument + else + set_value = default_value + end if + end function + end submodule NetCDF_variable_s \ No newline at end of file From e49011049f505be4ab603b912d13223f16ce5999 Mon Sep 17 00:00:00 2001 From: Damian Rouson Date: Tue, 22 Oct 2024 20:24:39 -0700 Subject: [PATCH 08/25] refac(train-cloud): generalize max-entropy filter --- demo/app/train-cloud-microphysics.F90 | 299 ++++++++++++------------- demo/src/occupancy_m.f90 | 75 +++++++ demo/src/occupancy_s.f90 | 261 +++++++++++++++++++++ src/fiats/tensor_map_m.f90 | 44 +++- src/fiats/tensor_map_s.f90 | 16 ++ src/fiats/trainable_network_m.f90 | 2 +- src/fiats/training_configuration_m.f90 | 32 +-- src/fiats/training_configuration_s.F90 | 4 +- 8 files changed, 554 insertions(+), 179 deletions(-) create mode 100644 demo/src/occupancy_m.f90 create mode 100644 demo/src/occupancy_s.f90 diff --git a/demo/app/train-cloud-microphysics.F90 b/demo/app/train-cloud-microphysics.F90 index fbdff9f73..0fef13830 100644 --- a/demo/app/train-cloud-microphysics.F90 +++ b/demo/app/train-cloud-microphysics.F90 @@ -19,6 +19,7 @@ program train_cloud_microphysics use phase_space_bin_m, only : phase_space_bin_t use NetCDF_file_m, only: NetCDF_file_t use NetCDF_variable_m, only: NetCDF_variable_t, tensors + use occupancy_m, only : occupancy_t implicit none character(len=*), parameter :: usage = new_line('a') // new_line('a') // & @@ -125,7 +126,7 @@ function get_command_line_arguments() result(command_line_arguments) stride = default_integer_or_read(1, stride_string) start_step = default_integer_or_read(1, start_string) report_step = default_integer_or_read(1, report_string) - num_bins = default_integer_or_read(1, bins_string) + num_bins = default_integer_or_read(3, bins_string) cost_tolerance = default_real_or_read(5E-8, tolerance_string) if (len(end_string)/=0) then @@ -149,27 +150,24 @@ subroutine read_train_write(training_configuration, args, plot_file) type(training_configuration_t), intent(in) :: training_configuration type(command_line_arguments_t), intent(in) :: args type(plot_file_t), intent(in), optional :: plot_file - type(NetCDF_variable_t), allocatable :: input_variable(:), output_variable(:) + type(NetCDF_variable_t), allocatable :: input_variable(:), output_variable(:), derivative(:) type(NetCDF_variable_t) input_time, output_time ! local variables: - integer t, b, t_end, v + type(trainable_network_t) trainable_network + type(mini_batch_t), allocatable :: mini_batches(:) + type(bin_t), allocatable :: bins(:) + type(input_output_pair_t), allocatable :: input_output_pairs(:) + type(tensor_t), allocatable, dimension(:) :: input_tensors, output_tensors + real, allocatable :: cost(:) + integer i, network_unit, io_status, epoch, end_step, t, b, t_end, v + integer(int64) start_training, finish_training logical stop_requested - enum, bind(C) - enumerator :: pressure=1, potential_temperature, temperature, qv, qc, qr, qs - end enum - - enum, bind(C) - enumerator :: dpotential_temperature_t=1, dqv_dt, dqc_dt, dqr_dt, dqs_dt - end enum - - !associate(input_names => & - ! [string_t("pressure"), string_t("potential_temperature"), string_t("temperature"), & - ! string_t("qv"), string_t("qc"), string_t("qr"), string_t("qs")] & - !) - associate(input_names => [string_t("qv"), string_t("qc")]) - + associate(input_names => & + [string_t("pressure"), string_t("potential_temperature"), string_t("temperature"), & + string_t("qv"), string_t("qc"), string_t("qr"), string_t("qs")] & + ) allocate(input_variable(size(input_names))) associate(input_file_name => args%base_name // "_input.nc") @@ -194,8 +192,7 @@ subroutine read_train_write(training_configuration, args, plot_file) end associate end associate - !associate(output_names => [string_t("potential_temperature"),string_t("qv"), string_t("qc"), string_t("qr"), string_t("qs")]) - associate(output_names => [string_t("qv"), string_t("qc")]) + associate(output_names => [string_t("potential_temperature"),string_t("qv"), string_t("qc"), string_t("qr"), string_t("qs")]) allocate(output_variable(size(output_names))) @@ -222,143 +219,139 @@ subroutine read_train_write(training_configuration, args, plot_file) end associate end associate - block - type(NetCDF_variable_t) derivative(size(output_variable)) - - print *,"Calculating desired neural-network model outputs" + print *,"Calculating desired neural-network model outputs" - associate(dt => NetCDF_variable_t(output_time - input_time, "dt")) - do v = 1, size(derivative) - associate(derivative_name => "d" // output_names(v)%string() // "/dt") - print *,"- " // derivative_name - derivative(v) = NetCDF_variable_t( input_variable(v) - output_variable(v) / dt, derivative_name) - call assert(.not. derivative(v)%any_nan(), "train_cloud_microhphysics: non NaN's") - end associate - end do - end associate - end block + allocate(derivative, mold=output_variable) + associate(dt => NetCDF_variable_t(output_time - input_time, "dt")) + do v = 1, size(derivative) + associate(derivative_name => "d" // output_names(v)%string() // "/dt") + print *,"- " // derivative_name + derivative(v) = NetCDF_variable_t( input_variable(v) - output_variable(v) / dt, derivative_name) + call assert(.not. derivative(v)%any_nan(), "train_cloud_microhphysics: non NaN's") + end associate + end do + end associate end associate - train_network: & - block - type(trainable_network_t) trainable_network - type(mini_batch_t), allocatable :: mini_batches(:) - type(bin_t), allocatable :: bins(:) - type(input_output_pair_t), allocatable :: input_output_pairs(:) - type(tensor_t), allocatable, dimension(:) :: inputs, outputs - real, allocatable :: cost(:) - integer i, network_unit, io_status, epoch, end_step - integer(int64) start_training, finish_training - + if (allocated(args%end_step)) then + end_step = args%end_step + else + end_step = input_variable(1)%end_step() + end if + + print *,"Defining input tensors for time step", args%start_step, "through", end_step, "with strides of", args%stride + input_tensors = tensors(input_variable, step_start = args%start_step, step_end = end_step, step_stride = args%stride) + + print *,"Defining output tensors for time step", args%start_step, "through", end_step, "with strides of", args%stride + output_tensors = tensors(derivative, step_start = args%start_step, step_end = end_step, step_stride = args%stride) + + associate( & + output_map => tensor_map_t( & + layer = "outputs" & + ,minima = [( derivative(v)%minimum(), v=1, size(derivative) )] & + ,maxima = [( derivative(v)%maximum(), v=1, size(derivative) )] & + )) + train_network: & + block + associate( network_file => args%base_name // "_network.json") - - open(newunit=network_unit, file=network_file, form='formatted', status='old', iostat=io_status, action='read') - - if (allocated(args%end_step)) then - end_step = args%end_step - else - end_step = input_variable(1)%end_step() - end if - - print *,"Defining input tensors starting from time step", args%start_step, "through", end_step, "with strides of", args%stride - inputs = tensors(input_variable, step_start = args%start_step, step_end = end_step, step_stride = args%stride) - - print *,"Defining output tensors starting from time step", args%start_step, "through", end_step, "with strides of", args%stride - outputs = tensors(output_variable, step_start = args%start_step, step_end = end_step, step_stride = args%stride) - - print *,"Calculating output tensor component ranges." - tensor_extrema: & - associate( & - input_minima => [( input_variable(v)%minimum(), v=1,size( input_variable) )] & - ,input_maxima => [( input_variable(v)%maximum(), v=1,size( input_variable) )] & - ,output_minima => [( output_variable(v)%minimum(), v=1,size(output_variable) )] & - ,output_maxima => [( output_variable(v)%maximum(), v=1,size(output_variable) )] & - ) - ! output_map: & - ! associate( output_map => tensor_map_t(layer = "outputs", minima = output_minima, maxima = output_maxima)) - ! read_or_initialize_network: & - ! if (io_status==0) then - ! print *,"Reading network from file " // network_file - ! trainable_network = trainable_network_t(neural_network_t(file_t(string_t(network_file)))) - ! close(network_unit) - ! else - ! close(network_unit) - - ! initialize_network: & - ! block - ! character(len=len('YYYYMMDD')) date - - ! call date_and_time(date) - - ! print *,"Calculating input tensor component ranges." - ! associate( & - ! input_map => tensor_map_t( & - ! layer = "inputs", & - ! minima = [minval(pressure_in), minval(potential_temperature_in), minval(temperature_in), & - ! minval(qv_in), minval(qc_in), minval(qr_in), minval(qs_in)], & - ! maxima = [maxval(pressure_in), maxval(potential_temperature_in), maxval(temperature_in), & - ! maxval(qv_in), maxval(qc_in), maxval(qr_in), maxval(qs_in)] & - ! ) ) - ! associate(activation => training_configuration%differentiable_activation()) - ! associate(residual_network=> string_t(trim(merge("true ", "false", training_configuration%skip_connections())))) - ! trainable_network = trainable_network_t( & - ! training_configuration, & - ! perturbation_magnitude = 0.05, & - ! metadata = [ & - ! string_t("Simple microphysics"), string_t("train-on-flat-dist"), string_t(date), & - ! activation%function_name(), residual_network & - ! ], input_map = input_map, output_map = output_map & - ! ) - ! end associate - ! end associate - ! end associate ! input_map, date_string - ! end block initialize_network - ! end if read_or_initialize_network - - ! print *, "Conditionally sampling for a flat distribution of output values" - ! block - ! integer i - ! logical occupied(argsnum_bins, args%num_bins, args%num_bins, args%num_bins, args%num_bins) - ! logical keepers(size(outputs)) - ! type(phase_space_bin_t), allocatable :: bin(:) - ! occupied = .false. - ! keepers = .false. - - ! bin = [(phase_space_bin_t(outputs(i), output_minima, output_maxima, args%num_bins), i=1,size(outputs))] - - ! do i = 1, size(outputs) - ! if (occupied(bin(i)%loc(1),bin(i)%loc(2),bin(i)%loc(3),bin(i)%loc(4),bin(i)%loc(5))) cycle - ! occupied(bin(i)%loc(1),bin(i)%loc(2),bin(i)%loc(3),bin(i)%loc(4),bin(i)%loc(5)) = .true. - ! keepers(i) = .true. - ! end do - ! input_output_pairs = input_output_pair_t(pack(inputs, keepers), pack(outputs, keepers)) - ! print *, "Kept ", size(input_output_pairs), " out of ", size(outputs, kind=int64), " input/output pairs " // & - ! " in ", count(occupied)," out of ", size(occupied, kind=int64), " bins." - ! end block - ! end associate output_map - end associate tensor_extrema - - ! print *,"Normalizing the remaining input and output tensors" - ! input_output_pairs = trainable_network%map_to_training_ranges(input_output_pairs) - - ! associate( & - ! num_pairs => size(input_output_pairs), & - ! n_bins => training_configuration%mini_batches(), & - ! adam => merge(.true., .false., training_configuration%optimizer_name() == "adam"), & - ! learning_rate => training_configuration%learning_rate() & - ! ) - ! bins = [(bin_t(num_items=num_pairs, num_bins=n_bins, bin_number=b), b = 1, n_bins)] - - ! print *,"Training network" - ! print *, " Epoch Cost (avg)" - - ! call system_clock(start_training) - ! - ! train_write_and_maybe_exit: & - ! block - ! integer first_epoch - ! integer me + + open(newunit=network_unit, file=network_file, form='formatted', status='old', iostat=io_status, action='read') + + read_or_initialize_network: & + if (io_status==0) then + print *,"Reading network from file " // network_file + trainable_network = trainable_network_t(neural_network_t(file_t(string_t(network_file)))) + close(network_unit) + else + close(network_unit) + + initialize_network: & + block + character(len=len('YYYYMMDD')) date + + call date_and_time(date) + + print *,"Defining a new network from training_configuration_t and tensor_map_t objects" + + associate(activation => training_configuration%activation()) + trainable_network = trainable_network_t( & + training_configuration & + ,perturbation_magnitude = 0.05 & + ,metadata = [ & + string_t("ICAR microphysics" ) & + ,string_t("max-entropy-filter") & + ,string_t(date ) & + ,activation%function_name( ) & + ,string_t(trim(merge("true ", "false", training_configuration%skip_connections()))) & + ] & + ,input_map = tensor_map_t( & + layer = "inputs" & + ,minima = [( input_variable(v)%minimum(), v=1, size( input_variable) )] & + ,maxima = [( input_variable(v)%maximum(), v=1, size( input_variable) )] & + ) & + ,output_map = output_map & + ) + end associate + end block initialize_network + + end if read_or_initialize_network + + print *, "Conditionally sampling for a flat distribution of output values" + + flatten_histogram: & + block + integer i + !logical occupied(args%num_bins, args%num_bins, args%num_bins, args%num_bins, args%num_bins) + logical occupied(args%num_bins, args%num_bins) + logical keepers(size(output_tensors)) + type(phase_space_bin_t), allocatable :: bin(:) + type(occupancy_t) occupancy + + ! Determine the phase-space bin that holds each output tensor + bin = [(phase_space_bin_t(output_tensors(i), output_map%minima(), output_map%maxima(), args%num_bins), i = 1, size(output_tensors))] + call occupancy%vacate( dims = [( args%num_bins, i = 1, size(output_variable))] ) + + keepers = .false. + + do i = 1, size(output_tensors) + if (occupancy%occupied(bin(i)%loc)) cycle + call occupancy%occupy(bin(i)%loc) + keepers(i) = .true. + end do + input_output_pairs = input_output_pair_t(pack(input_tensors, keepers), pack(output_tensors, keepers)) + print '(*(a,i))' & + ," Keeping " , size(input_output_pairs, kind=int64) & + ," out of " , size(output_tensors, kind=int64) & + ," input/output pairs in ", occupancy%num_occupied() & + ," out of " , occupancy%num_bins() & + ," bins." + + end block flatten_histogram + + end associate + + ! print *,"Normalizing the remaining input and output tensors" + ! input_output_pairs = trainable_network%map_to_training_ranges(input_output_pairs) + + ! associate( & + ! num_pairs => size(input_output_pairs), & + ! n_bins => training_configuration%mini_batches(), & + ! adam => merge(.true., .false., training_configuration%optimizer_name() == "adam"), & + ! learning_rate => training_configuration%learning_rate() & + ! ) + ! bins = [(bin_t(num_items=num_pairs, num_bins=n_bins, bin_number=b), b = 1, n_bins)] + + ! print *,"Training network" + ! print *, " Epoch Cost (avg)" + + ! call system_clock(start_training) + ! + ! train_write_and_maybe_exit: & + ! block + ! integer first_epoch + ! integer me #if defined(MULTI_IMAGE_SUPPORT) @@ -425,9 +418,11 @@ subroutine read_train_write(training_configuration, args, plot_file) ! print *,"Training time: ", real(finish_training - start_training, real64)/real(clock_rate, real64),"for", & ! args%num_epochs,"epochs" - end associate ! network_file + ! end associate end block train_network + end associate + !close(plot_file%plot_unit) end subroutine read_train_write diff --git a/demo/src/occupancy_m.f90 b/demo/src/occupancy_m.f90 new file mode 100644 index 000000000..15b2ef247 --- /dev/null +++ b/demo/src/occupancy_m.f90 @@ -0,0 +1,75 @@ +module occupancy_m + use iso_fortran_env, only : int64 + implicit none + + private + public :: occupancy_t + + type occupancy_t + private + logical, allocatable :: occupied_1D_(:) + logical, allocatable :: occupied_2D_(:,:) + logical, allocatable :: occupied_3D_(:,:,:) + logical, allocatable :: occupied_4D_(:,:,:,:) + logical, allocatable :: occupied_5D_(:,:,:,:,:) + logical, allocatable :: occupied_6D_(:,:,:,:,:,:) + logical, allocatable :: occupied_7D_(:,:,:,:,:,:,:) + logical, allocatable :: occupied_8D_(:,:,:,:,:,:,:,:) + logical, allocatable :: occupied_9D_(:,:,:,:,:,:,:,:,:) + logical, allocatable :: occupied_10D_(:,:,:,:,:,:,:,:,:,:) + logical, allocatable :: occupied_11D_(:,:,:,:,:,:,:,:,:,:,:) + logical, allocatable :: occupied_12D_(:,:,:,:,:,:,:,:,:,:,:,:) + logical, allocatable :: occupied_13D_(:,:,:,:,:,:,:,:,:,:,:,:,:) + logical, allocatable :: occupied_14D_(:,:,:,:,:,:,:,:,:,:,:,:,:,:) + logical, allocatable :: occupied_15D_(:,:,:,:,:,:,:,:,:,:,:,:,:,:,:) + contains + procedure, non_overridable :: vacate + procedure, non_overridable :: occupy + procedure, non_overridable :: occupied + procedure, non_overridable :: num_occupied + procedure, non_overridable :: num_bins + procedure, non_overridable :: allocated_dim + end type + + interface + + pure module subroutine vacate(self, dims) + implicit none + class(occupancy_t), intent(inout) :: self + integer, intent(in) :: dims(:) + end subroutine + + pure module subroutine occupy(self, loc) + implicit none + class(occupancy_t), intent(inout) :: self + integer, intent(in) :: loc(:) + end subroutine + + pure module function occupied(self, loc) result(bin_occupied) + implicit none + class(occupancy_t), intent(in) :: self + integer, intent(in) :: loc(:) + logical bin_occupied + end function + + pure module function num_occupied(self) result(bins_occupied) + implicit none + class(occupancy_t), intent(in) :: self + integer(int64) bins_occupied + end function + + pure module function num_bins(self) result(bins_total) + implicit none + class(occupancy_t), intent(in) :: self + integer(int64) bins_total + end function + + pure module function allocated_dim(self) result(my_dim) + implicit none + class(occupancy_t), intent(in) :: self + integer my_dim + end function + + end interface + +end module occupancy_m diff --git a/demo/src/occupancy_s.f90 b/demo/src/occupancy_s.f90 new file mode 100644 index 000000000..3b4e4a44e --- /dev/null +++ b/demo/src/occupancy_s.f90 @@ -0,0 +1,261 @@ +submodule(occupancy_m) occupancy_s + use assert_m, only : assert, intrinsic_array_t + implicit none + +contains + + pure function allocations(occupancy) result(components_allocated) + + type(occupancy_t), intent(in) :: occupancy + logical, allocatable :: components_allocated(:) + + components_allocated = [ & + allocated(occupancy%occupied_1D_ ) & + ,allocated(occupancy%occupied_2D_ ) & + ,allocated(occupancy%occupied_3D_ ) & + ,allocated(occupancy%occupied_4D_ ) & + ,allocated(occupancy%occupied_5D_ ) & + ,allocated(occupancy%occupied_6D_ ) & + ,allocated(occupancy%occupied_7D_ ) & + ,allocated(occupancy%occupied_8D_ ) & + ,allocated(occupancy%occupied_9D_ ) & + ,allocated(occupancy%occupied_10D_) & + ,allocated(occupancy%occupied_11D_) & + ,allocated(occupancy%occupied_12D_) & + ,allocated(occupancy%occupied_13D_) & + ,allocated(occupancy%occupied_14D_) & + ,allocated(occupancy%occupied_15D_) & + ] + + end function allocations + + module procedure vacate + + select case(size(dims)) + case(1) + if (allocated(self%occupied_1D_)) deallocate(self%occupied_1D_) + allocate(self%occupied_1D_(dims(1)), source = .false.) + case(2) + if (allocated(self%occupied_2D_)) deallocate(self%occupied_2D_) + allocate(self%occupied_2D_(dims(1),dims(2)), source = .false.) + case(3) + if (allocated(self%occupied_3D_)) deallocate(self%occupied_3D_) + allocate(self%occupied_3D_(dims(1),dims(2),dims(3)), source = .false.) + case(4) + if (allocated(self%occupied_4D_)) deallocate(self%occupied_4D_) + allocate(self%occupied_4D_(dims(1),dims(2),dims(3),dims(4)), source = .false.) + case(5) + if (allocated(self%occupied_5D_)) deallocate(self%occupied_5D_) + allocate(self%occupied_5D_(dims(1),dims(2),dims(3),dims(4),dims(5)), source = .false.) + case(6) + if (allocated(self%occupied_6D_)) deallocate(self%occupied_6D_) + allocate(self%occupied_6D_(dims(1),dims(2),dims(3),dims(4),dims(5),dims(6)), source = .false.) + case(7) + if (allocated(self%occupied_7D_)) deallocate(self%occupied_7D_) + allocate(self%occupied_7D_(dims(1),dims(2),dims(3),dims(4),dims(5),dims(6),dims(7)), source = .false.) + case(8) + if (allocated(self%occupied_8D_)) deallocate(self%occupied_8D_) + allocate(self%occupied_8D_(dims(1),dims(2),dims(3),dims(4),dims(5),dims(6),dims(7),dims(8)), source = .false.) + case(9) + if (allocated(self%occupied_9D_)) deallocate(self%occupied_9D_) + allocate(self%occupied_9D_(dims(1),dims(2),dims(3),dims(4),dims(5),dims(6),dims(7),dims(8),dims(9)), source = .false.) + case(10) + if (allocated(self%occupied_10D_)) deallocate(self%occupied_10D_) + allocate(self%occupied_10D_(dims(1),dims(2),dims(3),dims(4),dims(5),dims(6),dims(7),dims(8),dims(9),dims(10)), source = .false.) + case(11) + if (allocated(self%occupied_11D_)) deallocate(self%occupied_11D_) + allocate(self%occupied_11D_(dims(1),dims(2),dims(3),dims(4),dims(5),dims(6),dims(7),dims(8),dims(9),dims(10),dims(11)), source = .false.) + case(12) + if (allocated(self%occupied_12D_)) deallocate(self%occupied_12D_) + allocate(self%occupied_12D_(dims(1),dims(2),dims(3),dims(4),dims(5),dims(6),dims(7),dims(8),dims(9),dims(10),dims(11),dims(12)), source = .false.) + case(13) + if (allocated(self%occupied_13D_)) deallocate(self%occupied_13D_) + allocate(self%occupied_13D_(dims(1),dims(2),dims(3),dims(4),dims(5),dims(6),dims(7),dims(8),dims(9),dims(10),dims(11),dims(12),dims(13)), source = .false.) + case(14) + if (allocated(self%occupied_14D_)) deallocate(self%occupied_14D_) + allocate(self%occupied_14D_(dims(1),dims(2),dims(3),dims(4),dims(5),dims(6),dims(7),dims(8),dims(9),dims(10),dims(11),dims(12),dims(13),dims(14)), source = .false.) + case(15) + if (allocated(self%occupied_15D_)) deallocate(self%occupied_15D_) + allocate(self%occupied_15D_(dims(1),dims(2),dims(3),dims(4),dims(5),dims(6),dims(7),dims(8),dims(9),dims(10),dims(11),dims(12),dims(13),dims(14),dims(15)), source= .false.) + case default + error stop "occupancy_s(vacate): unsupported rank" + end select + + call assert(self%allocated_dim()==size(dims), "occupancy_s(vacate): count(self%allocations()) == 1") + + end procedure vacate + + module procedure occupy + + associate_o: & + associate(o => (loc)) + select case(size(loc)) + case(1) + self%occupied_1D_(o(1)) = .true. + case(2) + self%occupied_2D_(o(1),o(2)) = .true. + case(3) + self%occupied_3D_(o(1),o(2),o(3)) = .true. + case(4) + self%occupied_4D_(o(1),o(2),o(3),o(4)) = .true. + case(5) + self%occupied_5D_(o(1),o(2),o(3),o(4),o(5)) = .true. + case(6) + self%occupied_6D_(o(1),o(2),o(3),o(4),o(5),o(6)) = .true. + case(7) + self%occupied_7D_(o(1),o(2),o(3),o(4),o(5),o(6),o(7)) = .true. + case(8) + self%occupied_8D_(o(1),o(2),o(3),o(4),o(5),o(6),o(7),o(8)) = .true. + case(9) + self%occupied_9D_(o(1),o(2),o(3),o(4),o(5),o(6),o(7),o(8),o(9)) = .true. + case(10) + self%occupied_10D_(o(1),o(2),o(3),o(4),o(5),o(6),o(7),o(8),o(9),o(10)) = .true. + case(11) + self%occupied_11D_(o(1),o(2),o(3),o(4),o(5),o(6),o(7),o(8),o(9),o(10),o(11)) = .true. + case(12) + self%occupied_12D_(o(1),o(2),o(3),o(4),o(5),o(6),o(7),o(8),o(9),o(10),o(11),o(12)) = .true. + case(13) + self%occupied_13D_(o(1),o(2),o(3),o(4),o(5),o(6),o(7),o(8),o(9),o(10),o(11),o(12),o(13)) = .true. + case(14) + self%occupied_14D_(o(1),o(2),o(3),o(4),o(5),o(6),o(7),o(8),o(9),o(10),o(11),o(12),o(13),o(14)) = .true. + case(15) + self%occupied_15D_(o(1),o(2),o(3),o(4),o(5),o(6),o(7),o(8),o(9),o(10),o(11),o(12),o(13),o(14),o(15)) = .true. + case default + error stop "occupancy_s(occupy): unsupported rank" + end select + end associate associate_o + + end procedure occupy + + module procedure occupied + + nickname_loc: & + associate(b => (loc)) + select case(size(loc)) + case(1) + bin_occupied = self%occupied_1D_(b(1)) + case(2) + bin_occupied = self%occupied_2D_(b(1),b(2)) + case(3) + bin_occupied = self%occupied_3D_(b(1),b(2),b(3)) + case(4) + bin_occupied = self%occupied_4D_(b(1),b(2),b(3),b(4)) + case(5) + bin_occupied = self%occupied_5D_(b(1),b(2),b(3),b(4),b(5)) + case(6) + bin_occupied = self%occupied_6D_(b(1),b(2),b(3),b(4),b(5),b(6)) + case(7) + bin_occupied = self%occupied_7D_(b(1),b(2),b(3),b(4),b(5),b(6),b(7)) + case(8) + bin_occupied = self%occupied_8D_(b(1),b(2),b(3),b(4),b(5),b(6),b(7),b(8)) + case(9) + bin_occupied = self%occupied_9D_(b(1),b(2),b(3),b(4),b(5),b(6),b(7),b(8),b(9)) + case(10) + bin_occupied = self%occupied_10D_(b(1),b(2),b(3),b(4),b(5),b(6),b(7),b(8),b(9),b(10)) + case(11) + bin_occupied = self%occupied_11D_(b(1),b(2),b(3),b(4),b(5),b(6),b(7),b(8),b(9),b(10),b(11)) + case(12) + bin_occupied = self%occupied_12D_(b(1),b(2),b(3),b(4),b(5),b(6),b(7),b(8),b(9),b(10),b(11),b(12)) + case(13) + bin_occupied = self%occupied_13D_(b(1),b(2),b(3),b(4),b(5),b(6),b(7),b(8),b(9),b(10),b(11),b(12),b(13)) + case(14) + bin_occupied = self%occupied_14D_(b(1),b(2),b(3),b(4),b(5),b(6),b(7),b(8),b(9),b(10),b(11),b(12),b(13),b(14)) + case(15) + bin_occupied = self%occupied_15D_(b(1),b(2),b(3),b(4),b(5),b(6),b(7),b(8),b(9),b(10),b(11),b(12),b(13),b(14),b(15)) + case default + error stop "occupancy_s(occupied): unsupported rank" + end select + end associate nickname_loc + + end procedure occupied + + module procedure num_occupied + + call assert(count(allocations(self))==1, "occupancy_s(allocated_dim): count(self%allocations()) == 1") + + select case(self%allocated_dim()) + case(1) + bins_occupied = count(self%occupied_1D_, kind=int64) + case(2) + bins_occupied = count(self%occupied_2D_, kind=int64) + case(3) + bins_occupied = count(self%occupied_3D_, kind=int64) + case(4) + bins_occupied = count(self%occupied_4D_, kind=int64) + case(5) + bins_occupied = count(self%occupied_5D_, kind=int64) + case(6) + bins_occupied = count(self%occupied_6D_, kind=int64) + case(7) + bins_occupied = count(self%occupied_7D_, kind=int64) + case(8) + bins_occupied = count(self%occupied_8D_, kind=int64) + case(9) + bins_occupied = count(self%occupied_9D_, kind=int64) + case(10) + bins_occupied = count(self%occupied_10D_, kind=int64) + case(11) + bins_occupied = count(self%occupied_11D_, kind=int64) + case(12) + bins_occupied = count(self%occupied_12D_, kind=int64) + case(13) + bins_occupied = count(self%occupied_13D_, kind=int64) + case(14) + bins_occupied = count(self%occupied_14D_, kind=int64) + case(15) + bins_occupied = count(self%occupied_15D_, kind=int64) + case default + error stop "occupancy_s(num_occupied): unsupported rank" + end select + + end procedure num_occupied + + module procedure num_bins + + select case(self%allocated_dim()) + case(1) + bins_total = size(self%occupied_1D_) + case(2) + bins_total = size(self%occupied_2D_) + case(3) + bins_total = size(self%occupied_3D_) + case(4) + bins_total = size(self%occupied_4D_) + case(5) + bins_total = size(self%occupied_5D_) + case(6) + bins_total = size(self%occupied_6D_) + case(7) + bins_total = size(self%occupied_7D_) + case(8) + bins_total = size(self%occupied_8D_) + case(9) + bins_total = size(self%occupied_9D_) + case(10) + bins_total = size(self%occupied_10D_) + case(11) + bins_total = size(self%occupied_11D_) + case(12) + bins_total = size(self%occupied_12D_) + case(13) + bins_total = size(self%occupied_13D_) + case(14) + bins_total = size(self%occupied_14D_) + case(15) + bins_total = size(self%occupied_15D_) + case default + error stop "occupancy_s(num_bins): unsupported rank" + end select + + end procedure num_bins + + module procedure allocated_dim + + associate(my_allocations => allocations(self)) + call assert(count(my_allocations)==1, "occupancy_s(allocated_dim): count(self%allocations()) == 1") + my_dim = findloc(my_allocations, .true., dim=1) + end associate + + end procedure + +end submodule occupancy_s diff --git a/src/fiats/tensor_map_m.f90 b/src/fiats/tensor_map_m.f90 index b105f4613..1343343fb 100644 --- a/src/fiats/tensor_map_m.f90 +++ b/src/fiats/tensor_map_m.f90 @@ -15,14 +15,18 @@ module tensor_map_m character(len=:), allocatable, private :: layer_ real(k), dimension(:), allocatable, private :: intercept_, slope_ contains - generic :: map_to_training_range => default_real_map_to_training_range, double_precision_map_to_training_range - procedure, private, non_overridable :: default_real_map_to_training_range, double_precision_map_to_training_range - generic :: map_from_training_range => default_real_map_from_training_range, double_precision_map_from_training_range - procedure, private, non_overridable :: default_real_map_from_training_range, double_precision_map_from_training_range - generic :: to_json => default_real_to_json, double_precision_to_json - procedure, private :: default_real_to_json, double_precision_to_json - generic :: operator(==) => default_real_equals, double_precision_equals - procedure, private :: default_real_equals, double_precision_equals + generic :: map_to_training_range => default_real_map_to_training_range , double_precision_map_to_training_range + procedure, private, non_overridable :: default_real_map_to_training_range , double_precision_map_to_training_range + generic :: map_from_training_range => default_real_map_from_training_range, double_precision_map_from_training_range + procedure, private, non_overridable :: default_real_map_from_training_range, double_precision_map_from_training_range + generic :: minima => default_real_minima , double_precision_minima + procedure, private, non_overridable :: default_real_minima , double_precision_minima + generic :: maxima => default_real_maxima , double_precision_maxima + procedure, private, non_overridable :: default_real_maxima , double_precision_maxima + generic :: to_json => default_real_to_json , double_precision_to_json + procedure, private :: default_real_to_json , double_precision_to_json + generic :: operator(==) => default_real_equals , double_precision_equals + procedure, private :: default_real_equals , double_precision_equals end type @@ -58,6 +62,30 @@ module function double_precision_from_json(lines) result(tensor_map) interface + pure module function default_real_minima(self) result(minima) + implicit none + class(tensor_map_t), intent(in) :: self + real, allocatable :: minima(:) + end function + + pure module function double_precision_minima(self) result(minima) + implicit none + class(tensor_map_t(double_precision)), intent(in) :: self + double precision, allocatable :: minima(:) + end function + + pure module function default_real_maxima(self) result(maxima) + implicit none + class(tensor_map_t), intent(in) :: self + real, allocatable :: maxima(:) + end function + + pure module function double_precision_maxima(self) result(maxima) + implicit none + class(tensor_map_t(double_precision)), intent(in) :: self + double precision, allocatable :: maxima(:) + end function + elemental module function default_real_map_to_training_range(self, tensor) result(normalized_tensor) implicit none class(tensor_map_t), intent(in) :: self diff --git a/src/fiats/tensor_map_s.f90 b/src/fiats/tensor_map_s.f90 index c0e504d81..4651078f2 100644 --- a/src/fiats/tensor_map_s.f90 +++ b/src/fiats/tensor_map_s.f90 @@ -15,6 +15,22 @@ tensor_map%slope_ = maxima - minima end procedure + module procedure default_real_minima + minima = self%intercept_ + end procedure + + module procedure default_real_maxima + maxima = self%intercept_ + self%slope_ + end procedure + + module procedure double_precision_minima + minima = self%intercept_ + end procedure + + module procedure double_precision_maxima + maxima = self%intercept_ + self%slope_ + end procedure + module procedure construct_double_precision call assert(size(minima)==size(maxima),"tensor_map_s(construct_double_precision): size(minima)==size(maxima)") tensor_map%layer_ = layer diff --git a/src/fiats/trainable_network_m.f90 b/src/fiats/trainable_network_m.f90 index 0cb2cfc50..a085168fa 100644 --- a/src/fiats/trainable_network_m.f90 +++ b/src/fiats/trainable_network_m.f90 @@ -34,8 +34,8 @@ module function perturbed_identity_network(training_configuration, perturbation_ result(trainable_network) implicit none type(training_configuration_t), intent(in) :: training_configuration - type(string_t), intent(in) :: metadata(:) real, intent(in) :: perturbation_magnitude + type(string_t), intent(in) :: metadata(:) type(tensor_map_t) input_map, output_map type(trainable_network_t) trainable_network end function diff --git a/src/fiats/training_configuration_m.f90 b/src/fiats/training_configuration_m.f90 index eb20a30f0..507d77089 100644 --- a/src/fiats/training_configuration_m.f90 +++ b/src/fiats/training_configuration_m.f90 @@ -18,20 +18,20 @@ module training_configuration_m type(hyperparameters_t(k)), private :: hyperparameters_ type(network_configuration_t), private :: network_configuration_ contains - generic :: operator(==) => default_real_equals, double_precision_equals - procedure, private :: default_real_equals, double_precision_equals - generic :: to_json => default_real_to_json, double_precision_to_json - procedure, private :: default_real_to_json, double_precision_to_json - generic :: mini_batches => default_real_mini_batches, double_precision_mini_batches - procedure, private :: default_real_mini_batches, double_precision_mini_batches - generic :: optimizer_name => default_real_optimizer_name, double_precision_optimizer_name - procedure, private :: default_real_optimizer_name, double_precision_optimizer_name - generic :: learning_rate => default_real_learning_rate, double_precision_learning_rate - procedure, private :: default_real_learning_rate, double_precision_learning_rate - generic :: differentiable_activation => default_real_differentiable_activation, double_precision_differentiable_activation - procedure, private :: default_real_differentiable_activation, double_precision_differentiable_activation - generic :: nodes_per_layer => default_real_nodes_per_layer, double_precision_nodes_per_layer - procedure, private :: default_real_nodes_per_layer, double_precision_nodes_per_layer + generic :: operator(==) => default_real_equals , double_precision_equals + procedure, private :: default_real_equals , double_precision_equals + generic :: to_json => default_real_to_json , double_precision_to_json + procedure, private :: default_real_to_json , double_precision_to_json + generic :: mini_batches => default_real_mini_batches , double_precision_mini_batches + procedure, private :: default_real_mini_batches , double_precision_mini_batches + generic :: optimizer_name => default_real_optimizer_name , double_precision_optimizer_name + procedure, private :: default_real_optimizer_name , double_precision_optimizer_name + generic :: learning_rate => default_real_learning_rate , double_precision_learning_rate + procedure, private :: default_real_learning_rate , double_precision_learning_rate + generic :: activation => default_real_activation , double_precision_activation + procedure, private :: default_real_activation , double_precision_activation + generic :: nodes_per_layer => default_real_nodes_per_layer , double_precision_nodes_per_layer + procedure, private :: default_real_nodes_per_layer , double_precision_nodes_per_layer generic :: skip_connections => default_real_skip_connections, double_precision_skip_connections procedure, private :: default_real_skip_connections, double_precision_skip_connections end type @@ -128,13 +128,13 @@ elemental module function double_precision_learning_rate(self) result(rate) double precision rate end function - module function default_real_differentiable_activation(self) result(activation) + module function default_real_activation(self) result(activation) implicit none class(training_configuration_t), intent(in) :: self type(activation_t) activation end function - module function double_precision_differentiable_activation(self) result(activation) + module function double_precision_activation(self) result(activation) implicit none class(training_configuration_t(double_precision)), intent(in) :: self type(activation_t) activation diff --git a/src/fiats/training_configuration_s.F90 b/src/fiats/training_configuration_s.F90 index 3b982a202..5ecdb5423 100644 --- a/src/fiats/training_configuration_s.F90 +++ b/src/fiats/training_configuration_s.F90 @@ -181,7 +181,7 @@ using_skip = self%network_configuration_%skip_connections() end procedure - module procedure default_real_differentiable_activation + module procedure default_real_activation #if defined __INTEL_COMPILER || _CRAYFTN type(string_t) :: activation_name activation_name = self%network_configuration_%activation_name() @@ -206,7 +206,7 @@ #endif end procedure - module procedure double_precision_differentiable_activation + module procedure double_precision_activation #if defined __INTEL_COMPILER || _CRAYFTN type(string_t) :: activation_name activation_name = self%network_configuration_%activation_name() From 681cc1414f19b6a839a9458ce2013724638f4132 Mon Sep 17 00:00:00 2001 From: Damian Rouson Date: Tue, 22 Oct 2024 21:25:29 -0700 Subject: [PATCH 09/25] chore(train-cloud-micro):label associate statement --- demo/app/train-cloud-microphysics.F90 | 163 ++++++++++++++------------ 1 file changed, 89 insertions(+), 74 deletions(-) diff --git a/demo/app/train-cloud-microphysics.F90 b/demo/app/train-cloud-microphysics.F90 index 0fef13830..3ad073c2a 100644 --- a/demo/app/train-cloud-microphysics.F90 +++ b/demo/app/train-cloud-microphysics.F90 @@ -164,16 +164,19 @@ subroutine read_train_write(training_configuration, args, plot_file) integer(int64) start_training, finish_training logical stop_requested + input_names: & associate(input_names => & [string_t("pressure"), string_t("potential_temperature"), string_t("temperature"), & string_t("qv"), string_t("qc"), string_t("qr"), string_t("qs")] & ) allocate(input_variable(size(input_names))) + input_file_name: & associate(input_file_name => args%base_name // "_input.nc") print *,"Reading physics-based model inputs from " // input_file_name + input_file: & associate(input_file => netCDF_file_t(input_file_name)) do v=1, size(input_variable) @@ -188,18 +191,21 @@ subroutine read_train_write(training_configuration, args, plot_file) print *,"- reading time" call input_time%input("time", input_file, rank=1) - end associate - end associate - end associate + end associate input_file + end associate input_file_name + end associate input_names + output_names: & associate(output_names => [string_t("potential_temperature"),string_t("qv"), string_t("qc"), string_t("qr"), string_t("qs")]) allocate(output_variable(size(output_names))) + output_file_name: & associate(output_file_name => args%base_name // "_output.nc") print *,"Reading physics-based model outputs from " // output_file_name + output_file: & associate(output_file => netCDF_file_t(output_file_name)) do v=1, size(output_variable) @@ -216,23 +222,25 @@ subroutine read_train_write(training_configuration, args, plot_file) call assert(output_time%conformable_with(input_time), "train_cloud_microphysics: input/output time conformance") - end associate - end associate + end associate output_file + end associate output_file_name print *,"Calculating desired neural-network model outputs" allocate(derivative, mold=output_variable) + dt: & associate(dt => NetCDF_variable_t(output_time - input_time, "dt")) do v = 1, size(derivative) + derivative_name: & associate(derivative_name => "d" // output_names(v)%string() // "/dt") print *,"- " // derivative_name derivative(v) = NetCDF_variable_t( input_variable(v) - output_variable(v) / dt, derivative_name) call assert(.not. derivative(v)%any_nan(), "train_cloud_microhphysics: non NaN's") - end associate + end associate derivative_name end do - end associate - end associate + end associate dt + end associate output_names if (allocated(args%end_step)) then end_step = args%end_step @@ -246,6 +254,7 @@ subroutine read_train_write(training_configuration, args, plot_file) print *,"Defining output tensors for time step", args%start_step, "through", end_step, "with strides of", args%stride output_tensors = tensors(derivative, step_start = args%start_step, step_end = end_step, step_stride = args%stride) + output_map: & associate( & output_map => tensor_map_t( & layer = "outputs" & @@ -255,7 +264,8 @@ subroutine read_train_write(training_configuration, args, plot_file) train_network: & block - associate( network_file => args%base_name // "_network.json") + network_file: & + associate(network_file => args%base_name // "_network.json") open(newunit=network_unit, file=network_file, form='formatted', status='old', iostat=io_status, action='read') @@ -275,6 +285,7 @@ subroutine read_train_write(training_configuration, args, plot_file) print *,"Defining a new network from training_configuration_t and tensor_map_t objects" + activation: & associate(activation => training_configuration%activation()) trainable_network = trainable_network_t( & training_configuration & @@ -293,7 +304,7 @@ subroutine read_train_write(training_configuration, args, plot_file) ) & ,output_map = output_map & ) - end associate + end associate activation end block initialize_network end if read_or_initialize_network @@ -330,98 +341,102 @@ subroutine read_train_write(training_configuration, args, plot_file) end block flatten_histogram - end associate - - ! print *,"Normalizing the remaining input and output tensors" - ! input_output_pairs = trainable_network%map_to_training_ranges(input_output_pairs) + print *,"Normalizing the remaining input and output tensors" + input_output_pairs = trainable_network%map_to_training_ranges(input_output_pairs) - ! associate( & - ! num_pairs => size(input_output_pairs), & - ! n_bins => training_configuration%mini_batches(), & - ! adam => merge(.true., .false., training_configuration%optimizer_name() == "adam"), & - ! learning_rate => training_configuration%learning_rate() & - ! ) - ! bins = [(bin_t(num_items=num_pairs, num_bins=n_bins, bin_number=b), b = 1, n_bins)] + training_parameters: & + associate( & + num_pairs => size(input_output_pairs), & + n_bins => training_configuration%mini_batches(), & + adam => merge(.true., .false., training_configuration%optimizer_name() == "adam"), & + learning_rate => training_configuration%learning_rate() & + ) + bins = [(bin_t(num_items=num_pairs, num_bins=n_bins, bin_number=b), b = 1, n_bins)] - ! print *,"Training network" - ! print *, " Epoch Cost (avg)" + print *,"Training network" + print *, " Epoch Cost (avg)" - ! call system_clock(start_training) - ! - ! train_write_and_maybe_exit: & - ! block - ! integer first_epoch - ! integer me + call system_clock(start_training) + + train_write_and_maybe_exit: & + block + integer first_epoch + integer me #if defined(MULTI_IMAGE_SUPPORT) - ! me = this_image() + me = this_image() #else - ! me = 1 + me = 1 #endif - ! if (me==1) first_epoch = plot_file%previous_epoch + 1 + if (me==1) first_epoch = plot_file%previous_epoch + 1 #if defined(MULTI_IMAGE_SUPPORT) - ! call co_broadcast(first_epoch, source_image=1) + call co_broadcast(first_epoch, source_image=1) #endif - ! associate(last_epoch => first_epoch + args%num_epochs - 1) - ! epochs: & - ! do epoch = first_epoch, last_epoch + last_epoch: & + associate(last_epoch => first_epoch + args%num_epochs - 1) + epochs: & + do epoch = first_epoch, last_epoch + + if (size(bins)>1) call shuffle(input_output_pairs) ! set up for stochastic gradient descent + mini_batches = [(mini_batch_t(input_output_pairs(bins(b)%first():bins(b)%last())), b = 1, size(bins))] - ! if (size(bins)>1) call shuffle(input_output_pairs) ! set up for stochastic gradient descent - ! mini_batches = [(mini_batch_t(input_output_pairs(bins(b)%first():bins(b)%last())), b = 1, size(bins))] + call trainable_network%train(mini_batches, cost, adam, learning_rate) - ! call trainable_network%train(mini_batches, cost, adam, learning_rate) + average_cost: & + associate(average_cost => sum(cost)/size(cost)) + converged: & + associate(converged => average_cost <= args%cost_tolerance) - ! associate(average_cost => sum(cost)/size(cost)) - ! associate(converged => average_cost <= args%cost_tolerance) + image_1_maybe_writes: & + if (me==1 .and. any([converged, epoch==[first_epoch,last_epoch], mod(epoch,args%report_step)==0])) then - ! image_1_maybe_writes: & - ! if (me==1 .and. any([converged, epoch==[first_epoch,last_epoch], mod(epoch,args%report_step)==0])) then + print *, epoch, average_cost + write(plot_file%plot_unit,*) epoch, average_cost - ! print *, epoch, average_cost - ! write(plot_file%plot_unit,*) epoch, average_cost + associate(json_file => trainable_network%to_json()) + call json_file%write_lines(string_t(network_file)) + end associate - ! associate(json_file => trainable_network%to_json()) - ! call json_file%write_lines(string_t(network_file)) - ! end associate + end if image_1_maybe_writes - ! end if image_1_maybe_writes + signal_convergence: & + if (converged) then + block + integer unit + open(newunit=unit, file="converged", status="unknown") ! The train.sh script detects & removes this file. + close(unit) + exit epochs + end block + end if signal_convergence + end associate converged + end associate average_cost - ! signal_convergence: & - ! if (converged) then - ! block - ! integer unit - ! open(newunit=unit, file="converged", status="unknown") ! The train.sh script detects & removes this file. - ! close(unit) - ! exit epochs - ! end block - ! end if signal_convergence - ! end associate - ! end associate + inquire(file="stop", exist=stop_requested) - ! inquire(file="stop", exist=stop_requested) + graceful_exit: & + if (stop_requested) then + print *,'Shutting down because a file named "stop" was found.' + return + end if graceful_exit - ! graceful_exit: & - ! if (stop_requested) then - ! print *,'Shutting down because a file named "stop" was found.' - ! return - ! end if graceful_exit + end do epochs + end associate last_epoch + end block train_write_and_maybe_exit - ! end do epochs - ! end associate - ! end block train_write_and_maybe_exit + end associate training_parameters - ! end associate + end associate network_file - ! call system_clock(finish_training) - ! print *,"Training time: ", real(finish_training - start_training, real64)/real(clock_rate, real64),"for", & - ! args%num_epochs,"epochs" + call system_clock(finish_training) + print *,"Training time: ", real(finish_training - start_training, real64)/real(clock_rate, real64),"for", & + args%num_epochs,"epochs" ! end associate end block train_network - end associate + end associate output_map !close(plot_file%plot_unit) From b542b09145530836818844a6e6b57850f9181305 Mon Sep 17 00:00:00 2001 From: Damian Rouson Date: Tue, 22 Oct 2024 22:10:58 -0700 Subject: [PATCH 10/25] chore(train-cloud-micro): white space edits --- demo/app/train-cloud-microphysics.F90 | 328 ++++++++++++-------------- 1 file changed, 157 insertions(+), 171 deletions(-) diff --git a/demo/app/train-cloud-microphysics.F90 b/demo/app/train-cloud-microphysics.F90 index 3ad073c2a..a2c6054fb 100644 --- a/demo/app/train-cloud-microphysics.F90 +++ b/demo/app/train-cloud-microphysics.F90 @@ -164,7 +164,7 @@ subroutine read_train_write(training_configuration, args, plot_file) integer(int64) start_training, finish_training logical stop_requested - input_names: & + input_names: & associate(input_names => & [string_t("pressure"), string_t("potential_temperature"), string_t("temperature"), & string_t("qv"), string_t("qc"), string_t("qr"), string_t("qs")] & @@ -254,191 +254,177 @@ subroutine read_train_write(training_configuration, args, plot_file) print *,"Defining output tensors for time step", args%start_step, "through", end_step, "with strides of", args%stride output_tensors = tensors(derivative, step_start = args%start_step, step_end = end_step, step_stride = args%stride) - output_map: & + output_map_and_network_file: & associate( & output_map => tensor_map_t( & layer = "outputs" & ,minima = [( derivative(v)%minimum(), v=1, size(derivative) )] & ,maxima = [( derivative(v)%maximum(), v=1, size(derivative) )] & - )) - train_network: & + ), & + network_file => args%base_name // "_network.json" & + ) + open(newunit=network_unit, file=network_file, form='formatted', status='old', iostat=io_status, action='read') + + read_or_initialize_network: & + if (io_status==0) then + print *,"Reading network from file " // network_file + trainable_network = trainable_network_t(neural_network_t(file_t(string_t(network_file)))) + close(network_unit) + else + close(network_unit) + + initialize_network: & + block + character(len=len('YYYYMMDD')) date + + call date_and_time(date) + + print *,"Defining a new network from training_configuration_t and tensor_map_t objects" + + activation: & + associate(activation => training_configuration%activation()) + trainable_network = trainable_network_t( & + training_configuration & + ,perturbation_magnitude = 0.05 & + ,metadata = [ & + string_t("ICAR microphysics" ) & + ,string_t("max-entropy-filter") & + ,string_t(date ) & + ,activation%function_name( ) & + ,string_t(trim(merge("true ", "false", training_configuration%skip_connections()))) & + ] & + ,input_map = tensor_map_t( & + layer = "inputs" & + ,minima = [( input_variable(v)%minimum(), v=1, size( input_variable) )] & + ,maxima = [( input_variable(v)%maximum(), v=1, size( input_variable) )] & + ) & + ,output_map = output_map & + ) + end associate activation + end block initialize_network + + end if read_or_initialize_network + + print *, "Conditionally sampling for a flat distribution of output values" + + flatten_histogram: & block - - network_file: & - associate(network_file => args%base_name // "_network.json") - - open(newunit=network_unit, file=network_file, form='formatted', status='old', iostat=io_status, action='read') - - read_or_initialize_network: & - if (io_status==0) then - print *,"Reading network from file " // network_file - trainable_network = trainable_network_t(neural_network_t(file_t(string_t(network_file)))) - close(network_unit) - else - close(network_unit) - - initialize_network: & - block - character(len=len('YYYYMMDD')) date - - call date_and_time(date) - - print *,"Defining a new network from training_configuration_t and tensor_map_t objects" - - activation: & - associate(activation => training_configuration%activation()) - trainable_network = trainable_network_t( & - training_configuration & - ,perturbation_magnitude = 0.05 & - ,metadata = [ & - string_t("ICAR microphysics" ) & - ,string_t("max-entropy-filter") & - ,string_t(date ) & - ,activation%function_name( ) & - ,string_t(trim(merge("true ", "false", training_configuration%skip_connections()))) & - ] & - ,input_map = tensor_map_t( & - layer = "inputs" & - ,minima = [( input_variable(v)%minimum(), v=1, size( input_variable) )] & - ,maxima = [( input_variable(v)%maximum(), v=1, size( input_variable) )] & - ) & - ,output_map = output_map & - ) - end associate activation - end block initialize_network - - end if read_or_initialize_network - - print *, "Conditionally sampling for a flat distribution of output values" - - flatten_histogram: & - block - integer i - !logical occupied(args%num_bins, args%num_bins, args%num_bins, args%num_bins, args%num_bins) - logical occupied(args%num_bins, args%num_bins) - logical keepers(size(output_tensors)) - type(phase_space_bin_t), allocatable :: bin(:) - type(occupancy_t) occupancy - - ! Determine the phase-space bin that holds each output tensor - bin = [(phase_space_bin_t(output_tensors(i), output_map%minima(), output_map%maxima(), args%num_bins), i = 1, size(output_tensors))] - call occupancy%vacate( dims = [( args%num_bins, i = 1, size(output_variable))] ) - - keepers = .false. - - do i = 1, size(output_tensors) - if (occupancy%occupied(bin(i)%loc)) cycle - call occupancy%occupy(bin(i)%loc) - keepers(i) = .true. - end do - input_output_pairs = input_output_pair_t(pack(input_tensors, keepers), pack(output_tensors, keepers)) - print '(*(a,i))' & - ," Keeping " , size(input_output_pairs, kind=int64) & - ," out of " , size(output_tensors, kind=int64) & - ," input/output pairs in ", occupancy%num_occupied() & - ," out of " , occupancy%num_bins() & - ," bins." - - end block flatten_histogram - - print *,"Normalizing the remaining input and output tensors" - input_output_pairs = trainable_network%map_to_training_ranges(input_output_pairs) - - training_parameters: & - associate( & - num_pairs => size(input_output_pairs), & - n_bins => training_configuration%mini_batches(), & - adam => merge(.true., .false., training_configuration%optimizer_name() == "adam"), & - learning_rate => training_configuration%learning_rate() & - ) - bins = [(bin_t(num_items=num_pairs, num_bins=n_bins, bin_number=b), b = 1, n_bins)] - - print *,"Training network" - print *, " Epoch Cost (avg)" - - call system_clock(start_training) - - train_write_and_maybe_exit: & - block - integer first_epoch - integer me + integer i + !logical occupied(args%num_bins, args%num_bins, args%num_bins, args%num_bins, args%num_bins) + logical occupied(args%num_bins, args%num_bins) + logical keepers(size(output_tensors)) + type(phase_space_bin_t), allocatable :: bin(:) + type(occupancy_t) occupancy + + ! Determine the phase-space bin that holds each output tensor + bin = [(phase_space_bin_t(output_tensors(i), output_map%minima(), output_map%maxima(), args%num_bins), i = 1, size(output_tensors))] + call occupancy%vacate( dims = [( args%num_bins, i = 1, size(output_variable))] ) + + keepers = .false. + + do i = 1, size(output_tensors) + if (occupancy%occupied(bin(i)%loc)) cycle + call occupancy%occupy(bin(i)%loc) + keepers(i) = .true. + end do + input_output_pairs = input_output_pair_t(pack(input_tensors, keepers), pack(output_tensors, keepers)) + print '(*(a,i))' & + ," Keeping " , size(input_output_pairs, kind=int64) & + ," out of " , size(output_tensors, kind=int64) & + ," input/output pairs in ", occupancy%num_occupied() & + ," out of " , occupancy%num_bins() & + ," bins." + + end block flatten_histogram + + print *,"Normalizing the remaining input and output tensors" + input_output_pairs = trainable_network%map_to_training_ranges(input_output_pairs) + + training_parameters: & + associate( & + num_pairs => size(input_output_pairs), & + n_bins => training_configuration%mini_batches(), & + adam => merge(.true., .false., training_configuration%optimizer_name() == "adam"), & + learning_rate => training_configuration%learning_rate() & + ) + bins = [(bin_t(num_items=num_pairs, num_bins=n_bins, bin_number=b), b = 1, n_bins)] + print *,"Training network" + print *, " Epoch Cost (avg)" + call system_clock(start_training) + + train_write_and_maybe_exit: & + block + integer first_epoch + integer me #if defined(MULTI_IMAGE_SUPPORT) me = this_image() #else me = 1 #endif - if (me==1) first_epoch = plot_file%previous_epoch + 1 - + if (me==1) first_epoch = plot_file%previous_epoch + 1 #if defined(MULTI_IMAGE_SUPPORT) - call co_broadcast(first_epoch, source_image=1) + call co_broadcast(first_epoch, source_image=1) #endif - last_epoch: & - associate(last_epoch => first_epoch + args%num_epochs - 1) - epochs: & - do epoch = first_epoch, last_epoch - - if (size(bins)>1) call shuffle(input_output_pairs) ! set up for stochastic gradient descent - mini_batches = [(mini_batch_t(input_output_pairs(bins(b)%first():bins(b)%last())), b = 1, size(bins))] - - call trainable_network%train(mini_batches, cost, adam, learning_rate) - - average_cost: & - associate(average_cost => sum(cost)/size(cost)) - converged: & - associate(converged => average_cost <= args%cost_tolerance) - - image_1_maybe_writes: & - if (me==1 .and. any([converged, epoch==[first_epoch,last_epoch], mod(epoch,args%report_step)==0])) then - - print *, epoch, average_cost - write(plot_file%plot_unit,*) epoch, average_cost - - associate(json_file => trainable_network%to_json()) - call json_file%write_lines(string_t(network_file)) - end associate - - end if image_1_maybe_writes - - signal_convergence: & - if (converged) then - block - integer unit - open(newunit=unit, file="converged", status="unknown") ! The train.sh script detects & removes this file. - close(unit) - exit epochs - end block - end if signal_convergence - end associate converged - end associate average_cost - - inquire(file="stop", exist=stop_requested) - - graceful_exit: & - if (stop_requested) then - print *,'Shutting down because a file named "stop" was found.' - return - end if graceful_exit - - end do epochs - end associate last_epoch - end block train_write_and_maybe_exit - - end associate training_parameters - - end associate network_file - - call system_clock(finish_training) - print *,"Training time: ", real(finish_training - start_training, real64)/real(clock_rate, real64),"for", & - args%num_epochs,"epochs" - - ! end associate - end block train_network - - end associate output_map - - !close(plot_file%plot_unit) + last_epoch: & + associate(last_epoch => first_epoch + args%num_epochs - 1) + epochs: & + do epoch = first_epoch, last_epoch + + if (size(bins)>1) call shuffle(input_output_pairs) ! set up for stochastic gradient descent + mini_batches = [(mini_batch_t(input_output_pairs(bins(b)%first():bins(b)%last())), b = 1, size(bins))] + + call trainable_network%train(mini_batches, cost, adam, learning_rate) + + average_cost: & + associate(average_cost => sum(cost)/size(cost)) + converged: & + associate(converged => average_cost <= args%cost_tolerance) + + image_1_maybe_writes: & + if (me==1 .and. any([converged, epoch==[first_epoch,last_epoch], mod(epoch,args%report_step)==0])) then + + print *, epoch, average_cost + write(plot_file%plot_unit,*) epoch, average_cost + + associate(json_file => trainable_network%to_json()) + call json_file%write_lines(string_t(network_file)) + end associate + + end if image_1_maybe_writes + + signal_convergence: & + if (converged) then + block + integer unit + open(newunit=unit, file="converged", status="unknown") ! The train.sh script detects & removes this file. + close(unit) + exit epochs + end block + end if signal_convergence + end associate converged + end associate average_cost + + inquire(file="stop", exist=stop_requested) + + graceful_exit: & + if (stop_requested) then + print *,'Shutting down because a file named "stop" was found.' + return + end if graceful_exit + + end do epochs + end associate last_epoch + end block train_write_and_maybe_exit + + end associate training_parameters + end associate output_map_and_network_file + + call system_clock(finish_training) + print *,"Training time: ", real(finish_training - start_training, real64)/real(clock_rate, real64),"for", & + args%num_epochs,"epochs" + close(plot_file%plot_unit) end subroutine read_train_write From 4a916f30d9cd901af0183475efa3df82d4ce6b83 Mon Sep 17 00:00:00 2001 From: Damian Rouson Date: Sat, 26 Oct 2024 22:23:33 -0700 Subject: [PATCH 11/25] feat(tensor_names): define type, constructors, I/O --- src/fiats/tensor_names_m.f90 | 51 ++++++++++++++++++++++++ src/fiats/tensor_names_s.f90 | 53 +++++++++++++++++++++++++ src/fiats_m.f90 | 1 + test/main.F90 | 3 ++ test/tensor_names_test.f90 | 76 ++++++++++++++++++++++++++++++++++++ 5 files changed, 184 insertions(+) create mode 100644 src/fiats/tensor_names_m.f90 create mode 100644 src/fiats/tensor_names_s.f90 create mode 100644 test/tensor_names_test.f90 diff --git a/src/fiats/tensor_names_m.f90 b/src/fiats/tensor_names_m.f90 new file mode 100644 index 000000000..3e2e3dfe8 --- /dev/null +++ b/src/fiats/tensor_names_m.f90 @@ -0,0 +1,51 @@ +! Copyright (c), The Regents of the University of California +! Terms of use are as specified in LICENSE.txt +module tensor_names_m + use julienne_string_m, only : string_t + implicit none + + private + public :: tensor_names_t + + type tensor_names_t + private + type(string_t), allocatable :: inputs_(:), outputs_(:) + contains + procedure :: to_json + procedure :: equals + generic :: operator(==) => equals + end type + + interface tensor_names_t + + pure module function from_json(lines) result(tensor_names) + implicit none + type(string_t), intent(in) :: lines(:) + type(tensor_names_t) tensor_names + end function + + pure module function from_components(inputs, outputs) result(tensor_names) + implicit none + type(string_t), intent(in) :: inputs(:), outputs(:) + type(tensor_names_t) tensor_names + end function + + end interface + + interface + + pure module function to_json(self) result(lines) + implicit none + class(tensor_names_t), intent(in) :: self + type(string_t), allocatable :: lines(:) + end function + + elemental module function equals(lhs, rhs) result(lhs_equals_rhs) + implicit none + class(tensor_names_t), intent(in) :: lhs, rhs + logical lhs_equals_rhs + end function + + end interface + +end module diff --git a/src/fiats/tensor_names_s.f90 b/src/fiats/tensor_names_s.f90 new file mode 100644 index 000000000..b2f41c358 --- /dev/null +++ b/src/fiats/tensor_names_s.f90 @@ -0,0 +1,53 @@ +! Copyright (c), The Regents of the University of California +! Terms of use are as specified in LICENSE.txt +submodule(tensor_names_m) tensor_names_s + use assert_m, only : assert + use julienne_m, only : operator(.csv.) + implicit none + + character(len=*), parameter :: inputs_key = "inputs" + character(len=*), parameter :: outputs_key = "outputs" + +contains + + module procedure from_components + tensor_names%inputs_ = inputs + tensor_names%outputs_ = outputs + end procedure + + module procedure equals + call assert( all([allocated(lhs%inputs_), allocated(rhs%inputs_), allocated(lhs%outputs_), allocated(rhs%outputs_)]) & + ,"tensor_names_s(equals): all components allocated") + lhs_equals_rhs = all(lhs%inputs_ == rhs%inputs_) .and. all(lhs%outputs_ == rhs%outputs_) + end procedure + + module procedure from_json + integer l + logical tensor_names_key_found + + tensor_names_key_found = .false. + + do l=1,size(lines) + if (lines(l)%get_json_key() == "tensor names") then + tensor_names_key_found = .true. + tensor_names%inputs_ = lines(l+1)%get_json_value(string_t("inputs") , mold=[string_t("")]) + tensor_names%outputs_ = lines(l+2)%get_json_value(string_t("outputs"), mold=[string_t("")]) + return + end if + end do + + call assert(tensor_names_key_found, "tensor_names_s(from_json): tensor_names_found") + end procedure + + module procedure to_json + character(len=*), parameter :: indent = repeat(" ",ncopies=4) + + lines = [ & + string_t(indent // '"tensor names": {' ) & + ,indent // '"inputs" : [' // .csv. self%inputs_%bracket('"') // ']' & + ,indent // '"outputs" : [' // .csv. self%outputs_%bracket('"') // ']' & + ,string_t(indent // '}' ) & + ] + end procedure + +end submodule tensor_names_s diff --git a/src/fiats_m.f90 b/src/fiats_m.f90 index df6d35d7e..b8ce8d625 100644 --- a/src/fiats_m.f90 +++ b/src/fiats_m.f90 @@ -13,6 +13,7 @@ module fiats_m use network_configuration_m, only : network_configuration_t use tensor_m, only : tensor_t use tensor_map_m, only : tensor_map_t + use tensor_names_m, only : tensor_names_t use trainable_network_m, only : trainable_network_t use training_configuration_m, only : training_configuration_t implicit none diff --git a/test/main.F90 b/test/main.F90 index 40026b524..b4a97fa98 100644 --- a/test/main.F90 +++ b/test/main.F90 @@ -9,6 +9,7 @@ program main use network_configuration_test_m, only : network_configuration_test_t use training_configuration_test_m, only : training_configuration_test_t use tensor_map_test_m, only : tensor_map_test_t + use tensor_names_test_m, only : tensor_names_test_t use tensor_test_m, only : tensor_test_t use julienne_m, only : command_line_t implicit none @@ -21,6 +22,7 @@ program main type(network_configuration_test_t) network_configuration_test type(training_configuration_test_t) training_configuration_test type(tensor_map_test_t) tensor_map_test + type(tensor_names_test_t) tensor_names_test type(tensor_test_t) tensor_test real t_start, t_finish @@ -46,6 +48,7 @@ program main call metadata_test%report(passes, tests) call training_configuration_test%report(passes, tests) call tensor_map_test%report(passes, tests) + call tensor_names_test%report(passes, tests) call tensor_test%report(passes, tests) call asymmetric_network_test%report(passes, tests) call neural_network_test%report(passes, tests) diff --git a/test/tensor_names_test.f90 b/test/tensor_names_test.f90 new file mode 100644 index 000000000..edac2869e --- /dev/null +++ b/test/tensor_names_test.f90 @@ -0,0 +1,76 @@ +! Copyright (c), The Regents of the University of California +! Terms of use are as specified in LICENSE.txt +module tensor_names_test_m + !! Test tensor_names_t object I/O and construction + + ! External dependencies + use fiats_m, only : tensor_names_t + use julienne_m, only : test_t, test_result_t, test_description_t, test_description_substring, string_t +#ifdef __GFORTRAN__ + use julienne_m, only : test_function_i +#endif + + ! Internal dependencies + use tensor_names_m, only : tensor_names_t + implicit none + + private + public :: tensor_names_test_t + + type, extends(test_t) :: tensor_names_test_t + contains + procedure, nopass :: subject + procedure, nopass :: results + end type + +contains + + pure function subject() result(specimen) + character(len=:), allocatable :: specimen + specimen = "A tensor_names_t object" + end function + + function results() result(test_results) + type(test_result_t), allocatable :: test_results(:) + type(test_description_t), allocatable :: test_descriptions(:) + +#ifndef __GFORTRAN__ + test_descriptions = [ & + test_description_t( & + string_t("component-wise construction followed by conversion to and from JSON"), & + write_then_read_tensor_names) & + ] +#else + procedure(test_function_i), pointer :: check_write_then_read_ptr + check_write_then_read_ptr => write_then_read_tensor_names + + test_descriptions = [ & + test_description_t( & + string_t("component-wise construction followed by conversion to and from JSON"), & + check_write_then_read_ptr) & + ] +#endif + associate( & + substring_in_subject => index(subject(), test_description_substring) /= 0, & + substring_in_description => test_descriptions%contains_text(string_t(test_description_substring)) & + ) + test_descriptions = pack(test_descriptions, substring_in_subject .or. substring_in_description) + end associate + test_results = test_descriptions%run() + end function + + function write_then_read_tensor_names() result(test_passes) + logical test_passes + + associate( & + from_components => tensor_names_t( & + inputs = [string_t("qc"), string_t("qv"), string_t("pressure")], & + outputs = [string_t("qc"), string_t("qv")] & + ) ) + associate(from_json => tensor_names_t(from_components%to_json())) + test_passes = from_components == from_json + end associate + end associate + end function + +end module tensor_names_test_m From 9d92687d5fe5df1ece88c2494d4494d54321a97c Mon Sep 17 00:00:00 2001 From: Damian Rouson Date: Sat, 26 Oct 2024 22:25:30 -0700 Subject: [PATCH 12/25] chore(fpm.toml): update julienne version to 1.5.0 This dependency version update supports JSON file I/O capabilities used in the new tensor_map_t derived type. --- demo/fpm.toml | 2 +- fpm.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/demo/fpm.toml b/demo/fpm.toml index 9bed6f611..53401fedb 100644 --- a/demo/fpm.toml +++ b/demo/fpm.toml @@ -5,6 +5,6 @@ maintainer = "(Please see fiats/fpm.toml.)" [dependencies] assert = {git = "https://github.com/sourceryinstitute/assert", tag = "1.7.0"} -julienne = {git = "https://github.com/berkeleylab/julienne", tag = "1.2.2"} +julienne = {git = "https://github.com/berkeleylab/julienne", tag = "1.5.0"} fiats = {path = "../"} netcdf-interfaces = {git = "https://github.com/LKedward/netcdf-interfaces.git", rev = "d2bbb71ac52b4e346b62572b1ca1620134481096"} diff --git a/fpm.toml b/fpm.toml index 815ffa443..e5c5b5ca5 100644 --- a/fpm.toml +++ b/fpm.toml @@ -6,4 +6,4 @@ maintainer = "rouson@lbl.gov" [dependencies] assert = {git = "https://github.com/sourceryinstitute/assert", tag = "1.7.0"} -julienne = {git = "https://github.com/berkeleylab/julienne", tag = "1.3.1"} +julienne = {git = "https://github.com/berkeleylab/julienne", tag = "1.5.0"} From 1ae827f6e818787678e976e30a540458de2dec9b Mon Sep 17 00:00:00 2001 From: Damian Rouson Date: Sun, 27 Oct 2024 00:02:23 -0700 Subject: [PATCH 13/25] refac(training_config): rm hardwired obj locations --- src/fiats/training_configuration_s.F90 | 62 +++++--------------------- 1 file changed, 10 insertions(+), 52 deletions(-) diff --git a/src/fiats/training_configuration_s.F90 b/src/fiats/training_configuration_s.F90 index 5ecdb5423..c324f8622 100644 --- a/src/fiats/training_configuration_s.F90 +++ b/src/fiats/training_configuration_s.F90 @@ -37,9 +37,6 @@ end procedure module procedure default_real_from_file - integer, parameter :: hyperparameters_start=2, hyperparameters_end=6, separator_line=7 ! line numbers - integer, parameter :: net_config_start=8, net_config_end=12 ! line numbers - integer, parameter :: file_start=hyperparameters_start-1, file_end=net_config_end+1 ! line numbers #if defined __INTEL_COMPILER || _CRAYFTN type(string_t), allocatable :: lines(:) #endif @@ -48,35 +45,20 @@ #if defined __INTEL_COMPILER || _CRAYFTN lines = training_configuration%file_t%lines() - call assert(trim(adjustl(lines(file_start)%string()))==header, & - "training_configuration_s(default_precision_from_file): header",lines(file_start)) - training_configuration%hyperparameters_ = hyperparameters_t(lines(hyperparameters_start:hyperparameters_end)) - call assert(trim(adjustl(lines(separator_line)%string()))==separator, & - "training_configuration_s(default_precision_from_file): separator", & - lines(file_start)) - training_configuration%network_configuration_= network_configuration_t(lines(net_config_start:net_config_end)) - call assert(trim(adjustl(lines(file_end)%string()))==footer, & - "training_configuration_s(default_precision_from_file): footer", lines(file_end)) #else associate(lines => training_configuration%file_t%lines()) - call assert(trim(adjustl(lines(file_start)%string()))==header, & - "training_configuration_s(default_precision_from_file): header",lines(file_start)) - training_configuration%hyperparameters_ = hyperparameters_t(lines(hyperparameters_start:hyperparameters_end)) - call assert(trim(adjustl(lines(separator_line)%string()))==separator, & - "training_configuration_s(default_precision_from_file): separator", & - lines(file_start)) - training_configuration%network_configuration_= network_configuration_t(lines(net_config_start:net_config_end)) - call assert(trim(adjustl(lines(file_end)%string()))==footer, & - "training_configuration_s(default_precision_from_file): footer", lines(file_end)) +#endif + + training_configuration%hyperparameters_ = hyperparameters_t(lines) + training_configuration%network_configuration_= network_configuration_t(lines) + +#if ! defined __INTEL_COMPILER || _CRAYFTN end associate #endif end procedure module procedure double_precision_from_file - integer, parameter :: hyperparameters_start=2, hyperparameters_end=6, separator_line=7 ! line numbers - integer, parameter :: net_config_start=8, net_config_end=12 ! line numbers - integer, parameter :: file_start=hyperparameters_start-1, file_end=net_config_end+1 ! line numbers #if defined __INTEL_COMPILER || _CRAYFTN type(double_precision_string_t), allocatable :: lines(:) #endif @@ -85,40 +67,16 @@ #if defined __INTEL_COMPILER || _CRAYFTN lines = training_configuration%double_precision_file_t%double_precision_lines() - - call assert(adjustl(lines(file_start)%string()) == header, & - "training_configuration_s(double_precision_from_file): header",lines(file_start)) - - training_configuration%hyperparameters_ = hyperparameters_t(lines(hyperparameters_start:hyperparameters_end)) - - call assert(adjustl(lines(separator_line)%string()) == separator, & - "training_configuration_s(double_precision_from_file): separator", lines(file_start)) - - training_configuration%network_configuration_= network_configuration_t(lines(net_config_start:net_config_end)) - - call assert(adjustl(lines(file_end)%string()) == footer, & - "training_configuration_s(double_precision_from_file): footer", lines(file_end)) #else - associate(lines => training_configuration%double_precision_file_t%double_precision_lines()) +#endif - call assert(adjustl(lines(file_start)%string()) == header, & - "training_configuration_s(double_precision_from_file): header", lines(file_start)) - - training_configuration%hyperparameters_ = hyperparameters_t(lines(hyperparameters_start:hyperparameters_end)) - - call assert(adjustl(lines(separator_line)%string()) == separator, & - "training_configuration_s(double_precision_from_file): separator", lines(file_start)) - - training_configuration%network_configuration_= network_configuration_t(lines(net_config_start:net_config_end)) - - call assert(adjustl(lines(file_end)%string()) == footer, & - "training_configuration_s(double_precision_from_file): footer", lines(file_end)) + training_configuration%hyperparameters_ = hyperparameters_t(lines) + training_configuration%network_configuration_= network_configuration_t(lines) +#if ! defined __INTEL_COMPILER || _CRAYFTN end associate - #endif - end procedure module procedure default_real_to_json From 9d6d7359dcf311ceabe094e86f1909439980aaa1 Mon Sep 17 00:00:00 2001 From: Damian Rouson Date: Sun, 27 Oct 2024 09:12:40 -0700 Subject: [PATCH 14/25] chore(fpm.toml): update julienne version to 1.5.2 This improves the formatting of test-reporting output. --- fpm.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fpm.toml b/fpm.toml index e5c5b5ca5..f04ae87c6 100644 --- a/fpm.toml +++ b/fpm.toml @@ -6,4 +6,4 @@ maintainer = "rouson@lbl.gov" [dependencies] assert = {git = "https://github.com/sourceryinstitute/assert", tag = "1.7.0"} -julienne = {git = "https://github.com/berkeleylab/julienne", tag = "1.5.0"} +julienne = {git = "https://github.com/berkeleylab/julienne", tag = "1.5.2"} From 906093063542c102faf3f303834bc01f99f9c790 Mon Sep 17 00:00:00 2001 From: Damian Rouson Date: Sun, 27 Oct 2024 09:45:02 -0700 Subject: [PATCH 15/25] fix(training_configuration_t): define tensor_names --- example/print-training-configuration.F90 | 31 ++++++++++++++---------- src/fiats/tensor_names_m.f90 | 2 +- src/fiats/tensor_names_s.f90 | 9 ++++--- src/fiats/training_configuration_m.f90 | 8 ++++-- src/fiats/training_configuration_s.F90 | 17 +++++++++++-- 5 files changed, 45 insertions(+), 22 deletions(-) diff --git a/example/print-training-configuration.F90 b/example/print-training-configuration.F90 index 36ed0983f..300d2030c 100644 --- a/example/print-training-configuration.F90 +++ b/example/print-training-configuration.F90 @@ -2,25 +2,30 @@ ! Terms of use are as specified in LICENSE.txt program print_training_configuration !! Demonstrate how to construct and print a training_configuration_t object - use fiats_m, only : training_configuration_t, hyperparameters_t, network_configuration_t - use julienne_m, only : file_t + use fiats_m, only : training_configuration_t, hyperparameters_t, network_configuration_t, tensor_names_t + use julienne_m, only : file_t, string_t implicit none -#ifdef _CRAYFTN - type(training_configuration_t) :: training_configuration - type(file_t) :: json_file - training_configuration = training_configuration_t( & - hyperparameters_t(mini_batches=10, learning_rate=1.5, optimizer = "adam"), & - network_configuration_t(skip_connections=.false., nodes_per_layer=[2,72,2], activation_name="sigmoid")) - json_file = file_t(training_configuration%to_json()) - call json_file%write_lines() -#else +#ifndef _CRAYFTN associate(training_configuration => training_configuration_t( & - hyperparameters_t(mini_batches=10, learning_rate=1.5, optimizer = "adam"), & - network_configuration_t(skip_connections=.false., nodes_per_layer=[2,72,2], activation_name="sigmoid") & + hyperparameters_t(mini_batches=10, learning_rate=1.5, optimizer = "adam") & + ,network_configuration_t(skip_connections=.false., nodes_per_layer=[2,72,2], activation_name="sigmoid") & + ,tensor_names_t(inputs = [string_t("pressure"), string_t("temperature")], outputs = ([string_t("saturated mixing ratio")])) & )) associate(json_file => file_t(training_configuration%to_json())) call json_file%write_lines() end associate end associate +#else + block + type(training_configuration_t) :: training_configuration + type(file_t) :: json_file + training_configuration = training_configuration_t( & + hyperparameters_t(mini_batches=10, learning_rate=1.5, optimizer = "adam") & + ,network_configuration_t(skip_connections=.false., nodes_per_layer=[2,72,2], activation_name="sigmoid") & + ,tensorm_names_t(inputs=[string_t("pressure"), string_t("temperature")], outputs([string_t("saturated mixing ratio")])) & + ) + json_file = file_t(training_configuration%to_json()) + call json_file%write_lines() + end block #endif end program diff --git a/src/fiats/tensor_names_m.f90 b/src/fiats/tensor_names_m.f90 index 3e2e3dfe8..1a7cb9805 100644 --- a/src/fiats/tensor_names_m.f90 +++ b/src/fiats/tensor_names_m.f90 @@ -20,7 +20,7 @@ module tensor_names_m pure module function from_json(lines) result(tensor_names) implicit none - type(string_t), intent(in) :: lines(:) + class(string_t), intent(in) :: lines(:) type(tensor_names_t) tensor_names end function diff --git a/src/fiats/tensor_names_s.f90 b/src/fiats/tensor_names_s.f90 index b2f41c358..16254dce7 100644 --- a/src/fiats/tensor_names_s.f90 +++ b/src/fiats/tensor_names_s.f90 @@ -2,7 +2,7 @@ ! Terms of use are as specified in LICENSE.txt submodule(tensor_names_m) tensor_names_s use assert_m, only : assert - use julienne_m, only : operator(.csv.) + use julienne_m, only : operator(.csv.), operator(.cat.) implicit none character(len=*), parameter :: inputs_key = "inputs" @@ -41,13 +41,14 @@ module procedure to_json character(len=*), parameter :: indent = repeat(" ",ncopies=4) - + lines = [ & string_t(indent // '"tensor names": {' ) & - ,indent // '"inputs" : [' // .csv. self%inputs_%bracket('"') // ']' & - ,indent // '"outputs" : [' // .csv. self%outputs_%bracket('"') // ']' & + ,indent // indent // '"inputs" : [' // .csv. self%inputs_%bracket('"') // '],' & + ,indent // indent // '"outputs" : [' // .csv. self%outputs_%bracket('"') // ']' & ,string_t(indent // '}' ) & ] + end procedure end submodule tensor_names_s diff --git a/src/fiats/training_configuration_m.f90 b/src/fiats/training_configuration_m.f90 index 507d77089..d9a4cb5ff 100644 --- a/src/fiats/training_configuration_m.f90 +++ b/src/fiats/training_configuration_m.f90 @@ -8,6 +8,7 @@ module training_configuration_m use network_configuration_m, only : network_configuration_t use kind_parameters_m, only : default_real, double_precision use double_precision_file_m, only : double_precision_file_t + use tensor_names_m, only : tensor_names_t implicit none private @@ -17,6 +18,7 @@ module training_configuration_m integer, kind :: k = default_real type(hyperparameters_t(k)), private :: hyperparameters_ type(network_configuration_t), private :: network_configuration_ + type(tensor_names_t), private :: tensor_names_ contains generic :: operator(==) => default_real_equals , double_precision_equals procedure, private :: default_real_equals , double_precision_equals @@ -38,17 +40,19 @@ module training_configuration_m interface training_configuration_t - module function default_real_from_components(hyperparameters, network_configuration) result(training_configuration) + module function default_real_from_components(hyperparameters, network_configuration, tensor_names) result(training_configuration) implicit none type(hyperparameters_t), intent(in) :: hyperparameters type(network_configuration_t), intent(in) :: network_configuration type(training_configuration_t) training_configuration + type(tensor_names_t), intent(in) :: tensor_names end function - module function double_precision_from_components(hyperparameters, network_configuration) result(training_configuration) + module function double_precision_from_components(hyperparameters, network_configuration, tensor_names) result(training_configuration) implicit none type(hyperparameters_t(double_precision)), intent(in) :: hyperparameters type(network_configuration_t), intent(in) :: network_configuration + type(tensor_names_t), intent(in) :: tensor_names type(training_configuration_t(double_precision)) training_configuration end function diff --git a/src/fiats/training_configuration_s.F90 b/src/fiats/training_configuration_s.F90 index c324f8622..ed24159d5 100644 --- a/src/fiats/training_configuration_s.F90 +++ b/src/fiats/training_configuration_s.F90 @@ -14,24 +14,33 @@ training_configuration%hyperparameters_ = hyperparameters training_configuration%network_configuration_ = network_configuration + training_configuration%tensor_names_ = tensor_names + training_configuration%file_t = file_t([ & string_t(header), & training_configuration%hyperparameters_%to_json(), & string_t(separator), & training_configuration%network_configuration_%to_json(), & + string_t(separator), & + training_configuration%tensor_names_%to_json(), & string_t(footer) & ]) + end procedure module procedure double_precision_from_components training_configuration%hyperparameters_ = hyperparameters training_configuration%network_configuration_ = network_configuration + training_configuration%tensor_names_ = tensor_names + training_configuration%file_t = file_t([ & string_t(header), & training_configuration%hyperparameters_%to_json(), & string_t(separator), & training_configuration%network_configuration_%to_json(), & + string_t(separator), & + training_configuration%tensor_names_%to_json(), & string_t(footer) & ]) end procedure @@ -51,6 +60,7 @@ training_configuration%hyperparameters_ = hyperparameters_t(lines) training_configuration%network_configuration_= network_configuration_t(lines) + training_configuration%tensor_names_ = tensor_names_t(lines) #if ! defined __INTEL_COMPILER || _CRAYFTN end associate @@ -73,6 +83,7 @@ training_configuration%hyperparameters_ = hyperparameters_t(lines) training_configuration%network_configuration_= network_configuration_t(lines) + training_configuration%tensor_names_ = tensor_names_t(lines) #if ! defined __INTEL_COMPILER || _CRAYFTN end associate @@ -90,13 +101,15 @@ module procedure default_real_equals lhs_eq_rhs = & lhs%hyperparameters_ == rhs%hyperparameters_ .and. & - lhs%network_configuration_ == rhs%network_configuration_ + lhs%network_configuration_ == rhs%network_configuration_ .and. & + lhs%tensor_names_ == rhs%tensor_names_ end procedure module procedure double_precision_equals lhs_eq_rhs = & lhs%hyperparameters_ == rhs%hyperparameters_ .and. & - lhs%network_configuration_ == rhs%network_configuration_ + lhs%network_configuration_ == rhs%network_configuration_ .and. & + lhs%tensor_names_ == rhs%tensor_names_ end procedure module procedure default_real_mini_batches From 7caad25e4eba24008faf8b30ec83c8fb7ed6c11f Mon Sep 17 00:00:00 2001 From: Damian Rouson Date: Sun, 27 Oct 2024 10:27:52 -0700 Subject: [PATCH 16/25] feat(tensor_names): {inputs,outputs}_names getters --- src/fiats/tensor_names_m.f90 | 14 +++++++++++++ src/fiats/tensor_names_s.f90 | 8 ++++++++ src/fiats/training_configuration_m.f90 | 28 ++++++++++++++++++++++++++ src/fiats/training_configuration_s.F90 | 22 ++++++++++++++++---- test/training_configuration_test_m.F90 | 13 +++++++----- 5 files changed, 76 insertions(+), 9 deletions(-) diff --git a/src/fiats/tensor_names_m.f90 b/src/fiats/tensor_names_m.f90 index 1a7cb9805..b1c1a9723 100644 --- a/src/fiats/tensor_names_m.f90 +++ b/src/fiats/tensor_names_m.f90 @@ -13,6 +13,8 @@ module tensor_names_m contains procedure :: to_json procedure :: equals + procedure :: input_names + procedure :: output_names generic :: operator(==) => equals end type @@ -46,6 +48,18 @@ elemental module function equals(lhs, rhs) result(lhs_equals_rhs) logical lhs_equals_rhs end function + pure module function input_names(self) result(names) + implicit none + class(tensor_names_t), intent(in) :: self + type(string_t), allocatable :: names(:) + end function + + pure module function output_names(self) result(names) + implicit none + class(tensor_names_t), intent(in) :: self + type(string_t), allocatable :: names(:) + end function + end interface end module diff --git a/src/fiats/tensor_names_s.f90 b/src/fiats/tensor_names_s.f90 index 16254dce7..defb9cead 100644 --- a/src/fiats/tensor_names_s.f90 +++ b/src/fiats/tensor_names_s.f90 @@ -51,4 +51,12 @@ end procedure + module procedure input_names + names = self%inputs_ + end procedure + + module procedure output_names + names = self%outputs_ + end procedure + end submodule tensor_names_s diff --git a/src/fiats/training_configuration_m.f90 b/src/fiats/training_configuration_m.f90 index d9a4cb5ff..ae2ca8837 100644 --- a/src/fiats/training_configuration_m.f90 +++ b/src/fiats/training_configuration_m.f90 @@ -36,6 +36,10 @@ module training_configuration_m procedure, private :: default_real_nodes_per_layer , double_precision_nodes_per_layer generic :: skip_connections => default_real_skip_connections, double_precision_skip_connections procedure, private :: default_real_skip_connections, double_precision_skip_connections + generic :: input_names => default_real_input_names , double_precision_input_names + procedure, private :: default_real_input_names , double_precision_input_names + generic :: output_names => default_real_output_names , double_precision_output_names + procedure, private :: default_real_output_names , double_precision_output_names end type interface training_configuration_t @@ -168,6 +172,30 @@ elemental module function double_precision_skip_connections(self) result(using_s logical using_skip end function + pure module function default_real_input_names(self) result(input_names) + implicit none + class(training_configuration_t), intent(in) :: self + type(string_t), allocatable :: input_names(:) + end function + + pure module function double_precision_input_names(self) result(input_names) + implicit none + class(training_configuration_t(double_precision)), intent(in) :: self + type(string_t), allocatable :: input_names(:) + end function + + pure module function default_real_output_names(self) result(output_names) + implicit none + class(training_configuration_t), intent(in) :: self + type(string_t), allocatable :: output_names(:) + end function + + pure module function double_precision_output_names(self) result(output_names) + implicit none + class(training_configuration_t(double_precision)), intent(in) :: self + type(string_t), allocatable :: output_names(:) + end function + end interface end module diff --git a/src/fiats/training_configuration_s.F90 b/src/fiats/training_configuration_s.F90 index ed24159d5..f72e1a659 100644 --- a/src/fiats/training_configuration_s.F90 +++ b/src/fiats/training_configuration_s.F90 @@ -171,8 +171,7 @@ case default error stop 'activation_factory_s(factory): unrecognized activation name "' // activation_name%string() // '"' end select -#if defined __INTEL_COMPILER || _CRAYFTN -#else +#if ! (defined __INTEL_COMPILER || _CRAYFTN) end associate #endif end procedure @@ -196,10 +195,25 @@ case default error stop 'activation_factory_s(factory): unrecognized activation name "' // activation_name%string() // '"' end select -#if defined __INTEL_COMPILER || _CRAYFTN -#else +#if ! (defined __INTEL_COMPILER || _CRAYFTN) end associate #endif end procedure + module procedure default_real_input_names + input_names = self%tensor_names_%input_names() + end procedure + + module procedure double_precision_input_names + input_names = self%tensor_names_%input_names() + end procedure + + module procedure default_real_output_names + output_names = self%tensor_names_%output_names() + end procedure + + module procedure double_precision_output_names + output_names = self%tensor_names_%output_names() + end procedure + end submodule training_configuration_s diff --git a/test/training_configuration_test_m.F90 b/test/training_configuration_test_m.F90 index a09dc0c09..6bf1db85c 100644 --- a/test/training_configuration_test_m.F90 +++ b/test/training_configuration_test_m.F90 @@ -4,7 +4,7 @@ module training_configuration_test_m !! Test training_configuration_t object I/O and construction ! External dependencies - use fiats_m, only : training_configuration_t, hyperparameters_t, network_configuration_t + use fiats_m, only : training_configuration_t, hyperparameters_t, network_configuration_t, tensor_names_t use julienne_m, only : test_t, test_result_t, test_description_t, test_description_substring, string_t, file_t #ifdef __GFORTRAN__ use julienne_m, only : test_function_i @@ -64,13 +64,16 @@ function construct_and_convert_to_and_from_json() result(test_passes) #ifdef _CRAYFTN type(training_configuration_t) :: training_configuration, from_json training_configuration = training_configuration_t( & - hyperparameters_t(mini_batches=5, learning_rate=1., optimizer = "adam"), & - network_configuration_t(skip_connections=.false., nodes_per_layer=[2,72,2], activation_name="sigmoid")) + hyperparameters_t(mini_batches=5, learning_rate=1., optimizer = "adam") & + ,network_configuration_t(skip_connections=.false., nodes_per_layer=[2,72,2], activation_name="sigmoid") & + ,tensor_names_t(inputs=[string_t("pressure"), string_t("temperature")], ouptuts=[string_t("saturated mixing ratio")]) & + ) from_json = training_configuration_t(file_t(training_configuration%to_json())) #else associate(training_configuration => training_configuration_t( & - hyperparameters_t(mini_batches=5, learning_rate=1., optimizer = "adam"), & - network_configuration_t(skip_connections=.false., nodes_per_layer=[2,72,2], activation_name="sigmoid") & + hyperparameters_t(mini_batches=5, learning_rate=1., optimizer = "adam") & + ,network_configuration_t(skip_connections=.false., nodes_per_layer=[2,72,2], activation_name="sigmoid") & + ,tensor_names_t(inputs=[string_t("pressure"), string_t("temperature")], outputs=[string_t("saturated mixing ratio")]) & )) associate(from_json => training_configuration_t(file_t(training_configuration%to_json()))) #endif From bb119af9b87228d947ecc762df76670d18732fbe Mon Sep 17 00:00:00 2001 From: Damian Rouson Date: Sun, 27 Oct 2024 10:51:22 -0700 Subject: [PATCH 17/25] chore(fpm.toml): update to julienne 1.5.3 This improves the output in any uses of Julienne's file_t write_lines type-bound procedure. --- demo/fpm.toml | 2 +- fpm.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/demo/fpm.toml b/demo/fpm.toml index 53401fedb..729eb03ac 100644 --- a/demo/fpm.toml +++ b/demo/fpm.toml @@ -5,6 +5,6 @@ maintainer = "(Please see fiats/fpm.toml.)" [dependencies] assert = {git = "https://github.com/sourceryinstitute/assert", tag = "1.7.0"} -julienne = {git = "https://github.com/berkeleylab/julienne", tag = "1.5.0"} +julienne = {git = "https://github.com/berkeleylab/julienne", tag = "1.5.3"} fiats = {path = "../"} netcdf-interfaces = {git = "https://github.com/LKedward/netcdf-interfaces.git", rev = "d2bbb71ac52b4e346b62572b1ca1620134481096"} diff --git a/fpm.toml b/fpm.toml index f04ae87c6..379178226 100644 --- a/fpm.toml +++ b/fpm.toml @@ -6,4 +6,4 @@ maintainer = "rouson@lbl.gov" [dependencies] assert = {git = "https://github.com/sourceryinstitute/assert", tag = "1.7.0"} -julienne = {git = "https://github.com/berkeleylab/julienne", tag = "1.5.2"} +julienne = {git = "https://github.com/berkeleylab/julienne", tag = "1.5.3"} From b969f81e5d366221c374eff4b6d4bc4e67c13d77 Mon Sep 17 00:00:00 2001 From: Damian Rouson Date: Sun, 27 Oct 2024 11:02:01 -0700 Subject: [PATCH 18/25] feat(training_configuration):tensor_names_t object --- demo/app/train-cloud-microphysics.F90 | 15 +++++++++------ demo/training_configuration.json | 5 +++++ 2 files changed, 14 insertions(+), 6 deletions(-) diff --git a/demo/app/train-cloud-microphysics.F90 b/demo/app/train-cloud-microphysics.F90 index a2c6054fb..25183ea6a 100644 --- a/demo/app/train-cloud-microphysics.F90 +++ b/demo/app/train-cloud-microphysics.F90 @@ -61,7 +61,7 @@ program train_cloud_microphysics call system_clock(t_finish) print *,"System clock time: ", real(t_finish - t_start, real64)/real(clock_rate, real64) - print *,new_line('a') // "______train_cloud_microhpysics done _______" + print *,new_line('a') // "______train_cloud_microphysics done _______" contains @@ -164,11 +164,14 @@ subroutine read_train_write(training_configuration, args, plot_file) integer(int64) start_training, finish_training logical stop_requested + type(file_t) file + + file = file_t(training_configuration%to_json()) + call file%write_lines() + input_names: & - associate(input_names => & - [string_t("pressure"), string_t("potential_temperature"), string_t("temperature"), & - string_t("qv"), string_t("qc"), string_t("qr"), string_t("qs")] & - ) + associate(input_names => training_configuration%input_names()) + allocate(input_variable(size(input_names))) input_file_name: & @@ -196,7 +199,7 @@ subroutine read_train_write(training_configuration, args, plot_file) end associate input_names output_names: & - associate(output_names => [string_t("potential_temperature"),string_t("qv"), string_t("qc"), string_t("qr"), string_t("qs")]) + associate(output_names => training_configuration%output_names()) allocate(output_variable(size(output_names))) diff --git a/demo/training_configuration.json b/demo/training_configuration.json index 94cd9c49c..d00c25286 100644 --- a/demo/training_configuration.json +++ b/demo/training_configuration.json @@ -9,5 +9,10 @@ "skip connections" : false, "nodes per layer" : [7,32,32,32,5], "activation function" : "sigmoid" + }, + , + "tensor names": { + "inputs" : ["pressure", "potential_temperature", "temperature", "qv", "qc", "qr", "qs"], + "outputs" : ["potential_temperature","qv", "qc", "qr", "qs"] } } From cb9a173ecb0c83081cb1067d1997fdf402ff975a Mon Sep 17 00:00:00 2001 From: Damian Rouson Date: Sun, 27 Oct 2024 11:04:05 -0700 Subject: [PATCH 19/25] chore(train-cloud-micro): rm diagnostic output --- demo/app/train-cloud-microphysics.F90 | 5 ----- 1 file changed, 5 deletions(-) diff --git a/demo/app/train-cloud-microphysics.F90 b/demo/app/train-cloud-microphysics.F90 index 25183ea6a..b268b4a9f 100644 --- a/demo/app/train-cloud-microphysics.F90 +++ b/demo/app/train-cloud-microphysics.F90 @@ -164,11 +164,6 @@ subroutine read_train_write(training_configuration, args, plot_file) integer(int64) start_training, finish_training logical stop_requested - type(file_t) file - - file = file_t(training_configuration%to_json()) - call file%write_lines() - input_names: & associate(input_names => training_configuration%input_names()) From 2dd7bb4c0e94d6217b8c51a66e2c24fb0ff21a52 Mon Sep 17 00:00:00 2001 From: Damian Rouson Date: Sun, 27 Oct 2024 22:02:13 -0700 Subject: [PATCH 20/25] fix(training_configuration.json): reorder outputs This commit matches the outputs list to the beginning of the inputs list to ensure correct time derivative calculations. --- demo/training_configuration.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/demo/training_configuration.json b/demo/training_configuration.json index d00c25286..a01576ee5 100644 --- a/demo/training_configuration.json +++ b/demo/training_configuration.json @@ -12,7 +12,7 @@ }, , "tensor names": { - "inputs" : ["pressure", "potential_temperature", "temperature", "qv", "qc", "qr", "qs"], + "inputs" : ["potential_temperature","qv", "qc", "qr", "qs", "pressure", "temperature"], "outputs" : ["potential_temperature","qv", "qc", "qr", "qs"] } } From 027f0724ab50a5d7a80ab162ce0a6f1c1db06cb0 Mon Sep 17 00:00:00 2001 From: Damian Rouson Date: Sun, 27 Oct 2024 22:03:30 -0700 Subject: [PATCH 21/25] test(NetCDF_variable):check "-" operand name match --- demo/src/NetCDF_variable_s.f90 | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/demo/src/NetCDF_variable_s.f90 b/demo/src/NetCDF_variable_s.f90 index 0e7dec711..6c18e0f98 100644 --- a/demo/src/NetCDF_variable_s.f90 +++ b/demo/src/NetCDF_variable_s.f90 @@ -265,7 +265,12 @@ pure function double_precision_upper_bounds(NetCDF_variable) result(ubounds) end procedure module procedure default_real_subtract + call assert(lhs%conformable_with(rhs), "NetCDF_variable_s(default_real_subtract): lhs%conformable_with(rhs)") + call assert(lhs%name_==rhs%name_, "NetCDF_variable_s(default_real_subtract): lhs%name_==rhs%name_", lhs%name_//"/="//rhs%name_) + + difference%name_ = lhs%name_ + select case(lhs%rank()) case(1) difference%values_1D_ = lhs%values_1D_ - rhs%values_1D_ @@ -281,7 +286,12 @@ pure function double_precision_upper_bounds(NetCDF_variable) result(ubounds) end procedure module procedure double_precision_subtract + call assert(lhs%conformable_with(rhs), "NetCDF_variable_s(double_precision_subtract): lhs%conformable_with(rhs)") + call assert(lhs%name_ == rhs%name_, "NetCDF_variable_s(double_precision_subtract): lhs%name_==rhs%name_",lhs%name_//"/="//rhs%name_) + + difference%name_ = lhs%name_ + select case(lhs%rank()) case(1) difference%values_1D_ = lhs%values_1D_ - rhs%values_1D_ From 3bf6d0eb7bb51617be3c788ce61e0c85da847fb7 Mon Sep 17 00:00:00 2001 From: Damian Rouson Date: Sun, 27 Oct 2024 22:05:22 -0700 Subject: [PATCH 22/25] fix(train-cloud-micro): time difference formula This commit adds parentheses to the time differencing formula to correclty account for operator precedence. --- demo/app/train-cloud-microphysics.F90 | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/demo/app/train-cloud-microphysics.F90 b/demo/app/train-cloud-microphysics.F90 index b268b4a9f..ee19bda9d 100644 --- a/demo/app/train-cloud-microphysics.F90 +++ b/demo/app/train-cloud-microphysics.F90 @@ -75,7 +75,7 @@ function create_or_append_to(plot_file_name) result(plot_file) if (.not. preexisting_plot_file) then open(newunit=plot_unit, file=plot_file_name, status="new", action="write") - write(plot_unit,*) " Epoch Cost (avg)" + write(plot_unit,'(a)') " Epoch Cost (avg)" previous_epoch = 0 else associate(plot_file => file_t(string_t(plot_file_name))) @@ -233,7 +233,7 @@ subroutine read_train_write(training_configuration, args, plot_file) derivative_name: & associate(derivative_name => "d" // output_names(v)%string() // "/dt") print *,"- " // derivative_name - derivative(v) = NetCDF_variable_t( input_variable(v) - output_variable(v) / dt, derivative_name) + derivative(v) = NetCDF_variable_t( (input_variable(v) - output_variable(v)) / dt, derivative_name) call assert(.not. derivative(v)%any_nan(), "train_cloud_microhphysics: non NaN's") end associate derivative_name end do @@ -308,14 +308,16 @@ subroutine read_train_write(training_configuration, args, plot_file) flatten_histogram: & block integer i - !logical occupied(args%num_bins, args%num_bins, args%num_bins, args%num_bins, args%num_bins) logical occupied(args%num_bins, args%num_bins) logical keepers(size(output_tensors)) type(phase_space_bin_t), allocatable :: bin(:) type(occupancy_t) occupancy ! Determine the phase-space bin that holds each output tensor - bin = [(phase_space_bin_t(output_tensors(i), output_map%minima(), output_map%maxima(), args%num_bins), i = 1, size(output_tensors))] + associate(output_minima => output_map%minima(), output_maxima => output_map%maxima()) + bin = [(phase_space_bin_t(output_tensors(i), output_minima, output_maxima, args%num_bins), i = 1, size(output_tensors))] + end associate + call occupancy%vacate( dims = [( args%num_bins, i = 1, size(output_variable))] ) keepers = .false. @@ -325,10 +327,12 @@ subroutine read_train_write(training_configuration, args, plot_file) call occupancy%occupy(bin(i)%loc) keepers(i) = .true. end do + input_output_pairs = input_output_pair_t(pack(input_tensors, keepers), pack(output_tensors, keepers)) + print '(*(a,i))' & ," Keeping " , size(input_output_pairs, kind=int64) & - ," out of " , size(output_tensors, kind=int64) & + ," out of " , size(output_tensors, kind=int64) & ," input/output pairs in ", occupancy%num_occupied() & ," out of " , occupancy%num_bins() & ," bins." @@ -383,8 +387,8 @@ subroutine read_train_write(training_configuration, args, plot_file) image_1_maybe_writes: & if (me==1 .and. any([converged, epoch==[first_epoch,last_epoch], mod(epoch,args%report_step)==0])) then - print *, epoch, average_cost - write(plot_file%plot_unit,*) epoch, average_cost + !print '(*(g0,4x))', epoch, average_cost + write(plot_file%plot_unit,'(*(g0,4x))') epoch, average_cost associate(json_file => trainable_network%to_json()) call json_file%write_lines(string_t(network_file)) From bb8ca86bcd45604050dc2765f4bd45d78d5d6283 Mon Sep 17 00:00:00 2001 From: Damian Rouson Date: Mon, 28 Oct 2024 16:19:29 -0700 Subject: [PATCH 23/25] fix(write-read-infer): grammatical fixes in output --- example/write-read-infer.F90 | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/example/write-read-infer.F90 b/example/write-read-infer.F90 index 1f0cbed31..704014733 100644 --- a/example/write-read-infer.F90 +++ b/example/write-read-infer.F90 @@ -57,16 +57,16 @@ subroutine write_read_query_infer(output_file_name) type(file_t) json_output_file, json_input_file type(tensor_t) inputs, outputs - print *, "Constructing an neural_network_t neural-network object from scratch." + print *, "Constructing a neural_network_t neural-network object from scratch." network = identity_network() - print *, "Converting an neural_network_t object to a file_t object." + print *, "Converting a neural_network_t object to a file_t object." json_output_file = network%to_json() - print *, "Writing an neural_network_t object to the file '"//output_file_name%string()//"' in JSON format." + print *, "Writing a neural_network_t object to the file '"//output_file_name%string()//"' in JSON format." call json_output_file%write_lines(output_file_name) - print *, "Reading an neural_network_t object from the same JSON file '"//output_file_name%string()//"'." + print *, "Reading a neural_network_t object from the same JSON file '"//output_file_name%string()//"'." json_input_file = file_t(output_file_name) print *, "Constructing a new neural_network_t object from the parameters read." From 3f382f3dcf233565e12fbf70196cc3abc97f5fa5 Mon Sep 17 00:00:00 2001 From: Damian Rouson Date: Mon, 28 Oct 2024 21:15:24 -0700 Subject: [PATCH 24/25] fix(train-cloud-micro): inquire about network file This commit replaces an `open` statement with an `inquire` statatement to prevent a crash when mistakenly attempting to reopen an already open file. --- demo/app/train-cloud-microphysics.F90 | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/demo/app/train-cloud-microphysics.F90 b/demo/app/train-cloud-microphysics.F90 index ee19bda9d..3ad745ae6 100644 --- a/demo/app/train-cloud-microphysics.F90 +++ b/demo/app/train-cloud-microphysics.F90 @@ -261,10 +261,14 @@ subroutine read_train_write(training_configuration, args, plot_file) ), & network_file => args%base_name // "_network.json" & ) - open(newunit=network_unit, file=network_file, form='formatted', status='old', iostat=io_status, action='read') - + check_for_network_file: & + block + logical preexisting_network_file + + inquire(file=network_file, exist=preexisting_network_file) + read_or_initialize_network: & - if (io_status==0) then + if (preexisting_network_file) then print *,"Reading network from file " // network_file trainable_network = trainable_network_t(neural_network_t(file_t(string_t(network_file)))) close(network_unit) @@ -303,6 +307,8 @@ subroutine read_train_write(training_configuration, args, plot_file) end if read_or_initialize_network + end block check_for_network_file + print *, "Conditionally sampling for a flat distribution of output values" flatten_histogram: & From 36543133e190f63f2572d48ae8837de2390dc29d Mon Sep 17 00:00:00 2001 From: Damian Rouson Date: Mon, 28 Oct 2024 21:17:22 -0700 Subject: [PATCH 25/25] chore(train-cloud-micro): indent block internals --- demo/app/train-cloud-microphysics.F90 | 78 +++++++++++++-------------- 1 file changed, 39 insertions(+), 39 deletions(-) diff --git a/demo/app/train-cloud-microphysics.F90 b/demo/app/train-cloud-microphysics.F90 index 3ad745ae6..93ad7b79d 100644 --- a/demo/app/train-cloud-microphysics.F90 +++ b/demo/app/train-cloud-microphysics.F90 @@ -267,45 +267,45 @@ subroutine read_train_write(training_configuration, args, plot_file) inquire(file=network_file, exist=preexisting_network_file) - read_or_initialize_network: & - if (preexisting_network_file) then - print *,"Reading network from file " // network_file - trainable_network = trainable_network_t(neural_network_t(file_t(string_t(network_file)))) - close(network_unit) - else - close(network_unit) - - initialize_network: & - block - character(len=len('YYYYMMDD')) date - - call date_and_time(date) - - print *,"Defining a new network from training_configuration_t and tensor_map_t objects" - - activation: & - associate(activation => training_configuration%activation()) - trainable_network = trainable_network_t( & - training_configuration & - ,perturbation_magnitude = 0.05 & - ,metadata = [ & - string_t("ICAR microphysics" ) & - ,string_t("max-entropy-filter") & - ,string_t(date ) & - ,activation%function_name( ) & - ,string_t(trim(merge("true ", "false", training_configuration%skip_connections()))) & - ] & - ,input_map = tensor_map_t( & - layer = "inputs" & - ,minima = [( input_variable(v)%minimum(), v=1, size( input_variable) )] & - ,maxima = [( input_variable(v)%maximum(), v=1, size( input_variable) )] & - ) & - ,output_map = output_map & - ) - end associate activation - end block initialize_network - - end if read_or_initialize_network + read_or_initialize_network: & + if (preexisting_network_file) then + print *,"Reading network from file " // network_file + trainable_network = trainable_network_t(neural_network_t(file_t(string_t(network_file)))) + close(network_unit) + else + close(network_unit) + + initialize_network: & + block + character(len=len('YYYYMMDD')) date + + call date_and_time(date) + + print *,"Defining a new network from training_configuration_t and tensor_map_t objects" + + activation: & + associate(activation => training_configuration%activation()) + trainable_network = trainable_network_t( & + training_configuration & + ,perturbation_magnitude = 0.05 & + ,metadata = [ & + string_t("ICAR microphysics" ) & + ,string_t("max-entropy-filter") & + ,string_t(date ) & + ,activation%function_name( ) & + ,string_t(trim(merge("true ", "false", training_configuration%skip_connections()))) & + ] & + ,input_map = tensor_map_t( & + layer = "inputs" & + ,minima = [( input_variable(v)%minimum(), v=1, size( input_variable) )] & + ,maxima = [( input_variable(v)%maximum(), v=1, size( input_variable) )] & + ) & + ,output_map = output_map & + ) + end associate activation + end block initialize_network + + end if read_or_initialize_network end block check_for_network_file