Skip to content

Commit

Permalink
Merge pull request #119 from BerkeleyLab/bugfixes-for-nag
Browse files Browse the repository at this point in the history
Bugfixes for nag
  • Loading branch information
rouson authored Feb 6, 2024
2 parents 22c9c47 + d62f97f commit 057ffe0
Show file tree
Hide file tree
Showing 18 changed files with 66 additions and 46 deletions.
10 changes: 6 additions & 4 deletions example/learn-addition.f90
Original file line number Diff line number Diff line change
Expand Up @@ -22,12 +22,12 @@ program learn_addition
!! This trains a neural network to learn the following six polynomial functions of its eight inputs.
use inference_engine_m, only : &
inference_engine_t, trainable_engine_t, mini_batch_t, tensor_t, input_output_pair_t, shuffle, relu_t
use sourcery_m, only : string_t, file_t, command_line_t, bin_t, csv
use sourcery_m, only : string_t, file_t, command_line_t, bin_t
use assert_m, only : assert, intrinsic_array_t
use addition_m, only : y
implicit none

type(string_t) intial_network_file, final_network_file
type(string_t) final_network_file
type(command_line_t) command_line

final_network_file = string_t(command_line%flag_value("--output-file"))
Expand All @@ -47,7 +47,9 @@ program learn_addition
type(bin_t), allocatable :: bins(:)
real, allocatable :: cost(:), random_numbers(:)

#ifndef NAGFOR
call random_init(image_distinct=.true., repeatable=.true.)
#endif

trainable_engine = perturbed_identity_network(perturbation_magnitude=0.05)
call output(trainable_engine%to_inference_engine(), string_t("initial-network.json"))
Expand Down Expand Up @@ -122,7 +124,7 @@ function perturbed_identity_network(perturbation_magnitude) result(trainable_eng
real, intent(in) :: perturbation_magnitude
integer, parameter :: n(*) = [8, 64, 64, 64, 6]
integer, parameter :: n_max = maxval(n), layers = size(n)
integer j, k, l
integer k, l
real, allocatable :: identity(:,:,:), w_harvest(:,:,:), b_harvest(:,:)

identity = reshape( [( [(e(k,n_max), k=1,n_max)], l = 1, layers-1 )], [n_max, n_max, layers-1])
Expand All @@ -144,4 +146,4 @@ function perturbed_identity_network(perturbation_magnitude) result(trainable_eng
end associate
end function

end program
end program
12 changes: 7 additions & 5 deletions example/learn-exponentiation.f90
Original file line number Diff line number Diff line change
Expand Up @@ -22,12 +22,12 @@ program learn_exponentiation
!! This trains a neural network to learn the following six polynomial functions of its eight inputs.
use inference_engine_m, only : &
inference_engine_t, trainable_engine_t, mini_batch_t, tensor_t, input_output_pair_t, shuffle, relu_t
use sourcery_m, only : string_t, file_t, command_line_t, bin_t, csv
use sourcery_m, only : string_t, file_t, command_line_t, bin_t
use assert_m, only : assert, intrinsic_array_t
use exponentiation_m, only : y
implicit none

type(string_t) intial_network_file, final_network_file
type(string_t) final_network_file
type(command_line_t) command_line

final_network_file = string_t(command_line%flag_value("--output-file"))
Expand All @@ -47,7 +47,9 @@ program learn_exponentiation
type(bin_t), allocatable :: bins(:)
real, allocatable :: cost(:), random_numbers(:)

#ifndef NAGFOR
call random_init(image_distinct=.true., repeatable=.true.)
#endif

trainable_engine = perturbed_identity_network(perturbation_magnitude=0.05)
call output(trainable_engine%to_inference_engine(), string_t("initial-network.json"))
Expand All @@ -60,10 +62,10 @@ program learn_exponentiation
inputs = [(tensor_t(real([(j*i, j = 1,num_inputs)])/(num_inputs*num_pairs)), i = 1, num_pairs)]
desired_outputs = y(inputs)
output_sizes = [(size(desired_outputs(i)%values()),i=1,size(desired_outputs))]
call assert(all([num_outputs==output_sizes]), "fit-polynomials: # outputs", intrinsic_array_t([num_outputs,output_sizes]))
call assert(all([num_outputs==output_sizes]), "fit-polynomials: # outputs", intrinsic_array_t([num_outputs,output_sizes]))
end block
input_output_pairs = input_output_pair_t(inputs, desired_outputs)
block
block
integer b
bins = [(bin_t(num_items=num_pairs, num_bins=num_mini_batches, bin_number=b), b = 1, num_mini_batches)]
end block
Expand Down Expand Up @@ -122,7 +124,7 @@ function perturbed_identity_network(perturbation_magnitude) result(trainable_eng
real, intent(in) :: perturbation_magnitude
integer, parameter :: n(*) = [8, 64, 64, 64, 6] ! nodes per layer (first layer = input, last layer = output)
integer, parameter :: n_max = maxval(n), layers = size(n)
integer j, k, l
integer k, l
real, allocatable :: identity(:,:,:), w_harvest(:,:,:), b_harvest(:,:)

identity = reshape( [( [(e(k,n_max), k=1,n_max)], l = 1, layers-1 )], [n_max, n_max, layers-1])
Expand Down
2 changes: 2 additions & 0 deletions example/learn-microphysics-procedures.f90
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,9 @@ program learn_microphysics_procedures
integer, parameter :: nodes_per_layer(*) = [2, 72, 2]
real, parameter :: cost_tolerance = 1.E-08

#ifndef NAGFOR
call random_init(image_distinct=.true., repeatable=.true.)
#endif

open(newunit=network_unit, file=network_file%string(), form='formatted', status='old', iostat=io_status, action='read')

Expand Down
10 changes: 6 additions & 4 deletions example/learn-multiplication.f90
Original file line number Diff line number Diff line change
Expand Up @@ -22,12 +22,12 @@ program learn_multiplication
!! This trains a neural network to learn the following six polynomial functions of its eight inputs.
use inference_engine_m, only : &
inference_engine_t, trainable_engine_t, mini_batch_t, tensor_t, input_output_pair_t, shuffle, relu_t
use sourcery_m, only : string_t, file_t, command_line_t, bin_t, csv
use sourcery_m, only : string_t, file_t, command_line_t, bin_t
use assert_m, only : assert, intrinsic_array_t
use multiply_inputs, only : y
implicit none

type(string_t) intial_network_file, final_network_file
type(string_t) final_network_file
type(command_line_t) command_line

final_network_file = string_t(command_line%flag_value("--output-file"))
Expand All @@ -47,7 +47,9 @@ program learn_multiplication
type(bin_t), allocatable :: bins(:)
real, allocatable :: cost(:), random_numbers(:)

#ifndef NAGFOR
call random_init(image_distinct=.true., repeatable=.true.)
#endif

trainable_engine = perturbed_identity_network(perturbation_magnitude=0.05)
call output(trainable_engine%to_inference_engine(), string_t("initial-network.json"))
Expand Down Expand Up @@ -122,7 +124,7 @@ function perturbed_identity_network(perturbation_magnitude) result(trainable_eng
real, intent(in) :: perturbation_magnitude
integer, parameter :: n(*) = [8, 64, 64, 64, 6]
integer, parameter :: n_max = maxval(n), layers = size(n)
integer j, k, l
integer k, l
real, allocatable :: identity(:,:,:), w_harvest(:,:,:), b_harvest(:,:)

identity = reshape( [( [(e(k,n_max), k=1,n_max)], l = 1, layers-1 )], [n_max, n_max, layers-1])
Expand All @@ -144,4 +146,4 @@ function perturbed_identity_network(perturbation_magnitude) result(trainable_eng
end associate
end function

end program
end program
10 changes: 6 additions & 4 deletions example/learn-power-series.f90
Original file line number Diff line number Diff line change
Expand Up @@ -22,12 +22,12 @@ program learn_power_series
!! This trains a neural network to learn the following six polynomial functions of its eight inputs.
use inference_engine_m, only : &
inference_engine_t, trainable_engine_t, mini_batch_t, tensor_t, input_output_pair_t, shuffle, relu_t
use sourcery_m, only : string_t, file_t, command_line_t, bin_t, csv
use sourcery_m, only : string_t, file_t, command_line_t, bin_t
use assert_m, only : assert, intrinsic_array_t
use power_series, only : y
implicit none

type(string_t) intial_network_file, final_network_file
type(string_t) final_network_file
type(command_line_t) command_line

final_network_file = string_t(command_line%flag_value("--output-file"))
Expand All @@ -47,7 +47,9 @@ program learn_power_series
type(bin_t), allocatable :: bins(:)
real, allocatable :: cost(:), random_numbers(:)

#ifndef NAGFOR
call random_init(image_distinct=.true., repeatable=.true.)
#endif

trainable_engine = perturbed_identity_network(perturbation_magnitude=0.05)
call output(trainable_engine%to_inference_engine(), string_t("initial-network.json"))
Expand Down Expand Up @@ -124,7 +126,7 @@ function perturbed_identity_network(perturbation_magnitude) result(trainable_eng
real, intent(in) :: perturbation_magnitude
integer, parameter :: n(*) = [8, 196, 196, 196, 196, 6]
integer, parameter :: n_max = maxval(n), layers = size(n)
integer j, k, l
integer k, l
real, allocatable :: identity(:,:,:), w_harvest(:,:,:), b_harvest(:,:)

identity = reshape( [( [(e(k,n_max), k=1,n_max)], l = 1, layers-1 )], [n_max, n_max, layers-1])
Expand All @@ -146,4 +148,4 @@ function perturbed_identity_network(perturbation_magnitude) result(trainable_eng
end associate
end function

end program
end program
2 changes: 2 additions & 0 deletions example/learn-saturated-mixing-ratio.f90
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,9 @@ program train_saturated_mixture_ratio
integer, parameter :: nodes_per_layer(*) = [2, 72, 1]
real, parameter :: cost_tolerance = 1.E-08

#ifndef NAGFOR
call random_init(image_distinct=.true., repeatable=.true.)
#endif

open(newunit=network_unit, file=network_file%string(), form='formatted', status='old', iostat=io_status, action='read')

Expand Down
6 changes: 4 additions & 2 deletions example/train-and-write.f90
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ program train_and_write
use assert_m, only : assert, intrinsic_array_t
implicit none

type(string_t) intial_network_file, final_network_file
type(string_t) final_network_file
type(command_line_t) command_line

final_network_file = string_t(command_line%flag_value("--output-file"))
Expand All @@ -34,7 +34,9 @@ program train_and_write
type(bin_t), allocatable :: bins(:)
real, allocatable :: cost(:), random_numbers(:)

#ifndef NAGFOR
call random_init(image_distinct=.true., repeatable=.true.)
#endif

trainable_engine = perturbed_identity_network(perturbation_magnitude=0.2)
call output(trainable_engine%to_inference_engine(), string_t("initial-network.json"))
Expand Down Expand Up @@ -124,4 +126,4 @@ function perturbed_identity_network(perturbation_magnitude) result(trainable_eng
end associate
end function

end program
end program
7 changes: 4 additions & 3 deletions src/inference_engine/activation_strategy_m.f90
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ module activation_strategy_m
type, abstract :: activation_strategy_t
contains
procedure(activation_i), nopass, deferred :: activation
procedure(function_name_i), nopass, deferred :: function_name
procedure(function_name_i), deferred :: function_name
end type

abstract interface
Expand All @@ -27,9 +27,10 @@ elemental function activation_i(x) result(y)
real(rkind) y
end function

elemental function function_name_i() result(string)
import string_t
elemental function function_name_i(self) result(string)
import string_t, activation_strategy_t
implicit none
class(activation_strategy_t), intent(in) :: self
type(string_t) string
end function

Expand Down
8 changes: 4 additions & 4 deletions src/inference_engine/inference_engine_s.f90
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,8 @@
implicit none

interface assert_consistency
module procedure inference_engine_consistency
module procedure difference_consistency
procedure inference_engine_consistency
procedure difference_consistency
end interface

contains
Expand Down Expand Up @@ -53,7 +53,7 @@

end procedure

pure module subroutine inference_engine_consistency(self)
pure subroutine inference_engine_consistency(self)

type(inference_engine_t), intent(in) :: self

Expand All @@ -78,7 +78,7 @@ pure module subroutine inference_engine_consistency(self)

end subroutine

pure module subroutine difference_consistency(self)
pure subroutine difference_consistency(self)

type(difference_t), intent(in) :: self

Expand Down
5 changes: 3 additions & 2 deletions src/inference_engine/relu_m.f90
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ module relu_m
contains
procedure, nopass :: activation
procedure, nopass :: activation_derivative
procedure, nopass :: function_name
procedure :: function_name
end type

interface
Expand All @@ -30,8 +30,9 @@ elemental module function activation_derivative(x) result(y)
real(rkind) y
end function

elemental module function function_name() result(string)
elemental module function function_name(self) result(string)
implicit none
class(relu_t), intent(in) :: self
type(string_t) string
end function

Expand Down
5 changes: 3 additions & 2 deletions src/inference_engine/sigmoid_m.f90
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ module sigmoid_m
contains
procedure, nopass :: activation
procedure, nopass :: activation_derivative
procedure, nopass :: function_name
procedure :: function_name
end type

interface
Expand All @@ -30,8 +30,9 @@ elemental module function activation_derivative(x) result(y)
real(rkind) y
end function

elemental module function function_name() result(string)
elemental module function function_name(self) result(string)
implicit none
class(sigmoid_t), intent(in) :: self
type(string_t) string
end function

Expand Down
5 changes: 3 additions & 2 deletions src/inference_engine/step_m.f90
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ module step_m
type, extends(activation_strategy_t) :: step_t
contains
procedure, nopass :: activation
procedure, nopass :: function_name
procedure :: function_name
end type

interface
Expand All @@ -23,8 +23,9 @@ elemental module function activation(x) result(y)
real(rkind) y
end function

elemental module function function_name() result(string)
elemental module function function_name(self) result(string)
implicit none
class(step_t), intent(in) :: self
type(string_t) string
end function

Expand Down
5 changes: 3 additions & 2 deletions src/inference_engine/swish_m.f90
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ module swish_m
contains
procedure, nopass :: activation
procedure, nopass :: activation_derivative
procedure, nopass :: function_name
procedure :: function_name
end type

interface
Expand All @@ -30,8 +30,9 @@ elemental module function activation_derivative(x) result(y)
real(rkind) y
end function

elemental module function function_name() result(string)
elemental module function function_name(self) result(string)
implicit none
class(swish_t), intent(in) :: self
type(string_t) string
end function

Expand Down
2 changes: 1 addition & 1 deletion src/inference_engine/trainable_engine_m.f90
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,7 @@ elemental module function num_layers(self) result(n_layers)
integer n_layers
end function

pure module function to_inference_engine(self) result(inference_engine)
module function to_inference_engine(self) result(inference_engine)
implicit none
class(trainable_engine_t), intent(in) :: self
type(inference_engine_t) :: inference_engine
Expand Down
9 changes: 5 additions & 4 deletions src/inference_engine/trainable_engine_s.f90
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,6 @@
submodule(trainable_engine_m) trainable_engine_s
use assert_m, only : assert
use intrinsic_array_m, only : intrinsic_array_t
use input_output_pair_m, only : input_output_pair_t
use sigmoid_m, only : sigmoid_t
use tensor_m, only : tensor_t
implicit none

Expand Down Expand Up @@ -186,7 +184,7 @@
real, parameter :: obeta(*) = [1._rkind - beta(1), 1._rkind - beta(2)]
real, parameter :: epsilon = real(1.D-08,rkind)

adjust_weights_and_biases: &
adam_adjust_weights_and_biases: &
do concurrent(l = 1:output_layer)
dcdw(1:n(l),1:n(l-1),l) = dcdw(1:n(l),1:n(l-1),l)/(mini_batch_size)
vdw(1:n(l),1:n(l-1),l) = beta(1)*vdw(1:n(l),1:n(l-1),l) + obeta(1)*dcdw(1:n(l),1:n(l-1),l)
Expand All @@ -202,7 +200,7 @@
vdbc(1:n(l),l) = vdb(1:n(l),l)/(1._rkind - beta(1)**num_mini_batches)
sdbc(1:n(l),l) = sdb(1:n(l),l)/(1._rkind - beta(2)**num_mini_batches)
b(1:n(l),l) = b(1:n(l),l) - alpha*vdbc(1:n(l),l)/(sqrt(sdbc(1:n(l),l))+epsilon) ! Adjust weights
end do adjust_weights_and_biases
end do adam_adjust_weights_and_biases
end block
else
adjust_weights_and_biases: &
Expand Down Expand Up @@ -233,6 +231,9 @@
end procedure

module procedure to_inference_engine
! assignment-stmt disallows the procedure from being pure because it might
! deallocate polymorphic allocatable subcomponent `activation_strategy_`
! TODO: consider how this affects design
inference_engine = inference_engine_t(metadata = self%metadata_, weights = self%w, biases = self%b, nodes = self%n)
end procedure

Expand Down
Loading

0 comments on commit 057ffe0

Please sign in to comment.