Skip to content

Commit

Permalink
Merge pull request #102 from BerkeleyLab/simplify-app
Browse files Browse the repository at this point in the history
  • Loading branch information
rouson authored Nov 30, 2023
2 parents 9673024 + 0ed0b19 commit 456d51e
Show file tree
Hide file tree
Showing 16 changed files with 84 additions and 89 deletions.
2 changes: 0 additions & 2 deletions .github/workflows/deploy-docs.yml
Original file line number Diff line number Diff line change
Expand Up @@ -23,8 +23,6 @@ jobs:
run: |
PATH=/home/linuxbrew/.linuxbrew/bin/:"$PATH"
PATH=`brew --prefix ford`/bin:"$PATH"
echo $PATH
ford --version
ford ford.md
cp ./README.md ./doc/html
- name: Upload Documentation
Expand Down
88 changes: 21 additions & 67 deletions app/train-cloud-microphysics.f90
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ program train_cloud_microphysics
!! Internal dependencies;
use inference_engine_m, only : &
inference_engine_t, mini_batch_t, input_output_pair_t, tensor_t, trainable_engine_t, rkind, NetCDF_file_t, &
training_configuration_t
training_configuration_t, shuffle
use ubounds_m, only : ubounds_t
implicit none

Expand Down Expand Up @@ -235,7 +235,26 @@ subroutine read_train_write(training_configuration, base_name, plot_unit, previo
else
close(network_unit)
print *,"Initializing a new network"
trainable_engine = perturbed_identity_network(training_configuration, perturbation_magnitude=0.05)
block
character(len=len('YYYYMMDD')) date

call date_and_time(date)

associate(activation => training_configuration%differentiable_activation_strategy())
associate( &
model_name => string_t("Thompson microphysics"), &
author => string_t("Inference Engine"), &
date_string => string_t(date), &
activation_name => activation%function_name(), &
residual_network => string_t(trim(merge("true ", "false", training_configuration%skip_connections()))) &
)
trainable_engine = trainable_engine_t( &
training_configuration, metadata = [model_name, author, date_string, activation_name, residual_network], &
perturbation_magnitude=0.05 &
)
end associate
end associate
end block
end if

if (.not. allocated(end_step)) end_step = t_end
Expand Down Expand Up @@ -339,77 +358,12 @@ subroutine read_train_write(training_configuration, base_name, plot_unit, previo

end subroutine read_train_write

subroutine shuffle(pairs)
type(input_output_pair_t), intent(inout) :: pairs(:)
type(input_output_pair_t) temp
real harvest(2:size(pairs))
integer i, j

call random_init(image_distinct=.true., repeatable=.true.)
call random_number(harvest)

durstenfeld_shuffle: &
do i = size(pairs), 2, -1
j = 1 + int(harvest(i)*i)
temp = pairs(i)
pairs(i) = pairs(j)
pairs(j) = temp
end do durstenfeld_shuffle

end subroutine

pure function normalize(x, x_min, x_max) result(x_normalized)
real(rkind), intent(in) :: x(:,:,:,:), x_min, x_max
real(rkind), allocatable :: x_normalized(:,:,:,:)
call assert(x_min/=x_max, "train_cloud_microphysics(normaliz): x_min/=x_max")
x_normalized = (x - x_min)/(x_max - x_min)
end function

pure function e(j,n) result(unit_vector)
integer, intent(in) :: j, n
integer k
real, allocatable :: unit_vector(:)
unit_vector = real([(merge(1,0,j==k),k=1,n)])
end function

function perturbed_identity_network(training_configuration, perturbation_magnitude) result(trainable_engine)
type(training_configuration_t), intent(in) :: training_configuration
real(rkind), intent(in) :: perturbation_magnitude
type(trainable_engine_t) trainable_engine

! local variables:
integer k, l
real, allocatable :: identity(:,:,:), w_harvest(:,:,:), b_harvest(:,:)
character(len=len('YYYMMDD')) date

call date_and_time(date)

associate(n=>training_configuration%nodes_per_layer(), activation=>training_configuration%differentiable_activation_strategy())
associate(n_max => maxval(n), layers => size(n))

identity = reshape( [( [(e(k,n_max), k=1,n_max)], l = 1, layers-1 )], [n_max, n_max, layers-1])
allocate(w_harvest, mold = identity)
allocate(b_harvest(size(identity,1), size(identity,3)))
call random_number(w_harvest)
call random_number(b_harvest)

associate( &
w => identity + perturbation_magnitude*(w_harvest-0.5)/0.5, &
b => perturbation_magnitude*(b_harvest-0.5)/0.5, &
activation_name => activation%function_name(), &
residual_network => string_t(trim(merge("true ", "false", training_configuration%skip_connections()))), &
model_name => string_t("Thompson microphysics"), &
author => string_t("Inference Engine"), &
date_string => string_t(date) &
)
trainable_engine = trainable_engine_t( &
nodes = n, weights = w, biases = b, differentiable_activation_strategy = activation, &
metadata = [model_name, author, date_string, activation_name, residual_network] &
)
end associate
end associate
end associate
end function

end program train_cloud_microphysics
#endif // __INTEL_FORTRAN
2 changes: 1 addition & 1 deletion example/fit-polynomials.f90
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ program train_polynomials
integer e, b
do e = 1,num_epochs
call random_number(random_numbers)
call shuffle(input_output_pairs, random_numbers)
call shuffle(input_output_pairs)
mini_batches = [(mini_batch_t(input_output_pairs(bins(b)%first():bins(b)%last())), b = 1, size(bins))]
call trainable_engine%train(mini_batches, cost, adam=.true., learning_rate=1.5)
print *,sum(cost)/size(cost)
Expand Down
2 changes: 1 addition & 1 deletion example/learn-addition.f90
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ program train_polynomials
integer e, b
do e = 1,num_epochs
call random_number(random_numbers)
call shuffle(input_output_pairs, random_numbers)
call shuffle(input_output_pairs)
mini_batches = [(mini_batch_t(input_output_pairs(bins(b)%first():bins(b)%last())), b = 1, size(bins))]
call trainable_engine%train(mini_batches, cost, adam=.true., learning_rate=1.5)
print *,sum(cost)/size(cost)
Expand Down
2 changes: 1 addition & 1 deletion example/learn-exponentiation.f90
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ program train_polynomials
integer e, b
do e = 1,num_epochs
call random_number(random_numbers)
call shuffle(input_output_pairs, random_numbers)
call shuffle(input_output_pairs)
mini_batches = [(mini_batch_t(input_output_pairs(bins(b)%first():bins(b)%last())), b = 1, size(bins))]
call trainable_engine%train(mini_batches, cost, adam=.true., learning_rate=1.5)
print *,sum(cost)/size(cost)
Expand Down
2 changes: 1 addition & 1 deletion example/learn-microphysics-procedures.f90
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ program learn_microphysics_procedures

do e = previous_epoch + 1, previous_epoch + max_num_epochs
call random_number(random_numbers)
call shuffle(input_output_pairs, random_numbers)
call shuffle(input_output_pairs)
mini_batches = [(mini_batch_t(input_output_pairs(bins(b)%first():bins(b)%last())), b = 1, size(bins))]
call trainable_engine%train(mini_batches, cost, adam=.true., learning_rate=1.5)
call system_clock(counter_end, clock_rate)
Expand Down
2 changes: 1 addition & 1 deletion example/learn-multiplication.f90
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ program train_polynomials
integer e, b
do e = 1,num_epochs
call random_number(random_numbers)
call shuffle(input_output_pairs, random_numbers)
call shuffle(input_output_pairs)
mini_batches = [(mini_batch_t(input_output_pairs(bins(b)%first():bins(b)%last())), b = 1, size(bins))]
call trainable_engine%train(mini_batches, cost, adam=.true., learning_rate=1.5)
print *,sum(cost)/size(cost)
Expand Down
2 changes: 1 addition & 1 deletion example/learn-power-series.f90
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ program train_polynomials
integer e, b
do e = 1,num_epochs
call random_number(random_numbers)
call shuffle(input_output_pairs, random_numbers)
call shuffle(input_output_pairs)
mini_batches = [(mini_batch_t(input_output_pairs(bins(b)%first():bins(b)%last())), b = 1, size(bins))]
call trainable_engine%train(mini_batches, cost, adam=.true., learning_rate=1.5)
print *,sum(cost)/size(cost)
Expand Down
2 changes: 1 addition & 1 deletion example/learn-saturated-mixing-ratio.f90
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@ program train_saturated_mixture_ratio

do e = previous_epoch + 1, previous_epoch + max_num_epochs
call random_number(random_numbers)
call shuffle(input_output_pairs, random_numbers)
call shuffle(input_output_pairs)
mini_batches = [(mini_batch_t(input_output_pairs(bins(b)%first():bins(b)%last())), b = 1, size(bins))]
call trainable_engine%train(mini_batches, cost, adam=.true., learning_rate=1.5)
call system_clock(counter_end, clock_rate)
Expand Down
2 changes: 1 addition & 1 deletion example/train-and-write.f90
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ program train_and_write
integer e, b
do e = 1,num_epochs
call random_number(random_numbers)
call shuffle(input_output_pairs, random_numbers)
call shuffle(input_output_pairs)
mini_batches = [(mini_batch_t(input_output_pairs(bins(b)%first():bins(b)%last())), b = 1, size(bins))]
call trainable_engine%train(mini_batches, cost, adam=.true., learning_rate=1.5)
print *,sum(cost)/size(cost)
Expand Down
3 changes: 1 addition & 2 deletions src/inference_engine/input_output_pair_m.f90
Original file line number Diff line number Diff line change
Expand Up @@ -41,10 +41,9 @@ elemental module function expected_outputs(self) result(my_expected_outputs)
type(tensor_t) :: my_expected_outputs
end function

pure module subroutine shuffle(pairs, random_numbers)
module subroutine shuffle(pairs)
implicit none
type(input_output_pair_t), intent(inout) :: pairs(:)
real, intent(in) :: random_numbers(2:)
end subroutine

end interface
Expand Down
7 changes: 4 additions & 3 deletions src/inference_engine/input_output_pair_s.f90
Original file line number Diff line number Diff line change
Expand Up @@ -21,14 +21,15 @@

module procedure shuffle
type(input_output_pair_t) temp
real harvest(2:size(pairs))
integer i, j

call assert(size(random_numbers) >= size(pairs)-1, "input_output_pair_s(shuffle): size(random_numbers) >= size(pairs)-1")
call random_number(harvest)

durstenfeld_shuffle: &
do i = size(pairs), 2, -1
j = 1 + int(random_numbers(i)*i)
temp = pairs(i)
j = 1 + int(harvest(i)*i)
temp = pairs(i)
pairs(i) = pairs(j)
pairs(j) = temp
end do durstenfeld_shuffle
Expand Down
9 changes: 9 additions & 0 deletions src/inference_engine/trainable_engine_m.f90
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ module trainable_engine_m
use kind_parameters_m, only : rkind
use tensor_m, only : tensor_t
use mini_batch_m, only : mini_batch_t
use training_configuration_m, only : training_configuration_t
implicit none

private
Expand Down Expand Up @@ -51,6 +52,14 @@ pure module function construct_from_inference_engine(inference_engine) result(tr
type(trainable_engine_t) trainable_engine
end function

module function perturbed_identity_network(training_configuration, perturbation_magnitude, metadata) result(trainable_engine)
implicit none
type(training_configuration_t), intent(in) :: training_configuration
type(string_t), intent(in) :: metadata(:)
real(rkind), intent(in) :: perturbation_magnitude
type(trainable_engine_t) trainable_engine
end function

end interface

interface
Expand Down
37 changes: 37 additions & 0 deletions src/inference_engine/trainable_engine_s.f90
Original file line number Diff line number Diff line change
Expand Up @@ -235,4 +235,41 @@
inference_engine = inference_engine_t(metadata = self%metadata_, weights = self%w, biases = self%b, nodes = self%n)
end procedure

module procedure perturbed_identity_network

integer k, l
real, allocatable :: identity(:,:,:), w_harvest(:,:,:), b_harvest(:,:)

associate(n=>training_configuration%nodes_per_layer())
associate(n_max => maxval(n), layers => size(n))

identity = reshape( [( [(e(k,n_max), k=1,n_max)], l = 1, layers-1 )], [n_max, n_max, layers-1])
allocate(w_harvest, mold = identity)
allocate(b_harvest(size(identity,1), size(identity,3)))
call random_number(w_harvest)
call random_number(b_harvest)

associate( &
w => identity + perturbation_magnitude*(w_harvest-0.5)/0.5, &
b => perturbation_magnitude*(b_harvest-0.5)/0.5, &
activation => training_configuration%differentiable_activation_strategy() &
)
trainable_engine = trainable_engine_t( &
nodes = n, weights = w, biases = b, differentiable_activation_strategy = activation, metadata = metadata &
)
end associate
end associate
end associate

contains

pure function e(j,n) result(unit_vector)
integer, intent(in) :: j, n
integer k
real, allocatable :: unit_vector(:)
unit_vector = real([(merge(1,0,j==k),k=1,n)])
end function

end procedure

end submodule trainable_engine_s
4 changes: 2 additions & 2 deletions src/inference_engine/training_configuration_m.f90
Original file line number Diff line number Diff line change
Expand Up @@ -74,13 +74,13 @@ elemental module function learning_rate(self) result(rate)
real(rkind) rate
end function

module function differentiable_activation_strategy(self) result(strategy)
pure module function differentiable_activation_strategy(self) result(strategy)
implicit none
class(training_configuration_t), intent(in) :: self
class(differentiable_activation_strategy_t), allocatable :: strategy
end function

module function nodes_per_layer(self) result(nodes)
pure module function nodes_per_layer(self) result(nodes)
implicit none
class(training_configuration_t), intent(in) :: self
integer, allocatable :: nodes(:)
Expand Down
7 changes: 2 additions & 5 deletions test/trainable_engine_test_m.f90
Original file line number Diff line number Diff line change
Expand Up @@ -390,7 +390,7 @@ function perturbed_identity_converges() result(test_passes)
type(tensor_t), allocatable :: inputs(:)
type(trainable_engine_t) trainable_engine
type(bin_t), allocatable :: bins(:)
real(rkind), allocatable :: cost(:), random_numbers(:)
real(rkind), allocatable :: cost(:)
integer, allocatable :: neurons(:)
integer, parameter :: num_pairs = 6
integer, parameter :: num_epochs = 148
Expand All @@ -409,11 +409,8 @@ function perturbed_identity_converges() result(test_passes)
end associate
bins = [(bin_t(num_items=num_pairs, num_bins=num_bins, bin_number=bin), bin = 1, num_bins)]

allocate(random_numbers(2:size(input_output_pairs)))

do epoch = 1,num_epochs
call random_number(random_numbers)
call shuffle(input_output_pairs, random_numbers)
call shuffle(input_output_pairs)
mini_batches = [(mini_batch_t(input_output_pairs(bins(bin)%first():bins(bin)%last())), bin = 1, size(bins))]
call trainable_engine%train(mini_batches, cost, adam=.true., learning_rate=1.5)
end do
Expand Down

0 comments on commit 456d51e

Please sign in to comment.