diff --git a/doc/specs/stdlib_specialfunctions_activations.md b/doc/specs/stdlib_specialfunctions_activations.md new file mode 100644 index 000000000..12afe4bc3 --- /dev/null +++ b/doc/specs/stdlib_specialfunctions_activations.md @@ -0,0 +1,765 @@ +--- +title: specialfunctions_activations +--- + +# Special functions - Neural Networks activations and their gradients + +[TOC] + +## `Gaussian` - Gaussian function + +### Status + +Experimental + +### Description + +Computes the gaussian function: +$$f(x)=\exp(-x^2)$$ + +### Syntax + +`result = ` [[stdlib_specialfunctions(module):gaussian(interface)]] ` (x)` + +### Class + +Elemental function + +### Arguments + +`x`: Shall be a scalar or array of any `real` kind. + +### Return value + +The function returns a value with the same type and kind as input argument. + +### Example +```fortran +{!example/specialfunctions_activations/example_gaussian.f90!} +``` + +## `Gaussian_grad` - Gradient of the Gaussian function + +### Status + +Experimental + +### Description + +Computes the gradient of the gaussian function: +$$f(x)=-2 * x * \exp( - x ^ 2)$$ + +### Syntax + +`result = ` [[stdlib_specialfunctions(module):gaussian_grad(interface)]] ` (x)` + +### Class + +Elemental function + +### Arguments + +`x`: Shall be a scalar or array of any `real` kind. + +### Return value + +The function returns a value with the same type and kind as input argument. + +## `Elu` - Exponential Linear Unit function + +### Status + +Experimental + +### Description + +Computes the gaussian function: +$$ +\text{f}(x) = +\begin{cases} +x, & \text{if } x \geq 0 \\ +a * (\exp(x) - 1), & \text{otherwise} +\end{cases} +$$ + +### Syntax + +`result = ` [[stdlib_specialfunctions(module):elu(interface)]] ` (x,a)` + +### Class + +Elemental function + +### Arguments + +`x`: Shall be a scalar or array of any `real` kind. +`a`: Shall be a scalar or array of any `real` kind. + +### Return value + +The function returns a value with the same type and kind as input argument. + +### Example +```fortran +{!example/specialfunctions_activations/example_elu.f90!} +``` + +## `Elu_grad` - Gradient of the Exponential Linear Unit function + +### Status + +Experimental + +### Description + +Computes the gradient of the gaussian function: +$$ +\text{f}(x) = +\begin{cases} +1, & \text{if } x \geq 0 \\ +a * \exp(x), & \text{otherwise} +\end{cases} +$$ + +### Syntax + +`result = ` [[stdlib_specialfunctions(module):elu_grad(interface)]] ` (x,a)` + +### Class + +Elemental function + +### Arguments + +`x`: Shall be a scalar or array of any `real` kind. +`a`: Shall be a scalar or array of any `real` kind. + +### Return value + +The function returns a value with the same type and kind as input argument. + +## `Relu` - Rectified Linear Unit function + +### Status + +Experimental + +### Description + +Computes the Rectified Linear Unit function: +$$f(x) = \text{max}(0,x)$$ + +### Syntax + +`result = ` [[stdlib_specialfunctions(module):relu(interface)]] ` (x)` + +### Class + +Elemental function + +### Arguments + +`x`: Shall be a scalar or array of any `real` kind. + +### Return value + +The function returns a value with the same type and kind as input argument. + +### Example +```fortran +{!example/specialfunctions_activations/example_relu.f90!} +``` + +## `Relu_grad` - Gradient of the Rectified Linear Unit function + +### Status + +Experimental + +### Description + +Computes the gradient of the gaussian function: +$$ +f(x) = +\begin{cases} +1, & \text{if } x \geq 0 \\ +0, & \text{otherwise} +\end{cases} +$$ + +### Syntax + +`result = ` [[stdlib_specialfunctions(module):relu_grad(interface)]] ` (x)` + +### Class + +Elemental function + +### Arguments + +`x`: Shall be a scalar or array of any `real` kind. + +### Return value + +The function returns a value with the same type and kind as input argument. + +## `Leaky_relu` - Leaky Rectified Linear Unit function + +### Status + +Experimental + +### Description + +Computes the gaussian function: +$$ +\text{f}(x) = +\begin{cases} +x, & \text{if } x \geq 0 \\ +a * x, & \text{otherwise} +\end{cases} +$$ + +### Syntax + +`result = ` [[stdlib_specialfunctions(module):leaky_relu(interface)]] ` (x,a)` + +### Class + +Elemental function + +### Arguments + +`x`: Shall be a scalar or array of any `real` kind. +`a`: Shall be a scalar or array of any `real` kind. + +### Return value + +The function returns a value with the same type and kind as input argument. + +### Example +```fortran +{!example/specialfunctions_activations/example_leaky_relu.f90!} +``` + +## `Leaky_relu_grad` - Gradient of the Leaky Rectified Linear Unit function + +### Status + +Experimental + +### Description + +Computes the gradient of the leaky_relu function: +$$ +\text{f}(x) = +\begin{cases} +1, & \text{if } x \geq 0 \\ +a , & \text{otherwise} +\end{cases} +$$ + +### Syntax + +`result = ` [[stdlib_specialfunctions(module):leaky_relu_grad(interface)]] ` (x,a)` + +### Class + +Elemental function + +### Arguments + +`x`: Shall be a scalar or array of any `real` kind. +`a`: Shall be a scalar or array of any `real` kind. + +### Return value + +The function returns a value with the same type and kind as the input argument. + +## `Gelu` - Gaussian Error Linear Unit function + +### Status + +Experimental + +### Description + +Computes the Gaussian Error Linear Unit function: +$$f(x) = \frac{1}{2} x ( 1 + \text{erf}(\frac{x}{\sqrt{2}}) ) $$ + +### Syntax + +`result = ` [[stdlib_specialfunctions(module):gelu(interface)]] ` (x)` + +### Class + +Elemental function + +### Arguments + +`x`: Shall be a scalar or array of any `real` kind. + +### Return value + +The function returns a value with the same type and kind as input argument. + +### Example +```fortran +{!example/specialfunctions_activations/example_gelu.f90!} +``` + +## `Gelu_grad` - Gradient of the Gaussian Error Linear Unit function + +### Status + +Experimental + +### Description + +Computes the gradient of the gaussian error linear unit function: +$$ +f(x) = \frac{1}{2} ( 1 + \text{erf}(x \sqrt{2}) ) + x \sqrt{2} \exp( -\frac{1}{2} x^2) +$$ + +### Syntax + +`result = ` [[stdlib_specialfunctions(module):gelu_grad(interface)]] ` (x)` + +### Class + +Elemental function + +### Arguments + +`x`: Shall be a scalar or array of any `real` kind. + +### Return value + +The function returns a value with the same type and kind as input argument. + +## `Gelu_approx` - Approximation of the Gaussian Error Linear Unit function + +### Status + +Experimental + +### Description + +Computes a fast approximation of the Gaussian Error Linear Unit function using a fast $\text{erf}$ approximation: +$$f(x) = \frac{1}{2} x ( 1 + \text{ferf}(\frac{x}{\sqrt{2}}) ) $$ + +### Syntax + +`result = ` [[stdlib_specialfunctions(module):gelu_approx(interface)]] ` (x)` + +### Class + +Elemental function + +### Arguments + +`x`: Shall be a scalar or array of any `real` kind. + +### Return value + +The function returns a value with the same type and kind as input argument. + +## `Gelu_approx_grad` - Gradient of the Approximated Gaussian Error Linear Unit function + +### Status + +Experimental + +### Description + +Computes the gradient of the gaussian error linear unit function using a fast $\text{erf}$ approximation: +$$ +f(x) = \frac{1}{2} ( 1 + \text{ferf}(x \sqrt{2}) ) + x \sqrt{2} \exp( -\frac{1}{2} x^2) +$$ + +### Syntax + +`result = ` [[stdlib_specialfunctions(module):gelu_approx_grad(interface)]] ` (x)` + +### Class + +Elemental function + +### Arguments + +`x`: Shall be a scalar or array of any `real` kind. + +### Return value + +The function returns a value with the same type and kind as input argument. + +## `Selu` - Scaled Exponential Linear Unit function + +### Status + +Experimental + +### Description + +Applies the Scaled Exponential Linear Unit activation function: +$$ +f(x) = +\begin{cases} +scale * x, & \text{if } x \ge 0 \\ +scale * (\alpha * exp(x) - \alpha ), & \text{otherwise} +\end{cases} +$$ +Where, +$$scale = 1.0507009873554804934193349852946$$ and $$\alpha = 1.6732632423543772848170429916717$$ + +### Syntax + +`result = ` [[stdlib_specialfunctions(module):selu(interface)]] ` (x)` + +### Class + +Elemental function + +### Arguments + +`x`: Shall be a scalar or array of any `real` kind. + +### Return value + +The function returns a value with the same type and kind as input argument. + +### Example +```fortran +{!example/specialfunctions_activations/example_selu.f90!} +``` + +## `selu_grad` - Gradient of the Scaled Exponential Linear Unit function + +### Status + +Experimental + +### Description + +Applies the gradient of the Scaled Exponential Linear Unit activation function: +$$ +f(x) = +\begin{cases} +scale, & \text{if } x \ge 0 \\ +scale * \alpha * exp(x) , & \text{otherwise} +\end{cases} +$$ + +### Syntax + +`result = ` [[stdlib_specialfunctions(module):selu_grad(interface)]] ` (x)` + +### Class + +Elemental function + +### Arguments + +`x`: Shall be a scalar or array of any `real` kind. + +### Return value + +The function returns a value with the same type and kind as input argument. + +## `Sigmoid` - Sigmoid function + +### Status + +Experimental + +### Description + +Computes the sigmoid function: +$$f(x) = \frac{1}{1+\exp(-x)} $$ + +### Syntax + +`result = ` [[stdlib_specialfunctions(module):sigmoid(interface)]] ` (x)` + +### Class + +Elemental function + +### Arguments + +`x`: Shall be a scalar or array of any `real` kind. + +### Return value + +The function returns a value with the same type and kind as input argument. + +## `Sigmoid_grad` - Gradient of the Sigmoid function + +### Status + +Experimental + +### Description + +Computes the gradient of the Sigmoid function: +$$f(x) = \frac{\exp(x)}{(1+\exp(x))^2} $$ + +### Syntax + +`result = ` [[stdlib_specialfunctions(module):sigmoid_grad(interface)]] ` (x)` + +### Class + +Elemental function + +### Arguments + +`x`: Shall be a scalar or array of any `real` kind. + +### Return value + +The function returns a value with the same type and kind as input argument. + +## `SiLU` - Sigmoid Linear Unit function + +### Status + +Experimental + +### Description + +Computes the Sigmoid Linear Unit function: +$$f(x) = \frac{x}{1+\exp(-x)} $$ + +### Syntax + +`result = ` [[stdlib_specialfunctions(module):silu(interface)]] ` (x)` + +### Class + +Elemental function + +### Arguments + +`x`: Shall be a scalar or array of any `real` kind. + +### Return value + +The function returns a value with the same type and kind as input argument. + +### Example +```fortran +{!example/specialfunctions_activations/example_silu.f90!} +``` + +## `Silu_grad` - Gradient of the Sigmoid Linear Unit function + +### Status + +Experimental + +### Description + +Computes the gradient of the Sigmoid function: +$$f(x) = \frac{\exp(x)*(x+(1+\exp(x))^2)}{(1+\exp(x))^2} $$ + +### Syntax + +`result = ` [[stdlib_specialfunctions(module):silu_grad(interface)]] ` (x)` + +### Class + +Elemental function + +### Arguments + +`x`: Shall be a scalar or array of any `real` kind. + +### Return value + +The function returns a value with the same type and kind as input argument. + +## `Step` - Step function + +### Status + +Experimental + +### Description + +Computes the step function: +$$ +f(x) = +\begin{cases} +1, & \text{if } x > 0 \\ +0, & \text{otherwise} +\end{cases} +$$ + +### Syntax + +`result = ` [[stdlib_specialfunctions(module):step(interface)]] ` (x)` + +### Class + +Elemental function + +### Arguments + +`x`: Shall be a scalar or array of any `real` kind. + +### Return value + +The function returns a value with the same type and kind as input argument. + +### Example +```fortran +{!example/specialfunctions_activations/example_step.f90!} +``` + +## `Step_grad` - Gradient of the Step function + +### Status + +Experimental + +### Description + +Computes the gradient of the Sigmoid function: +$$f(x) = 0 $$ + +### Syntax + +`result = ` [[stdlib_specialfunctions(module):step_grad(interface)]] ` (x)` + +### Class + +Elemental function + +### Arguments + +`x`: Shall be a scalar or array of any `real` kind. + +### Return value + +The function returns a value with the same type and kind as input argument. + +## `Softmax` - Softmax function + +### Status + +Experimental + +### Description + +Computes the Softmax function: +$$f(x) = \frac{\exp(x)-\text{max}(x_j)}{\sum_j{\exp(x)-\text{max}(x_j)}}$$ + +### Syntax + +`result = ` [[stdlib_specialfunctions(module):softmax(interface)]] ` (x,dim)` + +### Class + +Pure function for ranks 1 to 4. + +### Arguments + +`x`: Shall be an array of rank 1 to 4 of any `real` kind. +`dim`: integer scalar indicating upon which dimension to apply the normalization. + +### Return value + +The function returns an array with the same rank and kind as the input argument `x`. + +### Example +```fortran +{!example/specialfunctions_activations/example_softmax.f90!} +``` + +## `Softmax_grad` - Gradient of the Softmax function + +### Status + +Experimental + +### Description + +Computes the gradient of the Softmax function: +$$f(x,dim) = \text{Softmax}(x,dim)*(1-\text{Softmax}(x,dim)) $$ + +### Syntax + +`result = ` [[stdlib_specialfunctions(module):softmax_grad(interface)]] ` (x,dim)` + +### Class + +Pure function for ranks 1 to 4. + +### Arguments + +`x`: Shall be an array of rank 1 to 4 of any `real` kind. +`dim`: integer scalar indicating upon which dimension to apply the normalization. + +### Return value + +The function returns a value with the same type and kind as input argument. + +## `Softplus` - Softplus function + +### Status + +Experimental + +### Description + +Computes the Softplus function: +$$f(x) = \log(\exp(x)+1)$$ + +### Syntax + +`result = ` [[stdlib_specialfunctions(module):softplus(interface)]] ` (x)` + +### Class + +Elemental function + +### Arguments + +`x`: Shall be a scalar or array of any `real` kind. + +### Return value + +The function returns a value with the same type and kind as input argument. + +### Example +```fortran +{!example/specialfunctions_activations/example_softplus.f90!} +``` + +## `Softplus_grad` - Gradient of the Softplus function + +### Status + +Experimental + +### Description + +Computes the gradient of the Softplus function: +$$f(x) = \frac{\exp(x)}{\exp(x)+1} $$ + +### Syntax + +`result = ` [[stdlib_specialfunctions(module):softplus_grad(interface)]] ` (x)` + +### Class + +Elemental function + +### Arguments + +`x`: Shall be a scalar or array of any `real` kind. + +### Return value + +The function returns a value with the same type and kind as input argument. \ No newline at end of file diff --git a/example/specialfunctions_activations/CMakeLists.txt b/example/specialfunctions_activations/CMakeLists.txt new file mode 100644 index 000000000..d2be7c802 --- /dev/null +++ b/example/specialfunctions_activations/CMakeLists.txt @@ -0,0 +1,11 @@ +ADD_EXAMPLE(elu) +ADD_EXAMPLE(gaussian) +ADD_EXAMPLE(gelu) +ADD_EXAMPLE(leaky_relu) +ADD_EXAMPLE(relu) +ADD_EXAMPLE(selu) +ADD_EXAMPLE(silu) +ADD_EXAMPLE(softmax) +ADD_EXAMPLE(logsoftmax) +ADD_EXAMPLE(softplus) +ADD_EXAMPLE(step) diff --git a/example/specialfunctions_activations/example_elu.f90 b/example/specialfunctions_activations/example_elu.f90 new file mode 100644 index 000000000..50919c821 --- /dev/null +++ b/example/specialfunctions_activations/example_elu.f90 @@ -0,0 +1,13 @@ +program example_elu + use stdlib_kinds, only: sp + use stdlib_math, only: linspace + use stdlib_specialfunctions, only: elu + implicit none + integer, parameter :: n = 10 + real(sp) :: x(n), y(n) + + x = linspace(-2._sp, 2._sp, n) + y = elu( x , 1.0 ) + print *, y +end program example_elu + \ No newline at end of file diff --git a/example/specialfunctions_activations/example_gaussian.f90 b/example/specialfunctions_activations/example_gaussian.f90 new file mode 100644 index 000000000..45076c2e0 --- /dev/null +++ b/example/specialfunctions_activations/example_gaussian.f90 @@ -0,0 +1,13 @@ +program example_gaussian + use stdlib_kinds, only: sp + use stdlib_math, only: linspace + use stdlib_specialfunctions, only: gaussian + implicit none + integer, parameter :: n = 10 + real(sp) :: x(n), y(n) + + x = linspace(-2._sp, 2._sp, n) + y = gaussian( x ) + print *, y +end program example_gaussian + \ No newline at end of file diff --git a/example/specialfunctions_activations/example_gelu.f90 b/example/specialfunctions_activations/example_gelu.f90 new file mode 100644 index 000000000..8748eba3b --- /dev/null +++ b/example/specialfunctions_activations/example_gelu.f90 @@ -0,0 +1,13 @@ +program example_gelu + use stdlib_kinds, only: sp + use stdlib_math, only: linspace + use stdlib_specialfunctions, only: gelu + implicit none + integer, parameter :: n = 10 + real(sp) :: x(n), y(n) + + x = linspace(-2._sp, 2._sp, n) + y = gelu( x ) + print *, y +end program example_gelu + \ No newline at end of file diff --git a/example/specialfunctions_activations/example_leaky_relu.f90 b/example/specialfunctions_activations/example_leaky_relu.f90 new file mode 100644 index 000000000..894c81b94 --- /dev/null +++ b/example/specialfunctions_activations/example_leaky_relu.f90 @@ -0,0 +1,13 @@ +program example_gelu + use stdlib_kinds, only: sp + use stdlib_math, only: linspace + use stdlib_specialfunctions, only: leaky_relu + implicit none + integer, parameter :: n = 10 + real(sp) :: x(n), y(n) + + x = linspace(-2._sp, 2._sp, n) + y = leaky_relu( x , 0.1_sp ) + print *, y +end program example_gelu + \ No newline at end of file diff --git a/example/specialfunctions_activations/example_logsoftmax.f90 b/example/specialfunctions_activations/example_logsoftmax.f90 new file mode 100644 index 000000000..1d868fbb3 --- /dev/null +++ b/example/specialfunctions_activations/example_logsoftmax.f90 @@ -0,0 +1,13 @@ +program example_logsoftmax + use stdlib_kinds, only: sp + use stdlib_math, only: linspace + use stdlib_specialfunctions, only: logsoftmax + implicit none + integer, parameter :: n = 10 + real(sp) :: x(n), y(n) + + x = linspace(-2._sp, 2._sp, n) + y = logsoftmax( x ) + print *, y +end program example_logsoftmax + \ No newline at end of file diff --git a/example/specialfunctions_activations/example_relu.f90 b/example/specialfunctions_activations/example_relu.f90 new file mode 100644 index 000000000..8e23ea457 --- /dev/null +++ b/example/specialfunctions_activations/example_relu.f90 @@ -0,0 +1,13 @@ +program example_relu + use stdlib_kinds, only: sp + use stdlib_math, only: linspace + use stdlib_specialfunctions, only: relu + implicit none + integer, parameter :: n = 10 + real(sp) :: x(n), y(n) + + x = linspace(-2._sp, 2._sp, n) + y = relu( x ) + print *, y +end program example_relu + \ No newline at end of file diff --git a/example/specialfunctions_activations/example_selu.f90 b/example/specialfunctions_activations/example_selu.f90 new file mode 100644 index 000000000..b51ed2b7f --- /dev/null +++ b/example/specialfunctions_activations/example_selu.f90 @@ -0,0 +1,13 @@ +program example_selu + use stdlib_kinds, only: sp + use stdlib_math, only: linspace + use stdlib_specialfunctions, only: selu + implicit none + integer, parameter :: n = 10 + real(sp) :: x(n), y(n) + + x = linspace(-2._sp, 2._sp, n) + y = selu( x ) + print *, y +end program example_selu + \ No newline at end of file diff --git a/example/specialfunctions_activations/example_silu.f90 b/example/specialfunctions_activations/example_silu.f90 new file mode 100644 index 000000000..46534c938 --- /dev/null +++ b/example/specialfunctions_activations/example_silu.f90 @@ -0,0 +1,13 @@ +program example_silu + use stdlib_kinds, only: sp + use stdlib_math, only: linspace + use stdlib_specialfunctions, only: silu + implicit none + integer, parameter :: n = 10 + real(sp) :: x(n), y(n) + + x = linspace(-2._sp, 2._sp, n) + y = silu( x ) + print *, y +end program example_silu + \ No newline at end of file diff --git a/example/specialfunctions_activations/example_softmax.f90 b/example/specialfunctions_activations/example_softmax.f90 new file mode 100644 index 000000000..accb9e9ec --- /dev/null +++ b/example/specialfunctions_activations/example_softmax.f90 @@ -0,0 +1,13 @@ +program example_softmax + use stdlib_kinds, only: sp + use stdlib_math, only: linspace + use stdlib_specialfunctions, only: softmax + implicit none + integer, parameter :: n = 10 + real(sp) :: x(n), y(n) + + x = linspace(-2._sp, 2._sp, n) + y = softmax( x ) + print *, y +end program example_softmax + \ No newline at end of file diff --git a/example/specialfunctions_activations/example_softplus.f90 b/example/specialfunctions_activations/example_softplus.f90 new file mode 100644 index 000000000..30f1e928e --- /dev/null +++ b/example/specialfunctions_activations/example_softplus.f90 @@ -0,0 +1,13 @@ +program example_softplus + use stdlib_kinds, only: sp + use stdlib_math, only: linspace + use stdlib_specialfunctions, only: softplus + implicit none + integer, parameter :: n = 10 + real(sp) :: x(n), y(n) + + x = linspace(-2._sp, 2._sp, n) + y = softplus( x ) + print *, y +end program example_softplus + \ No newline at end of file diff --git a/example/specialfunctions_activations/example_step.f90 b/example/specialfunctions_activations/example_step.f90 new file mode 100644 index 000000000..77b4747d3 --- /dev/null +++ b/example/specialfunctions_activations/example_step.f90 @@ -0,0 +1,13 @@ +program example_step + use stdlib_kinds, only: sp + use stdlib_math, only: linspace + use stdlib_specialfunctions, only: step + implicit none + integer, parameter :: n = 10 + real(sp) :: x(n), y(n) + + x = linspace(-2._sp, 2._sp, n) + y = step( x ) + print *, y +end program example_step + \ No newline at end of file diff --git a/include/common.fypp b/include/common.fypp index de0a7b911..59c395e81 100644 --- a/include/common.fypp +++ b/include/common.fypp @@ -230,6 +230,29 @@ ${prefix + joinstr.join([line.strip() for line in txt.split("\n")]) + suffix}$ #:endif #:enddef +#! Brace enclosed, comma separated Fortran expressions for a shape. +#! +#! It defines an output variable with the same shape as the input variable. +#! +#! Args: +#! varname (str): Name of the variable to be used as origin +#! origrank (int): Rank of the original variable +#! +#! Returns: +#! Shape expression enclosed in braces, so that it can be used as suffix to +#! define array shapes in declarations. +#! +#:def shape(varname, origrank) + #:assert origrank > 0 + #:if origrank > 1 + #:call join_lines(joinstr=", ", prefix="(", suffix=")") + #:for i in range(1, origrank+1) + size(${varname}$, ${i}$) + #:endfor + #:endcall + #:endif +#:enddef + #! Generates a routine name from a generic name, rank, type and kind #! diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 75989dbc9..dc12651cc 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -48,7 +48,9 @@ set(fppFiles stdlib_sparse_conversion.fypp stdlib_sparse_kinds.fypp stdlib_sparse_spmv.fypp + stdlib_specialfunctions_activations.fypp stdlib_specialfunctions_gamma.fypp + stdlib_specialfunctions.fypp stdlib_stats.fypp stdlib_stats_corr.fypp stdlib_stats_cov.fypp @@ -121,7 +123,6 @@ set(SRC stdlib_sorting_radix_sort.f90 stdlib_system.F90 stdlib_sparse.f90 - stdlib_specialfunctions.f90 stdlib_specialfunctions_legendre.f90 stdlib_quadrature_gauss.f90 stdlib_stringlist_type.f90 diff --git a/src/stdlib_specialfunctions.f90 b/src/stdlib_specialfunctions.f90 deleted file mode 100644 index a8f37bfac..000000000 --- a/src/stdlib_specialfunctions.f90 +++ /dev/null @@ -1,34 +0,0 @@ -module stdlib_specialfunctions - use stdlib_kinds, only: sp, dp, xdp, qp - - implicit none - - private - - public :: legendre - public :: dlegendre - - - interface legendre - !! version: experimental - !! - !! Legendre polynomial - pure elemental module function legendre_fp64(n,x) result(leg) - integer, intent(in) :: n - real(dp), intent(in) :: x - real(dp) :: leg - end function - end interface - - interface dlegendre - !! version: experimental - !! - !! First derivative Legendre polynomial - pure elemental module function dlegendre_fp64(n,x) result(dleg) - integer, intent(in) :: n - real(dp), intent(in) :: x - real(dp) :: dleg - end function - end interface - -end module stdlib_specialfunctions diff --git a/src/stdlib_specialfunctions.fypp b/src/stdlib_specialfunctions.fypp new file mode 100644 index 000000000..f493657d0 --- /dev/null +++ b/src/stdlib_specialfunctions.fypp @@ -0,0 +1,466 @@ +#:include "common.fypp" +#:set RANKS = range(2, MAXRANK + 1) +module stdlib_specialfunctions + use stdlib_kinds, only: int8, int16, int32, int64, sp, dp, xdp, qp + + implicit none + + private + + interface legendre + !! version: experimental + !! + !! Legendre polynomial + pure elemental module function legendre_fp64(n,x) result(leg) + integer, intent(in) :: n + real(dp), intent(in) :: x + real(dp) :: leg + end function + end interface + public :: legendre + + interface dlegendre + !! version: experimental + !! + !! First derivative Legendre polynomial + pure elemental module function dlegendre_fp64(n,x) result(dleg) + integer, intent(in) :: n + real(dp), intent(in) :: x + real(dp) :: dleg + end function + end interface + public :: dlegendre + + interface gaussian + !! Version: experimental + !! + !! gaussian function + !> ([Specification](../page/specs/stdlib_specialfunctions.html#gaussian)) + #:for rk, rt in REAL_KINDS_TYPES + elemental module function gaussian_${rk}$( x ) result( y ) + ${rt}$, intent(in) :: x + ${rt}$ :: y + end function + #:endfor + end interface + public :: gaussian + + interface gaussian_grad + !! Version: experimental + !! + !! gradient of the gaussian function + !> ([Specification](../page/specs/stdlib_specialfunctions.html#gaussian_grad)) + #:for rk, rt in REAL_KINDS_TYPES + elemental module function gaussian_grad_${rk}$( x ) result( y ) + ${rt}$, intent(in) :: x + ${rt}$ :: y + end function + #:endfor + end interface + public :: gaussian_grad + + interface elu + !! Version: experimental + !! + !! exponential linear unit function + !> ([Specification](../page/specs/stdlib_specialfunctions.html#elu)) + #:for rk, rt in REAL_KINDS_TYPES + elemental module function elu_${rk}$( x , a ) result( y ) + ${rt}$, intent(in) :: x + ${rt}$, intent(in) :: a + ${rt}$ :: y + end function + #:endfor + end interface + public :: elu + + interface elu_grad + !! Version: experimental + !! + !! gradient of the exponential linear unit function + !> ([Specification](../page/specs/stdlib_specialfunctions.html#elu_grad)) + #:for rk, rt in REAL_KINDS_TYPES + elemental module function elu_grad_${rk}$( x , a ) result( y ) + ${rt}$, intent(in) :: x + ${rt}$, intent(in) :: a + ${rt}$ :: y + end function + #:endfor + end interface + public :: elu_grad + + interface relu + !! Version: experimental + !! + !! Rectified linear unit function + !> ([Specification](../page/specs/stdlib_specialfunctions.html#relu)) + #:for rk, rt in REAL_KINDS_TYPES + elemental module function relu_${rk}$( x ) result( y ) + ${rt}$, intent(in) :: x + ${rt}$ :: y + end function + #:endfor + end interface + public :: relu + + interface relu_grad + !! Version: experimental + !! + !! Gradient rectified linear unit function + !> ([Specification](../page/specs/stdlib_specialfunctions.html#relu_grad)) + #:for rk, rt in REAL_KINDS_TYPES + elemental module function relu_grad_${rk}$( x ) result( y ) + ${rt}$, intent(in) :: x + ${rt}$ :: y + end function + #:endfor + end interface + public :: relu_grad + + interface leaky_relu + !! Version: experimental + !! + !! Leaky Rectified linear unit function + !> ([Specification](../page/specs/stdlib_specialfunctions.html#leaky_relu)) + #:for rk, rt in REAL_KINDS_TYPES + elemental module function leaky_relu_${rk}$( x , a ) result( y ) + ${rt}$, intent(in) :: x + ${rt}$, intent(in) :: a + ${rt}$ :: y + end function + #:endfor + end interface + public :: leaky_relu + + interface leaky_relu_grad + !! Version: experimental + !! + !! Gradient of the Leaky Rectified linear unit function + !> ([Specification](../page/specs/stdlib_specialfunctions.html#leaky_relu_grad)) + #:for rk, rt in REAL_KINDS_TYPES + elemental module function leaky_relu_grad_${rk}$( x , a ) result( y ) + ${rt}$, intent(in) :: x + ${rt}$, intent(in) :: a + ${rt}$ :: y + end function + #:endfor + end interface + public :: leaky_relu_grad + + interface gelu + !! Version: experimental + !! + !! Gaussian error linear unit function + !> ([Specification](../page/specs/stdlib_specialfunctions.html#gelu)) + #:for rk, rt in REAL_KINDS_TYPES + elemental module function gelu_${rk}$( x ) result( y ) + ${rt}$, intent(in) :: x + ${rt}$ :: y + end function + #:endfor + end interface + public :: gelu + + interface gelu_grad + !! Version: experimental + !! + !! Gradient of the gaussian error linear unit function + !> ([Specification](../page/specs/stdlib_specialfunctions.html#gelu_grad)) + #:for rk, rt in REAL_KINDS_TYPES + elemental module function gelu_grad_${rk}$( x ) result( y ) + ${rt}$, intent(in) :: x + ${rt}$ :: y + end function + #:endfor + end interface + public :: gelu_grad + + interface gelu_approx + !! Version: experimental + !! + !! Approximated gaussian error linear unit function + !> ([Specification](../page/specs/stdlib_specialfunctions.html#gelu_approx)) + #:for rk, rt in REAL_KINDS_TYPES + elemental module function gelu_approx_${rk}$( x ) result( y ) + ${rt}$, intent(in) :: x + ${rt}$ :: y + end function + #:endfor + end interface + public :: gelu_approx + + interface gelu_approx_grad + !! Version: experimental + !! + !! Gradient of the approximated gaussian error linear unit function + !> ([Specification](../page/specs/stdlib_specialfunctions.html#gelu_approx_grad)) + #:for rk, rt in REAL_KINDS_TYPES + elemental module function gelu_approx_grad_${rk}$( x ) result( y ) + ${rt}$, intent(in) :: x + ${rt}$ :: y + end function + #:endfor + end interface + public :: gelu_approx_grad + + interface selu + !! Version: experimental + !! + !! Scaled Exponential Linear Unit + !> ([Specification](../page/specs/stdlib_specialfunctions.html#selu)) + #:for rk, rt in REAL_KINDS_TYPES + elemental module function selu_${rk}$( x ) result( y ) + ${rt}$, intent(in) :: x + ${rt}$ :: y + end function + #:endfor + end interface + public :: selu + + interface selu_grad + !! Version: experimental + !! + !! Scaled Exponential Linear Unit + !> ([Specification](../page/specs/stdlib_specialfunctions.html#selu_grad)) + #:for rk, rt in REAL_KINDS_TYPES + elemental module function selu_grad_${rk}$( x ) result( y ) + ${rt}$, intent(in) :: x + ${rt}$ :: y + end function + #:endfor + end interface + public :: selu_grad + + interface sigmoid + !! Version: experimental + !! + !! Sigmoid function + !> ([Specification](../page/specs/stdlib_specialfunctions.html#sigmoid)) + #:for rk, rt in REAL_KINDS_TYPES + elemental module function sigmoid_${rk}$( x ) result( y ) + ${rt}$, intent(in) :: x + ${rt}$ :: y + end function + #:endfor + end interface + public :: sigmoid + + interface sigmoid_grad + !! Version: experimental + !! + !! Gradient of the sigmoid function + !> ([Specification](../page/specs/stdlib_specialfunctions.html#sigmoid_grad)) + #:for rk, rt in REAL_KINDS_TYPES + elemental module function sigmoid_grad_${rk}$( x ) result( y ) + ${rt}$, intent(in) :: x + ${rt}$ :: y + end function + #:endfor + end interface + public :: sigmoid_grad + + interface silu + !! Version: experimental + !! + !! Sigmoid Linear Unit function + !> ([Specification](../page/specs/stdlib_specialfunctions.html#silu)) + #:for rk, rt in REAL_KINDS_TYPES + elemental module function silu_${rk}$( x ) result( y ) + ${rt}$, intent(in) :: x + ${rt}$ :: y + end function + #:endfor + end interface + public :: silu + + interface silu_grad + !! Version: experimental + !! + !! Gradient of the Sigmoid Linear Unit function + !> ([Specification](../page/specs/stdlib_specialfunctions.html#silu_grad)) + #:for rk, rt in REAL_KINDS_TYPES + elemental module function silu_grad_${rk}$( x ) result( y ) + ${rt}$, intent(in) :: x + ${rt}$ :: y + end function + #:endfor + end interface + public :: silu_grad + + interface step + !! Version: experimental + !! + !! Step function + !> ([Specification](../page/specs/stdlib_specialfunctions.html#step)) + #:for rk, rt in REAL_KINDS_TYPES + elemental module function step_${rk}$( x ) result( y ) + ${rt}$, intent(in) :: x + ${rt}$ :: y + end function + #:endfor + end interface + public :: step + + interface step_grad + !! Version: experimental + !! + !! Gradient of the step function + !> ([Specification](../page/specs/stdlib_specialfunctions.html#step_grad)) + #:for rk, rt in REAL_KINDS_TYPES + elemental module function step_grad_${rk}$( x ) result( y ) + ${rt}$, intent(in) :: x + ${rt}$ :: y + end function + #:endfor + end interface + public :: step_grad + + interface tanh + !! Version: experimental + !! + !! gaussian function + #:for rk, rt in REAL_KINDS_TYPES + elemental module function tanh_${rk}$( x ) result( y ) + ${rt}$, intent(in) :: x + ${rt}$ :: y + end function + #:endfor + end interface + public :: tanh + + interface tanh_grad + !! Version: experimental + !! + !! gradient of the gaussian function + #:for rk, rt in REAL_KINDS_TYPES + elemental module function tanh_grad_${rk}$( x ) result( y ) + ${rt}$, intent(in) :: x + ${rt}$ :: y + end function + #:endfor + end interface + public :: tanh_grad + + interface Softmax + !! Version: experimental + !! + !! Softmax function. Available for ranks 1 to 4 + !> ([Specification](../page/specs/stdlib_specialfunctions.html#Softmax)) + #:for rk, rt in REAL_KINDS_TYPES + pure module function Softmax_r1_${rk}$( x , dim ) result( y ) + ${rt}$, intent(in) :: x(:) + ${rt}$ :: y(size(x)) + integer, intent(in), optional :: dim + end function + #:for rank in RANKS + pure module function Softmax_r${rank}$_${rk}$( x , dim ) result( y ) + ${rt}$, intent(in) :: x${ranksuffix(rank)}$ + ${rt}$ :: y${shape('x', rank)}$ + integer, intent(in), optional :: dim + end function + #:endfor + #:endfor + end interface + public :: softmax + + interface Softmax_grad + !! Version: experimental + !! + !! Gradient of the softmax function. Available for ranks 1 to 4 + !> ([Specification](../page/specs/stdlib_specialfunctions.html#Softmax_grad)) + #:for rk, rt in REAL_KINDS_TYPES + pure module function Softmax_grad_r1_${rk}$( x , dim ) result( y ) + ${rt}$, intent(in) :: x(:) + ${rt}$ :: y(size(x)) + integer, intent(in), optional :: dim + end function + #:for rank in RANKS + pure module function Softmax_grad_r${rank}$_${rk}$( x , dim ) result( y ) + ${rt}$, intent(in) :: x${ranksuffix(rank)}$ + ${rt}$ :: y${shape('x', rank)}$ + integer, intent(in), optional :: dim + end function + #:endfor + #:endfor + end interface + public :: Softmax_grad + + interface LogSoftmax + !! Version: experimental + !! + !! Softmax function. Available for ranks 1 to 4 + !> ([Specification](../page/specs/stdlib_specialfunctions.html#LogSoftmax)) + #:for rk, rt in REAL_KINDS_TYPES + pure module function LogSoftmax_r1_${rk}$( x, dim ) result( y ) + ${rt}$, intent(in) :: x(:) + ${rt}$ :: y(size(x)) + integer, intent(in), optional :: dim + end function + #:for rank in RANKS + pure module function LogSoftmax_r${rank}$_${rk}$( x , dim ) result( y ) + ${rt}$, intent(in) :: x${ranksuffix(rank)}$ + ${rt}$ :: y${shape('x', rank)}$ + integer, intent(in), optional :: dim + end function + #:endfor + #:endfor + end interface + public :: LogSoftmax + + interface Softplus + !! Version: experimental + !! + !! Softplus function + !> ([Specification](../page/specs/stdlib_specialfunctions.html#Softplus)) + #:for rk, rt in REAL_KINDS_TYPES + elemental module function Softplus_${rk}$( x ) result( y ) + ${rt}$, intent(in) :: x + ${rt}$ :: y + end function + #:endfor + end interface + public :: Softplus + + interface Softplus_grad + !! Version: experimental + !! + !! Gradient of the softplus function + !> ([Specification](../page/specs/stdlib_specialfunctions.html#Softplus_grad)) + #:for rk, rt in REAL_KINDS_TYPES + elemental module function Softplus_grad_${rk}$( x ) result( y ) + ${rt}$, intent(in) :: x + ${rt}$ :: y + end function + #:endfor + end interface + public :: Softplus_grad + + interface ftanh + !! Version: experimental + !! + !! Fast approximation of the tanh function + !! Source: https://fortran-lang.discourse.group/t/fastgpt-faster-than-pytorch-in-300-lines-of-fortran/5385/31 + #:for rk, rt in REAL_KINDS_TYPES + elemental module function ftanh_${rk}$( x ) result( y ) + ${rt}$, intent(in) :: x + ${rt}$ :: y + end function + #:endfor + end interface + public :: ftanh + + interface ferf + !! Version: experimental + !! + !! Fast approximation of the erf function + !! Source: https://fortran-lang.discourse.group/t/fastgpt-faster-than-pytorch-in-300-lines-of-fortran/5385/31 + #:for rk, rt in REAL_KINDS_TYPES + elemental module function ferf_${rk}$( x ) result( y ) + ${rt}$, intent(in) :: x + ${rt}$ :: y + end function + #:endfor + end interface + public :: ferf + +end module stdlib_specialfunctions diff --git a/src/stdlib_specialfunctions_activations.fypp b/src/stdlib_specialfunctions_activations.fypp new file mode 100644 index 000000000..ab4e37126 --- /dev/null +++ b/src/stdlib_specialfunctions_activations.fypp @@ -0,0 +1,378 @@ +#:include "common.fypp" +#:set RANKS = range(2, MAXRANK + 1) +submodule(stdlib_specialfunctions) stdlib_specialfunctions_activations + implicit none + + #:for rk, rt in REAL_KINDS_TYPES + ${rt}$, parameter :: isqrt2_${rk}$ = 1._${rk}$ / sqrt(2._${rk}$) + #:endfor + +contains + +!================================================== +! Gaussian +!================================================== +#:for rk, rt in REAL_KINDS_TYPES +elemental module function gaussian_${rk}$( x ) result( y ) + ${rt}$, intent(in) :: x + ${rt}$ :: y + y = exp(-x**2) +end function + +elemental module function gaussian_grad_${rk}$( x ) result( y ) + ${rt}$, intent(in) :: x + ${rt}$ :: y + y = -2._${rk}$ * x * exp(-x**2) +end function + +#:endfor + +!================================================== +! Exponential Linear Unit +!================================================== +#:for rk, rt in REAL_KINDS_TYPES +elemental module function elu_${rk}$( x , a ) result ( y ) + ${rt}$, intent(in) :: x + ${rt}$, intent(in) :: a + ${rt}$ :: y + y = merge( x , a * (exp(x) - 1._${rk}$), x >= 0._${rk}$) +end function + +elemental module function elu_grad_${rk}$( x , a ) result ( y ) + ${rt}$, intent(in) :: x + ${rt}$, intent(in) :: a + ${rt}$ :: y + y = merge( 1._${rk}$ , a * exp(x), x >= 0._${rk}$) +end function + +#:endfor + +!================================================== +! Rectified Linear Unit +!================================================== +#:for rk, rt in REAL_KINDS_TYPES +elemental module function relu_${rk}$( x ) result( y ) + ${rt}$, intent(in) :: x + ${rt}$ :: y + y = max(0._${rk}$, x) +end function + +elemental module function relu_grad_${rk}$( x ) result( y ) + ${rt}$, intent(in) :: x + ${rt}$ :: y + y = merge( 1._${rk}$ , 0._${rk}$, x > 0._${rk}$) +end function + +#:endfor + +!================================================== +! Leaky Rectified Linear Unit +!================================================== +#:for rk, rt in REAL_KINDS_TYPES +elemental module function Leaky_relu_${rk}$( x , a ) result( y ) + ${rt}$, intent(in) :: x + ${rt}$, intent(in) :: a + ${rt}$ :: y + y = merge( x, a * x , x >= 0._${rk}$) +end function + +elemental module function Leaky_relu_grad_${rk}$( x , a ) result( y ) + ${rt}$, intent(in) :: x + ${rt}$, intent(in) :: a + ${rt}$ :: y + y = merge( 1._${rk}$ , a , x >= 0._${rk}$) +end function + +#:endfor + +!================================================== +! GELU: Gaussian Error Linear Units function +!================================================== +#:for rk, rt in REAL_KINDS_TYPES +elemental module function gelu_${rk}$( x ) result( y ) + ${rt}$, intent(in) :: x + ${rt}$ :: y + y = 0.5_${rk}$ * x * (1._${rk}$ + erf(x * isqrt2_${rk}$)) +end function + +elemental module function gelu_grad_${rk}$( x ) result( y ) + ${rt}$, intent(in) :: x + ${rt}$ :: y + y = 0.5_${rk}$ * (1._${rk}$ + erf(x * isqrt2_${rk}$) ) + y = y + x * isqrt2_${rk}$ * exp( - 0.5_${rk}$ * x**2 ) +end function + +#:endfor + +#:for rk, rt in REAL_KINDS_TYPES +elemental module function gelu_approx_${rk}$( x ) result( y ) + ${rt}$, intent(in) :: x + ${rt}$ :: y + y = 0.5_${rk}$ * x * (1._${rk}$ + ferf(x * isqrt2_${rk}$)) +end function + +elemental module function gelu_approx_grad_${rk}$( x ) result( y ) + ${rt}$, intent(in) :: x + ${rt}$ :: y + y = 0.5_${rk}$ * (1._${rk}$ + ferf(x * isqrt2_${rk}$) ) + y = y + x * isqrt2_${rk}$ * exp( - 0.5_${rk}$ * x**2 ) +end function + +#:endfor + +!================================================== +! Scaled Exponential Linear Unit (SELU) +!================================================== +#:for rk, rt in REAL_KINDS_TYPES +elemental module function selu_${rk}$( x ) result( y ) + ${rt}$, intent(in) :: x + ${rt}$ :: y + ${rt}$, parameter :: scale = 1.0507009873554804934193349852946_${rk}$ + ${rt}$, parameter :: alpha = 1.6732632423543772848170429916717_${rk}$ + y = merge( x , alpha * exp(x) - alpha, x > 0._${rk}$) + y = scale * y +end function + +elemental module function selu_grad_${rk}$( x ) result( y ) + ${rt}$, intent(in) :: x + ${rt}$ :: y + ${rt}$, parameter :: scale = 1.0507009873554804934193349852946_${rk}$ + ${rt}$, parameter :: alpha = 1.6732632423543772848170429916717_${rk}$ + y = merge( scale , scale * alpha * exp(x), x > 0._${rk}$) +end function + +#:endfor + +!================================================== +! Sigmoid +!================================================== +#:for rk, rt in REAL_KINDS_TYPES +elemental module function sigmoid_${rk}$( x ) result( y ) + ${rt}$, intent(in) :: x + ${rt}$ :: y + y = 1._${rk}$ / (1._${rk}$ + exp(-x)) +end function + +elemental module function sigmoid_grad_${rk}$( x ) result( y ) + ${rt}$, intent(in) :: x + ${rt}$ :: y + y = exp(x) / (1._${rk}$ + exp(x))**2 +end function + +#:endfor + +!================================================== +! SiLU: Sigmoid Linear Unit +!================================================== +#:for rk, rt in REAL_KINDS_TYPES +elemental module function silu_${rk}$( x ) result( y ) + ${rt}$, intent(in) :: x + ${rt}$ :: y + y = x / (1._${rk}$ + exp(-x)) +end function + +elemental module function silu_grad_${rk}$( x ) result( y ) + ${rt}$, intent(in) :: x + ${rt}$ :: y + y = (1._${rk}$ + exp(x))**2 + y = exp(x) * ( x + y ) / y +end function + +#:endfor + +!================================================== +! Step +!================================================== +#:for rk, rt in REAL_KINDS_TYPES +elemental module function Step_${rk}$( x ) result( y ) + ${rt}$, intent(in) :: x + ${rt}$ :: y + y = merge( 1._${rk}$ , 0._${rk}$, x > 0._${rk}$) +end function + +elemental module function Step_grad_${rk}$( x ) result( y ) + ${rt}$, intent(in) :: x + ${rt}$ :: y + y = 0._${rk}$ +end function + +#:endfor + +!================================================== +! tanh +!================================================== +#:for rk, rt in REAL_KINDS_TYPES +elemental module function tanh_${rk}$( x ) result( y ) + ${rt}$, intent(in) :: x + ${rt}$ :: y + y = ftanh(x) +end function + +elemental module function tanh_grad_${rk}$( x ) result( y ) + ${rt}$, intent(in) :: x + ${rt}$ :: y + y = 1._${rk}$ - ftanh(x)**2 +end function + +#:endfor + +!================================================== +! Softmax +!================================================== +#:for rk, rt in REAL_KINDS_TYPES +pure module function Softmax_r1_${rk}$( x , dim ) result( y ) + ${rt}$, intent(in) :: x(:) + ${rt}$ :: y(size(x)) + integer, intent(in), optional :: dim + + y = exp(x - maxval(x)) + y = y / sum(y) +end function + +#:for rank in RANKS +pure module function Softmax_r${rank}$_${rk}$( x , dim ) result( y ) + ${rt}$, intent(in) :: x${ranksuffix(rank)}$ + ${rt}$ :: y${shape('x', rank)}$ + + integer, intent(in), optional :: dim + integer :: dim_, j + + dim_ = 1; if(present(dim)) dim_ = dim + + if(dim_<${rank}$)then + do j = 1, size(x,dim=${rank}$) + #:if rank == 2 + y${select_subarray(rank, [(rank, 'j')])}$ = Softmax( x${select_subarray(rank, [(rank, 'j')])}$ ) + #:else + y${select_subarray(rank, [(rank, 'j')])}$ = Softmax( x${select_subarray(rank, [(rank, 'j')])}$, dim=dim_ ) + #:endif + end do + else + do j = 1, size(x,dim=1) + #:if rank == 2 + y${select_subarray(rank, [(1, 'j')])}$ = Softmax( x${select_subarray(rank, [(1, 'j')])}$ ) + #:else + y${select_subarray(rank, [(1, 'j')])}$ = Softmax( x${select_subarray(rank, [(1, 'j')])}$, dim=${rank-1}$ ) + #:endif + end do + end if +end function +#:endfor + +pure module function Softmax_grad_r1_${rk}$( x , dim ) result( y ) + ${rt}$, intent(in) :: x(:) + ${rt}$ :: y(size(x)) + integer, intent(in), optional :: dim + + y = Softmax(x) + y = y * (1._${rk}$ - y) +end function + +#:for rank in RANKS +pure module function Softmax_grad_r${rank}$_${rk}$( x , dim ) result( y ) + ${rt}$, intent(in) :: x${ranksuffix(rank)}$ + ${rt}$ :: y${shape('x', rank)}$ + + integer, intent(in), optional :: dim + integer :: dim_ + + dim_ = 1; if(present(dim)) dim_ = dim + + y = Softmax(x,dim_) + y = y * (1._${rk}$ - y) +end function +#:endfor + +#:endfor + +!================================================== +! LogSoftmax +!================================================== +#:for rk, rt in REAL_KINDS_TYPES +pure module function LogSoftmax_r1_${rk}$( x, dim ) result( y ) + ${rt}$, intent(in) :: x(:) + ${rt}$ :: y(size(x)) + integer, intent(in), optional :: dim + y = x - maxval(x) + y = y - log( sum(exp(y)) ) +end function + +#:for rank in RANKS +pure module function LogSoftmax_r${rank}$_${rk}$( x , dim ) result( y ) + ${rt}$, intent(in) :: x${ranksuffix(rank)}$ + ${rt}$ :: y${shape('x', rank)}$ + + integer, intent(in), optional :: dim + integer :: dim_, j + + dim_ = 1; if(present(dim)) dim_ = dim + + if(dim_<${rank}$)then + do j = 1, size(x,dim=${rank}$) + #:if rank == 2 + y${select_subarray(rank, [(rank, 'j')])}$ = LogSoftmax( x${select_subarray(rank, [(rank, 'j')])}$ ) + #:else + y${select_subarray(rank, [(rank, 'j')])}$ = LogSoftmax( x${select_subarray(rank, [(rank, 'j')])}$, dim=dim_ ) + #:endif + end do + else + do j = 1, size(x,dim=1) + #:if rank == 2 + y${select_subarray(rank, [(1, 'j')])}$ = LogSoftmax( x${select_subarray(rank, [(1, 'j')])}$ ) + #:else + y${select_subarray(rank, [(1, 'j')])}$ = LogSoftmax( x${select_subarray(rank, [(1, 'j')])}$, dim=${rank-1}$ ) + #:endif + end do + end if +end function +#:endfor + +#:endfor + +!================================================== +! Softplus +!================================================== +#:for rk, rt in REAL_KINDS_TYPES +elemental module function Softplus_${rk}$( x ) result( y ) + ${rt}$, intent(in) :: x + ${rt}$ :: y + y = log(exp(x) + 1._${rk}$) +end function + +elemental module function Softplus_grad_${rk}$( x ) result( y ) + ${rt}$, intent(in) :: x + ${rt}$ :: y + y = exp(x) / (exp(x) + 1._${rk}$) +end function + +#:endfor + +!================================================== +! Fast intrinsics for accelerated activations +!================================================== + +#:for rk, rt in REAL_KINDS_TYPES +elemental module function ftanh_${rk}$( x ) result( y ) + ${rt}$, intent(in) :: x + ${rt}$ :: y + ${rt}$ :: x2, a, b + + x2 = x*x + a = x * (135135.0_${rk}$ + x2 * (17325.0_${rk}$ + x2 * (378.0_${rk}$ + x2))) + b = 135135.0_${rk}$ + x2 * (62370.0_${rk}$ + x2 * (3150.0_${rk}$ + x2 * 28.0_${rk}$)) + y = merge( a / b , sign(1._${rk}$,x) , x2 <= 25._${rk}$ ) +end function + +elemental module function ferf_${rk}$( x ) result( y ) + ${rt}$, intent(in) :: x + ${rt}$ :: y + ${rt}$ :: abs_x + + abs_x = abs(x) + y = 1._${rk}$ - 1._${rk}$ / (1._${rk}$+ 0.278393_${rk}$*abs_x + 0.230389_${rk}$*abs_x**2 + 0.000972_${rk}$*abs_x**3 + 0.078108_${rk}$*abs_x**4)**4 + y = y * sign(1.0_${rk}$,x) +end function + +#:endfor + +end submodule \ No newline at end of file diff --git a/test/specialfunctions/CMakeLists.txt b/test/specialfunctions/CMakeLists.txt index caa3a96b5..536c36e12 100644 --- a/test/specialfunctions/CMakeLists.txt +++ b/test/specialfunctions/CMakeLists.txt @@ -2,9 +2,11 @@ # Create a list of the files to be preprocessed set(fppFiles + test_specialfunctions_activations.fypp test_specialfunctions_gamma.fypp ) fypp_f90("${fyppFlags}" "${fppFiles}" outFiles) ADDTEST(specialfunctions_gamma) +ADDTEST(specialfunctions_activations) \ No newline at end of file diff --git a/test/specialfunctions/test_specialfunctions_activations.fypp b/test/specialfunctions/test_specialfunctions_activations.fypp new file mode 100644 index 000000000..c9a898495 --- /dev/null +++ b/test/specialfunctions/test_specialfunctions_activations.fypp @@ -0,0 +1,372 @@ +#:include "common.fypp" +#:set R_KINDS_TYPES = [KT for KT in REAL_KINDS_TYPES if KT[0] in ["sp","dp"]] + +module test_specialfunctions_activation + use testdrive, only : new_unittest, unittest_type, error_type, check + use stdlib_kinds + use stdlib_specialfunctions + use stdlib_math, only: linspace + implicit none + private + + public :: collect_specialfunctions_activation + + #:for k1, t1 in R_KINDS_TYPES + ${t1}$, parameter :: tol_${k1}$ = 1000 * epsilon(1.0_${k1}$) + #:endfor + +contains + + subroutine collect_specialfunctions_activation(testsuite) + type(unittest_type), allocatable, intent(out) :: testsuite(:) + + testsuite = [ & + new_unittest("gaussian", test_gaussian), & + new_unittest("elu", test_elu), & + new_unittest("relu", test_relu), & + new_unittest("leaky_relu", test_leaky_relu), & + new_unittest("gelu" , test_gelu), & + new_unittest("selu" , test_selu), & + new_unittest("sigmoid", test_sigmoid), & + new_unittest("silu" , test_silu), & + new_unittest("softmax", test_softmax), & + new_unittest("logsoftmax", test_logsoftmax) & + ] + end subroutine collect_specialfunctions_activation + + subroutine test_gaussian(error) + type(error_type), allocatable, intent(out) :: error + integer, parameter :: n = 10 + real(sp) :: x(n), y(n), y_ref(n) + + x = linspace(-2._sp, 2._sp, n) + y_ref = exp(-x**2) + y = gaussian( x ) + call check(error, norm2(y-y_ref) < n*tol_sp ) + if (allocated(error)) return + + ! Derivative + y_ref = -2.0 * x * exp(-x**2) + y = gaussian_grad( x ) + call check(error, norm2(y-y_ref) < n*tol_sp ) + if (allocated(error)) return + end subroutine + + subroutine test_elu(error) + type(error_type), allocatable, intent(out) :: error + integer, parameter :: n = 10 + real(sp) :: x(n), y(n), y_ref(n), a + + x = linspace(-2._sp, 2._sp, n) + a = 1.0_sp + where(x >= 0._sp) + y_ref = x + elsewhere + y_ref = a * (exp(x) - 1._sp) + end where + y = elu( x , a ) + call check(error, norm2(y-y_ref) < n*tol_sp ) + if (allocated(error)) return + + ! Derivative + where(x >= 0._sp) + y_ref = 1.0_sp + elsewhere + y_ref = a * exp(x) + end where + y = elu_grad( x , a ) + call check(error, norm2(y-y_ref) < n*tol_sp ) + if (allocated(error)) return + end subroutine + + subroutine test_relu(error) + type(error_type), allocatable, intent(out) :: error + integer, parameter :: n = 10 + real(sp) :: x(n), y(n), y_ref(n) + + x = linspace(-2._sp, 2._sp, n) + y_ref = max(0._sp, x) + y = relu( x ) + call check(error, norm2(y-y_ref) < n*tol_sp ) + if (allocated(error)) return + + ! Derivative + where(x > 0._sp) + y_ref = 1.0_sp + elsewhere + y_ref = 0.0_sp + end where + y = relu_grad( x ) + call check(error, norm2(y-y_ref) < n*tol_sp ) + if (allocated(error)) return + end subroutine + + subroutine test_selu(error) + type(error_type), allocatable, intent(out) :: error + integer, parameter :: n = 10 + real(sp), parameter :: scale = 1.0507009873554804934193349852946_sp + real(sp), parameter :: alpha = 1.6732632423543772848170429916717_sp + real(sp) :: x(n), y(n), y_ref(n) + + x = linspace(-2._sp, 2._sp, n) + where(x >= 0._sp) + y_ref = scale * x + elsewhere + y_ref = scale * (alpha * exp(x) - alpha) + end where + y = selu( x ) + call check(error, norm2(y-y_ref) < n*tol_sp ) + if (allocated(error)) return + + ! Derivative + where(x >= 0._sp) + y_ref = scale + elsewhere + y_ref = scale * alpha * exp(x) + end where + y = selu_grad( x ) + call check(error, norm2(y-y_ref) < n*tol_sp ) + if (allocated(error)) return + end subroutine + + subroutine test_gelu(error) + type(error_type), allocatable, intent(out) :: error + integer, parameter :: n = 10 + real(sp) :: x(n), y(n), y_ref(n) + + y_ref = [-0.0455002784729 , -0.093188509345055, -0.148066952824593,& + -0.168328359723091, -0.0915712043643 , 0.130650997161865,& + 0.498338282108307, 0.963044226169586, 1.462367057800293,& + 1.9544997215271 ] + x = linspace(-2._sp, 2._sp, n) + y = gelu( x ) + call check(error, norm2(y-y_ref) < n*tol_sp ) + if (allocated(error)) return + + y = gelu_approx( x ) + call check(error, norm2(y-y_ref) < n*tol_sp ) + if (allocated(error)) return + end subroutine + + subroutine test_leaky_relu(error) + type(error_type), allocatable, intent(out) :: error + integer, parameter :: n = 10 + real(sp) :: x(n), y(n), y_ref(n), a + + call random_number(x) + a = 0.1_sp + where(x>=0._sp) + y_ref = x + elsewhere + y_ref = a * x + end where + y = Leaky_relu( x , a ) + + call check(error, norm2(y-y_ref) < n*tol_sp ) + if (allocated(error)) return + end subroutine + + subroutine test_sigmoid(error) + type(error_type), allocatable, intent(out) :: error + integer, parameter :: n = 10 + real(sp) :: x(n), y(n), y_ref(n) + + y_ref = [0.119202919304371, 0.174285307526588, 0.247663781046867,& + 0.339243650436401, 0.444671928882599, 0.555328071117401,& + 0.660756349563599, 0.752336204051971, 0.825714707374573,& + 0.880797028541565] + x = linspace(-2._sp, 2._sp, n) + y = sigmoid( x ) + call check(error, norm2(y-y_ref) < n*tol_sp ) + if (allocated(error)) return + end subroutine + + subroutine test_silu(error) + type(error_type), allocatable, intent(out) :: error + integer, parameter :: n = 10 + real(sp) :: x(n), y(n), y_ref(n), a + + x = linspace(-2._sp, 2._sp, n) + y_ref = x / (1._sp + exp(-x)) + y = silu( x ) + call check(error, norm2(y-y_ref) < n*tol_sp ) + if (allocated(error)) return + + ! Derivative + y_ref = (1._sp + exp(x))**2 + y_ref = exp(x) * ( x + y_ref ) / y_ref + y = silu_grad( x ) + call check(error, norm2(y-y_ref) < n*tol_sp ) + if (allocated(error)) return + end subroutine + + subroutine test_softmax(error) + type(error_type), allocatable, intent(out) :: error + + real(sp) :: x(3,3,3), y(3,3,3), y_ref(3,3,3) + + x = reshape( [ 0.82192878, 0.76998032, 0.98611263,& + 0.8621334 , 0.65358045, 0.26387113,& + 0.12743663, 0.35237132, 0.23801647,& + + 0.69773567, 0.40568874, 0.44789482,& + 0.42930753, 0.49579193, 0.53139985,& + 0.03035799, 0.65293157, 0.47613957,& + + 0.21088634, 0.9356926 , 0.0991312 ,& + 0.46070181, 0.02943479, 0.17557538,& + 0.10541313, 0.33946349, 0.34804323 ] ,[3,3,3] ) + + !> Softmax on dim = 1 + y = Softmax(x,dim=1) + + y_ref = reshape( [ 0.319712639, 0.303528070, 0.376759291,& + 0.423455358, 0.343743294, 0.232801422,& + 0.296809316, 0.371676773, 0.331513911,& + + 0.395936400, 0.295658976, 0.308404684,& + 0.314838648, 0.336482018, 0.348679334,& + 0.225966826, 0.421138495, 0.352894694,& + + 0.252614945, 0.521480858, 0.225904226,& + 0.416388273, 0.270521373, 0.313090324,& + 0.282621205, 0.357150704, 0.360228121 ] ,[3,3,3] ) + + call check(error, norm2(y-y_ref) < tol_sp ) + if (allocated(error)) return + + !> Softmax on dim = 2 + y = Softmax(x,dim=2) + + y_ref = reshape( [ 0.393646270, 0.392350882, 0.510482967,& + 0.409795105, 0.349239051, 0.247922391,& + 0.196558580, 0.258410037, 0.241594598,& + + 0.439052343, 0.296315849, 0.320951223,& + 0.335690796, 0.324254662, 0.348903090,& + 0.225256786, 0.379429489, 0.330145657,& + + 0.314101219, 0.511530280, 0.297435701,& + 0.403239518, 0.206675291, 0.321064562,& + 0.282659233, 0.281794399, 0.381499708 ] ,[3,3,3] ) + + call check(error, norm2(y-y_ref) < tol_sp ) + if (allocated(error)) return + + !> Softmax on dim = 3 + y = Softmax(x,dim=3) + + y_ref = reshape( [ 0.412202179, 0.347835541, 0.501081109,& + 0.431399941, 0.418453932, 0.310344934,& + 0.346536130, 0.299599379, 0.295405835,& + + 0.364060789, 0.241637364, 0.292525023,& + 0.279837668, 0.357372403, 0.405537367,& + 0.314476222, 0.404643506, 0.374830246,& + + 0.223737061, 0.410527140, 0.206393898,& + 0.288762331, 0.224173695, 0.284117699,& + 0.338987619, 0.295757085, 0.329763889 ] ,[3,3,3] ) + + call check(error, norm2(y-y_ref) < tol_sp ) + if (allocated(error)) return + + end subroutine test_softmax + + subroutine test_logsoftmax(error) + type(error_type), allocatable, intent(out) :: error + + real(sp) :: x(3,3,3), y(3,3,3), y_ref(3,3,3) + + x = reshape( [ 0.755308866500854,-0.789980888366699, 0.88806813955307 ,& + -1.210636496543884, 0.746919095516205, 0.177668794989586,& + 0.540819883346558, 0.291532933712006,-0.324642956256866,& + + 1.94184136390686 , 0.951070547103882,-2.303410291671753,& + 0.59752631187439 , 1.189722180366516, 1.401878595352173,& + -0.262732744216919, 0.421907186508179,-0.200457707047462,& + + -0.702468574047089, 0.153426378965378, 0.330110251903534,& + -1.16956090927124 ,-0.845042765140533,-1.364316940307617,& + -1.679381608963013,-1.497506022453308,-1.194215059280396 ] ,[3,3,3] ) + + !> LogSoftmax on dim = 1 + y = LogSoftmax(x,dim=1) + + y_ref = reshape( [ -0.856636286,-2.40192604,-0.723877013,& + -2.49238253,-0.534826934,-1.10407722 ,& + -0.788554132,-1.03784108,-1.65401697 ,& + + -0.326149583,-1.31692040,-4.57140112 ,& + -1.61804128,-1.02584541,-0.813688993 ,& + -1.39805317,-0.713413179,-1.33577800 ,& + + -1.81836534,-0.962470412,-0.785786569,& + -1.16514850,-0.840630412,-1.35990453 ,& + -1.34127355,-1.15939808,-0.856107056 ],[3,3,3] ) + + !> LogSoftmax on dim = 2 + y = LogSoftmax(x,dim=2) + + y_ref = reshape( [ -0.666278005,-2.15167999, -0.581566215,& + -2.63222337 ,-0.614779949,-1.29196548 ,& + -0.880766988,-1.07016611,-1.79427731 ,& + + -0.315551817,-1.05034387,-3.90906072 ,& + -1.65986681 ,-0.811692238,-0.203771874,& + -2.52012587 ,-1.57950723 ,-1.80610812 ,& + + -0.694792688,-0.444887042,-0.337523341,& + -1.16188502 ,-1.44335616 ,-2.03195047 ,& + -1.67170572 ,-2.09581947 ,-1.86184871 ],[3,3,3] ) + + call check(error, norm2(y-y_ref) < tol_sp ) + if (allocated(error)) return + + !> LogSoftmax on dim = 3 + y = LogSoftmax(x,dim=3) + + y_ref = reshape( [ -1.50595474 , -2.22700500 ,-0.478398114,& + -2.09693313 , -1.01544499 ,-1.52940571 ,& + -0.442325860, -0.835677147,-0.936625183,& + + -0.319422185, -0.485953659,-3.66987658 ,& + -0.288770229, -0.572641909,-0.305195898,& + -1.24587846 , -0.705302894,-0.812439919,& + + -2.96373224 , -1.28359783 ,-1.03635597 ,& + -2.05585742 , -2.60740685 ,-3.07139134 ,& + -2.66252732 , -2.62471604 ,-1.80619729 ],[3,3,3] ) + + call check(error, norm2(y-y_ref) < tol_sp ) + if (allocated(error)) return + + end subroutine test_logsoftmax + + +end module test_specialfunctions_activation + +program tester + use, intrinsic :: iso_fortran_env, only : error_unit + use testdrive, only : run_testsuite, new_testsuite, testsuite_type + use test_specialfunctions_activation, only : collect_specialfunctions_activation + implicit none + integer :: stat, is + type(testsuite_type), allocatable :: testsuites(:) + character(len=*), parameter :: fmt = '("#", *(1x, a))' + + stat = 0 + + testsuites = [new_testsuite("activation functions", & + collect_specialfunctions_activation)] + + do is = 1, size(testsuites) + write(error_unit, fmt) "Testing:", testsuites(is)%name + call run_testsuite(testsuites(is)%collect, error_unit, stat) + end do + + if (stat > 0) then + write(error_unit, '(i0, 1x, a)') stat, "test(s) failed!" + error stop + end if +end program tester \ No newline at end of file