GCC Code Coverage Report


Directory: src/lib/
File: src/lib/mod_base_layer.f90
Date: 2024-06-28 12:51:18
Exec Total Coverage
Lines: 0 1 0.0%
Functions: 0 0 -%
Branches: 0 95 0.0%

Line Branch Exec Source
1 !!!#############################################################################
2 !!! Code written by Ned Thaddeus Taylor
3 !!! Code part of the ATHENA library - a feedforward neural network library
4 !!!#############################################################################
5 !!! definition of the abstract base layer type, from which all other layers ...
6 !!! ... are derived
7 !!! module includes the following public abstract derived types:
8 !!! - base_layer_type - abstract type for all layers
9 !!! - input_layer_type - abstract type for input layers
10 !!! - flatten_layer_type - abstract type for flatten (rehsape) layers
11 !!! - pool_layer_type - abstract type for spatial pooling layers
12 !!! - drop_layer_type - abstract type for dropout layers
13 !!! - learnable_layer_type - abstract type for layers with learnable parameters
14 !!! - conv_layer_type - abstract type for spatial convolutional layers
15 !!! - batch_layer_type - abstract type for batch normalisation layers
16 !!!##################
17 !!! base_layer_type includes the following procedures:
18 !!! - set_shape - set the input shape of the layer
19 !!! - get_num_params - get the number of parameters in the layer
20 !!! - print - print the layer to a file
21 !!! - get_output - get the output of the layer
22 !!! - init - initialise the layer
23 !!! - set_batch_size - set the batch size of the layer
24 !!! - forward - forward pass of layer
25 !!! - backward - backward pass of layer
26 !!!##################
27 !!! input_layer_type includes the following unique procedures:
28 !!! - set - set the input of the layer
29 !!!##################
30 !!! learnable_layer_type includes the following unique procedures:
31 !!! - layer_reduction - reduce the layer to a single value
32 !!! - layer_merge - merge the layer with another layer
33 !!! - get_params - get the learnable parameters of the layer
34 !!! - set_params - set the learnable parameters of the layer
35 !!! - get_gradients - get the gradients of the layer
36 !!! - set_gradients - set the gradients of the layer
37 !!!#############################################################################
38 !!! Attribution statement:
39 !!! The following procedures are based on code from the neural-fortran library
40 !!! https://github.com/modern-fortran/neural-fortran/blob/main/src/nf/nf_layer.f90
41 !!! procedures:
42 !!! - get_num_params*
43 !!! - get_params*
44 !!! - set_params*
45 !!! - get_gradients*
46 !!! - set_gradients*
47 !!!#############################################################################
48 module base_layer
49 use constants, only: real12
50 use clipper, only: clip_type
51 use custom_types, only: activation_type
52 implicit none
53
54 private
55
56 public :: base_layer_type
57 public :: input_layer_type
58 public :: flatten_layer_type
59 public :: pool_layer_type
60 public :: drop_layer_type
61 public :: learnable_layer_type
62 public :: conv_layer_type
63 public :: batch_layer_type
64
65 !!!------------------------------------------------------------------------
66 !!! layer abstract type
67 !!!------------------------------------------------------------------------
68 type, abstract :: base_layer_type !! give it parameterised values?
69 integer :: batch_size = 0
70 integer :: input_rank = 0
71 logical :: inference = .false.
72 character(:), allocatable :: name
73 integer, allocatable, dimension(:) :: input_shape, output_shape
74 contains
75 procedure, pass(this) :: set_shape => set_shape_base
76 procedure, pass(this) :: get_num_params => get_num_params_base
77 procedure, pass(this) :: print => print_base
78 procedure(get_output), deferred, pass(this) :: get_output
79 procedure(initialise), deferred, pass(this) :: init
80 procedure(set_batch_size), deferred, pass(this) :: set_batch_size
81 procedure(forward), deferred, pass(this) :: forward
82 procedure(backward), deferred, pass(this) :: backward
83 !! NO NEED FOR DEFERRED PRODECURES
84 !! instead, make this a generic type that just has a set of interfaces for (module) procedures that call 1D, 3D, and 4D forms
85 !! Use subroutines because output data is trickier for function tricker to handle
86 !! Use a general train subroutine that is called by the main model, which internally goes through forward and backward passes
87 !! Input sizes have to be 1D, 3D, or 4D (any 2D data is simply 3D with num_channels=1)
88 !! Output sizes defined by user
89 !! For every forward, just pass in the whole previous layer container
90 !! ... reverse for backward
91 !! In each layer container, you know what size you are expecting for the input, so just take that based on a select type (or of a previous?)
92 end type base_layer_type
93
94 interface
95 !!--------------------------------------------------------------------------
96 !! print layer to file (do nothing for a base layer)
97 !!--------------------------------------------------------------------------
98 !! this = (T, in) base_layer_type
99 !! file = (I, in) file name
100 module subroutine print_base(this, file)
101 class(base_layer_type), intent(in) :: this
102 character(*), intent(in) :: file
103 end subroutine print_base
104 end interface
105
106 interface
107 !!--------------------------------------------------------------------------
108 !! setup input layer shape
109 !!--------------------------------------------------------------------------
110 !! this = (T, inout) base_layer_type
111 !! input_shape = (I, in) input shape
112 module subroutine set_shape_base(this, input_shape)
113 class(base_layer_type), intent(inout) :: this
114 integer, dimension(:), intent(in) :: input_shape
115 end subroutine set_shape_base
116 end interface
117
118
119 interface
120 !!--------------------------------------------------------------------------
121 !! initialise layer
122 !!--------------------------------------------------------------------------
123 !! this = (T, inout) base_layer_type
124 !! input_shape = (I, in) input shape
125 !! batch_size = (I, in) batch size
126 !! verbose = (I, in) verbosity level
127 module subroutine initialise(this, input_shape, batch_size, verbose)
128 class(base_layer_type), intent(inout) :: this
129 integer, dimension(:), intent(in) :: input_shape
130 integer, optional, intent(in) :: batch_size
131 integer, optional, intent(in) :: verbose
132 end subroutine initialise
133
134 !!--------------------------------------------------------------------------
135 !! set batch size
136 !!--------------------------------------------------------------------------
137 !! this = (T, inout) base_layer_type
138 !! batch_size = (I, in) batch size
139 !! verbose = (I, in) verbosity level
140 module subroutine set_batch_size(this, batch_size, verbose)
141 class(base_layer_type), intent(inout) :: this
142 integer, intent(in) :: batch_size
143 integer, optional, intent(in) :: verbose
144 end subroutine set_batch_size
145 end interface
146
147 interface
148 !!--------------------------------------------------------------------------
149 !! get number of parameters in layer
150 !! procedure modified from neural-fortran library
151 !!--------------------------------------------------------------------------
152 !! this = (T, in) layer_type
153 pure module function get_num_params(this) result(num_params)
154 class(base_layer_type), intent(in) :: this
155 integer :: num_params
156 end function get_num_params
157
158 !!--------------------------------------------------------------------------
159 !! get number of parameters in layer
160 !!--------------------------------------------------------------------------
161 !! this = (T, in) layer_type
162 !! output = (R, out) number of parameters
163 pure module subroutine get_output(this, output)
164 class(base_layer_type), intent(in) :: this
165 real(real12), allocatable, dimension(..), intent(out) :: output
166 end subroutine get_output
167
168 !!--------------------------------------------------------------------------
169 !! forward pass of layer
170 !!--------------------------------------------------------------------------
171 !! this = (T, in) layer_type
172 !! input = (R, in) input data
173 pure module subroutine forward(this, input)
174 class(base_layer_type), intent(inout) :: this
175 real(real12), dimension(..), intent(in) :: input
176 end subroutine forward
177
178 !!--------------------------------------------------------------------------
179 !! backward pass of layer
180 !!--------------------------------------------------------------------------
181 !! this = (T, in) layer_type
182 !! input = (R, in) input data
183 !! gradient = (R, in) gradient data
184 pure module subroutine backward(this, input, gradient)
185 class(base_layer_type), intent(inout) :: this
186 real(real12), dimension(..), intent(in) :: input
187 real(real12), dimension(..), intent(in) :: gradient
188 end subroutine backward
189 end interface
190
191
192 !!!-----------------------------------------------------------------------------
193 !!! input derived extended type
194 !!!-----------------------------------------------------------------------------
195 type, abstract, extends(base_layer_type) :: input_layer_type
196 integer :: num_outputs
197 contains
198 procedure(set), deferred, pass(this) :: set
199 end type input_layer_type
200
201 abstract interface
202 pure subroutine set(this, input)
203 import :: input_layer_type, real12
204 class(input_layer_type), intent(inout) :: this
205 real(real12), dimension(..), intent(in) :: input
206 end subroutine set
207 end interface
208
209
210 !!!-----------------------------------------------------------------------------
211 !!! flatten derived extended type
212 !!!-----------------------------------------------------------------------------
213 type, abstract, extends(base_layer_type) :: flatten_layer_type
214 integer :: num_outputs, num_addit_outputs = 0
215 real(real12), allocatable, dimension(:,:) :: output
216 contains
217 procedure, pass(this) :: get_output => get_output_flatten
218 end type flatten_layer_type
219
220
221 !!!-----------------------------------------------------------------------------
222 !!! pooling derived extended type
223 !!!-----------------------------------------------------------------------------
224 type, abstract, extends(base_layer_type) :: pool_layer_type
225 !! strd = stride (step)
226 !! pool = pool
227 integer, allocatable, dimension(:) :: pool, strd
228 integer :: num_channels
229 end type pool_layer_type
230
231
232 !!!-----------------------------------------------------------------------------
233 !!! dropout derived extended type
234 !!!-----------------------------------------------------------------------------
235 type, abstract, extends(base_layer_type) :: drop_layer_type
236 !! rate = 1 - keep_prob -- typical = 0.05-0.25
237 real(real12) :: rate = 0.1_real12
238 contains
239 procedure(generate_mask), deferred, pass(this) :: generate_mask
240 end type drop_layer_type
241
242 abstract interface
243 !!--------------------------------------------------------------------------
244 !! get number of parameters in layer
245 !!--------------------------------------------------------------------------
246 !! this = (T, in) drop_layer_type
247 subroutine generate_mask(this)
248 import :: drop_layer_type
249 class(drop_layer_type), intent(inout) :: this
250 end subroutine generate_mask
251 end interface
252
253
254 !!!-----------------------------------------------------------------------------
255 !!! learnable derived extended type
256 !!!-----------------------------------------------------------------------------
257 type, abstract, extends(base_layer_type) :: learnable_layer_type
258 character(len=14) :: kernel_initialiser='', bias_initialiser=''
259 class(activation_type), allocatable :: transfer
260 contains
261 procedure(layer_reduction), deferred, pass(this) :: reduce
262 procedure(layer_merge), deferred, pass(this) :: merge
263 procedure(get_params), deferred, pass(this) :: get_params
264 procedure(set_params), deferred, pass(this) :: set_params
265 procedure(get_gradients), deferred, pass(this) :: get_gradients
266 procedure(set_gradients), deferred, pass(this) :: set_gradients
267 end type learnable_layer_type
268
269 abstract interface
270 !!--------------------------------------------------------------------------
271 !! reduce two layers to a single value
272 !!--------------------------------------------------------------------------
273 !! this = (T, io) layer_type
274 !! rhs = (T, in) layer_type
275 subroutine layer_reduction(this, rhs)
276 import :: learnable_layer_type
277 class(learnable_layer_type), intent(inout) :: this
278 class(learnable_layer_type), intent(in) :: rhs
279 end subroutine layer_reduction
280
281 !!--------------------------------------------------------------------------
282 !! merge two layers
283 !!--------------------------------------------------------------------------
284 !! this = (T, io) layer_type
285 !! input = (T, in) layer_type
286 subroutine layer_merge(this, input)
287 import :: learnable_layer_type
288 class(learnable_layer_type), intent(inout) :: this
289 class(learnable_layer_type), intent(in) :: input
290 end subroutine layer_merge
291
292 !!--------------------------------------------------------------------------
293 !! get learnable parameters of layer
294 !! procedure modified from neural-fortran library
295 !!--------------------------------------------------------------------------
296 !! this = (T, in) layer_type
297 !! param = (R, out) learnable parameters
298 pure function get_params(this) result(params)
299 import :: learnable_layer_type, real12
300 class(learnable_layer_type), intent(in) :: this
301 real(real12), allocatable, dimension(:) :: params
302 end function get_params
303
304 !!--------------------------------------------------------------------------
305 !! set learnable parameters of layer
306 !! procedure modified from neural-fortran library
307 !!--------------------------------------------------------------------------
308 !! this = (T, io) layer_type
309 !! param = (R, in) learnable parameters
310 subroutine set_params(this, params)
311 import :: learnable_layer_type, real12
312 class(learnable_layer_type), intent(inout) :: this
313 real(real12), dimension(:), intent(in) :: params
314 end subroutine set_params
315
316 !!--------------------------------------------------------------------------
317 !! get parameter gradients of layer
318 !! procedure modified from neural-fortran library
319 !!--------------------------------------------------------------------------
320 !! this = (T, in) layer_type
321 !! clip_method = (T, in) clip method
322 !! gradients = (R, out) parameter gradients
323 pure function get_gradients(this, clip_method) result(gradients)
324 import :: learnable_layer_type, real12, clip_type
325 class(learnable_layer_type), intent(in) :: this
326 type(clip_type), optional, intent(in) :: clip_method
327 real(real12), allocatable, dimension(:) :: gradients
328 end function get_gradients
329
330 !!--------------------------------------------------------------------------
331 !! set learnable parameters of layer
332 !! procedure modified from neural-fortran library
333 !!--------------------------------------------------------------------------
334 !! this = (T, io) layer_type
335 !! gradients = (R, in) learnable parameters
336 subroutine set_gradients(this, gradients)
337 import :: learnable_layer_type, real12
338 class(learnable_layer_type), intent(inout) :: this
339 real(real12), dimension(..), intent(in) :: gradients
340 end subroutine set_gradients
341 end interface
342
343 !!!-----------------------------------------------------------------------------
344 !!! convolution extended derived type
345 !!!-----------------------------------------------------------------------------
346 type, abstract, extends(learnable_layer_type) :: conv_layer_type
347 !! knl = kernel
348 !! stp = stride (step)
349 !! hlf = half
350 !! pad = pad
351 !! cen = centre
352 !! output_shape = dimension (height, width, depth)
353 logical :: calc_input_gradients = .true.
354 integer :: num_channels
355 integer :: num_filters
356 integer, allocatable, dimension(:) :: knl, stp, hlf, pad, cen
357 real(real12), allocatable, dimension(:) :: bias
358 real(real12), allocatable, dimension(:,:) :: db ! bias gradient
359 contains
360 procedure, pass(this) :: get_num_params => get_num_params_conv
361 end type conv_layer_type
362
363
364 !!!-----------------------------------------------------------------------------
365 !!! batch extended derived type
366 !!!-----------------------------------------------------------------------------
367 type, abstract, extends(learnable_layer_type) :: batch_layer_type
368 !! gamma = scale factor (learnable)
369 !! beta = shift factor (learnable)
370 !! dg = gradient of gamma
371 !! db = gradient of beta
372 !! mean = mean of each feature (not learnable)
373 !! variance = variance of each feature (not learnable)
374 !! NOTE: if momentum = 0, mean and variance batch-dependent values
375 !! NOTE: if momentum > 0, mean and variance are running averages
376 !! NED: NEED TO KEEP TRACK OF EXPONENTIAL MOVING AVERAGE (EMA)
377 !! ... FOR INFERENCE
378 integer :: num_channels
379 real(real12) :: norm
380 real(real12) :: momentum = 0.99_real12
381 real(real12) :: epsilon = 0.001_real12
382 real(real12) :: gamma_init_mean = 1._real12, gamma_init_std = 0.01_real12
383 real(real12) :: beta_init_mean = 0._real12, beta_init_std = 0.01_real12
384 character(len=14) :: moving_mean_initialiser='', &
385 moving_variance_initialiser=''
386 real(real12), allocatable, dimension(:) :: mean, variance !! not learnable
387 real(real12), allocatable, dimension(:) :: gamma, beta !! learnable
388 real(real12), allocatable, dimension(:) :: dg, db !! learnable
389 contains
390 procedure, pass(this) :: get_num_params => get_num_params_batch
391 procedure, pass(this) :: get_params => get_params_batch
392 procedure, pass(this) :: set_params => set_params_batch
393 procedure, pass(this) :: get_gradients => get_gradients_batch
394 procedure, pass(this) :: set_gradients => set_gradients_batch
395 end type batch_layer_type
396
397
398
399 interface
400 pure module function get_num_params_base(this) result(num_params)
401 class(base_layer_type), intent(in) :: this
402 integer :: num_params
403 end function get_num_params_base
404 pure module function get_gradients_batch(this, clip_method) result(gradients)
405 class(batch_layer_type), intent(in) :: this
406 type(clip_type), optional, intent(in) :: clip_method
407 real(real12), allocatable, dimension(:) :: gradients
408 end function get_gradients_batch
409 pure module function get_num_params_batch(this) result(num_params)
410 class(batch_layer_type), intent(in) :: this
411 integer :: num_params
412 end function get_num_params_batch
413 module subroutine set_gradients_batch(this, gradients)
414 class(batch_layer_type), intent(inout) :: this
415 real(real12), dimension(..), intent(in) :: gradients
416 end subroutine set_gradients_batch
417 pure module function get_params_batch(this) result(params)
418 class(batch_layer_type), intent(in) :: this
419 real(real12), allocatable, dimension(:) :: params
420 end function get_params_batch
421 module subroutine set_params_batch(this, params)
422 class(batch_layer_type), intent(inout) :: this
423 real(real12), dimension(:), intent(in) :: params
424 end subroutine set_params_batch
425 pure module function get_num_params_conv(this) result(num_params)
426 class(conv_layer_type), intent(in) :: this
427 integer :: num_params
428 end function get_num_params_conv
429 pure module subroutine get_output_flatten(this, output)
430 class(flatten_layer_type), intent(in) :: this
431 real(real12), allocatable, dimension(..), intent(out) :: output
432 end subroutine get_output_flatten
433 end interface
434
435
436 end module base_layer
437 !!!#############################################################################
438