Line | Branch | Exec | Source |
---|---|---|---|
1 | !!!############################################################################# | ||
2 | !!! Code written by Ned Thaddeus Taylor | ||
3 | !!! Code part of the ATHENA library - a feedforward neural network library | ||
4 | !!!############################################################################# | ||
5 | !!! definition of the abstract base layer type, from which all other layers ... | ||
6 | !!! ... are derived | ||
7 | !!! module includes the following public abstract types: | ||
8 | !!! base_layer_type - abstract type for all layers | ||
9 | !!! input_layer_type - abstract type for input layers | ||
10 | !!! flatten_layer_type - abstract type for flatten (rehsape) layers | ||
11 | !!! pool_layer_type - abstract type for spatial pooling layers | ||
12 | !!! drop_layer_type - abstract type for dropout layers | ||
13 | !!! learnable_layer_type - abstract type for layers with learnable parameters | ||
14 | !!! conv_layer_type - abstract type for spatial convolutional layers | ||
15 | !!! batch_layer_type - abstract type for batch normalisation layers | ||
16 | !!!################## | ||
17 | !!! base_layer_type includes the following procedures: | ||
18 | !!! set_shape - set the input shape of the layer | ||
19 | !!! get_num_params - get the number of parameters in the layer | ||
20 | !!! print - print the layer to a file | ||
21 | !!! get_output - get the output of the layer | ||
22 | !!! init - initialise the layer | ||
23 | !!! set_batch_size - set the batch size of the layer | ||
24 | !!! forward - forward pass of layer | ||
25 | !!! backward - backward pass of layer | ||
26 | !!!################## | ||
27 | !!! input_layer_type includes the following unique procedures: | ||
28 | !!! set - set the input of the layer | ||
29 | !!!################## | ||
30 | !!! learnable_layer_type includes the following unique procedures: | ||
31 | !!! layer_reduction - reduce the layer to a single value | ||
32 | !!! layer_merge - merge the layer with another layer | ||
33 | !!! get_params - get the learnable parameters of the layer | ||
34 | !!! set_params - set the learnable parameters of the layer | ||
35 | !!! get_gradients - get the gradients of the layer | ||
36 | !!! set_gradients - set the gradients of the layer | ||
37 | !!!############################################################################# | ||
38 | !!! Attribution statement: | ||
39 | !!! The following procedures are based on code from the neural-fortran library | ||
40 | !!! https://github.com/modern-fortran/neural-fortran/blob/main/src/nf/nf_layer.f90 | ||
41 | !!! procedures: | ||
42 | !!! - get_num_params* | ||
43 | !!! - get_params* | ||
44 | !!! - set_params* | ||
45 | !!! - get_gradients* | ||
46 | !!! - set_gradients* | ||
47 | !!!############################################################################# | ||
48 | submodule(base_layer) base_layer_submodule | ||
49 | implicit none | ||
50 | |||
51 | contains | ||
52 | |||
53 | !!!############################################################################# | ||
54 | !!! print layer to file (do nothing for a base layer) | ||
55 | !!!############################################################################# | ||
56 | !!! this = (T, in) base_layer_type | ||
57 | !!! file = (I, in) file name | ||
58 | ✗ | module subroutine print_base(this, file) | |
59 | implicit none | ||
60 | class(base_layer_type), intent(in) :: this | ||
61 | character(*), intent(in) :: file | ||
62 | |||
63 | !! NO NEED TO WRITE ANYTHING FOR A DEFAULT LAYER | ||
64 | ✗ | return | |
65 | ✗ | end subroutine print_base | |
66 | !!!############################################################################# | ||
67 | |||
68 | |||
69 | !!!############################################################################# | ||
70 | !!! setup input layer shape | ||
71 | !!!############################################################################# | ||
72 | !!! this = (T, inout) base_layer_type | ||
73 | !!! input_shape = (I, in) input shape | ||
74 |
1/2✓ Branch 0 taken 104 times.
✗ Branch 1 not taken.
|
104 | module subroutine set_shape_base(this, input_shape) |
75 | implicit none | ||
76 | class(base_layer_type), intent(inout) :: this | ||
77 | integer, dimension(:), intent(in) :: input_shape | ||
78 | character(len=100) :: err_msg | ||
79 | |||
80 | !!-------------------------------------------------------------------------- | ||
81 | !! initialise input shape | ||
82 | !!-------------------------------------------------------------------------- | ||
83 |
5/8✗ Branch 0 not taken.
✓ Branch 1 taken 104 times.
✗ Branch 3 not taken.
✓ Branch 4 taken 104 times.
✗ Branch 6 not taken.
✓ Branch 7 taken 104 times.
✓ Branch 9 taken 102 times.
✓ Branch 10 taken 2 times.
|
104 | if(size(input_shape,dim=1).eq.this%input_rank)then |
84 |
7/14✗ Branch 0 not taken.
✓ Branch 1 taken 102 times.
✗ Branch 3 not taken.
✓ Branch 4 taken 102 times.
✗ Branch 6 not taken.
✓ Branch 7 taken 102 times.
✗ Branch 9 not taken.
✓ Branch 10 taken 102 times.
✗ Branch 11 not taken.
✗ Branch 12 not taken.
✓ Branch 13 taken 102 times.
✗ Branch 14 not taken.
✓ Branch 15 taken 315 times.
✓ Branch 16 taken 102 times.
|
417 | this%input_shape = input_shape |
85 | else | ||
86 | write(err_msg,'("ERROR: invalid size of input_shape in ",A,& | ||
87 | &" expected (",I0,"), got (",I0")")') & | ||
88 |
4/8✓ Branch 3 taken 2 times.
✗ Branch 4 not taken.
✗ Branch 6 not taken.
✓ Branch 7 taken 2 times.
✗ Branch 9 not taken.
✓ Branch 10 taken 2 times.
✗ Branch 12 not taken.
✓ Branch 13 taken 2 times.
|
2 | trim(this%name), this%input_rank, size(input_shape,dim=1) |
89 | 2 | stop trim(err_msg) | |
90 | end if | ||
91 | |||
92 | 102 | end subroutine set_shape_base | |
93 | !!!############################################################################# | ||
94 | |||
95 | |||
96 | !!!############################################################################# | ||
97 | !!! get number of parameters in layer | ||
98 | !!! procedure modified from neural-fortran library | ||
99 | !!!############################################################################# | ||
100 | !!! this = (T, in) layer_type | ||
101 | !!! num_params = (I, out) number of parameters | ||
102 | 1026 | pure module function get_num_params_base(this) result(num_params) | |
103 | implicit none | ||
104 | class(base_layer_type), intent(in) :: this | ||
105 | integer :: num_params | ||
106 | |||
107 | !! NO PARAMETERS IN A BASE LAYER | ||
108 | 1026 | num_params = 0 | |
109 | |||
110 | 1026 | end function get_num_params_base | |
111 | !!!----------------------------------------------------------------------------- | ||
112 | 6 | pure module function get_num_params_conv(this) result(num_params) | |
113 | implicit none | ||
114 | class(conv_layer_type), intent(in) :: this | ||
115 | integer :: num_params | ||
116 | |||
117 | !! num_filters x num_channels x kernel_size + num_biases | ||
118 | !! num_biases = num_filters | ||
119 | 24 | num_params = this%num_filters * this%num_channels * product(this%knl) + & | |
120 |
6/10✗ Branch 0 not taken.
✓ Branch 1 taken 6 times.
✗ Branch 3 not taken.
✓ Branch 4 taken 6 times.
✗ Branch 6 not taken.
✓ Branch 7 taken 6 times.
✗ Branch 9 not taken.
✓ Branch 10 taken 6 times.
✓ Branch 12 taken 15 times.
✓ Branch 13 taken 6 times.
|
21 | this%num_filters |
121 | |||
122 | 6 | end function get_num_params_conv | |
123 | !!!----------------------------------------------------------------------------- | ||
124 | 3 | pure module function get_num_params_batch(this) result(num_params) | |
125 | implicit none | ||
126 | class(batch_layer_type), intent(in) :: this | ||
127 | integer :: num_params | ||
128 | |||
129 | !! num_filters x num_channels x kernel_size + num_biases | ||
130 | !! num_biases = num_filters | ||
131 | 3 | num_params = 2 * this%num_channels | |
132 | |||
133 | 3 | end function get_num_params_batch | |
134 | !!!############################################################################# | ||
135 | |||
136 | |||
137 | !!!############################################################################# | ||
138 | !!! get learnable parameters of layer | ||
139 | !!! procedure modified from neural-fortran library | ||
140 | !!!############################################################################# | ||
141 | 3 | pure module function get_params_batch(this) result(params) | |
142 | implicit none | ||
143 | class(batch_layer_type), intent(in) :: this | ||
144 | real(real12), allocatable, dimension(:) :: params | ||
145 | |||
146 |
18/32✗ Branch 0 not taken.
✓ Branch 1 taken 3 times.
✗ Branch 3 not taken.
✓ Branch 4 taken 3 times.
✗ Branch 6 not taken.
✓ Branch 7 taken 3 times.
✗ Branch 9 not taken.
✓ Branch 10 taken 3 times.
✗ Branch 12 not taken.
✓ Branch 13 taken 3 times.
✓ Branch 15 taken 14 times.
✓ Branch 16 taken 3 times.
✗ Branch 17 not taken.
✓ Branch 18 taken 3 times.
✗ Branch 20 not taken.
✓ Branch 21 taken 3 times.
✗ Branch 23 not taken.
✓ Branch 24 taken 3 times.
✗ Branch 26 not taken.
✓ Branch 27 taken 3 times.
✗ Branch 29 not taken.
✓ Branch 30 taken 3 times.
✓ Branch 32 taken 14 times.
✓ Branch 33 taken 3 times.
✗ Branch 34 not taken.
✓ Branch 35 taken 3 times.
✗ Branch 36 not taken.
✗ Branch 37 not taken.
✓ Branch 38 taken 3 times.
✗ Branch 39 not taken.
✓ Branch 40 taken 28 times.
✓ Branch 41 taken 3 times.
|
59 | params = [this%gamma, this%beta] |
147 | |||
148 | 3 | end function get_params_batch | |
149 | !!!############################################################################# | ||
150 | |||
151 | |||
152 | !!!############################################################################# | ||
153 | !!! set learnable parameters of layer | ||
154 | !!! procedure modified from neural-fortran library | ||
155 | !!!############################################################################# | ||
156 |
1/2✓ Branch 0 taken 3 times.
✗ Branch 1 not taken.
|
3 | module subroutine set_params_batch(this, params) |
157 | implicit none | ||
158 | class(batch_layer_type), intent(inout) :: this | ||
159 | real(real12), dimension(:), intent(in) :: params | ||
160 | |||
161 |
7/14✗ Branch 0 not taken.
✓ Branch 1 taken 3 times.
✗ Branch 3 not taken.
✓ Branch 4 taken 3 times.
✗ Branch 6 not taken.
✓ Branch 7 taken 3 times.
✓ Branch 9 taken 3 times.
✗ Branch 10 not taken.
✓ Branch 11 taken 3 times.
✗ Branch 12 not taken.
✗ Branch 13 not taken.
✗ Branch 14 not taken.
✓ Branch 15 taken 14 times.
✓ Branch 16 taken 3 times.
|
17 | this%gamma = params(1:this%num_channels) |
162 |
8/16✗ Branch 0 not taken.
✓ Branch 1 taken 3 times.
✗ Branch 3 not taken.
✓ Branch 4 taken 3 times.
✗ Branch 6 not taken.
✓ Branch 7 taken 3 times.
✗ Branch 9 not taken.
✓ Branch 10 taken 3 times.
✓ Branch 12 taken 3 times.
✗ Branch 13 not taken.
✓ Branch 14 taken 3 times.
✗ Branch 15 not taken.
✗ Branch 16 not taken.
✗ Branch 17 not taken.
✓ Branch 18 taken 14 times.
✓ Branch 19 taken 3 times.
|
17 | this%beta = params(this%num_channels+1:2*this%num_channels) |
163 | |||
164 | 3 | end subroutine set_params_batch | |
165 | !!!############################################################################# | ||
166 | |||
167 | |||
168 | !!!############################################################################# | ||
169 | !!! get gradients of layer | ||
170 | !!! procedure modified from neural-fortran library | ||
171 | !!!############################################################################# | ||
172 | 6 | pure module function get_gradients_batch(this, clip_method) result(gradients) | |
173 | use clipper, only: clip_type | ||
174 | implicit none | ||
175 | class(batch_layer_type), intent(in) :: this | ||
176 | type(clip_type), optional, intent(in) :: clip_method | ||
177 | real(real12), allocatable, dimension(:) :: gradients | ||
178 | |||
179 |
18/32✗ Branch 0 not taken.
✓ Branch 1 taken 6 times.
✗ Branch 3 not taken.
✓ Branch 4 taken 6 times.
✗ Branch 6 not taken.
✓ Branch 7 taken 6 times.
✗ Branch 9 not taken.
✓ Branch 10 taken 6 times.
✗ Branch 12 not taken.
✓ Branch 13 taken 6 times.
✓ Branch 15 taken 14 times.
✓ Branch 16 taken 6 times.
✗ Branch 17 not taken.
✓ Branch 18 taken 6 times.
✗ Branch 20 not taken.
✓ Branch 21 taken 6 times.
✗ Branch 23 not taken.
✓ Branch 24 taken 6 times.
✗ Branch 26 not taken.
✓ Branch 27 taken 6 times.
✗ Branch 29 not taken.
✓ Branch 30 taken 6 times.
✓ Branch 32 taken 42 times.
✓ Branch 33 taken 6 times.
✗ Branch 34 not taken.
✓ Branch 35 taken 6 times.
✗ Branch 36 not taken.
✗ Branch 37 not taken.
✓ Branch 38 taken 6 times.
✗ Branch 39 not taken.
✓ Branch 40 taken 56 times.
✓ Branch 41 taken 6 times.
|
118 | gradients = [this%dg/this%batch_size, this%db/this%batch_size] |
180 | |||
181 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 6 times.
|
6 | if(present(clip_method)) call clip_method%apply(size(gradients),gradients) |
182 | |||
183 | 6 | end function get_gradients_batch | |
184 | !!!############################################################################# | ||
185 | |||
186 | |||
187 | !!!############################################################################# | ||
188 | !!! set gradients of layer | ||
189 | !!! procedure modified from neural-fortran library | ||
190 | !!!############################################################################# | ||
191 | 6 | module subroutine set_gradients_batch(this, gradients) | |
192 | implicit none | ||
193 | class(batch_layer_type), intent(inout) :: this | ||
194 | real(real12), dimension(..), intent(in) :: gradients | ||
195 | |||
196 | select rank(gradients) | ||
197 | rank(0) | ||
198 |
2/2✓ Branch 0 taken 7 times.
✓ Branch 1 taken 3 times.
|
10 | this%dg = gradients * this%batch_size |
199 |
2/2✓ Branch 0 taken 21 times.
✓ Branch 1 taken 3 times.
|
27 | this%db = gradients * this%batch_size |
200 | rank(1) | ||
201 |
8/14✗ Branch 1 not taken.
✓ Branch 2 taken 3 times.
✗ Branch 4 not taken.
✓ Branch 5 taken 3 times.
✗ Branch 7 not taken.
✓ Branch 8 taken 3 times.
✓ Branch 10 taken 3 times.
✗ Branch 11 not taken.
✗ Branch 12 not taken.
✓ Branch 13 taken 3 times.
✗ Branch 14 not taken.
✓ Branch 15 taken 3 times.
✓ Branch 16 taken 7 times.
✓ Branch 17 taken 3 times.
|
10 | this%dg = gradients(:this%batch_size) * this%batch_size |
202 |
10/18✗ Branch 0 not taken.
✓ Branch 1 taken 3 times.
✗ Branch 2 not taken.
✓ Branch 3 taken 3 times.
✗ Branch 5 not taken.
✓ Branch 6 taken 3 times.
✗ Branch 8 not taken.
✓ Branch 9 taken 3 times.
✗ Branch 11 not taken.
✓ Branch 12 taken 3 times.
✓ Branch 14 taken 3 times.
✗ Branch 15 not taken.
✗ Branch 16 not taken.
✓ Branch 17 taken 3 times.
✗ Branch 18 not taken.
✓ Branch 19 taken 3 times.
✓ Branch 20 taken 21 times.
✓ Branch 21 taken 3 times.
|
27 | this%db = gradients(this%batch_size+1:) * this%batch_size |
203 | end select | ||
204 | |||
205 | 6 | end subroutine set_gradients_batch | |
206 | !!!############################################################################# | ||
207 | |||
208 | |||
209 | !!!############################################################################# | ||
210 | !!! get layer outputs | ||
211 | !!!############################################################################# | ||
212 | 4 | pure module subroutine get_output_flatten(this, output) | |
213 | implicit none | ||
214 | class(flatten_layer_type), intent(in) :: this | ||
215 | real(real12), allocatable, dimension(..), intent(out) :: output | ||
216 | |||
217 | select rank(output) | ||
218 | rank(1) | ||
219 | ✗ | output = reshape(this%output, [size(this%output)]) | |
220 | rank(2) | ||
221 |
18/36✗ Branch 0 not taken.
✓ Branch 1 taken 4 times.
✗ Branch 3 not taken.
✓ Branch 4 taken 4 times.
✗ Branch 6 not taken.
✓ Branch 7 taken 4 times.
✗ Branch 9 not taken.
✓ Branch 10 taken 4 times.
✗ Branch 12 not taken.
✓ Branch 13 taken 4 times.
✗ Branch 15 not taken.
✓ Branch 16 taken 4 times.
✗ Branch 18 not taken.
✓ Branch 19 taken 4 times.
✗ Branch 21 not taken.
✓ Branch 22 taken 4 times.
✗ Branch 24 not taken.
✓ Branch 25 taken 4 times.
✗ Branch 26 not taken.
✗ Branch 27 not taken.
✗ Branch 28 not taken.
✗ Branch 29 not taken.
✓ Branch 30 taken 4 times.
✗ Branch 31 not taken.
✓ Branch 32 taken 4 times.
✗ Branch 33 not taken.
✓ Branch 34 taken 4 times.
✗ Branch 35 not taken.
✓ Branch 36 taken 4 times.
✗ Branch 37 not taken.
✓ Branch 38 taken 4 times.
✗ Branch 39 not taken.
✓ Branch 40 taken 4 times.
✓ Branch 41 taken 4 times.
✓ Branch 42 taken 14040 times.
✓ Branch 43 taken 4 times.
|
14048 | output = this%output |
222 | end select | ||
223 | |||
224 | 4 | end subroutine get_output_flatten | |
225 | !!!############################################################################# | ||
226 | |||
227 |
8/12✓ Branch 0 taken 3 times.
✓ Branch 1 taken 3 times.
✓ Branch 2 taken 3 times.
✗ Branch 3 not taken.
✓ Branch 4 taken 3 times.
✓ Branch 5 taken 3 times.
✓ Branch 6 taken 3 times.
✗ Branch 7 not taken.
✗ Branch 8 not taken.
✓ Branch 9 taken 4 times.
✓ Branch 10 taken 4 times.
✗ Branch 11 not taken.
|
10 | end submodule base_layer_submodule |
228 | !!!############################################################################# | ||
229 |