GCC Code Coverage Report


Directory: src/athena/
File: athena_conv1d_layer.f90
Date: 2025-12-10 07:37:07
Exec Total Coverage
Lines: 0 0 100.0%
Functions: 0 0 -%
Branches: 0 0 -%

Line Branch Exec Source
1 module athena__conv1d_layer
2 !! Module containing implementation of a 1D convolutional layer
3 !!
4 !! This module implements 1D convolution for processing sequential data
5 !! such as time series or text. Applies learnable filters along sequence.
6 !!
7 !! Mathematical operation:
8 !! \[
9 !! y_{i,k}
10 !! =
11 !! \sigma\left(
12 !! \sum_{c=1}^{C_{in}}
13 !! \sum_{m=0}^{K_w-1}
14 !! x_{i+m,\,c}\; w_{m,\,c,\,k}
15 !! + b_k
16 !! \right)
17 !! \]
18 !!
19 !! where:
20 !! - \(i\) is the spatial coordinate in the output
21 !! - \(k\) is the output channel (filter) index
22 !! - \(m\) is the kernel offset
23 !! - \(c\) is the input channel index
24 !! - \(K_w\) is the kernel width
25 !! - \(\sigma\) is the activation function
26 !!
27 !! Shape: \((W, C_{in}) \rightarrow (W', C_{out})\)
28 use coreutils, only: real32, stop_program
29 use athena__base_layer, only: conv_layer_type, base_layer_type
30 use athena__pad1d_layer, only: pad1d_layer_type
31 use athena__misc_types, only: base_actv_type, base_init_type, &
32 onnx_node_type, onnx_initialiser_type, onnx_tensor_type
33 use diffstruc, only: array_type
34 use athena__diffstruc_extd, only: conv1d, add_bias
35 implicit none
36
37
38 private
39
40 public :: conv1d_layer_type
41 public :: read_conv1d_layer
42
43
44 type, extends(conv_layer_type) :: conv1d_layer_type
45 !! Type for 1D convolutional layer with overloaded procedures
46 type(array_type), dimension(2) :: z
47 !! Temporary arrays for forward propagation
48 contains
49 procedure, pass(this) :: set_hyperparams => set_hyperparams_conv1d
50 !! Set hyperparameters for 1D convolutional layer
51 procedure, pass(this) :: read => read_conv1d
52 !! Read 1D convolutional layer from file
53
54 procedure, pass(this) :: forward => forward_conv1d
55 !! Forward propagation derived type handler
56
57 final :: finalise_conv1d
58 !! Finalise 1D convolutional layer
59 end type conv1d_layer_type
60
61 interface conv1d_layer_type
62 !! Interface for setting up the 1D convolutional layer
63 module function layer_setup( &
64 input_shape, &
65 num_filters, kernel_size, stride, dilation, padding, &
66 use_bias, &
67 activation, &
68 kernel_initialiser, bias_initialiser, &
69 verbose ) result(layer)
70 !! Set up the 1D convolutional layer
71 integer, dimension(:), optional, intent(in) :: input_shape
72 !! Input shape
73 integer, optional, intent(in) :: num_filters
74 !! Number of filters
75 integer, dimension(..), optional, intent(in) :: kernel_size
76 !! Kernel size
77 integer, dimension(..), optional, intent(in) :: stride
78 !! Stride
79 integer, dimension(..), optional, intent(in) :: dilation
80 !! Dilation
81 logical, optional, intent(in) :: use_bias
82 !! Use bias
83 class(*), optional, intent(in) :: activation, &
84 kernel_initialiser, bias_initialiser
85 !! Activation function, kernel initialiser, bias initialiser
86 character(*), optional, intent(in) :: padding
87 !! Padding method
88 integer, optional, intent(in) :: verbose
89 !! Verbosity level
90 type(conv1d_layer_type) :: layer
91 !! Instance of the 1D convolutional layer
92 end function layer_setup
93 end interface conv1d_layer_type
94
95
96
97 contains
98
99 !###############################################################################
100 subroutine finalise_conv1d(this)
101 !! Finalise 1D convolutional layer
102 implicit none
103
104 ! Arguments
105 type(conv1d_layer_type), intent(inout) :: this
106 !! Instance of the 1D convolutional layer
107
108 if(allocated(this%dil)) deallocate(this%dil)
109 if(allocated(this%knl)) deallocate(this%knl)
110 if(allocated(this%stp)) deallocate(this%stp)
111
112 if(allocated(this%input_shape)) deallocate(this%input_shape)
113 if(allocated(this%output)) deallocate(this%output)
114 if(allocated(this%pad_layer)) deallocate(this%pad_layer)
115
116 end subroutine finalise_conv1d
117 !###############################################################################
118
119
120 !##############################################################################!
121 ! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * !
122 !##############################################################################!
123
124
125 !###############################################################################
126 module function layer_setup( &
127 input_shape, &
128 num_filters, kernel_size, stride, dilation, padding, &
129 use_bias, &
130 activation, &
131 kernel_initialiser, bias_initialiser, &
132 verbose ) result(layer)
133 !! Set up the 1D convolutional layer
134 use athena__activation, only: activation_setup
135 use athena__initialiser, only: initialiser_setup
136 implicit none
137
138 ! Arguments
139 integer, dimension(:), optional, intent(in) :: input_shape
140 !! Input shape
141 integer, optional, intent(in) :: num_filters
142 !! Number of filters
143 integer, dimension(..), optional, intent(in) :: kernel_size
144 !! Kernel size
145 integer, dimension(..), optional, intent(in) :: stride
146 !! Stride
147 integer, dimension(..), optional, intent(in) :: dilation
148 !! Dilation
149 logical, optional, intent(in) :: use_bias
150 !! Use bias
151 class(*), optional, intent(in) :: activation
152 !! Activation function
153 class(*), optional, intent(in) :: kernel_initialiser, bias_initialiser
154 !! Activation function, kernel initialiser, and bias initialiser
155 character(*), optional, intent(in) :: padding
156 !! Padding method
157 integer, optional, intent(in) :: verbose
158 !! Verbosity level
159
160 type(conv1d_layer_type) :: layer
161 !! Instance of the 1D convolutional layer
162
163 ! Local variables
164 integer :: verbose_ = 0
165 !! Verbosity level
166 integer :: num_filters_
167 !! Number of filters
168 logical :: use_bias_ = .true.
169 !! Use bias
170 character(len=10) :: activation_function_ = "none"
171 !! Activation function
172 character(len=20) :: padding_
173 !! Padding
174 integer, dimension(1) :: kernel_size_, stride_, dilation_
175 !! Kernel size and stride
176 class(base_actv_type), allocatable :: activation_
177 !! Activation function
178 class(base_init_type), allocatable :: kernel_initialiser_, bias_initialiser_
179 !! Kernel and bias initialisers
180
181 if(present(verbose)) verbose_ = verbose
182
183
184 !---------------------------------------------------------------------------
185 ! Set use_bias
186 !---------------------------------------------------------------------------
187 if(present(use_bias)) use_bias_ = use_bias
188
189
190 !---------------------------------------------------------------------------
191 ! Set activation functions based on input name
192 !---------------------------------------------------------------------------
193 if(present(activation))then
194 activation_ = activation_setup(activation)
195 else
196 activation_ = activation_setup("none")
197 end if
198
199
200 !---------------------------------------------------------------------------
201 ! Define weights (kernels) and biases initialisers
202 !---------------------------------------------------------------------------
203 if(present(kernel_initialiser))then
204 kernel_initialiser_ = initialiser_setup(kernel_initialiser)
205 end if
206 if(present(bias_initialiser))then
207 bias_initialiser_ = initialiser_setup(bias_initialiser)
208 end if
209
210
211 !---------------------------------------------------------------------------
212 ! Set up number of filters
213 !---------------------------------------------------------------------------
214 if(present(num_filters))then
215 num_filters_ = num_filters
216 else
217 num_filters_ = 32
218 end if
219
220
221 !---------------------------------------------------------------------------
222 ! Set up kernel size
223 !---------------------------------------------------------------------------
224 if(present(kernel_size))then
225 select rank(kernel_size)
226 rank(0)
227 kernel_size_ = kernel_size
228 rank(1)
229 kernel_size_ = kernel_size
230 end select
231 else
232 kernel_size_ = 3
233 end if
234
235
236 !---------------------------------------------------------------------------
237 ! Set up dilation
238 !---------------------------------------------------------------------------
239 if(present(dilation))then
240 select rank(dilation)
241 rank(0)
242 dilation_ = dilation
243 rank(1)
244 dilation_ = dilation(1)
245 end select
246 else
247 dilation_ = 1
248 end if
249
250
251 !---------------------------------------------------------------------------
252 ! Set up padding name
253 !---------------------------------------------------------------------------
254 if(present(padding))then
255 padding_ = padding
256 else
257 padding_ = "valid"
258 end if
259
260
261 !---------------------------------------------------------------------------
262 ! Set up stride
263 !---------------------------------------------------------------------------
264 if(present(stride))then
265 select rank(stride)
266 rank(0)
267 stride_ = stride
268 rank(1)
269 stride_ = stride(1)
270 end select
271 else
272 stride_ = 1
273 end if
274
275
276 !---------------------------------------------------------------------------
277 ! Set hyperparameters
278 !---------------------------------------------------------------------------
279 call layer%set_hyperparams( &
280 num_filters = num_filters_, &
281 kernel_size = kernel_size_, stride = stride_, dilation = dilation_, &
282 padding = padding_, &
283 use_bias = use_bias_, &
284 activation = activation_, &
285 kernel_initialiser = kernel_initialiser_, &
286 bias_initialiser = bias_initialiser_, &
287 verbose = verbose_ &
288 )
289
290
291 !---------------------------------------------------------------------------
292 ! Initialise layer shape
293 !---------------------------------------------------------------------------
294 if(present(input_shape)) call layer%init(input_shape=input_shape)
295
296 end function layer_setup
297 !###############################################################################
298
299
300 !###############################################################################
301 subroutine set_hyperparams_conv1d( &
302 this, &
303 num_filters, &
304 kernel_size, stride, dilation, &
305 padding, &
306 use_bias, &
307 activation, &
308 kernel_initialiser, bias_initialiser, &
309 verbose &
310 )
311 !! Set hyperparameters for 1D convolutional layer
312 use athena__activation, only: activation_setup
313 use athena__initialiser, only: get_default_initialiser, initialiser_setup
314 use coreutils, only: to_lower
315 implicit none
316
317 ! Arguments
318 class(conv1d_layer_type), intent(inout) :: this
319 !! Instance of the 1D convolutional layer
320 integer, intent(in) :: num_filters
321 !! Number of filters
322 integer, dimension(1), intent(in) :: kernel_size, stride, dilation
323 !! Kernel size, stride, dilation
324 character(*), intent(in) :: padding
325 !! Padding
326 logical, intent(in) :: use_bias
327 !! Use bias
328 class(base_actv_type), allocatable, intent(in) :: activation
329 !! Activation function
330 class(base_init_type), allocatable, intent(in) :: &
331 kernel_initialiser, bias_initialiser
332 !! Kernel and bias initialisers
333 integer, optional, intent(in) :: verbose
334 !! Verbosity level
335
336 ! Local variables
337 character(len=20) :: padding_
338 character(len=256) :: buffer
339
340 this%name = "conv1d"
341 this%type = "conv"
342 this%input_rank = 2
343 this%output_rank = 2
344 this%use_bias = use_bias
345 if(allocated(this%dil)) deallocate(this%dil)
346 if(allocated(this%knl)) deallocate(this%knl)
347 if(allocated(this%stp)) deallocate(this%stp)
348 allocate( &
349 this%dil(this%input_rank-1), &
350 this%knl(this%input_rank-1), &
351 this%stp(this%input_rank-1) &
352 )
353 this%dil = dilation
354 this%knl = kernel_size
355 this%stp = stride
356 this%num_filters = num_filters
357 padding_ = trim(adjustl(padding))
358
359 select case(trim(adjustl(to_lower(padding_))))
360 case("valid", "none", "")
361 case default
362 this%pad_layer = pad1d_layer_type( &
363 padding = [ (this%knl-1)/2 ], &
364 method = padding_ &
365 )
366 end select
367 if(allocated(this%activation)) deallocate(this%activation)
368 if(.not.allocated(activation))then
369 this%activation = activation_setup("none")
370 else
371 allocate(this%activation, source=activation)
372 end if
373 if(allocated(this%kernel_init)) deallocate(this%kernel_init)
374 if(.not.allocated(kernel_initialiser))then
375 buffer = get_default_initialiser(this%activation%name)
376 this%kernel_init = initialiser_setup(buffer)
377 else
378 allocate(this%kernel_init, source=kernel_initialiser)
379 end if
380 if(allocated(this%bias_init)) deallocate(this%bias_init)
381 if(.not.allocated(bias_initialiser))then
382 buffer = get_default_initialiser( &
383 this%activation%name, &
384 is_bias=.true. &
385 )
386 this%bias_init = initialiser_setup(buffer)
387 else
388 allocate(this%bias_init, source=bias_initialiser)
389 end if
390 if(present(verbose))then
391 if(abs(verbose).gt.0)then
392 write(*,'("CONV1D activation function: ",A)') &
393 trim(this%activation%name)
394 write(*,'("CONV1D kernel initialiser: ",A)') &
395 trim(this%kernel_init%name)
396 write(*,'("CONV1D bias initialiser: ",A)') &
397 trim(this%bias_init%name)
398 end if
399 end if
400
401 end subroutine set_hyperparams_conv1d
402 !###############################################################################
403
404
405 !##############################################################################!
406 ! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * !
407 !##############################################################################!
408
409
410 !###############################################################################
411 subroutine read_conv1d(this, unit, verbose)
412 !! Read 1D convolutional layer from file
413 use athena__tools_infile, only: assign_val, assign_vec, move
414 use coreutils, only: to_lower, to_upper, icount
415 use athena__activation, only: read_activation
416 use athena__initialiser, only: initialiser_setup
417 implicit none
418
419 ! Arguments
420 class(conv1d_layer_type), intent(inout) :: this
421 !! Instance of the 1D convolutional layer
422 integer, intent(in) :: unit
423 !! Unit number
424 integer, optional, intent(in) :: verbose
425 !! Verbosity level
426
427 ! Local variables
428 integer :: stat
429 !! Status of read
430 integer :: verbose_ = 0
431 !! Verbosity level
432 integer :: j, k, l, c, itmp1, iline, num_params
433 !! Loop variables and temporary integer
434 integer :: num_filters
435 !! Number of filters
436 logical :: use_bias = .true.
437 !! Whether to use bias
438 character(14) :: kernel_initialiser_name='', bias_initialiser_name=''
439 !! Kernel and bias initialisers
440 character(20) :: padding='', activation_name=''
441 !! Padding and activation function
442 class(base_actv_type), allocatable :: activation
443 !! Activation function
444 class(base_init_type), allocatable :: kernel_initialiser, bias_initialiser
445 !! Initialisers
446 character(256) :: buffer, tag, err_msg
447 !! Buffer, tag, and error message
448 integer, dimension(1) :: kernel_size, stride, dilation
449 !! Kernel size and stride
450 integer, dimension(2) :: input_shape
451 !! Input shape
452 real(real32), allocatable, dimension(:) :: data_list
453 !! Data list
454 integer :: param_line, final_line
455 !! Parameter line number
456
457
458 ! Initialise optional arguments
459 !---------------------------------------------------------------------------
460 if(present(verbose)) verbose_ = verbose
461
462
463 ! Loop over tags in layer card
464 !---------------------------------------------------------------------------
465 iline = 0
466 param_line = 0
467 final_line = 0
468 tag_loop: do
469
470 ! Check for end of file
471 !------------------------------------------------------------------------
472 read(unit,'(A)',iostat=stat) buffer
473 if(stat.ne.0)then
474 write(err_msg,'("file encountered error (EoF?) before END ",A)') &
475 to_upper(this%name)
476 call stop_program(err_msg)
477 return
478 end if
479 if(trim(adjustl(buffer)).eq."") cycle tag_loop
480
481 ! Check for end of layer card
482 !------------------------------------------------------------------------
483 if(trim(adjustl(buffer)).eq."END "//to_upper(trim(this%name)))then
484 final_line = iline
485 backspace(unit)
486 exit tag_loop
487 end if
488 iline = iline + 1
489
490 tag=trim(adjustl(buffer))
491 if(scan(buffer,"=").ne.0) tag=trim(tag(:scan(tag,"=")-1))
492
493 ! Read parameters from save file
494 !------------------------------------------------------------------------
495 select case(trim(tag))
496 case("INPUT_SHAPE")
497 call assign_vec(buffer, input_shape, itmp1)
498 case("NUM_FILTERS")
499 call assign_val(buffer, num_filters, itmp1)
500 case("KERNEL_SIZE")
501 call assign_vec(buffer, kernel_size, itmp1)
502 case("STRIDE")
503 call assign_vec(buffer, stride, itmp1)
504 case("DILATION")
505 call assign_vec(buffer, dilation, itmp1)
506 case("USE_BIAS")
507 call assign_val(buffer, use_bias, itmp1)
508 case("PADDING")
509 call assign_val(buffer, padding, itmp1)
510 padding = to_lower(padding)
511 case("ACTIVATION")
512 iline = iline - 1
513 backspace(unit)
514 activation = read_activation(unit, iline)
515 case("KERNEL_INITIALISER", "KERNEL_INIT", "KERNEL_INITIALIZER")
516 call assign_val(buffer, kernel_initialiser_name, itmp1)
517 case("BIAS_INITIALISER", "BIAS_INIT", "BIAS_INITIALIZER")
518 call assign_val(buffer, bias_initialiser_name, itmp1)
519 case("WEIGHTS")
520 kernel_initialiser_name = 'zeros'
521 bias_initialiser_name = 'zeros'
522 param_line = iline
523 case default
524 ! Don't look for "e" due to scientific notation of numbers
525 ! ... i.e. exponent (E+00)
526 if(scan(to_lower(trim(adjustl(buffer))),&
527 'abcdfghijklmnopqrstuvwxyz').eq.0)then
528 cycle tag_loop
529 elseif(tag(:3).eq.'END')then
530 cycle tag_loop
531 end if
532 write(err_msg,'("Unrecognised line in input file: ",A)') &
533 trim(adjustl(buffer))
534 call stop_program(err_msg)
535 return
536 end select
537 end do tag_loop
538 kernel_initialiser = initialiser_setup(kernel_initialiser_name)
539 bias_initialiser = initialiser_setup(bias_initialiser_name)
540
541
542 ! Set hyperparameters and initialise layer
543 !---------------------------------------------------------------------------
544 call this%set_hyperparams( &
545 num_filters = num_filters, &
546 kernel_size = kernel_size, stride = stride, dilation = dilation, &
547 padding = padding, &
548 use_bias = use_bias, &
549 activation = activation, &
550 kernel_initialiser = kernel_initialiser, &
551 bias_initialiser = bias_initialiser, &
552 verbose = verbose_ &
553 )
554 call this%init(input_shape = input_shape)
555
556
557 ! Check if WEIGHTS card was found
558 !---------------------------------------------------------------------------
559 if(param_line.eq.0)then
560 write(0,*) "WARNING: WEIGHTS card in "//to_upper(trim(this%name))//" not found"
561 else
562 call move(unit, param_line - iline, iostat=stat)
563 num_params = product(this%knl) * input_shape(2) * num_filters
564 allocate(data_list(num_params), source=0._real32)
565 c = 1
566 k = 1
567 data_concat_loop1: do while(c.le.num_params)
568 read(unit,'(A)',iostat=stat) buffer
569 if(stat.ne.0) exit data_concat_loop1
570 k = icount(buffer)
571 read(buffer,*,iostat=stat) (data_list(j),j=c,c+k-1)
572 c = c + k
573 end do data_concat_loop1
574 this%params(1)%val(:,1) = data_list
575 deallocate(data_list)
576 allocate(data_list(num_filters), source=0._real32)
577 c = 1
578 k = 1
579 data_concat_loop2: do while(c.le.num_filters)
580 read(unit,'(A)',iostat=stat) buffer
581 if(stat.ne.0) exit data_concat_loop2
582 k = icount(buffer)
583 read(buffer,*,iostat=stat) (data_list(j),j=c,c+k-1)
584 c = c + k
585 end do data_concat_loop2
586 this%params(2)%val(:,1) = data_list
587 deallocate(data_list)
588
589 ! Check for end of weights card
590 !------------------------------------------------------------------------
591 read(unit,'(A)') buffer
592 if(trim(adjustl(buffer)).ne."END WEIGHTS")then
593 write(0,*) trim(adjustl(buffer))
594 call stop_program("END WEIGHTS not where expected")
595 return
596 end if
597 end if
598
599
600 ! Check for end of layer card
601 !---------------------------------------------------------------------------
602 call move(unit, final_line - iline, iostat=stat)
603 read(unit,'(A)') buffer
604 if(trim(adjustl(buffer)).ne."END "//to_upper(trim(this%name)))then
605 write(0,*) trim(adjustl(buffer))
606 write(err_msg,'("END ",A," not where expected")') to_upper(this%name)
607 call stop_program(err_msg)
608 return
609 end if
610
611 end subroutine read_conv1d
612 !###############################################################################
613
614
615 !###############################################################################
616 function read_conv1d_layer(unit, verbose) result(layer)
617 !! Read 1D convolutional layer from file and return layer
618 implicit none
619
620 ! Arguments
621 integer, intent(in) :: unit
622 !! Unit number
623 integer, optional, intent(in) :: verbose
624 !! Verbosity level
625 class(base_layer_type), allocatable :: layer
626 !! Instance of the 1D convolutional layer
627
628 ! Local variables
629 integer :: verbose_ = 0
630 !! Verbosity level
631
632 if(present(verbose)) verbose_ = verbose
633 allocate(layer, source=conv1d_layer_type())
634 call layer%read(unit, verbose=verbose_)
635
636 end function read_conv1d_layer
637 !###############################################################################
638
639
640 !###############################################################################
641 subroutine build_from_onnx_conv1d( &
642 this, node, initialisers, value_info, verbose &
643 )
644 !! Read ONNX attributes for 1D convolutional layer
645 use athena__activation, only: activation_setup
646 use athena__initialiser_data, only: data_init_type
647 implicit none
648
649 ! Arguments
650 class(conv1d_layer_type), intent(inout) :: this
651 !! Instance of the 1D convolutional layer
652 type(onnx_node_type), intent(in) :: node
653 !! ONNX node information
654 type(onnx_initialiser_type), dimension(:), intent(in) :: initialisers
655 !! ONNX initialiser information
656 type(onnx_tensor_type), dimension(:), intent(in) :: value_info
657 !! ONNX value info information
658 integer, intent(in) :: verbose
659 !! Verbosity level
660
661 ! Local variables
662 integer :: i, weight_idx, bias_idx
663 !! Loop index and temporary integer
664 integer :: num_filters
665 !! Number of filters
666 logical :: use_bias = .true.
667 !! Whether to use bias
668 integer, dimension(1) :: padding, stride, kernel_size, dilation
669 !! Padding, stride, kernel size, and dilation
670 integer, dimension(:), allocatable :: dims
671 !! Dimensions
672 character(256) :: val
673 !! Attribute value
674 class(base_actv_type), allocatable :: activation
675 !! Activation function
676 class(base_init_type), allocatable :: kernel_initialiser, bias_initialiser
677
678 ! Set default values
679 padding = 0
680 stride = 1
681 kernel_size = 3
682 dilation = 1
683
684 do i = 1, size(node%attributes)
685 val = node%attributes(i)%val
686 select case(trim(adjustl(node%attributes(i)%name)))
687 case("pads")
688 read(val,*) padding
689 case("strides")
690 read(val,*) stride
691 case("kernel_shape")
692 read(val,*) kernel_size
693 case("dilations")
694 read(val,*) dilation
695 case default
696 ! Do nothing
697 write(0,*) "WARNING: Unrecognised attribute in ONNX CONV1D layer: ", &
698 trim(adjustl(node%attributes(i)%name))
699 end select
700 end do
701
702 weight_idx = -1
703 bias_idx = -1
704 allocate(dims(0))
705 if(size(initialisers).lt.1)then
706 call stop_program("ONNX CONV1D layer requires at least 1 initialiser")
707 return
708 else
709 ! check which initialiser has weights and which has biases
710 do i = 1, size(initialisers)
711 if(allocated(initialisers(i)%dims))then
712 dims = [ dims, product(initialisers(i)%dims) ]
713 end if
714 end do
715 end if
716
717 select case(size(dims))
718 case(1)
719 if(mod(dims(1), kernel_size(1)).eq.0)then
720 weight_idx = 1
721 else
722 call stop_program("ONNX CONV1D layer initialiser dimensions do not &
723 &match kernel size")
724 return
725 end if
726 use_bias = .false.
727 case(2)
728 ! check which is weight and which is bias
729 if(mod(dims(1), kernel_size(1)).eq.0 .and. &
730 dims(1)/kernel_size(1).eq.dims(2))then
731 weight_idx = 1
732 bias_idx = 2
733 elseif(mod(dims(2), kernel_size(1)).eq.0 .and. &
734 dims(2)/kernel_size(1).eq.dims(1))then
735 weight_idx = 2
736 bias_idx = 1
737 else
738 call stop_program("ONNX CONV1D layer initialiser dimensions do not &
739 &match kernel size")
740 return
741 end if
742 case default
743 call stop_program("ONNX CONV1D layer number of initialisers not supported")
744 return
745 end select
746
747 num_filters = dims(weight_idx) / kernel_size(1)
748 if(num_filters .ne. value_info(1)%dims(2))then
749 call stop_program("ONNX CONV1D layer number of filters does not match &
750 &value info")
751 return
752 end if
753
754 kernel_initialiser = data_init_type( data = initialisers(weight_idx)%data )
755 if(use_bias)then
756 bias_initialiser = data_init_type( data = initialisers(bias_idx)%data )
757 end if
758
759 activation = activation_setup("none")
760 call this%set_hyperparams( &
761 num_filters = num_filters, &
762 kernel_size = kernel_size, stride = stride, &
763 dilation = dilation, &
764 padding = "valid", &
765 use_bias = use_bias, &
766 activation = activation, &
767 verbose = verbose, &
768 kernel_initialiser = kernel_initialiser, &
769 bias_initialiser = bias_initialiser &
770 )
771
772 end subroutine build_from_onnx_conv1d
773 !###############################################################################
774
775
776 !##############################################################################!
777 ! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * !
778 !##############################################################################!
779
780
781 !###############################################################################
782 subroutine forward_conv1d(this, input)
783 !! Forward propagation
784 implicit none
785
786 ! Arguments
787 class(conv1d_layer_type), intent(inout) :: this
788 !! Instance of the 1D convolutional layer
789 class(array_type), dimension(:,:), intent(in) :: input
790 !! Input values
791
792 ! Local variables
793 type(array_type), pointer :: ptr
794 !! Pointer array
795
796
797 ! Generate outputs from weights, biases, and inputs
798 !---------------------------------------------------------------------------
799 select case(allocated(this%pad_layer))
800 case(.true.)
801 call this%pad_layer%forward(input)
802 ptr => conv1d(this%pad_layer%output(1,1), this%params(1), &
803 this%stp(1), this%dil(1) &
804 )
805 case default
806 ptr => conv1d(input(1,1), this%params(1), this%stp(1), this%dil(1))
807 end select
808 ptr => add_bias(ptr, this%params(2), dim=2, dim_act_on_shape=.true.)
809
810 ! Apply activation function to activation
811 !---------------------------------------------------------------------------
812 if(trim(this%activation%name) .eq. "none") then
813 call this%output(1,1)%assign_and_deallocate_source(ptr)
814 else
815 ptr => this%activation%apply(ptr)
816 call this%output(1,1)%assign_and_deallocate_source(ptr)
817 end if
818 this%output(1,1)%is_temporary = .false.
819
820 end subroutine forward_conv1d
821 !###############################################################################
822
823 end module athena__conv1d_layer
824