Coverage for mlair/keras_legacy/conv_utils.py: 18%

76 statements  

« prev     ^ index     » next       coverage.py v6.4.2, created at 2023-06-01 13:03 +0000

1"""Utilities used in convolutional layers. 

2""" 

3from __future__ import absolute_import 

4from __future__ import division 

5from __future__ import print_function 

6 

7import numpy as np 

8from tensorflow.keras import backend as K 

9 

10 

11def normalize_tuple(value, n, name): 

12 """Transforms a single int or iterable of ints into an int tuple. 

13 

14 # Arguments 

15 value: The value to validate and convert. Could be an int, or any iterable 

16 of ints. 

17 n: The size of the tuple to be returned. 

18 name: The name of the argument being validated, e.g. `strides` or 

19 `kernel_size`. This is only used to format error messages. 

20 

21 # Returns 

22 A tuple of n integers. 

23 

24 # Raises 

25 ValueError: If something else than an int/long or iterable thereof was 

26 passed. 

27 """ 

28 if isinstance(value, int): 

29 return (value,) * n 

30 else: 

31 try: 

32 value_tuple = tuple(value) 

33 except TypeError: 

34 raise ValueError('The `' + name + '` argument must be a tuple of ' + 

35 str(n) + ' integers. Received: ' + str(value)) 

36 if len(value_tuple) != n: 36 ↛ 37line 36 didn't jump to line 37, because the condition on line 36 was never true

37 raise ValueError('The `' + name + '` argument must be a tuple of ' + 

38 str(n) + ' integers. Received: ' + str(value)) 

39 for single_value in value_tuple: 

40 try: 

41 int(single_value) 

42 except ValueError: 

43 raise ValueError('The `' + name + '` argument must be a tuple of ' + 

44 str(n) + ' integers. Received: ' + str(value) + ' ' 

45 'including element ' + str(single_value) + ' of ' 

46 'type ' + str(type(single_value))) 

47 return value_tuple 

48 

49 

50def normalize_padding(value): 

51 padding = value.lower() 

52 allowed = {'valid', 'same', 'causal'} 

53 if K.backend() == 'theano': 

54 allowed.add('full') 

55 if padding not in allowed: 

56 raise ValueError('The `padding` argument must be one of "valid", "same" ' 

57 '(or "causal" for Conv1D). Received: ' + str(padding)) 

58 return padding 

59 

60 

61def convert_kernel(kernel): 

62 """Converts a Numpy kernel matrix from Theano format to TensorFlow format. 

63 

64 Also works reciprocally, since the transformation is its own inverse. 

65 

66 # Arguments 

67 kernel: Numpy array (3D, 4D or 5D). 

68 

69 # Returns 

70 The converted kernel. 

71 

72 # Raises 

73 ValueError: in case of invalid kernel shape or invalid data_format. 

74 """ 

75 kernel = np.asarray(kernel) 

76 if not 3 <= kernel.ndim <= 5: 

77 raise ValueError('Invalid kernel shape:', kernel.shape) 

78 slices = [slice(None, None, -1) for _ in range(kernel.ndim)] 

79 no_flip = (slice(None, None), slice(None, None)) 

80 slices[-2:] = no_flip 

81 return np.copy(kernel[slices]) 

82 

83 

84def conv_output_length(input_length, filter_size, 

85 padding, stride, dilation=1): 

86 """Determines output length of a convolution given input length. 

87 

88 # Arguments 

89 input_length: integer. 

90 filter_size: integer. 

91 padding: one of `"same"`, `"valid"`, `"full"`. 

92 stride: integer. 

93 dilation: dilation rate, integer. 

94 

95 # Returns 

96 The output length (integer). 

97 """ 

98 if input_length is None: 

99 return None 

100 assert padding in {'same', 'valid', 'full', 'causal'} 

101 dilated_filter_size = filter_size + (filter_size - 1) * (dilation - 1) 

102 if padding == 'same': 

103 output_length = input_length 

104 elif padding == 'valid': 

105 output_length = input_length - dilated_filter_size + 1 

106 elif padding == 'causal': 

107 output_length = input_length 

108 elif padding == 'full': 

109 output_length = input_length + dilated_filter_size - 1 

110 return (output_length + stride - 1) // stride 

111 

112 

113def conv_input_length(output_length, filter_size, padding, stride): 

114 """Determines input length of a convolution given output length. 

115 

116 # Arguments 

117 output_length: integer. 

118 filter_size: integer. 

119 padding: one of `"same"`, `"valid"`, `"full"`. 

120 stride: integer. 

121 

122 # Returns 

123 The input length (integer). 

124 """ 

125 if output_length is None: 

126 return None 

127 assert padding in {'same', 'valid', 'full'} 

128 if padding == 'same': 

129 pad = filter_size // 2 

130 elif padding == 'valid': 

131 pad = 0 

132 elif padding == 'full': 

133 pad = filter_size - 1 

134 return (output_length - 1) * stride - 2 * pad + filter_size 

135 

136 

137def deconv_length(dim_size, stride_size, kernel_size, padding, 

138 output_padding, dilation=1): 

139 """Determines output length of a transposed convolution given input length. 

140 

141 # Arguments 

142 dim_size: Integer, the input length. 

143 stride_size: Integer, the stride along the dimension of `dim_size`. 

144 kernel_size: Integer, the kernel size along the dimension of 

145 `dim_size`. 

146 padding: One of `"same"`, `"valid"`, `"full"`. 

147 output_padding: Integer, amount of padding along the output dimension, 

148 Can be set to `None` in which case the output length is inferred. 

149 dilation: dilation rate, integer. 

150 

151 # Returns 

152 The output length (integer). 

153 """ 

154 assert padding in {'same', 'valid', 'full'} 

155 if dim_size is None: 

156 return None 

157 

158 # Get the dilated kernel size 

159 kernel_size = kernel_size + (kernel_size - 1) * (dilation - 1) 

160 

161 # Infer length if output padding is None, else compute the exact length 

162 if output_padding is None: 

163 if padding == 'valid': 

164 dim_size = dim_size * stride_size + max(kernel_size - stride_size, 0) 

165 elif padding == 'full': 

166 dim_size = dim_size * stride_size - (stride_size + kernel_size - 2) 

167 elif padding == 'same': 

168 dim_size = dim_size * stride_size 

169 else: 

170 if padding == 'same': 

171 pad = kernel_size // 2 

172 elif padding == 'valid': 

173 pad = 0 

174 elif padding == 'full': 

175 pad = kernel_size - 1 

176 

177 dim_size = ((dim_size - 1) * stride_size + kernel_size - 2 * pad + 

178 output_padding) 

179 

180 return dim_size