| 
 | 
 
 本帖最后由 matlab的旋律 于 2025-1-13 02:20 编辑  
 
CBAM通道注意力的是tensorflow2.x,  参考https://blog.csdn.net/weixin_39122088/article/details/10719197 
from tensorflow.keras import layers, regularizers import tensorflow as tf - #实现方式1
 
 - class ChannelAttention(layers.Layer):
 
 -         def __init__(self, in_planes, ratio=8):
 
 -              super(ChannelAttention, self).__init__()
 
  
-             self.avg_out= layers.GlobalAveragePooling2D()
 
 -             self.max_out= layers.GlobalMaxPooling2D()
 
  
-             self.fc1 = layers.Dense(in_planes//ratio, kernel_initializer='he_normal',
 
 -             kernel_regularizer=regularizers.l2(5e-4),
 
 -             activation=tf.nn.relu,
 
 -            use_bias=True, bias_initializer='zeros')
 
 -            self.fc2 = layers.Dense(in_planes, kernel_initializer='he_normal',
 
 -            kernel_regularizer=regularizers.l2(5e-4),
 
 -            use_bias=True, bias_initializer='zeros')
 
  
- def call(self, inputs):
 
 -      avg_out = self.avg_out(inputs)
 
 -      max_out = self.max_out(inputs)
 
 -      out = tf.stack([avg_out, max_out], axis=1) # shape=(None, 2, fea_num)
 
 -      out = self.fc2(self.fc1(out))
 
 -      out = tf.reduce_sum(out, axis=1) # shape=(256, 512)
 
 -      out = tf.nn.sigmoid(out)
 
 -      out = layers.Reshape((1, 1, out.shape[1]))(out)
 
  
-    return out
 
  复制代码 
 - #实现方式2
 
 - class ChannelAttention(layers.Layer):
 
 -      def __init__(self, in_planes):
 
 -          super(ChannelAttention, self).__init__()
 
  
-          self.avg= layers.GlobalAveragePooling2D()
 
 -          self.max= layers.GlobalMaxPooling2D()
 
  
-          self.fc1 = layers.Dense(in_planes//16, kernel_initializer='he_normal', activation='relu',
 
 -          use_bias=True, bias_initializer='zeros')
 
 -          self.fc2 = layers.Dense(in_planes, kernel_initializer='he_normal', use_bias=True,
 
 -          bias_initializer='zeros')
 
  
-    def call(self, inputs):
 
 -         avg_out = self.fc2(self.fc1(self.avg(inputs)))
 
 -         max_out = self.fc2(self.fc1(self.max(inputs)))
 
 -         out = avg_out + max_out
 
 -         out = tf.nn.sigmoid(out)
 
 -         out = tf.reshape(out, [out.shape[0], 1, 1, out.shape[1]])
 
 -         out = tf.tile(out, [1, inputs.shape[1], inputs.shape[2], 1])
 
  
-       return out
 
  复制代码 空间注意力,参考https://blog.csdn.net/mqdlff_python/article/details/135119610
  
- class SpatialAttention(layers.Layer):
 
 -       def __init__(self):
 
 -            super(SpatialAttention, self).__init__()
 
 -            self.conv =layers.Conv2D(filters=1, kernel_size=3, padding='same', activation='sigmoid')
 
 -  
 
 -      def call(self, inputs):
 
 -           max_pool = reduce_max(inputs, axis=-1, keepdims=True)
 
 -            spatial_attention = self.conv(max_pool)
 
 
  复制代码 
  |   
 
 
 
 |