Concatenating parallel layers in tensorflow

I am going to implement neural network below in tensorflow Neural network with paralle layers

and i wrote code below for it

# Defining model input
input_ = Input(shape=(224, 224, 3))

# Defining first parallel layer
in_1 = Conv2D(filters=16, kernel_size=(3, 3), activation=relu)(input_)
conv_1 = BatchNormalization()(in_1)
conv_1 = AveragePooling2D(pool_size=(2, 2), strides=(3, 3))(conv_1)

# Defining second parallel layer
in_2 = Conv2D(filters=16, kernel_size=(5, 5), activation=relu)(input_)
conv_2 = BatchNormalization()(in_2)
conv_2 = AveragePooling2D(pool_size=(2, 2), strides=(3, 3))(conv_2)

# Defining third parallel layer
in_3 = Conv2D(filters=16, kernel_size=(5, 5), activation=relu)(input_)
conv_3 = BatchNormalization()(in_3)
conv_3 = MaxPooling2D(pool_size=(2, 2), strides=(3, 3))(conv_3)

# Defining fourth parallel layer
in_4 = Conv2D(filters=16, kernel_size=(9, 9), activation=relu)(input_)
conv_4 = BatchNormalization()(in_4)
conv_4 = MaxPooling2D(pool_size=(2, 2), strides=(3, 3))(conv_4)

# Concatenating layers
concat = Concatenate([conv_1, conv_2, conv_3, conv_4])
flat = Flatten()(concat)
out = Dense(units=4, activation=softmax)(flat)

model = Model(inputs=[in_1, in_2, in_3, in_4], outputs=[out])
model.summary()

After running the code i got error below:

TypeError: Inputs to a layer should be tensors.
Got: <tensorflow.python.keras.layers.merge.Concatenate object at 0x7febd46f6ac0>

Answer

there were various error in your code, no padding, wrong concatenation, wrong input, and the activation are defined in a not reproducible way, this works:

from keras.layers.merge import concatenate # please share the import next time
from keras.layers import Conv2D, AveragePooling2D, MaxPooling2D, Flatten, Dense, Concatenate, Input
from keras import Model

# Defining model input
input_ = Input(shape=(224, 224, 3))

# Defining first parallel layer
in_1 = Conv2D(filters=16, kernel_size=(3, 3), activation='relu', padding='same')(input_)
conv_1 = BatchNormalization()(in_1)
conv_1 = AveragePooling2D(pool_size=(2, 2), strides=(3, 3))(conv_1)

# Defining second parallel layer
in_2 = Conv2D(filters=16, kernel_size=(5, 5), activation='relu', padding='same')(input_)
conv_2 = BatchNormalization()(in_2)
conv_2 = AveragePooling2D(pool_size=(2, 2), strides=(3, 3))(conv_2)

# Defining third parallel layer
in_3 = Conv2D(filters=16, kernel_size=(5, 5), activation='relu', padding='same')(input_)
conv_3 = BatchNormalization()(in_3)
conv_3 = MaxPooling2D(pool_size=(2, 2), strides=(3, 3))(conv_3)

# Defining fourth parallel layer
in_4 = Conv2D(filters=16, kernel_size=(9, 9), activation='relu', padding='same')(input_)
conv_4 = BatchNormalization()(in_4)
conv_4 = MaxPooling2D(pool_size=(2, 2), strides=(3, 3))(conv_4)

# Concatenating layers
concat = concatenate([conv_1, conv_2, conv_3, conv_4])
flat = Flatten()(concat)
out = Dense(units=4, activation='softmax')(flat)

model = Model(inputs=[input_], outputs=[out])
model.summary()

so you either do:

concat = Concatenate()([conv_1, conv_2, conv_3, conv_4])

or:

concat = concatenate([conv_1, conv_2, conv_3, conv_4])