Machine Learning – Use CNN with two inputs for prediction

I have a record like this:

q1 q2 label
ccc ddd 1
zzz yyy 0
, , ,
, , ,

Where q1 and q2 are sentences and the name indicates whether they are duplicates or not.

Now I am confused because I have two inputs q1 and q2 to concatenate both for the prediction. I have created two CNN functions for both columns and I want to concatenate them.

my cnn function:

                            def cnn_model (FILTER_SIZES, 
# Filter sizes as a list
MAX_NB_WORDS, 
# Total number of words
MAX_DOC_LEN, 
# max words in a document
EMBEDDING_DIM = 200, 
# Word vector dimension
NUM_FILTERS = 64, 
# Number of filters for all sizes
DROP_OUT = 0.5, 
# Cancellation rate
NUM_OUTPUT_UNITS = 1, 
# Number of output units
NUM_DENSE_UNITS = 100, 
# Number of units in dense layer
PRETRAINED_WORD_VECTOR = None, 
# Specifies whether trained word vectors should be used
LAM = 0.0):
# Regularization coefficient

main_input = Input (shape = (MAX_DOC_LEN,), 
dtype = & # 39; int32 & # 39 ;, name = & # 39; main_input & # 39;

if PRETRAINED_WORD_VECTOR is not None:
embed_1 = Embedding (input_dim = MAX_NB_WORDS + 1, 
output_dim = EMBEDDING_DIM, 
input_length = MAX_DOC_LEN, 
# Use ready-made word vectors
Weights =[PRETRAINED_WORD_VECTOR], 
# Word vectors can be further tuned
Set # to False when using static word vectors
trainable = true, 
name = & # 39; embedding & # 39;) (main_input)
otherwise:
embed_1 = Embedding (input_dim = MAX_NB_WORDS + 1, 
output_dim = EMBEDDING_DIM, 
input_length = MAX_DOC_LEN, 
name = & # 39; embedding & # 39;) (main_input)
# Add convolution-pooling-flat block
conv_blocks = []
   for f in FILTER_SIZES:
conv = Conv1D (filters = NUM_FILTERS, kernel_size = f, 
Activation = & # 39; relu & # 39 ;, name = & # 39; conv_ & # 39; + str (f)) (embed_1)
conv = MaxPooling1D (MAX_DOC_LEN-f + 1, name = max max _ # + str (f)) (conv)
conv = flattening (name = flat flat _ # + str (f)) (conv)
conv_blocks.append (conv)

if len (conv_blocks)> 1:
z = concatenate (name = & # 39; concatenate & # 39;) (conv_blocks)
otherwise:
z = conv_blocks[0]


   dense = dense (NUM_DENSE_UNITS, activation = & # 39; relu & # 39 ;, 
kernel_regularizer = l2 (LAM), name = & # 39; density & # 39;; (drop)

model = Model (inputs = main_input, outputs = tight)

model.compile (loss = "binary_crossentropy", 
optimizer = "adam", metrics =["accuracy"])

return model

First I have the pad sequence of the two columns:

            tokenizer = tokenizer (num_words = MAX_NB_WORDS)
tokenizer.fit_on_texts (data["q1"])

# set the dense units
dense_units_num = num_filters * len (FILTER_SIZES)

BTACH_SIZE = 32
NUM_EPOCHES = 100

sequence_1 = tokenizer. 
texte_zu_folgen (data["q1"])
#print (sequence_1)

sequence_2 = tokenizer. 
texte_zu_folgen (data["q2"])

Sequences = Sequences_1 + Sequences_2

output_units_num = 1



# Pad all sequences in the same length
# If a sentence is longer than maxlen, fill it up on the right
# If a sentence is shorter than maxlen, shorten it to the right
padded_sequences = pad_sequences (sequence, 
maxlen = MAX_DOC_LEN, 
padding = & # 39; post & # 39; 
truncating = & # 39; post & # 39;) `

Now I've made two models like this for both columns:

                left_cnn = cnn_model (FILTER_SIZES, MAX_NB_WORDS, 
MAX_DOC_LEN, 
NUM_FILTERS = num_filters, 
NUM_OUTPUT_UNITS = output_units_num, 
NUM_DENSE_UNITS = density_units_number, 
PRETRAINED_WORD_VECTOR = None)

right_cnn = cnn_model (FILTER_SIZES, MAX_NB_WORDS, 
MAX_DOC_LEN, 
NUM_FILTERS = num_filters, 
NUM_OUTPUT_UNITS = output_units_num, 
NUM_DENSE_UNITS = density_units_number, 
PRETRAINED_WORD_VECTOR = None)

Now I do not know how to link these two models. And what to do next!