I have a sequence to sequence learning model which works fine and able to predict some outputs. The problem is I have no idea how to convert the output back to text sequence.
This is my code.
from keras.preprocessing.text import Tokenizer,base_filter
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential
from keras.layers import Dense
txt1="""What makes this problem difficult is that the sequences can vary in length,
be comprised of a very large vocabulary of input symbols and may require the model
to learn the long term context or dependencies between symbols in the input sequence."""
#txt1 is used for fitting
tk = Tokenizer(nb_words=2000, filters=base_filter(), lower=True, split=" ")
tk.fit_on_texts(txt1)
#convert text to sequence
t= tk.texts_to_sequences(txt1)
#padding to feed the sequence to keras model
t=pad_sequences(t, maxlen=10)
model = Sequential()
model.add(Dense(10,input_dim=10))
model.add(Dense(10,activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam',metrics=['accuracy'])
#predicting new sequcenc
pred=model.predict(t)
#Convert predicted sequence to text
pred=??