Mr-Vicky-01 commited on
Commit
5dee96a
1 Parent(s): 3362a93

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -34
app.py CHANGED
@@ -8,38 +8,7 @@ from tensorflow.keras.preprocessing.image import load_img, img_to_array
8
  from tensorflow.keras.preprocessing.text import Tokenizer
9
  from tensorflow.keras.preprocessing.sequence import pad_sequences
10
  from tensorflow.keras.models import Model
11
- ###############################################
12
 
13
- # from keras.layers import Input, Dense, Dropout, Embedding, LSTM, Concatenate, Bidirectional
14
- # from keras.models import Model
15
-
16
- # max_length = 35
17
- # vocab_size = 8485
18
-
19
- # # Encoder Model
20
- # inputs1 = Input(shape=(2560,))
21
- # fe1 = Dropout(0.5)(inputs1)
22
- # fe2 = Dense(512, activation='relu')(fe1) # Increased units
23
- # fe3 = Dense(256, activation='relu')(fe2) # Increased units
24
-
25
- # # Sequence Feature Layer
26
- # inputs2 = Input(shape=(max_length,))
27
- # se1 = Embedding(vocab_size, 256, mask_zero=True)(inputs2)
28
- # se2 = Dropout(0.5)(se1)
29
- # se3 = Bidirectional(LSTM(512))(se2) # Increased units
30
-
31
- # # Decoder Model
32
- # decoder1 = Concatenate()([fe3, se3])
33
- # decoder2 = Dense(512, activation='relu')(decoder1) # Increased units
34
- # decoder3 = Dropout(0.5)(decoder2)
35
- # outputs = Dense(vocab_size, activation='softmax')(decoder3)
36
-
37
- # model = Model(inputs=[inputs1, inputs2], outputs=outputs)
38
- # # optimizer = Adam(lr=0.001) # Adjusted learning rate
39
- # model.compile(optimizer="adam", loss='categorical_crossentropy')
40
-
41
- # model.load_weights("Modified_Image_Captioner_model.h5")
42
- ##############################################################################
43
 
44
  # load vgg16 model
45
  pre_trained = EfficientNetB7(weights='imagenet', include_top=False, input_shape=(224, 224, 3))
@@ -52,9 +21,9 @@ pre_trained_model = Model(inputs=pre_trained.input, outputs=x)
52
 
53
  # model = tf.keras.models.load_model("Modified_Image_Captioner_model.h5")
54
 
55
- # tokenizer = Tokenizer()
56
- # with open("Image_Captioner_tokenizer.pkl", "rb") as f:
57
- # tokenizer = pickle.load(f)
58
 
59
  def idx_to_word(integer, tokenizer):
60
  for word, index in tokenizer.word_index.items():
 
8
  from tensorflow.keras.preprocessing.text import Tokenizer
9
  from tensorflow.keras.preprocessing.sequence import pad_sequences
10
  from tensorflow.keras.models import Model
 
11
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
 
13
  # load vgg16 model
14
  pre_trained = EfficientNetB7(weights='imagenet', include_top=False, input_shape=(224, 224, 3))
 
21
 
22
  # model = tf.keras.models.load_model("Modified_Image_Captioner_model.h5")
23
 
24
+ tokenizer = Tokenizer()
25
+ with open("Image_Captioner_tokenizer.pkl", "rb") as f:
26
+ tokenizer = pickle.load(f)
27
 
28
  def idx_to_word(integer, tokenizer):
29
  for word, index in tokenizer.word_index.items():