Spaces:
Sleeping
Sleeping
model file and custom_classes added , app and requirements files updated
Browse files- .gitattributes +2 -0
- app.py +24 -3
- custom_classes.py +79 -0
- full_transformer_encoder.keras +3 -0
- requirements.txt +2 -1
.gitattributes
CHANGED
|
@@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
*.file_extension filter=lfs diff=lfs merge=lfs -text
|
| 37 |
+
*.keras filter=lfs diff=lfs merge=lfs -text
|
app.py
CHANGED
|
@@ -6,9 +6,30 @@ This is a temporary script file.
|
|
| 6 |
"""
|
| 7 |
|
| 8 |
import gradio as gr
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 9 |
|
| 10 |
-
|
| 11 |
-
|
|
|
|
|
|
|
| 12 |
|
| 13 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 14 |
demo.launch()
|
|
|
|
| 6 |
"""
|
| 7 |
|
| 8 |
import gradio as gr
|
| 9 |
+
import tensorflow as tf
|
| 10 |
+
from tensorflow import keras
|
| 11 |
+
from tensorflow.keras import layers
|
| 12 |
+
from tensorflow.keras.layers import TextVectorization, Embedding, Dense
|
| 13 |
+
from custom_classes import TransformerEncoder, PositionalEmbedding
|
| 14 |
|
| 15 |
+
model = keras.models.load_model(
|
| 16 |
+
"full_transformer_encoder.keras",
|
| 17 |
+
custom_objects={"TransformerEncoder": TransformerEncoder,
|
| 18 |
+
"PositionalEmbedding": PositionalEmbedding})
|
| 19 |
|
| 20 |
+
def make_prediction(input_text):
|
| 21 |
+
myTensor = tf.convert_to_tensor(input_text, dtype=tf.string)
|
| 22 |
+
pred = model(tf.reshape(myTensor, (-1,1)))
|
| 23 |
+
label_index = int(pred.numpy()[0,0] + 0.5)
|
| 24 |
+
mapping = {0: 'Negative', 1: 'Positive'}
|
| 25 |
+
label = mapping[label_index]
|
| 26 |
+
return label
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
#Create the Gradio demo
|
| 30 |
+
demo = gr.Interface(fn=make_prediction,
|
| 31 |
+
inputs="text",
|
| 32 |
+
outputs="text",
|
| 33 |
+
title="Text Classification",
|
| 34 |
+
description="built via gradio")
|
| 35 |
demo.launch()
|
custom_classes.py
ADDED
|
@@ -0,0 +1,79 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import tensorflow as tf
|
| 2 |
+
from tensorflow import keras
|
| 3 |
+
from tensorflow.keras import layers
|
| 4 |
+
from tensorflow.keras.layers import TextVectorization, Embedding, Dense
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class TransformerEncoder(layers.Layer):
|
| 8 |
+
def __init__(self, embed_dim, dense_dim, num_heads, **kwargs):
|
| 9 |
+
super().__init__(**kwargs)
|
| 10 |
+
self.embed_dim = embed_dim # Dimension of embedding. 4 in the dummy example
|
| 11 |
+
self.dense_dim = dense_dim # No. of neurons in dense layer
|
| 12 |
+
self.num_heads = num_heads # No. of heads for MultiHead Attention layer
|
| 13 |
+
self.attention = layers.MultiHeadAttention(num_heads=num_heads, key_dim=embed_dim) # MultiHead Attention layer
|
| 14 |
+
self.dense_proj = keras.Sequential([layers.Dense(dense_dim, activation="relu"),
|
| 15 |
+
layers.Dense(embed_dim),] # encoders are stacked on top of the other.
|
| 16 |
+
) # So output dimension is also embed_dim
|
| 17 |
+
self.layernorm_1 = layers.LayerNormalization()
|
| 18 |
+
self.layernorm_2 = layers.LayerNormalization()
|
| 19 |
+
|
| 20 |
+
# Call function based on figure above
|
| 21 |
+
def call(self, inputs, mask=None):
|
| 22 |
+
if mask is not None:
|
| 23 |
+
mask = mask[:, tf.newaxis, :]
|
| 24 |
+
attention_output = self.attention(query=inputs, # Query: inputs,
|
| 25 |
+
value=inputs, # Value: inputs,
|
| 26 |
+
key=inputs, # Keys: Same as Values by default
|
| 27 |
+
attention_mask=mask
|
| 28 |
+
) # Q: Can you see how this is self attention? A: all args are the same
|
| 29 |
+
|
| 30 |
+
proj_input = self.layernorm_1(inputs + attention_output) # LayerNormalization; + Recall cat picture
|
| 31 |
+
proj_output = self.dense_proj(proj_input)
|
| 32 |
+
return self.layernorm_2(proj_input + proj_output) # LayerNormalization + Residual connection
|
| 33 |
+
|
| 34 |
+
def get_config(self):
|
| 35 |
+
config = super().get_config()
|
| 36 |
+
config.update({
|
| 37 |
+
"embed_dim": self.embed_dim,
|
| 38 |
+
"num_heads": self.num_heads,
|
| 39 |
+
"dense_dim": self.dense_dim,
|
| 40 |
+
})
|
| 41 |
+
return config
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
# Using positional encoding to re-inject order information
|
| 45 |
+
|
| 46 |
+
class PositionalEmbedding(layers.Layer):
|
| 47 |
+
def __init__(self, sequence_length, input_dim, output_dim, **kwargs): # input_dim = (token) vocabulary size, output_dim = embedding size
|
| 48 |
+
super().__init__(**kwargs)
|
| 49 |
+
self.token_embeddings = layers.Embedding(input_dim=input_dim, output_dim=output_dim) # Q: what is input_dim and output_dim? A: vocab size, embedding dim
|
| 50 |
+
self.position_embeddings = layers.Embedding(input_dim=sequence_length, output_dim=output_dim) # Q: Why input_dim = seq_length? A: there are seq_len; no. of possible positions
|
| 51 |
+
# Q: What is the vocab for this Embedding layer? A: seq_length
|
| 52 |
+
self.sequence_length = sequence_length
|
| 53 |
+
self.input_dim = input_dim
|
| 54 |
+
self.output_dim = output_dim
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
def call(self, inputs): # inputs will be a batch of sequences (batch, seq_len)
|
| 58 |
+
length = tf.shape(inputs)[-1] # lenght will just be sequence length
|
| 59 |
+
positions = tf.range(start=0, limit=length, delta=1) # indices for input to positional embedding
|
| 60 |
+
embedded_tokens = tf.reshape(self.token_embeddings(inputs), (-1, length, self.output_dim))
|
| 61 |
+
embedded_positions = tf.reshape(self.position_embeddings(positions), (-1, length, self.output_dim))
|
| 62 |
+
return layers.Add()([embedded_tokens, embedded_positions]) # ADD the embeddings
|
| 63 |
+
|
| 64 |
+
def compute_mask(self, inputs, mask=None): # makes this layer a mask-generating layer
|
| 65 |
+
if mask is None:
|
| 66 |
+
return None
|
| 67 |
+
return tf.math.not_equal(inputs, 0) # mask will get propagated to the next layer.
|
| 68 |
+
|
| 69 |
+
# When using custom layers, this enables the layer to be reinstantiated from its config dict,
|
| 70 |
+
# which is useful during model saving and loading.
|
| 71 |
+
def get_config(self):
|
| 72 |
+
config = super().get_config()
|
| 73 |
+
config.update({
|
| 74 |
+
"output_dim": self.output_dim,
|
| 75 |
+
"sequence_length": self.sequence_length,
|
| 76 |
+
"input_dim": self.input_dim,
|
| 77 |
+
})
|
| 78 |
+
return config
|
| 79 |
+
|
full_transformer_encoder.keras
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0f7fc760fb9103dea9fa7ca87432016da5a160094d6a6751baa98cc475becb9e
|
| 3 |
+
size 46928797
|
requirements.txt
CHANGED
|
@@ -1,2 +1,3 @@
|
|
| 1 |
numpy
|
| 2 |
-
pandas
|
|
|
|
|
|
| 1 |
numpy
|
| 2 |
+
pandas
|
| 3 |
+
tensorflow
|