|
| 1 | +import json |
| 2 | +import string |
| 3 | +import numpy as np |
| 4 | + |
| 5 | +# Define the classes from the transformer code |
| 6 | + |
| 7 | +class PositionalEncoding: |
| 8 | + def __init__(self, d_model, max_seq_len): |
| 9 | + self.d_model = d_model |
| 10 | + self.max_seq_len = max_seq_len |
| 11 | + |
| 12 | + def get_positional_encoding(self, positions): |
| 13 | + angles = np.arange(self.d_model) / self.d_model |
| 14 | + angles = angles[np.newaxis, :] # Shape: (1, d_model) |
| 15 | + |
| 16 | + positions = positions[:, np.newaxis] # Shape: (max_seq_len, 1) |
| 17 | + angles = angles * (1 / np.power(10000, 2 * positions / self.d_model)) |
| 18 | + angles[:, 0::2] = np.sin(angles[:, 0::2]) |
| 19 | + angles[:, 1::2] = np.cos(angles[:, 1::2]) |
| 20 | + |
| 21 | + return angles # Shape: (max_seq_len, d_model) |
| 22 | + |
| 23 | +class MultiHeadAttention: |
| 24 | + def __init__(self, d_model, num_heads): |
| 25 | + self.d_model = d_model |
| 26 | + self.num_heads = num_heads |
| 27 | + self.d_head = d_model // num_heads |
| 28 | + |
| 29 | + self.W_q = np.random.randn(d_model, d_model) |
| 30 | + self.W_k = np.random.randn(d_model, d_model) |
| 31 | + self.W_v = np.random.randn(d_model, d_model) |
| 32 | + self.W_o = np.random.randn(d_model, d_model) |
| 33 | + |
| 34 | + def attention(self, Q, K, V): |
| 35 | + scores = np.matmul(Q, K.T) / np.sqrt(self.d_head) # Shape: (num_heads, seq_len, seq_len) |
| 36 | + attention_weights = softmax(scores, axis=-1) # Apply softmax along the last axis |
| 37 | + |
| 38 | + attended_values = np.matmul(attention_weights, V) # Shape: (num_heads, seq_len, d_head) |
| 39 | + return attended_values |
| 40 | + |
| 41 | + def forward(self, X): |
| 42 | + Q = np.matmul(X, self.W_q) |
| 43 | + K = np.matmul(X, self.W_k) |
| 44 | + V = np.matmul(X, self.W_v) |
| 45 | + |
| 46 | + Q_split = np.split(Q, self.num_heads, axis=-1) |
| 47 | + K_split = np.split(K, self.num_heads, axis=-1) |
| 48 | + V_split = np.split(V, self.num_heads, axis=-1) |
| 49 | + |
| 50 | + attended_values = [] |
| 51 | + for i in range(self.num_heads): |
| 52 | + attended_values.append(self.attention(Q_split[i], K_split[i], V_split[i])) |
| 53 | + |
| 54 | + concatenated = np.concatenate(attended_values, axis=-1) # Shape: (seq_len, d_model) |
| 55 | + output = np.matmul(concatenated, self.W_o) |
| 56 | + |
| 57 | + return output |
| 58 | + |
| 59 | +class FeedForwardNetwork: |
| 60 | + def __init__(self, d_model, d_ff): |
| 61 | + self.d_model = d_model |
| 62 | + self.d_ff = d_ff |
| 63 | + |
| 64 | + self.W_1 = np.random.randn(d_model, d_ff) |
| 65 | + self.W_2 = np.random.randn(d_ff, d_model) |
| 66 | + |
| 67 | + def forward(self, X): |
| 68 | + hidden = np.matmul(X, self.W_1) |
| 69 | + hidden = np.maximum(hidden, 0) # Apply ReLU activation |
| 70 | + output = np.matmul(hidden, self.W_2) |
| 71 | + |
| 72 | + return output |
| 73 | + |
| 74 | +class Transformer: |
| 75 | + def __init__(self, d_model, num_heads, d_ff, num_layers): |
| 76 | + self.d_model = d_model |
| 77 | + self.num_heads = num_heads |
| 78 | + self.d_ff = d_ff |
| 79 | + self.num_layers = num_layers |
| 80 | + |
| 81 | + self.layers = [] |
| 82 | + for _ in range(num_layers): |
| 83 | + self.layers.append( |
| 84 | + (MultiHeadAttention(d_model, num_heads), FeedForwardNetwork(d_model, d_ff))) |
| 85 | + |
| 86 | + def forward(self, X): |
| 87 | + for layer in self.layers: |
| 88 | + attention_output = layer[0].forward(X) |
| 89 | + X = X + attention_output # Residual connection |
| 90 | + X = X + layer[1].forward(X) # Residual connection |
| 91 | + |
| 92 | + return X |
| 93 | + |
| 94 | +# Define the IntentMatcher class |
| 95 | + |
| 96 | +class IntentMatcher: |
| 97 | + def __init__(self, intents_file_path): |
| 98 | + self.intents_file_path = intents_file_path |
| 99 | + self.intents = self.train() |
| 100 | + |
| 101 | + # Create an instance of the Transformer |
| 102 | + self.transformer = Transformer(d_model=64, num_heads=4, d_ff=128, num_layers=2) |
| 103 | + |
| 104 | + class NoMatchingIntentError(Exception): |
| 105 | + pass |
| 106 | + |
| 107 | + def Tokenize(self, input_string): |
| 108 | + input_string = input_string.strip() |
| 109 | + input_string = input_string.translate(str.maketrans("", "", string.punctuation)) |
| 110 | + words = input_string.split() |
| 111 | + |
| 112 | + words = self.stem_list(words) |
| 113 | + |
| 114 | + return words |
| 115 | + |
| 116 | + def Tokenize_List(self, input_list): |
| 117 | + Tokenwords = [] |
| 118 | + for word in input_list: |
| 119 | + token = self.Tokenize(word) |
| 120 | + Tokenwords.append(token) |
| 121 | + |
| 122 | + return Tokenwords |
| 123 | + |
| 124 | + def train(self): |
| 125 | + with open(self.intents_file_path, 'r') as json_data: |
| 126 | + intents = json.load(json_data) |
| 127 | + return intents |
| 128 | + |
| 129 | + def patterncompare(self, input_string): |
| 130 | + transformed_input = self.transform_input(input_string) |
| 131 | + |
| 132 | + input_string = input_string.lower() |
| 133 | + HighestSimilarity = 0 |
| 134 | + MostSimilarPattern = None |
| 135 | + SimilarityPercentage = 0 |
| 136 | + |
| 137 | + patterns = [] |
| 138 | + Similarity = 0 |
| 139 | + |
| 140 | + WordList2 = self.Tokenize(input_string) |
| 141 | + |
| 142 | + for intent_class in self.intents['intents']: |
| 143 | + OverallWordList = [] |
| 144 | + Similarity = 0 |
| 145 | + |
| 146 | + patterns = intent_class.get('patterns') |
| 147 | + for pattern in patterns: |
| 148 | + WordList = [] |
| 149 | + pattern = pattern.lower() |
| 150 | + WordList = self.Tokenize(pattern) |
| 151 | + OverallWordList.append(WordList) |
| 152 | + NewList = [] |
| 153 | + NewBag = [] |
| 154 | + |
| 155 | + for word in WordList: |
| 156 | + word = self.stem(word) |
| 157 | + NewList.append(word) |
| 158 | + |
| 159 | + for word in WordList2: |
| 160 | + word = self.stem(word) |
| 161 | + NewBag.append(word) |
| 162 | + |
| 163 | + WordList = NewList |
| 164 | + WordList2 = NewBag |
| 165 | + |
| 166 | + for word in WordList2: |
| 167 | + if word in WordList: |
| 168 | + Similarity = Similarity + 1 |
| 169 | + |
| 170 | + if Similarity > HighestSimilarity: |
| 171 | + SimilarityPercentage = Similarity / len(OverallWordList + WordList2) |
| 172 | + HighestSimilarity = Similarity |
| 173 | + MostSimilarPattern = intent_class |
| 174 | + |
| 175 | + print(f"Similarity: {SimilarityPercentage:.2f}%") |
| 176 | + |
| 177 | + if MostSimilarPattern: |
| 178 | + return MostSimilarPattern |
| 179 | + else: |
| 180 | + raise self.NoMatchingIntentError("No matching intent class found.") |
| 181 | + |
| 182 | + def responsecompare(self, input_string, intent_class): |
| 183 | + transformed_input = self.transform_input(input_string) |
| 184 | + |
| 185 | + input_string = input_string.lower() |
| 186 | + HighestSimilarity = 0 |
| 187 | + SimilarityPercentage = 0 |
| 188 | + MostSimilarResponse = None |
| 189 | + |
| 190 | + responses = [] |
| 191 | + Similarity = 0 |
| 192 | + |
| 193 | + WordList2 = self.Tokenize(input_string) |
| 194 | + |
| 195 | + if intent_class is not None: |
| 196 | + responses = intent_class.get('responses') |
| 197 | + else: |
| 198 | + raise self.NoMatchingIntentError("No matching intent class found.") |
| 199 | + |
| 200 | + for response in responses: |
| 201 | + Similarity = 0 |
| 202 | + pattern = response.lower() |
| 203 | + WordList = self.Tokenize(response) |
| 204 | + NewList = [] |
| 205 | + NewBag = [] |
| 206 | + |
| 207 | + for word in WordList: |
| 208 | + word = self.stem(word) |
| 209 | + NewList.append(word) |
| 210 | + |
| 211 | + for word in WordList2: |
| 212 | + word = self.stem(word) |
| 213 | + NewBag.append(word) |
| 214 | + |
| 215 | + WordList = NewList |
| 216 | + WordList2 = NewBag |
| 217 | + |
| 218 | + for word in WordList2: |
| 219 | + if word in WordList: |
| 220 | + Similarity = Similarity + 1 |
| 221 | + |
| 222 | + if Similarity > HighestSimilarity: |
| 223 | + SimilarityPercentage = Similarity * 100 / (len(WordList) + len(WordList2)) |
| 224 | + HighestSimilarity = Similarity |
| 225 | + MostSimilarResponse = response |
| 226 | + |
| 227 | + print(f"Similarity: {SimilarityPercentage:.2f}%") |
| 228 | + |
| 229 | + # Convert MSR back into the original string |
| 230 | + for response in responses: |
| 231 | + lowresponselist = [] |
| 232 | + lowresponse = response.lower() |
| 233 | + lowresponselist = self.stem_sentence(lowresponse) |
| 234 | + |
| 235 | + for lowresponse in lowresponselist: |
| 236 | + if lowresponse == MostSimilarResponse: |
| 237 | + MostSimilarResponse = response |
| 238 | + |
| 239 | + return MostSimilarResponse |
| 240 | + |
| 241 | + def stem(self, input_word): |
| 242 | + suffixes = ['ing', 'ly', 'ed', 'es', "'s", 'er', 'est', 'y', 'ily', 'able', 'ful', 'ness', 'less', 'ment', 'ive', 'ize', 'ous'] |
| 243 | + for suffix in suffixes: |
| 244 | + if input_word.endswith(suffix): |
| 245 | + input_word = input_word[:-len(suffix)] |
| 246 | + break |
| 247 | + return input_word |
| 248 | + |
| 249 | + def stem_sentence(self, input_string): |
| 250 | + wordlist = [] |
| 251 | + stemmedwords = [] |
| 252 | + wordlist = input_string.split() |
| 253 | + for input_word in wordlist: |
| 254 | + word = self.stem(input_word) |
| 255 | + stemmedwords.append(word) |
| 256 | + |
| 257 | + return stemmedwords |
| 258 | + |
| 259 | + def stem_list(self, input_list): |
| 260 | + stemmedwords = [] |
| 261 | + for word in input_list: |
| 262 | + stemmedword = self.stem(word) |
| 263 | + stemmedwords.append(stemmedword) |
| 264 | + |
| 265 | + return stemmedwords |
| 266 | + |
| 267 | + def transform_input(self, input_string): |
| 268 | + # Tokenize and transform the input using the Transformer |
| 269 | + tokens = self.Tokenize(input_string) |
| 270 | + max_seq_len = 10 # Specify the desired maximum sequence length |
| 271 | + padded_tokens = tokens[:max_seq_len] + [0] * (max_seq_len - len(tokens)) |
| 272 | + input_array = np.array(padded_tokens) |
| 273 | + input_array = np.expand_dims(input_array, axis=0) # Add batch dimension |
| 274 | + |
| 275 | + transformed_input = self.transformer.forward(input_array) |
| 276 | + return transformed_input |
| 277 | + |
| 278 | +# Example usage |
| 279 | +intents_file_path = 'intents.json' |
| 280 | + |
| 281 | +intent_matcher = IntentMatcher(intents_file_path) |
| 282 | +pattern = intent_matcher.patterncompare("Hello") |
| 283 | +response = intent_matcher.responsecompare("Hello", pattern) |
| 284 | +print(response) |
0 commit comments