@@ -8,6 +8,7 @@ def Tokenize(input_string):
8
8
input_string = input_string .strip ()
9
9
input_string = input_string .translate (str .maketrans ("" , "" , string .punctuation ))
10
10
words = input_string .split ()
11
+
11
12
return words
12
13
13
14
def train (intents_file_path ):
@@ -56,10 +57,10 @@ def patterncompare(input_string, intents_file_path):
56
57
if word in WordList :
57
58
Similarity = Similarity + 1
58
59
59
- if Similarity > HighestSimilarity :
60
- SimilarityPercentage = Similarity / len (OverallWordList + WordList2 )
61
- HighestSimilarity = Similarity
62
- MostSimilarPattern = intent_class
60
+ if Similarity > HighestSimilarity :
61
+ SimilarityPercentage = Similarity / len (OverallWordList + WordList2 )
62
+ HighestSimilarity = Similarity
63
+ MostSimilarPattern = intent_class
63
64
64
65
print (f"Similarity: { SimilarityPercentage :.2f} %" )
65
66
@@ -110,10 +111,10 @@ def responsecompare(input_string, intents_file_path, intent_class):
110
111
if word in WordList :
111
112
Similarity = (Similarity + 1 / len (WordList + WordList2 ))
112
113
113
- if Similarity > HighestSimilarity :
114
- SimilarityPercentage = Similarity * 100
115
- HighestSimilarity = Similarity
116
- MostSimilarResponse = response
114
+ if Similarity > HighestSimilarity :
115
+ SimilarityPercentage = Similarity * 100
116
+ HighestSimilarity = Similarity
117
+ MostSimilarResponse = response
117
118
118
119
print (f"Similarity: { SimilarityPercentage :.2f} %" )
119
120
@@ -130,7 +131,7 @@ def responsecompare(input_string, intents_file_path, intent_class):
130
131
return MostSimilarResponse
131
132
132
133
def stem (input_word ):
133
- suffixes = ['ing' , 'ly' , 'ed' , 'es' , 's' , 'er' , 'est' , 'y' , 'ily' , 'able' , 'ful' , 'ness' , 'less' , 'ment' , 'ive' , 'ize' , 'ous' ]
134
+ suffixes = ['ing' , 'ly' , 'ed' , 'es' , "'s" , 'er' , 'est' , 'y' , 'ily' , 'able' , 'ful' , 'ness' , 'less' , 'ment' , 'ive' , 'ize' , 'ous' ]
134
135
for suffix in suffixes :
135
136
if input_word .endswith (suffix ):
136
137
input_word = input_word [:- len (suffix )]
0 commit comments