-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathbot.py
160 lines (124 loc) · 6.44 KB
/
bot.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
import nltk
import warnings
warnings.filterwarnings("ignore")
import numpy as np
import random
import string
f=open('nlp python answer finals.txt','r',errors = 'ignore')
m=open('modules pythons.txt','r',errors = 'ignore')
checkpoint = "./chatbot_weights.ckpt"
raw=f.read()
rawone=m.read()
raw=raw.lower()
rawone=rawone.lower()
nltk.download('punkt')
nltk.download('wordnet')
sent_tokens = nltk.sent_tokenize(raw)
word_tokens = nltk.word_tokenize(raw)
sent_tokensone = nltk.sent_tokenize(rawone)
word_tokensone = nltk.word_tokenize(rawone)
sent_tokens[:2]
sent_tokensone[:2]
word_tokens[:5]
word_tokensone[:5]
lemmer = nltk.stem.WordNetLemmatizer()
def LemTokens(tokens):
return [lemmer.lemmatize(token) for token in tokens]
remove_punct_dict = dict((ord(punct), None) for punct in string.punctuation)
def LemNormalize(text):
return LemTokens(nltk.word_tokenize(text.lower().translate(remove_punct_dict)))
Introduce_Ans = ["My name is Cobot.","My name is Cobot you can called me Cobot.","Im Cobot :) ","My name is Cobot. and my nickname is Cobot and i am happy to solve your queries :) "]
GREETING_INPUTS = ("hello", "hi","hiii","hii","hiiii","hiiii", "greetings", "sup", "what's up","hey",)
GREETING_RESPONSES = ["hi", "hey", "hii there", "hi there", "hello", "I am glad! You are talking to me"]
Basic_Q = ("what is python ?","what is python","what is python?","what is python.")
Basic_Ans = "Python is a high-level, interpreted, interactive and object-oriented scripting programming language python is designed to be highly readable It uses English keywords frequently where as other languages use punctuation, and it has fewer syntactical constructions than other languages."
Basic_Om = ("what is module","what is module.","what is module ","what is module ?","what is module?","what is module in python","what is module in python.","what is module in python?","what is module in python ?")
Basic_AnsM = ["Consider a module to be the same as a code library.","A file containing a set of functions you want to include in your application.","A module can define functions, classes and variables. A module can also include runnable code. Grouping related code into a module makes the code easier to understand and use."]
def greeting(sentence):
"""If user's input is a greeting, return a greeting response"""
for word in sentence.split():
if word.lower() in GREETING_INPUTS:
return random.choice(GREETING_RESPONSES)
def basic(sentence):
for word in Basic_Q:
if sentence.lower() == word:
return Basic_Ans
def basicM(sentence):
"""If user's input is a greeting, return a greeting response"""
for word in Basic_Om:
if sentence.lower() == word:
return random.choice(Basic_AnsM)
def IntroduceMe(sentence):
return random.choice(Introduce_Ans)
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
def response(user_response):
robo_response=''
sent_tokens.append(user_response)
TfidfVec = TfidfVectorizer(tokenizer=LemNormalize, stop_words='english')
tfidf = TfidfVec.fit_transform(sent_tokens)
vals = cosine_similarity(tfidf[-1], tfidf)
idx=vals.argsort()[0][-2]
flat = vals.flatten()
flat.sort()
req_tfidf = flat[-2]
if(req_tfidf==0):
robo_response=robo_response+"Bhuji nai bujaya kon"
return robo_response
else:
robo_response = robo_response+sent_tokens[idx]
return robo_response
def responseone(user_response):
robo_response=''
sent_tokensone.append(user_response)
TfidfVec = TfidfVectorizer(tokenizer=LemNormalize, stop_words='english')
tfidf = TfidfVec.fit_transform(sent_tokensone)
vals = cosine_similarity(tfidf[-1], tfidf)
idx=vals.argsort()[0][-2]
flat = vals.flatten()
flat.sort()
req_tfidf = flat[-2]
if(req_tfidf==0):
robo_response=robo_response+"Bhuji nai Bujaia kon"
return robo_response
else:
robo_response = robo_response+sent_tokensone[idx]
return robo_response
def chat(user_response):
user_response=user_response.lower()
keyword = " module "
keywordone = " module"
keywordsecond = "module "
if(user_response!='bye'):
if(user_response=='thanks' or user_response=='thank you' ):
flag=False
return "You are welcome.."
elif (user_response=="so,tell me when is corona virus going to end?"):
return "not predictable yet"
elif (user_response=="how many people are Affected all over the world by corona virus today?"):
return "Total affected today :20,403,658"
elif (user_response=="how many people all over the world recovered from corona virus?"):
return "Total recovered :13,286,580"
elif (user_response=="what can we do to prevent corona virus?"):
return "Wash your hands thoroughly with soap and warm water or with an alcohol-based hand sanitizer.Keep your hands and fingers away from your eyes, nose, and mouth.Avoid close contact with people who are infected.Clean and disinfect surfaces you touch often, like tables, doorknobs, light switches, toilets, faucets, and sinks."
elif(basicM(user_response)!=None):
return basicM(user_response)
else:
if(user_response.find(keyword) != -1 or user_response.find(keywordone) != -1 or user_response.find(keywordsecond) != -1):
#print("ROBO: ",end="")
#print(responseone(user_response))
return responseone(user_response)
sent_tokensone.remove(user_response)
elif(greeting(user_response)!=None):
#print("ROBO: "+greeting(user_response))
return greeting(user_response)
elif(user_response.find("your name") != -1 or user_response.find(" your name") != -1 or user_response.find("your name ") != -1 or user_response.find(" your name ") != -1):
return IntroduceMe(user_response)
elif(basic(user_response)!=None):
return basic(user_response)
else:
return response(user_response)
sent_tokens.remove(user_response)
else:
flag=False
return "Bye! take care.."