This repository has been archived by the owner on Oct 3, 2023. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathN0843239_MakeUp_Chatbot.py
357 lines (316 loc) · 12.8 KB
/
N0843239_MakeUp_Chatbot.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
# -*- coding: utf-8 -*-
#!pip install aiml
#!pip install scikit-learn
#!pip install nltk
#!pip install azure-cognitiveservices-vision-computervision
#!pip install azure-ai-vision
#!pip install azure-ai-textanalytics==5.2.0
import aiml
import sklearn
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.metrics.pairwise import linear_kernel
from sklearn.feature_extraction.text import TfidfVectorizer
import pandas as pd
import numpy as np
import json, requests
import nltk
#nltk.download('wordnet')
from nltk.stem import WordNetLemmatizer
from azure.cognitiveservices.vision.computervision import ComputerVisionClient
from msrest.authentication import CognitiveServicesCredentials
import matplotlib.pyplot as plt
from PIL import Image, ImageDraw
import os
import requests, uuid, json
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.utils import load_img, img_to_array
from keras.preprocessing.image import ImageDataGenerator
from azure.ai.textanalytics import TextAnalyticsClient
from azure.core.credentials import AzureKeyCredential
# Create a Kernel object. No string encoding (all I/O is unicode)
kern = aiml.Kernel()
kern.setTextEncoding(None)
kern.bootstrap(learnFiles="MakeUpBot.xml")
lemmatizer = WordNetLemmatizer()
#set up Azure Translator Service
key = '061d8e84da6b4fbc8d86453b2f2c92bd'
endpoint = 'https://api.cognitive.microsofttranslator.com/'
region = 'uksouth'
#set up Azure Computer Vision Service
key1 = 'd898006da33c4b3abc3e76b8d3a93ddb'
endpoint1 = 'https://imageanalysiscomputervision.cognitiveservices.azure.com/'
region1 = 'uksouth'
#set up Azure Language Sentitment Analysis
key2 = '885cb5d119ea45beb64a67f3c1244ed0'
endpoint2 = 'https://taskdsentimentanalysis.cognitiveservices.azure.com/'
region2 = 'eastus'
# Get client for computer vision service
computervision_client = ComputerVisionClient(endpoint1, CognitiveServicesCredentials(key1))
def azure_Sentiment(text):
#function has been derived from https://learn.microsoft.com/en-us/azure/cognitive-services/language-service/sentiment-opinion-mining/quickstart?tabs=windows&pivots=programming-language-python
language_Key = AzureKeyCredential(key2)
textanalytics_Client = TextAnalyticsClient(endpoint=endpoint2,credential=language_Key)
text_Doc = [text]
sentiment_Result = textanalytics_Client.analyze_sentiment(text_Doc, show_opinion_mining=True)
text_Result = [doc for doc in sentiment_Result if not doc.is_error]
positives = [doc for doc in text_Result if doc.sentiment == "positive"]
negatives = [doc for doc in text_Result if doc.sentiment == "negative"]
for doc in text_Result:
for sentence in doc.sentences:
print("\nSentence sentiment: {}".format(sentence.sentiment+"\n"))
def azure_Translator(region, key, text, target_Lang='fr'):
#This function has been derived from #https://github.com/MicrosoftDocs/ai-fundamentals/blob/master/02c%20-%20Translation.ipynb
# Create the URL for the Text Translator service REST request
path = '/translate'
constructed_url = endpoint + path
params = {
'api-version':'3.0',
'to': target_Lang
}
# Prepare the request headers
headers = {
'Ocp-Apim-Subscription-Key': key,
'Ocp-Apim-Subscription-Region':region,
'Content-type': 'application/json',
'X-ClientTraceId': str(uuid.uuid4())
}
# Add the text to be translated to the body
body = [{
'text': text
}]
# Get the translation
translate_Request = requests.post(constructed_url, params=params, headers=headers, json=body)
translate_Response = translate_Request.json()
return translate_Response[0]["translations"][0]["text"]
def image_Checker(imagepath):
#function has been derived from https://www.analyticsvidhya.com/blog/2021/06/beginner-friendly-project-cat-and-dog-classification-using-cnn/#:~:text=Cat%20and%20dog%20classification%20using%20CNN,-Convolutional%20Neural%20Network&text=Neural%20networks%20can%20be%20trained,tenths%20to%20hundreds%20of%20images.
predict_Image = load_img(imagepath, target_size= (80,80))
predict_Image_Modified = img_to_array(predict_Image)
predict_Image_Modified = predict_Image_Modified / 255
predict_Image_Modified = np.expand_dims(predict_Image_Modified, axis=0)
image_Classifier = keras.models.load_model("CNN_Image_Classification_Model.h5")
result = image_Classifier.predict(predict_Image_Modified)
if result[0][0] >= 0.5:
result_Prediction = 'Make up is worn'
else:
result_Prediction = 'Make up is not worn'
print("\nPrediction -> " + result_Prediction+"\n")
language_Code_Dict = {
"Afrikaans":"af",
"Albanian":"sq",
"Amharic":"am",
"Arabic":"ar",
"Armenian":"hy",
"Assamese":"as",
"Azerbaijani (Latin)":"az",
"Bangla":"bn",
"Bashkir":"ba",
"Basque":"eu",
"Bosnian (Latin)":"bs",
"Bulgarian":"bg",
"Cantonese (Traditional)":"yue",
"Catalan":"ca",
"Chinese (Literary)":"lzh",
"Chinese Simplified":"zh-Hans",
"Chinese Traditional":"zh-Hant",
"Croatian":"hr",
"Czech":"cs",
"Danish":"da",
"Dari":"prs",
"Divehi":"dv",
"Dutch":"nl",
"English":"en",
"Estonian":"et",
"Faroese":"fo",
"Fijian":"fj",
"Filipino":"fil",
"Finnish":"fi",
"French":"fr",
"French (Canada)":"fr-ca",
"Galician":"gl",
"Georgian":"ka",
"German":"de",
"Greek":"el",
"Gujarati":"gu",
"Haitian Creole":"ht",
"Hebrew":"he",
"Hindi":"hi",
"Hmong Daw (Latin)":"mww",
"Hungarian":"hu",
"Icelandic":"is",
"Indonesian":"id",
"Inuinnaqtun":"ikt",
"Inuktitut":"iu",
"Inuktitut (Latin)":"iu-Latn",
"Irish":"ga",
"Italian":"it",
"Japanese":"ja",
"Kannada":"kn",
"Kazakh":"kk",
"Khmer":"km",
"Klingon":"tlh-Latn",
"Klingon (plqaD)":"tlh-Piqd",
"Korean":"ko",
"Kurdish (Central)":"ku",
"Kurdish (Northern)":"kmr",
"Kyrgyz (Cyrillic)":"ky",
"Lao":"lo",
"Latvian":"lv",
"Lithuanian":"lt",
"Macedonian":"mk",
"Malagasy":"mg",
"Malay (Latin)":"ms",
"Malayalam":"ml",
"Maltese":"mt",
"Maori":"mi",
"Marathi":"mr",
"Mongolian (Cyrillic)":"mn-Cyrl",
"Mongolian (Traditional)":"mn-Mong",
"Myanmar":"my",
"Nepali":"ne",
"Norwegian":"nb",
"Odia":"or",
"Pashto":"ps",
"Persian":"fa",
"Polish":"pl",
"Portuguese (Brazil)":"pt",
"Portuguese (Portugal)":"pt-pt",
"Punjabi":"pa",
"Queretaro Otomi":"otq",
"Romanian":"ro",
"Russian":"ru",
"Samoan (Latin)":"sm",
"Serbian (Cyrillic)":"sr-Cyrl",
"Serbian (Latin)":"sr-Latn",
"Slovak":"sk",
"Slovenian":"sl",
"Somali (Arabic)":"so",
"Spanish":"es",
"Swahili (Latin)":"sw",
"Swedish":"sv",
"Tahitian":"ty",
"Tamil":"ta",
"Tatar (Latin)":"tt",
"Telugu":"te",
"Thai":"th",
"Tibetan":"bo",
"Tigrinya":"ti",
"Tongan":"to",
"Turkish":"tr",
"Turkmen (Latin)":"tk",
"Ukrainian":"uk",
"Upper Sorbian":"hsb",
"Urdu":"ur",
"Uyghur (Arabic)":"ug",
"Uzbek (Latin)":"uz",
"Vietnamese":"vi",
"Welsh":"cy",
"Yucatec Maya":"yua"
}
print("\nWelcome to this Makeup chatbot. Please feel free to ask questions from me!\n")
while True:
#get user input
try:
userInput = input("> ")
except (KeyboardInterrupt, EOFError) as e:
print("Bye!")
break
responseAgent = 'aiml'
#activate selected response agent
if responseAgent == 'aiml':
xmlAnswer = kern.respond(userInput)
if xmlAnswer[0] == '#':
params = xmlAnswer[1:].split('$')
cmd = int(params[0])
if cmd == 0:
print(params[1])
break
elif cmd == 1:
#User is able to ask for a branded product type.
#The question must begin with 'what is a' 'what is an' 'name a'
try:
responseSuccess = False
URL_makeupAPI = r"https://makeup-api.herokuapp.com/api/v1/products.json?"
inputBrandProd = params[1]
inputBrandProd = inputBrandProd.split(" ")
response = requests.get(URL_makeupAPI + r"brand=" + inputBrandProd[0] + r"&product_type=" + inputBrandProd[1])
if response.status_code == 200:
response_json = json.loads(response.content)
if response_json:
name = response_json[0]['name']
prod_type = response_json[0]['product_type']
description = response_json[0]['description']
print('\n\bName:\t' , name,'\n\n\bDescription:\n\t', description, "\n")
responseSuccess = True
if not responseSuccess:
print("Sorry, I could not find an example for the brand and product you gave me")
except:
print("I did not get that, please try again.")
elif cmd == 4:
#for image translation question must begin with 'show me text from'
try:
input_Array=userInput.split(" ")
image_Path = input_Array[4]
#compares the language input by the user with dictionary
chosen_Lang = input_Array[6]
chosen_Lang_code = language_Code_Dict[chosen_Lang]
# Read the image file
found_Image = open(image_Path, "rb")
# Use Computer Vision to find text in image
image_Results = computervision_client.recognize_printed_text_in_stream(found_Image)
# Read the words in the line of text
a_Line_text = ''
#reads the lines in the image one by one
for a_Region in image_Results.regions:
for a_Line in a_Region.lines:
for a_Word in a_Line.words:
a_Line_text += a_Word.text + ' '
translated_Image = azure_Translator(region, key, a_Line_text, target_Lang=chosen_Lang_code)
print('\n{} -> {}'.format(a_Line_text,translated_Image))
azure_Sentiment(a_Line_text)
except:
print("Sorry, I could not complete the translation")
elif cmd == 5:
#for image analysis question must begin with 'does she wear makeup in'
try:
input_Array = userInput.split(" ")
input_Array[5].replace('?', '')
image_Path = input_Array[5];
image_Checker(image_Path)
#code has been derived from https://youtu.be/2gW-JzY4JgU
with open (image_Path, 'rb') as chosen_Image:
result_Caption = computervision_client.describe_image_in_stream(chosen_Image)
print("Content found using Cloud -> " + result_Caption.captions[0].text)
except:
print("Image could not be found")
elif cmd == 99:
#if no other options fit, the user is directed towards the CSV file.
try:
df = pd.read_csv('QA.csv').dropna()
lemmatizer.lemmatize(userInput)
inputArray = [userInput]
counter = 0
#derived from: https://goodboychan.github.io/python/datacamp/natural_language_processing/2020/07/17/04-TF-IDF-and-similarity-scores.html
# Create TfidfVectorizer object
vectorizer = TfidfVectorizer()
#derived from: https://stackoverflow.com/questions/58240401/matching-phrase-using-tf-idf-and-cosine-similarity
similarity_index_list = cosine_similarity(vectorizer.fit_transform(df["Question"]), vectorizer.transform(inputArray)).flatten()
#stores the value of the answer at the index position of the best match
csvAnswerOuput = df.loc[similarity_index_list.argmax(), "Answer"]
#Checks if every index returns a 0 similarity against the CSV questions.
#The potential answer is then disregarded
for x in similarity_index_list:
if x <= 0.1:
counter = counter + 1
#only printing the answer if there is a suitable similarity level
if counter == len(similarity_index_list):
print ("I'm sorry, I don't have an answer for that.")
else:
print (csvAnswerOuput)
except:
print("I did not get that, please try again.")
else:
print(xmlAnswer)
else:
print ("Error")