| | !pip install --upgrade tensorflow |
| |
|
| | import nltk |
| | nltk.download('punkt') |
| |
|
| | import nltk |
| | from nltk.stem.lancaster import LancasterStemmer |
| | import numpy as np |
| | import tflearn |
| | import tensorflow |
| | import random |
| | import json |
| | import pandas as pd |
| | import pickle |
| | import gradio as gr |
| |
|
| | stemmer = LancasterStemmer() |
| |
|
| | with open("intents.json") as file: |
| | data = json.load(file) |
| |
|
| | with open("data.pickle", "rb") as f: |
| | words, labels, training, output = pickle.load(f) |
| |
|
| | net = tflearn.input_data(shape=[None, len(training[0])]) |
| | net = tflearn.fully_connected(net, 8) |
| | net = tflearn.fully_connected(net, 8) |
| | net = tflearn.fully_connected(net, len(output[0]), activation="softmax") |
| | net = tflearn.regression(net) |
| |
|
| | model = tflearn.DNN(net) |
| | model.load("MentalHealthChatBotmodel.tflearn") |
| | |
| |
|
| |
|
| | def bag_of_words(s, words): |
| | bag = [0 for _ in range(len(words))] |
| |
|
| | s_words = nltk.word_tokenize(s) |
| | s_words = [stemmer.stem(word.lower()) for word in s_words] |
| |
|
| | for se in s_words: |
| | for i, w in enumerate(words): |
| | if w == se: |
| | bag[i] = 1 |
| | |
| | return np.array(bag) |
| |
|
| |
|
| | def chat(message): |
| | message = message.lower() |
| | results = model.predict([bag_of_words(message, words)]) |
| | results_index = np.argmax(results) |
| | tag = labels[results_index] |
| |
|
| | for tg in data["intents"]: |
| | if tg['tag'] == tag: |
| | responses = tg['responses'] |
| | |
| | return random.choice(responses) |
| | |
| | |
| | |
| |
|
| | chatbot = gr.Chatbot(label="Chat") |
| |
|
| | demo = gr.Interface( |
| | chat, |
| | inputs="text", |
| | outputs="label", |
| | title="Tabibu | Mental Health Bot", |
| | ) |
| | if __name__ == "__main__": |
| | demo.launch() |