diff --git a/Project/naive_approach/naive_approach.py b/Project/naive_approach/naive_approach.py new file mode 100644 index 0000000..0d1484c --- /dev/null +++ b/Project/naive_approach/naive_approach.py @@ -0,0 +1,129 @@ +# coding: utf-8 + +# In[1]: + + +import pandas as pd +from IPython.display import clear_output, Markdown, Math +import ipywidgets as widgets +import os +import unicodedata as uni +import numpy as np +from nltk.stem import PorterStemmer +from nltk.tokenize import sent_tokenize, word_tokenize +from nltk.corpus import wordnet +import math +import pprint + + +# # Naive Approach + + +####################### +# Helper functions +####################### + +def stemming(messages): + stemmed_messages = [] + ps = PorterStemmer() + for m in messages: + words = word_tokenize(m) + sm = [] + for w in words: + sm.append(ps.stem(w)) + m = (" ").join(sm) + stemmed_messages.append(m) + return stemmed_messages + + +# * compare words to emoji descriptions +def evaluate_sentence(sentence, table, description_key = 'description', lang = 'eng'): + + tokenized_sentence = word_tokenize(sentence) + n = len(tokenized_sentence) + l = table.shape[0] + matrix_list = [] + + for index, row in table.iterrows(): + emoji_tokens = word_tokenize(row[description_key]) + m = len(emoji_tokens) + + mat = np.zeros(shape=(m,n)) + for i in range(len(emoji_tokens)): + for j in range(len(tokenized_sentence)): + syn1 = wordnet.synsets(emoji_tokens[i],lang=lang) + if len(syn1) == 0: + continue + w1 = syn1[0] + #print(j, tokenized_sentence) + syn2 = wordnet.synsets(tokenized_sentence[j], lang=lang) + if len(syn2) == 0: + continue + w2 = syn2[0] + val = w1.wup_similarity(w2) + if val is None: + continue + mat[i,j] = val + #print(row['character'], mat) + matrix_list.append(mat) + + return matrix_list + + +########################### +#Functions to be called from main script +########################### + + +# load and preprocess data +# emojis_to_consider can be either a list or "all" +def prepareData(stemming=False, emojis_to_consider="all"): + + table = pd.read_csv('../Tools/emoji_descriptions.csv') + table.head() + + if(stemming): + table['description'] = stemming(table['description']) + + #collect the emojis + lookup = {} + emoji_set = [] + for index, row in table.iterrows(): + if(emojis_to_consider=="all" or (type(emojis_to_consider)==list and row['character'] in emojis_to_consider)): + lookup[index] = row['character'] + emoji_set.append(row['character']) + + emoji_set = set(emoji_set) + + return lookup, table + +# make a prediction for an input sentence +def predict(sentence, lookup, table, emojis_to_consider="all", criteria="threshold", description_key='description', lang = 'eng', n=10, t=0.9): + + result = evaluate_sentence(sentence, table, description_key, lang) + + if(criteria=="summed"): + indexes = np.argsort([-np.sum(x) for x in result])[0:n] + elif (criteria=="max_val"): + indexes = np.argsort([-np.max(x) for x in result])[0:n] + elif(criteria=="avg"): + indexes = np.argsort([-np.mean(x) for x in result])[0:n] + else: + indexes= np.argsort([-len(np.where(x>t)[0]) / (x.shape[0] * x.shape[1]) for x in result])[0:n] + + if(emojis_to_consider!="all"): + for i in indexes: + if (i not in lookup): + indexes = np.delete(indexes, [i]) + + # build a result table + table_array = [[lookup[indexes[i]], str(table.iloc[indexes[i]][description_key])] for i in range(n) ] + + table_frame = pd.DataFrame(table_array, columns=[criteria, 'description']) + + #display(table_frame) + + return list(table_frame[criteria]) + +#predict("I like to travel by train", description_key='description' , lang='eng') +