# coding: utf-8 # In[1]: import pandas as pd from IPython.display import clear_output, Markdown, Math import ipywidgets as widgets import os import unicodedata as uni import numpy as np from nltk.stem import PorterStemmer from nltk.tokenize import sent_tokenize, word_tokenize from nltk.corpus import wordnet import math import pprint # # Naive Approach table = pd.read_csv('../Tools/emoji_descriptions.csv') ####################### # Helper functions ####################### def stemming(messages): stemmed_messages = [] ps = PorterStemmer() for m in messages: words = word_tokenize(m) sm = [] for w in words: sm.append(ps.stem(w)) m = (" ").join(sm) stemmed_messages.append(m) return stemmed_messages # * compare words to emoji descriptions def evaluate_sentence(sentence, table, description_key = 'description', lang = 'eng', emojis_to_consider="all"): tokenized_sentence = word_tokenize(sentence) n = len(tokenized_sentence) l = table.shape[0] matrix_list = [] for index, row in table.iterrows(): emoji_tokens = word_tokenize(row[description_key]) m = len(emoji_tokens) mat = np.zeros(shape=(m,n)) for i in range(len(emoji_tokens)): for j in range(len(tokenized_sentence)): syn1 = wordnet.synsets(emoji_tokens[i],lang=lang) if len(syn1) == 0: continue w1 = syn1[0] #print(j, tokenized_sentence) syn2 = wordnet.synsets(tokenized_sentence[j], lang=lang) if len(syn2) == 0: continue w2 = syn2[0] val = w1.wup_similarity(w2) if val is None: continue mat[i,j] = val #print(row['character'], mat) matrix_list.append(mat) return matrix_list ########################### #Functions to be called from main script ########################### # load and preprocess data # emojis_to_consider can be either a list or "all" def prepareData(stemming=False): if(stemming): table['description'] = stemming(table['description']) #collect the emojis lookup = {} emoji_set = [] for index, row in table.iterrows(): lookup[index] = row['character'] emoji_set.append(row['character']) emoji_set = set(emoji_set) return lookup # make a prediction for an input sentence def predict(sentence, lookup, emojis_to_consider="all", criteria="threshold", description_key='description', lang = 'eng', n=10, t=0.9): result = evaluate_sentence(sentence, table, description_key, lang, emojis_to_consider=emojis_to_consider) try: if(criteria=="summed"): resultValues = [-np.sum(x) for x in result] elif (criteria=="max_val"): resultValues = [-np.max(x) for x in result] elif(criteria=="avg"): resultValues = [-np.mean(x) for x in result] else: resultValues = [-len(np.where(x>t)[0]) / (x.shape[0] * x.shape[1]) for x in result] indexes = np.argsort(resultValues) results = np.sort(resultValues) if (emojis_to_consider != "all" and type(emojis_to_consider) == list): indexes2 = [] results2 = [] for i in range(len(indexes)): if lookup[indexes[i]] in emojis_to_consider: indexes2.append(indexes[i]) results2.append(results[i]) indexes = indexes2 results = results2 indexes = indexes[0:n] results = results[0:n] # build a result table table_array = [[lookup[indexes[i]], str(table.iloc[indexes[i]][description_key])] for i in range(n) ] table_frame = pd.DataFrame(table_array, columns=[criteria, 'description']) #display(table_frame) return list(table_frame[criteria]), results except ZeroDivisionError as err: print("There seems to be a problem with the input format. Please enter a nonempty string") #predict("I like to travel by train", description_key='description' , lang='eng')