diff --git a/Project/naive_approach/fastTextVectors.kv b/Project/naive_approach/fastTextVectors.kv new file mode 100644 index 0000000..be9a89d Binary files /dev/null and b/Project/naive_approach/fastTextVectors.kv differ diff --git a/Project/naive_approach/naive_approach.py b/Project/naive_approach/naive_approach.py index e08cd47..741c2fc 100644 --- a/Project/naive_approach/naive_approach.py +++ b/Project/naive_approach/naive_approach.py @@ -18,7 +18,7 @@ import pprint from gensim.models import Word2Vec, KeyedVectors # # Naive Approach -table = pd.read_csv('../Tools/emoji_descriptions.csv') +table = pd.read_csv('../Tools/emoji_descriptions_preprocessed.csv') ##Store table in the format: ## { index: [emoji, description]} @@ -41,13 +41,19 @@ def stemming(message): # * compare words to emoji descriptions -def evaluate_sentence(sentence, description_key = 'description', lang = 'eng', emojis_to_consider="all", stem=True): +def evaluate_sentence(sentence, description_key = 'description', lang = 'eng', emojis_to_consider="all",\ + stem=True, embeddings="wordnet"): # assumes there is a trained w2v model stored in the same directory! __location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))) - wv = KeyedVectors.load(str(__location__)+"/word2vec.model", mmap='r') + if embeddings=="word2Vec": + wv = KeyedVectors.load(str(__location__)+"/word2vec.model", mmap='r') + elif embeddings=="fastText": + wv = KeyedVectors.load("/fastTextVectors.kv", mmap='r') + if (stem): sentence = stemming(sentence) + tokenized_sentence = word_tokenize(sentence) n = len(tokenized_sentence) matrix_list = [] @@ -59,10 +65,24 @@ def evaluate_sentence(sentence, description_key = 'description', lang = 'eng', e mat = np.zeros(shape=(m,n)) for i in range(len(emoji_tokens)): for j in range(len(tokenized_sentence)): - try: - val = wv.similarity(emoji_tokens[i], tokenized_sentence[j]) - except KeyError: - continue + if embeddings=="wordnet": + syn1 = wordnet.synsets(emoji_tokens[i],lang=lang) + if len(syn1) == 0: + continue + w1 = syn1[0] + #print(j, tokenized_sentence) + syn2 = wordnet.synsets(tokenized_sentence[j], lang=lang) + if len(syn2) == 0: + continue + w2 = syn2[0] + val = w1.wup_similarity(w2) + if val is None: + continue + elif (embeddings == "word2Vec" or embeddings == "fastText"): + try: + val = wv.similarity(emoji_tokens[i], tokenized_sentence[j]) + except KeyError: + continue mat[i,j] = val matrix_list.append(mat) @@ -96,9 +116,11 @@ def prepareData(stem=True, lower=True): return lookup # make a prediction for an input sentence -def predict(sentence, lookup, emojis_to_consider="all", criteria="threshold", lang = 'eng', n=10, t=0.9): +# embeddings = ["wordnet", "word2Vec", "fastText"] +def predict(sentence, lookup, emojis_to_consider="all", criteria="threshold", lang = 'eng',\ + embeddings="wordnet", n=10, t=0.9): - result = evaluate_sentence(sentence, lang, emojis_to_consider=emojis_to_consider) + result = evaluate_sentence(sentence, lang, emojis_to_consider=emojis_to_consider, embeddings=embeddings) try: if(criteria=="summed"):