diff --git a/Project/naive_approach/fastTextVectors.kv b/Project/naive_approach/fastTextVectors.kv new file mode 100644 index 0000000..be9a89d Binary files /dev/null and b/Project/naive_approach/fastTextVectors.kv differ diff --git a/Project/naive_approach/naive_approach.py b/Project/naive_approach/naive_approach.py index efea7fe..741c2fc 100644 --- a/Project/naive_approach/naive_approach.py +++ b/Project/naive_approach/naive_approach.py @@ -42,14 +42,18 @@ def stemming(message): # * compare words to emoji descriptions def evaluate_sentence(sentence, description_key = 'description', lang = 'eng', emojis_to_consider="all",\ - stem=True, use_wordnet=True): + stem=True, embeddings="wordnet"): # assumes there is a trained w2v model stored in the same directory! __location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))) - if use_wordnet==False: - wv = KeyedVectors.load(str(__location__)+"/word2vec.model", mmap='r') + if embeddings=="word2Vec": + wv = KeyedVectors.load(str(__location__)+"/word2vec.model", mmap='r') + elif embeddings=="fastText": + wv = KeyedVectors.load("/fastTextVectors.kv", mmap='r') + if (stem): sentence = stemming(sentence) + tokenized_sentence = word_tokenize(sentence) n = len(tokenized_sentence) matrix_list = [] @@ -61,7 +65,7 @@ def evaluate_sentence(sentence, description_key = 'description', lang = 'eng', e mat = np.zeros(shape=(m,n)) for i in range(len(emoji_tokens)): for j in range(len(tokenized_sentence)): - if use_wordnet: + if embeddings=="wordnet": syn1 = wordnet.synsets(emoji_tokens[i],lang=lang) if len(syn1) == 0: continue @@ -74,7 +78,7 @@ def evaluate_sentence(sentence, description_key = 'description', lang = 'eng', e val = w1.wup_similarity(w2) if val is None: continue - else: + elif (embeddings == "word2Vec" or embeddings == "fastText"): try: val = wv.similarity(emoji_tokens[i], tokenized_sentence[j]) except KeyError: @@ -112,11 +116,11 @@ def prepareData(stem=True, lower=True): return lookup # make a prediction for an input sentence -# use_wordnet=True --> use wordnet similarites, otherwise use Word2Vec +# embeddings = ["wordnet", "word2Vec", "fastText"] def predict(sentence, lookup, emojis_to_consider="all", criteria="threshold", lang = 'eng',\ - use_wordnet=True, n=10, t=0.9): + embeddings="wordnet", n=10, t=0.9): - result = evaluate_sentence(sentence, lang, emojis_to_consider=emojis_to_consider, use_wordnet=use_wordnet) + result = evaluate_sentence(sentence, lang, emojis_to_consider=emojis_to_consider, embeddings=embeddings) try: if(criteria=="summed"):