Naive approach mit word2Vec similarities
This commit is contained in:
parent
abd61c35d1
commit
bc94107353
@ -15,6 +15,7 @@ from nltk.corpus import wordnet
|
|||||||
import math
|
import math
|
||||||
import pprint
|
import pprint
|
||||||
|
|
||||||
|
from gensim.models import Word2Vec, KeyedVectors
|
||||||
|
|
||||||
# # Naive Approach
|
# # Naive Approach
|
||||||
table = pd.read_csv('../Tools/emoji_descriptions.csv')
|
table = pd.read_csv('../Tools/emoji_descriptions.csv')
|
||||||
@ -29,25 +30,25 @@ for index, row in table.iterrows():
|
|||||||
# Helper functions
|
# Helper functions
|
||||||
#######################
|
#######################
|
||||||
|
|
||||||
def stemming(messages):
|
def stemming(message):
|
||||||
stemmed_messages = []
|
|
||||||
ps = PorterStemmer()
|
ps = PorterStemmer()
|
||||||
for m in messages:
|
words = word_tokenize(message)
|
||||||
words = word_tokenize(m)
|
|
||||||
sm = []
|
sm = []
|
||||||
for w in words:
|
for w in words:
|
||||||
sm.append(ps.stem(w))
|
sm.append(ps.stem(w))
|
||||||
m = (" ").join(sm)
|
stemmed_message = (" ").join(sm)
|
||||||
stemmed_messages.append(m)
|
return stemmed_message
|
||||||
return stemmed_messages
|
|
||||||
|
|
||||||
|
|
||||||
# * compare words to emoji descriptions
|
# * compare words to emoji descriptions
|
||||||
def evaluate_sentence(sentence, description_key = 'description', lang = 'eng', emojis_to_consider="all"):
|
def evaluate_sentence(sentence, description_key = 'description', lang = 'eng', emojis_to_consider="all", stem=True):
|
||||||
|
# assumes there is a trained w2v model stored in the same directory!
|
||||||
|
wv = KeyedVectors.load("word2vec.model", mmap='r')
|
||||||
|
|
||||||
|
if (stem):
|
||||||
|
sentence = stemming(sentence)
|
||||||
tokenized_sentence = word_tokenize(sentence)
|
tokenized_sentence = word_tokenize(sentence)
|
||||||
n = len(tokenized_sentence)
|
n = len(tokenized_sentence)
|
||||||
l = table.shape[0]
|
|
||||||
matrix_list = []
|
matrix_list = []
|
||||||
|
|
||||||
for index in tableDict.keys():
|
for index in tableDict.keys():
|
||||||
@ -57,20 +58,11 @@ def evaluate_sentence(sentence, description_key = 'description', lang = 'eng', e
|
|||||||
mat = np.zeros(shape=(m,n))
|
mat = np.zeros(shape=(m,n))
|
||||||
for i in range(len(emoji_tokens)):
|
for i in range(len(emoji_tokens)):
|
||||||
for j in range(len(tokenized_sentence)):
|
for j in range(len(tokenized_sentence)):
|
||||||
syn1 = wordnet.synsets(emoji_tokens[i],lang=lang)
|
try:
|
||||||
if len(syn1) == 0:
|
val = wv.similarity(emoji_tokens[i], tokenized_sentence[j])
|
||||||
continue
|
except KeyError:
|
||||||
w1 = syn1[0]
|
|
||||||
#print(j, tokenized_sentence)
|
|
||||||
syn2 = wordnet.synsets(tokenized_sentence[j], lang=lang)
|
|
||||||
if len(syn2) == 0:
|
|
||||||
continue
|
|
||||||
w2 = syn2[0]
|
|
||||||
val = w1.wup_similarity(w2)
|
|
||||||
if val is None:
|
|
||||||
continue
|
continue
|
||||||
mat[i,j] = val
|
mat[i,j] = val
|
||||||
#print(row['character'], mat)
|
|
||||||
matrix_list.append(mat)
|
matrix_list.append(mat)
|
||||||
|
|
||||||
return matrix_list
|
return matrix_list
|
||||||
@ -83,10 +75,13 @@ def evaluate_sentence(sentence, description_key = 'description', lang = 'eng', e
|
|||||||
|
|
||||||
# load and preprocess data
|
# load and preprocess data
|
||||||
# emojis_to_consider can be either a list or "all"
|
# emojis_to_consider can be either a list or "all"
|
||||||
def prepareData(stemming=False):
|
def prepareData(stem=True, lower=True):
|
||||||
if(stemming):
|
if(stem):
|
||||||
for index in tableDict.keys():
|
for index in tableDict.keys():
|
||||||
tableDict[index][1] = stemming(tableDict[index][1])
|
tableDict[index][1] = stemming(tableDict[index][1])
|
||||||
|
if(lower):
|
||||||
|
for index in tableDict.keys():
|
||||||
|
tableDict[index][1] = tableDict[index][1].lower()
|
||||||
|
|
||||||
#collect the emojis
|
#collect the emojis
|
||||||
lookup = {}
|
lookup = {}
|
||||||
|
BIN
Project/naive_approach/word2vec.model
Normal file
BIN
Project/naive_approach/word2vec.model
Normal file
Binary file not shown.
Loading…
Reference in New Issue
Block a user