Merge branch 'master' of ssh://the-cake-is-a-lie.net:20022/jonas/NLP-LAB
This commit is contained in:
commit
4cd15127ba
BIN
Project/naive_approach/fastTextVectors.kv
Normal file
BIN
Project/naive_approach/fastTextVectors.kv
Normal file
Binary file not shown.
@ -18,7 +18,7 @@ import pprint
|
|||||||
from gensim.models import Word2Vec, KeyedVectors
|
from gensim.models import Word2Vec, KeyedVectors
|
||||||
|
|
||||||
# # Naive Approach
|
# # Naive Approach
|
||||||
table = pd.read_csv('../Tools/emoji_descriptions.csv')
|
table = pd.read_csv('../Tools/emoji_descriptions_preprocessed.csv')
|
||||||
|
|
||||||
##Store table in the format:
|
##Store table in the format:
|
||||||
## { index: [emoji, description]}
|
## { index: [emoji, description]}
|
||||||
@ -41,13 +41,19 @@ def stemming(message):
|
|||||||
|
|
||||||
|
|
||||||
# * compare words to emoji descriptions
|
# * compare words to emoji descriptions
|
||||||
def evaluate_sentence(sentence, description_key = 'description', lang = 'eng', emojis_to_consider="all", stem=True):
|
def evaluate_sentence(sentence, description_key = 'description', lang = 'eng', emojis_to_consider="all",\
|
||||||
|
stem=True, embeddings="wordnet"):
|
||||||
# assumes there is a trained w2v model stored in the same directory!
|
# assumes there is a trained w2v model stored in the same directory!
|
||||||
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
|
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
|
||||||
|
|
||||||
|
if embeddings=="word2Vec":
|
||||||
wv = KeyedVectors.load(str(__location__)+"/word2vec.model", mmap='r')
|
wv = KeyedVectors.load(str(__location__)+"/word2vec.model", mmap='r')
|
||||||
|
elif embeddings=="fastText":
|
||||||
|
wv = KeyedVectors.load("/fastTextVectors.kv", mmap='r')
|
||||||
|
|
||||||
if (stem):
|
if (stem):
|
||||||
sentence = stemming(sentence)
|
sentence = stemming(sentence)
|
||||||
|
|
||||||
tokenized_sentence = word_tokenize(sentence)
|
tokenized_sentence = word_tokenize(sentence)
|
||||||
n = len(tokenized_sentence)
|
n = len(tokenized_sentence)
|
||||||
matrix_list = []
|
matrix_list = []
|
||||||
@ -59,6 +65,20 @@ def evaluate_sentence(sentence, description_key = 'description', lang = 'eng', e
|
|||||||
mat = np.zeros(shape=(m,n))
|
mat = np.zeros(shape=(m,n))
|
||||||
for i in range(len(emoji_tokens)):
|
for i in range(len(emoji_tokens)):
|
||||||
for j in range(len(tokenized_sentence)):
|
for j in range(len(tokenized_sentence)):
|
||||||
|
if embeddings=="wordnet":
|
||||||
|
syn1 = wordnet.synsets(emoji_tokens[i],lang=lang)
|
||||||
|
if len(syn1) == 0:
|
||||||
|
continue
|
||||||
|
w1 = syn1[0]
|
||||||
|
#print(j, tokenized_sentence)
|
||||||
|
syn2 = wordnet.synsets(tokenized_sentence[j], lang=lang)
|
||||||
|
if len(syn2) == 0:
|
||||||
|
continue
|
||||||
|
w2 = syn2[0]
|
||||||
|
val = w1.wup_similarity(w2)
|
||||||
|
if val is None:
|
||||||
|
continue
|
||||||
|
elif (embeddings == "word2Vec" or embeddings == "fastText"):
|
||||||
try:
|
try:
|
||||||
val = wv.similarity(emoji_tokens[i], tokenized_sentence[j])
|
val = wv.similarity(emoji_tokens[i], tokenized_sentence[j])
|
||||||
except KeyError:
|
except KeyError:
|
||||||
@ -96,9 +116,11 @@ def prepareData(stem=True, lower=True):
|
|||||||
return lookup
|
return lookup
|
||||||
|
|
||||||
# make a prediction for an input sentence
|
# make a prediction for an input sentence
|
||||||
def predict(sentence, lookup, emojis_to_consider="all", criteria="threshold", lang = 'eng', n=10, t=0.9):
|
# embeddings = ["wordnet", "word2Vec", "fastText"]
|
||||||
|
def predict(sentence, lookup, emojis_to_consider="all", criteria="threshold", lang = 'eng',\
|
||||||
|
embeddings="wordnet", n=10, t=0.9):
|
||||||
|
|
||||||
result = evaluate_sentence(sentence, lang, emojis_to_consider=emojis_to_consider)
|
result = evaluate_sentence(sentence, lang, emojis_to_consider=emojis_to_consider, embeddings=embeddings)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
if(criteria=="summed"):
|
if(criteria=="summed"):
|
||||||
|
Loading…
Reference in New Issue
Block a user