Merge branch 'master' of ssh://the-cake-is-a-lie.net:20022/jonas/NLP-LAB
This commit is contained in:
commit
60bac91655
130
Project/naive_approach/naiveApproachTest.ipynb
Normal file
130
Project/naive_approach/naiveApproachTest.ipynb
Normal file
@ -0,0 +1,130 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import naive_approach"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"top_emojis = ['😂',\n",
|
||||
" '😭',\n",
|
||||
" '😍',\n",
|
||||
" '😩',\n",
|
||||
" '😊',\n",
|
||||
" '😘',\n",
|
||||
" '🙏',\n",
|
||||
" '🙌',\n",
|
||||
" '😉',\n",
|
||||
" '😁',\n",
|
||||
" '😅',\n",
|
||||
" '😎',\n",
|
||||
" '😢',\n",
|
||||
" '😒',\n",
|
||||
" '😏',\n",
|
||||
" '😌',\n",
|
||||
" '😔',\n",
|
||||
" '😋',\n",
|
||||
" '😀',\n",
|
||||
" '😤']"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"lookup = naive_approach.prepareData(emojis_to_consider=top_emojis)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"sentence=\"I am very happy today\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"ename": "KeyError",
|
||||
"evalue": "357",
|
||||
"output_type": "error",
|
||||
"traceback": [
|
||||
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
|
||||
"\u001b[1;31mKeyError\u001b[0m Traceback (most recent call last)",
|
||||
"\u001b[1;32m<ipython-input-7-a7b8b0832a7d>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m()\u001b[0m\n\u001b[1;32m----> 1\u001b[1;33m \u001b[0mpred\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mnaive_approach\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mpredict\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0msentence\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mlookup\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0memojis_to_consider\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mtop_emojis\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mn\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;36m3\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m",
|
||||
"\u001b[1;32m~\\Desktop\\NLP-LAB\\Project\\naive_approach\\naive_approach.py\u001b[0m in \u001b[0;36mpredict\u001b[1;34m(sentence, lookup, emojis_to_consider, criteria, description_key, lang, n, t)\u001b[0m\n\u001b[0;32m 117\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 118\u001b[0m \u001b[1;31m# build a result table\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 119\u001b[1;33m \u001b[0mtable_array\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;33m[\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mlookup\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mindexes\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mi\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mstr\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mtable\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0miloc\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mindexes\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mi\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mdescription_key\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m]\u001b[0m \u001b[1;32mfor\u001b[0m \u001b[0mi\u001b[0m \u001b[1;32min\u001b[0m \u001b[0mrange\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mn\u001b[0m\u001b[1;33m)\u001b[0m \u001b[1;33m]\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 120\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 121\u001b[0m \u001b[0mtable_frame\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mpd\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mDataFrame\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mtable_array\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mcolumns\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mcriteria\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;34m'description'\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
|
||||
"\u001b[1;32m~\\Desktop\\NLP-LAB\\Project\\naive_approach\\naive_approach.py\u001b[0m in \u001b[0;36m<listcomp>\u001b[1;34m(.0)\u001b[0m\n\u001b[0;32m 117\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 118\u001b[0m \u001b[1;31m# build a result table\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 119\u001b[1;33m \u001b[0mtable_array\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;33m[\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mlookup\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mindexes\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mi\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mstr\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mtable\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0miloc\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mindexes\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mi\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mdescription_key\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m]\u001b[0m \u001b[1;32mfor\u001b[0m \u001b[0mi\u001b[0m \u001b[1;32min\u001b[0m \u001b[0mrange\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mn\u001b[0m\u001b[1;33m)\u001b[0m \u001b[1;33m]\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 120\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 121\u001b[0m \u001b[0mtable_frame\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mpd\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mDataFrame\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mtable_array\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mcolumns\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mcriteria\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;34m'description'\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
|
||||
"\u001b[1;31mKeyError\u001b[0m: 357"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"pred = naive_approach.predict(sentence, lookup, emojis_to_consider=top_emojis, n=3)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"['🎁', '🙋', '\\U0001f91f']"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"pred"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.6.4"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
128
Project/naive_approach/naive_approach.py
Normal file
128
Project/naive_approach/naive_approach.py
Normal file
@ -0,0 +1,128 @@
|
||||
# coding: utf-8
|
||||
|
||||
# In[1]:
|
||||
|
||||
|
||||
import pandas as pd
|
||||
from IPython.display import clear_output, Markdown, Math
|
||||
import ipywidgets as widgets
|
||||
import os
|
||||
import unicodedata as uni
|
||||
import numpy as np
|
||||
from nltk.stem import PorterStemmer
|
||||
from nltk.tokenize import sent_tokenize, word_tokenize
|
||||
from nltk.corpus import wordnet
|
||||
import math
|
||||
import pprint
|
||||
|
||||
|
||||
# # Naive Approach
|
||||
table = pd.read_csv('../Tools/emoji_descriptions.csv')
|
||||
|
||||
#######################
|
||||
# Helper functions
|
||||
#######################
|
||||
|
||||
def stemming(messages):
|
||||
stemmed_messages = []
|
||||
ps = PorterStemmer()
|
||||
for m in messages:
|
||||
words = word_tokenize(m)
|
||||
sm = []
|
||||
for w in words:
|
||||
sm.append(ps.stem(w))
|
||||
m = (" ").join(sm)
|
||||
stemmed_messages.append(m)
|
||||
return stemmed_messages
|
||||
|
||||
|
||||
# * compare words to emoji descriptions
|
||||
def evaluate_sentence(sentence, table, description_key = 'description', lang = 'eng'):
|
||||
|
||||
tokenized_sentence = word_tokenize(sentence)
|
||||
n = len(tokenized_sentence)
|
||||
l = table.shape[0]
|
||||
matrix_list = []
|
||||
|
||||
for index, row in table.iterrows():
|
||||
emoji_tokens = word_tokenize(row[description_key])
|
||||
m = len(emoji_tokens)
|
||||
|
||||
mat = np.zeros(shape=(m,n))
|
||||
for i in range(len(emoji_tokens)):
|
||||
for j in range(len(tokenized_sentence)):
|
||||
syn1 = wordnet.synsets(emoji_tokens[i],lang=lang)
|
||||
if len(syn1) == 0:
|
||||
continue
|
||||
w1 = syn1[0]
|
||||
#print(j, tokenized_sentence)
|
||||
syn2 = wordnet.synsets(tokenized_sentence[j], lang=lang)
|
||||
if len(syn2) == 0:
|
||||
continue
|
||||
w2 = syn2[0]
|
||||
val = w1.wup_similarity(w2)
|
||||
if val is None:
|
||||
continue
|
||||
mat[i,j] = val
|
||||
#print(row['character'], mat)
|
||||
matrix_list.append(mat)
|
||||
|
||||
return matrix_list
|
||||
|
||||
|
||||
###########################
|
||||
#Functions to be called from main script
|
||||
###########################
|
||||
|
||||
|
||||
# load and preprocess data
|
||||
# emojis_to_consider can be either a list or "all"
|
||||
def prepareData(stemming=False, emojis_to_consider="all"):
|
||||
|
||||
table.head()
|
||||
|
||||
if(stemming):
|
||||
table['description'] = stemming(table['description'])
|
||||
|
||||
#collect the emojis
|
||||
lookup = {}
|
||||
emoji_set = []
|
||||
for index, row in table.iterrows():
|
||||
if(emojis_to_consider=="all" or (type(emojis_to_consider)==list and row['character'] in emojis_to_consider)):
|
||||
lookup[index] = row['character']
|
||||
emoji_set.append(row['character'])
|
||||
|
||||
emoji_set = set(emoji_set)
|
||||
|
||||
return lookup
|
||||
|
||||
# make a prediction for an input sentence
|
||||
def predict(sentence, lookup, emojis_to_consider="all", criteria="threshold", description_key='description', lang = 'eng', n=10, t=0.9):
|
||||
|
||||
result = evaluate_sentence(sentence, table, description_key, lang)
|
||||
|
||||
if(criteria=="summed"):
|
||||
indexes = np.argsort([-np.sum(x) for x in result])[0:n]
|
||||
elif (criteria=="max_val"):
|
||||
indexes = np.argsort([-np.max(x) for x in result])[0:n]
|
||||
elif(criteria=="avg"):
|
||||
indexes = np.argsort([-np.mean(x) for x in result])[0:n]
|
||||
else:
|
||||
indexes= np.argsort([-len(np.where(x>t)[0]) / (x.shape[0] * x.shape[1]) for x in result])[0:n]
|
||||
|
||||
if(emojis_to_consider!="all"):
|
||||
for i in indexes:
|
||||
if (i not in lookup):
|
||||
indexes = np.delete(indexes, [i])
|
||||
|
||||
# build a result table
|
||||
table_array = [[lookup[indexes[i]], str(table.iloc[indexes[i]][description_key])] for i in range(n) ]
|
||||
|
||||
table_frame = pd.DataFrame(table_array, columns=[criteria, 'description'])
|
||||
|
||||
#display(table_frame)
|
||||
|
||||
return list(table_frame[criteria])
|
||||
|
||||
#predict("I like to travel by train", description_key='description' , lang='eng')
|
||||
|
Loading…
Reference in New Issue
Block a user