nlp-lab/Project/naive_approach/naive_approach.ipynb
2018-06-05 15:03:33 +02:00

572 lines
18 KiB
Plaintext

{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"import pandas as pd\n",
"from IPython.display import clear_output, Markdown, Math\n",
"import ipywidgets as widgets\n",
"import os\n",
"import unicodedata as uni\n",
"import numpy as np\n",
"from nltk.stem import PorterStemmer\n",
"from nltk.tokenize import sent_tokenize, word_tokenize\n",
"from nltk.corpus import wordnet\n",
"import math\n",
"import pprint\n",
"\n",
"pp=pprint.PrettyPrinter(indent=4)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Naive Approach"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"* read in table"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"<div>\n",
"<style scoped>\n",
" .dataframe tbody tr th:only-of-type {\n",
" vertical-align: middle;\n",
" }\n",
"\n",
" .dataframe tbody tr th {\n",
" vertical-align: top;\n",
" }\n",
"\n",
" .dataframe thead th {\n",
" text-align: right;\n",
" }\n",
"</style>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: right;\">\n",
" <th></th>\n",
" <th>Unnamed: 0</th>\n",
" <th>code</th>\n",
" <th>character</th>\n",
" <th>description</th>\n",
" <th>description_de</th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" <tr>\n",
" <th>0</th>\n",
" <td>0</td>\n",
" <td>126980</td>\n",
" <td>🀄</td>\n",
" <td>MAHJONG TILE RED DRAGON</td>\n",
" <td>MAHJONG FLIESE ROT DRACHE</td>\n",
" </tr>\n",
" <tr>\n",
" <th>1</th>\n",
" <td>1</td>\n",
" <td>129525</td>\n",
" <td>🧵</td>\n",
" <td>SPOOL OF THREAD</td>\n",
" <td>Spool Gewinde</td>\n",
" </tr>\n",
" <tr>\n",
" <th>2</th>\n",
" <td>2</td>\n",
" <td>129526</td>\n",
" <td>🧶</td>\n",
" <td>BALL OF YARN</td>\n",
" <td>BALL OF YARN</td>\n",
" </tr>\n",
" <tr>\n",
" <th>3</th>\n",
" <td>3</td>\n",
" <td>127183</td>\n",
" <td>🃏</td>\n",
" <td>PLAYING CARD BLACK JOKER</td>\n",
" <td>SPIELKARTE BLACK JOKER</td>\n",
" </tr>\n",
" <tr>\n",
" <th>4</th>\n",
" <td>4</td>\n",
" <td>129296</td>\n",
" <td>🤐</td>\n",
" <td>ZIPPER-MOUTH FACE</td>\n",
" <td>ZIPPER-MUND Gesicht</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>\n",
"</div>"
],
"text/plain": [
" Unnamed: 0 code character description \\\n",
"0 0 126980 🀄 MAHJONG TILE RED DRAGON \n",
"1 1 129525 🧵 SPOOL OF THREAD \n",
"2 2 129526 🧶 BALL OF YARN \n",
"3 3 127183 🃏 PLAYING CARD BLACK JOKER \n",
"4 4 129296 🤐 ZIPPER-MOUTH FACE \n",
"\n",
" description_de \n",
"0 MAHJONG FLIESE ROT DRACHE \n",
"1 Spool Gewinde \n",
"2 BALL OF YARN \n",
"3 SPIELKARTE BLACK JOKER \n",
"4 ZIPPER-MUND Gesicht "
]
},
"execution_count": 2,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"table = pd.read_csv('../Tools/emoji_descriptions.csv')\n",
"table.head()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"* todo: read in a lot of messages"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"messages = [\"Hello, this is a testing message\", \"this is a very sunny day today, i am very happy\"]"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"* using a Stemmer to get the main 'Part' of each word"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [],
"source": [
"ps = PorterStemmer()"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [],
"source": [
"stemmed_messages = []\n",
"for m in messages:\n",
" words = word_tokenize(m)\n",
" sm = []\n",
" for w in words:\n",
" sm.append(ps.stem(w))\n",
" stemmed_messages.append(sm)"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"[['hello', ',', 'thi', 'is', 'a', 'test', 'messag'],\n",
" ['thi',\n",
" 'is',\n",
" 'a',\n",
" 'veri',\n",
" 'sunni',\n",
" 'day',\n",
" 'today',\n",
" ',',\n",
" 'i',\n",
" 'am',\n",
" 'veri',\n",
" 'happi']]"
]
},
"execution_count": 6,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"stemmed_messages"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"(1027, 5)"
]
},
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"table.shape"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"* compare words to emoji descriptions"
]
},
{
"cell_type": "code",
"execution_count": 21,
"metadata": {},
"outputs": [],
"source": [
"def evaluate_sentence(sentence, description_key = 'description', lang = 'eng'):\n",
" \n",
" tokenized_sentence = word_tokenize(sentence)\n",
" n = len(tokenized_sentence)\n",
" l = table.shape[0]\n",
" matrix_list = []\n",
" \n",
" for index, row in table.iterrows():\n",
" emoji_tokens = word_tokenize(row[description_key])\n",
" m = len(emoji_tokens)\n",
"\n",
" mat = np.zeros(shape=(m,n))\n",
" for i in range(len(emoji_tokens)):\n",
" for j in range(len(tokenized_sentence)):\n",
" syn1 = wordnet.synsets(emoji_tokens[i],lang=lang)\n",
" if len(syn1) == 0:\n",
" continue\n",
" w1 = syn1[0]\n",
" #print(j, tokenized_sentence)\n",
" syn2 = wordnet.synsets(tokenized_sentence[j], lang=lang)\n",
" if len(syn2) == 0:\n",
" continue\n",
" w2 = syn2[0]\n",
" val = w1.wup_similarity(w2)\n",
" if val is None:\n",
" continue\n",
" mat[i,j] = val\n",
" #print(row['character'], mat)\n",
" matrix_list.append(mat)\n",
" \n",
" return matrix_list\n",
" \n",
" "
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"* building a lookup table:"
]
},
{
"cell_type": "code",
"execution_count": 22,
"metadata": {},
"outputs": [],
"source": [
"lookup = {}\n",
"emoji_set = []\n",
"for index, row in table.iterrows():\n",
" lookup[index] = row['character']\n",
" emoji_set.append(row['character'])\n",
"\n",
"emoji_set = set(emoji_set)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"* sorting"
]
},
{
"cell_type": "code",
"execution_count": 23,
"metadata": {},
"outputs": [],
"source": [
"def predict(sentence, description_key='description', lang = 'eng', n=10, t=0.9):\n",
"\n",
" result = evaluate_sentence(sentence, description_key, lang)\n",
" \n",
" summed = np.argsort([-np.sum(x) for x in result])[0:n]\n",
" max_val = np.argsort([-np.max(x) for x in result])[0:n]\n",
" avg = np.argsort([-np.mean(x) for x in result])[0:n]\n",
" threshold = np.argsort([-len(np.where(x>t)[0]) / (x.shape[0] * x.shape[1]) for x in result])[0:n]\n",
" \n",
" # build a result table\n",
" table_array = [[lookup[summed[i]], str(table.iloc[summed[i]][description_key]), \n",
" lookup[max_val[i]], str(table.iloc[max_val[i]][description_key]),\n",
" lookup[avg[i]], str(table.iloc[avg[i]][description_key]),\n",
" lookup[threshold[i]], str(table.iloc[threshold[i]][description_key])] for i in range(n) ]\n",
" \n",
" \n",
" table_frame = pd.DataFrame(table_array, columns=['summed', 'summed_description','max_val', 'max_val_description','avg', 'avg_description','threshold', 'threshold_description'])\n",
" \n",
" display(table_frame)\n"
]
},
{
"cell_type": "code",
"execution_count": 29,
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"<div>\n",
"<style scoped>\n",
" .dataframe tbody tr th:only-of-type {\n",
" vertical-align: middle;\n",
" }\n",
"\n",
" .dataframe tbody tr th {\n",
" vertical-align: top;\n",
" }\n",
"\n",
" .dataframe thead th {\n",
" text-align: right;\n",
" }\n",
"</style>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: right;\">\n",
" <th></th>\n",
" <th>summed</th>\n",
" <th>summed_description</th>\n",
" <th>max_val</th>\n",
" <th>max_val_description</th>\n",
" <th>avg</th>\n",
" <th>avg_description</th>\n",
" <th>threshold</th>\n",
" <th>threshold_description</th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" <tr>\n",
" <th>0</th>\n",
" <td>🤭</td>\n",
" <td>SMILING FACE WITH SMILING EYES AND HAND COVERI...</td>\n",
" <td>🤟</td>\n",
" <td>I LOVE YOU HAND SIGN</td>\n",
" <td>💐</td>\n",
" <td>BOUQUET</td>\n",
" <td>🚆</td>\n",
" <td>TRAIN</td>\n",
" </tr>\n",
" <tr>\n",
" <th>1</th>\n",
" <td>🔛</td>\n",
" <td>ON WITH EXCLAMATION MARK WITH LEFT RIGHT ARROW...</td>\n",
" <td>🇮</td>\n",
" <td>REGIONAL INDICATOR SYMBOL LETTER I</td>\n",
" <td>🍳</td>\n",
" <td>COOKING</td>\n",
" <td>🚸</td>\n",
" <td>CHILDREN CROSSING</td>\n",
" </tr>\n",
" <tr>\n",
" <th>2</th>\n",
" <td>🚮</td>\n",
" <td>PUT LITTER IN ITS PLACE SYMBOL</td>\n",
" <td>🚆</td>\n",
" <td>TRAIN</td>\n",
" <td>🚆</td>\n",
" <td>TRAIN</td>\n",
" <td>🚄</td>\n",
" <td>HIGH-SPEED TRAIN</td>\n",
" </tr>\n",
" <tr>\n",
" <th>3</th>\n",
" <td>🤪</td>\n",
" <td>GRINNING FACE WITH ONE LARGE AND ONE SMALL EYE</td>\n",
" <td>🚅</td>\n",
" <td>HIGH-SPEED TRAIN WITH BULLET NOSE</td>\n",
" <td>🎥</td>\n",
" <td>MOVIE CAMERA</td>\n",
" <td>🇮</td>\n",
" <td>REGIONAL INDICATOR SYMBOL LETTER I</td>\n",
" </tr>\n",
" <tr>\n",
" <th>4</th>\n",
" <td>🥰</td>\n",
" <td>SMILING FACE WITH SMILING EYES AND THREE HEARTS</td>\n",
" <td>🚄</td>\n",
" <td>HIGH-SPEED TRAIN</td>\n",
" <td>🎭</td>\n",
" <td>PERFORMING ARTS</td>\n",
" <td>🤟</td>\n",
" <td>I LOVE YOU HAND SIGN</td>\n",
" </tr>\n",
" <tr>\n",
" <th>5</th>\n",
" <td>🙌</td>\n",
" <td>PERSON RAISING BOTH HANDS IN CELEBRATION</td>\n",
" <td>🚸</td>\n",
" <td>CHILDREN CROSSING</td>\n",
" <td>🎹</td>\n",
" <td>MUSICAL KEYBOARD</td>\n",
" <td>🚅</td>\n",
" <td>HIGH-SPEED TRAIN WITH BULLET NOSE</td>\n",
" </tr>\n",
" <tr>\n",
" <th>6</th>\n",
" <td>🖖</td>\n",
" <td>RAISED HAND WITH PART BETWEEN MIDDLE AND RING ...</td>\n",
" <td>🛸</td>\n",
" <td>FLYING SAUCER</td>\n",
" <td>🧾</td>\n",
" <td>RECEIPT</td>\n",
" <td>🔹</td>\n",
" <td>SMALL BLUE DIAMOND</td>\n",
" </tr>\n",
" <tr>\n",
" <th>7</th>\n",
" <td>🔂</td>\n",
" <td>CLOCKWISE RIGHTWARDS AND LEFTWARDS OPEN CIRCLE...</td>\n",
" <td>🥾</td>\n",
" <td>HIKING BOOT</td>\n",
" <td>💏</td>\n",
" <td>KISS</td>\n",
" <td>📠</td>\n",
" <td>FAX MACHINE</td>\n",
" </tr>\n",
" <tr>\n",
" <th>8</th>\n",
" <td>😄</td>\n",
" <td>SMILING FACE WITH OPEN MOUTH AND SMILING EYES</td>\n",
" <td>🥏</td>\n",
" <td>FLYING DISC</td>\n",
" <td>🔥</td>\n",
" <td>FIRE</td>\n",
" <td>📡</td>\n",
" <td>SATELLITE ANTENNA</td>\n",
" </tr>\n",
" <tr>\n",
" <th>9</th>\n",
" <td>👉</td>\n",
" <td>WHITE RIGHT POINTING BACKHAND INDEX</td>\n",
" <td>🚏</td>\n",
" <td>BUS STOP</td>\n",
" <td>🎼</td>\n",
" <td>MUSICAL SCORE</td>\n",
" <td>📢</td>\n",
" <td>PUBLIC ADDRESS LOUDSPEAKER</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>\n",
"</div>"
],
"text/plain": [
" summed summed_description max_val \\\n",
"0 🤭 SMILING FACE WITH SMILING EYES AND HAND COVERI... 🤟 \n",
"1 🔛 ON WITH EXCLAMATION MARK WITH LEFT RIGHT ARROW... 🇮 \n",
"2 🚮 PUT LITTER IN ITS PLACE SYMBOL 🚆 \n",
"3 🤪 GRINNING FACE WITH ONE LARGE AND ONE SMALL EYE 🚅 \n",
"4 🥰 SMILING FACE WITH SMILING EYES AND THREE HEARTS 🚄 \n",
"5 🙌 PERSON RAISING BOTH HANDS IN CELEBRATION 🚸 \n",
"6 🖖 RAISED HAND WITH PART BETWEEN MIDDLE AND RING ... 🛸 \n",
"7 🔂 CLOCKWISE RIGHTWARDS AND LEFTWARDS OPEN CIRCLE... 🥾 \n",
"8 😄 SMILING FACE WITH OPEN MOUTH AND SMILING EYES 🥏 \n",
"9 👉 WHITE RIGHT POINTING BACKHAND INDEX 🚏 \n",
"\n",
" max_val_description avg avg_description threshold \\\n",
"0 I LOVE YOU HAND SIGN 💐 BOUQUET 🚆 \n",
"1 REGIONAL INDICATOR SYMBOL LETTER I 🍳 COOKING 🚸 \n",
"2 TRAIN 🚆 TRAIN 🚄 \n",
"3 HIGH-SPEED TRAIN WITH BULLET NOSE 🎥 MOVIE CAMERA 🇮 \n",
"4 HIGH-SPEED TRAIN 🎭 PERFORMING ARTS 🤟 \n",
"5 CHILDREN CROSSING 🎹 MUSICAL KEYBOARD 🚅 \n",
"6 FLYING SAUCER 🧾 RECEIPT 🔹 \n",
"7 HIKING BOOT 💏 KISS 📠 \n",
"8 FLYING DISC 🔥 FIRE 📡 \n",
"9 BUS STOP 🎼 MUSICAL SCORE 📢 \n",
"\n",
" threshold_description \n",
"0 TRAIN \n",
"1 CHILDREN CROSSING \n",
"2 HIGH-SPEED TRAIN \n",
"3 REGIONAL INDICATOR SYMBOL LETTER I \n",
"4 I LOVE YOU HAND SIGN \n",
"5 HIGH-SPEED TRAIN WITH BULLET NOSE \n",
"6 SMALL BLUE DIAMOND \n",
"7 FAX MACHINE \n",
"8 SATELLITE ANTENNA \n",
"9 PUBLIC ADDRESS LOUDSPEAKER "
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"predict(\"I like to travel by train\", description_key='description' , lang='eng')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.5"
}
},
"nbformat": 4,
"nbformat_minor": 2
}