nlp-lab/Project/Tools/emoji tester.ipynb
2018-05-19 20:43:06 +02:00

504 lines
16 KiB
Plaintext
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"import pandas as pd\n",
"from IPython.display import clear_output, Markdown, Math\n",
"import ipywidgets as widgets\n",
"import os"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"----\n",
"## file input stuff:\n",
"\n",
"* replace `test.txt` with your whatsapp log file"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"================================================================================\n",
"processing File: test.txt\n",
"================================================================================\n",
"================================================================================\n",
"successfully finished action: processing File: test.txt\n",
"================================================================================\n",
"================================================================================\n",
"Wrote output to test.txt.csv\n",
"================================================================================\n"
]
}
],
"source": [
"%%bash\n",
"./whatsapp2csv.sh test.txt"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"* read table `test.csv` exported by `whatsapp2csv.sh`"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"messages = pd.read_csv('test.txt.csv', delimiter='\\t')\n",
"messages.head()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"* read emoji-data (can be found here: https://www.unicode.org/Public/emoji/11.0/emoji-data.txt) and generate a table file out of it"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"found existing emoji specification\n"
]
}
],
"source": [
"%%bash\n",
"if [ ! -e emoji-data.txt ]\n",
"then\n",
" echo \"downloading emoji specification\"\n",
" wget https://www.unicode.org/Public/emoji/11.0/emoji-data.txt\n",
"else\n",
" echo \"found existing emoji specification\"\n",
"fi\n"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [],
"source": [
"emoji_blacklist = set([\n",
" 0x1F3FB,\n",
" 0x1F3FC,\n",
" 0x1F3FD,\n",
" 0x1F3FE,\n",
" 0x1F3FF,\n",
" 0x2642,\n",
" 0x2640\n",
"])"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [],
"source": [
"emoji_data = pd.read_csv('emoji-data.txt', delimiter=';', comment='#', names=[\"unicode\",\"type\"])\n",
"emoji_data['type'] = emoji_data['type'].str.strip()\n",
"emoji_data = emoji_data[emoji_data['type'] == \"Emoji_Presentation\"]"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"* now build a set out of the unicode types"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"True"
]
},
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"ord(\"😀\") == int('0x1f600',16)"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"228 231A..231B \n",
"229 23E9..23EC \n",
"230 23F0 \n",
"231 23F3 \n",
"232 25FD..25FE \n",
"Name: unicode, dtype: object"
]
},
"execution_count": 8,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"emoji_codes = emoji_data['unicode']\n",
"emoji_codes.head()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"* we have to iterate over the whole list and extract all given ranges:"
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {},
"outputs": [
{
"data": {
"text/markdown": [
"**used Emojis:**"
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/plain": [
"'🀄\\U0001f9f1\\U0001f9f2\\U0001f9f3\\U0001f9f4\\U0001f9f5\\U0001f9f6🃏🤐🤑🤒🤓🤔🤕🤖🤗🤘🤙🤚🤛🤜🤝🤞\\U0001f91f🤠🤡🤢🤣🤤🤥🤦🤧\\U0001f928\\U0001f929\\U0001f92a\\U0001f92b\\U0001f92c\\U0001f92d\\U0001f92e\\U0001f92f🤰\\U0001f931\\U0001f932🤳🤴🤵🤶🤷🤸🤹🤺🤼🤽🤾🥀🥁🥂🥃🥄🥅🥇🥈🥉🥊🥋\\U0001f94c\\U0001f94d\\U0001f94e\\U0001f94f🥐🥑🥒🥓🥔🥕🥖🥗🥘🥙🥚🥛🥜🥝🥞\\U0001f95f\\U0001f960\\U0001f961\\U0001f962\\U0001f963\\U0001f964\\U0001f965\\U0001f966\\U0001f967\\U0001f968\\U0001f969\\U0001f96a\\U0001f96b\\U0001f96c\\U0001f96d\\U0001f96e\\U0001f96f\\U0001f970\\U0001f9ec\\U0001f9ed\\U0001f973\\U0001f974\\U0001f975\\U0001f976\\U0001f9ee\\U0001f97a\\U0001f9ef\\U0001f97c\\U0001f97d\\U0001f97e\\U0001f97f🦀🦁🦂🦃🦄🦅🦆🦇🦈🦉🦊🦋🦌🦍🆎🦎🦏🆑🆒🆓🆔🆕🆖🆗🆘🆙🆚\\U0001f992\\U0001f993\\U0001f994\\U0001f995\\U0001f996\\U0001f997\\U0001f998\\U0001f999\\U0001f99a\\U0001f99b\\U0001f99c\\U0001f99d\\U0001f99e\\U0001f99f\\U0001f9a0\\U0001f9a1\\U0001f9a2\\U0001f9f8\\U0001f9b0\\U0001f9b1\\U0001f9b2\\U0001f9b3\\U0001f9b4\\U0001f9b5\\U0001f9b6\\U0001f9b7\\U0001f9b8\\U0001f9b9🧀\\U0001f9c1\\U0001f9c2\\U0001f9d0\\U0001f9d1\\U0001f9d2\\U0001f9d3\\U0001f9d4\\U0001f9d5\\U0001f9d6\\U0001f9d7\\U0001f9d8\\U0001f9d9\\U0001f9da\\U0001f9db\\U0001f9dc\\U0001f9dd\\U0001f9de\\U0001f9df\\U0001f9e0\\U0001f9e1\\U0001f9e2\\U0001f9e3\\U0001f9e4\\U0001f9e5🇦🇧🇨🇩🇪🇫🇬🇭🇮🇯🇰🇱🇲🇳🇴🇵🇶🇷🇸🇹🇺🇻🇼🇽🇾🇿\\U0001f9f7🈁\\U0001f9f9\\U0001f9fa\\U0001f9fb\\U0001f9fc\\U0001f9fd\\U0001f9fe\\U0001f9ff🈚🈯🈲🈳🈴🈵🈶🈸🈹🈺🉐🉑🌀🌁🌂🌃🌄🌅🌆🌇🌈🌉🌊🌋🌌🌍🌎🌏🌐🌑🌒🌓🌔🌕🌖🌗🌘🌙⌚⌛⬛⬜🌚🌛🌜🌝🌞🌟🌠🌭🌮🌯🌰🌱🌲🌳🌴🌵🌷🌸🌹🌺🌻🌼🌽🌾🌿🍀🍁🍂🍃🍄🍅🍆🍇🍈🍉🍊🍋🍌🍍🍎🍏⭐🍐🍑🍒🍓⭕🍔🍕🍖🍗🍘🍙🍚🍛🍜🍟🍝🍞🍠🍡🍤🍢🍣🍥🍦🍩🍧🍨🍪🍫🍮🍬🍭🍯🍰🍳🍱🍲🍴🍵🍸🍶🍷🍹🍺🍻🍼🍾🍿🎀🎁🎂🎃🎅🎄🎆🎈🎉🎊🎋🎌🎍🎇🎏🎐🎎🎑🎒🎓🎠🎡🎢🎣🎤🎥🎦🎧🎨🎩🎪🎫🎬🎭🎮🎯🎰🎱🎲🎳🎴🎵🎶🎷🎸🎹🎺🎻🎼🎽🎾🎿🏀🏁🏂🏃🏄🏅🏆🏇🏈🏉🏊🏏🏐🏑🏒🏓🏠🏡🏢🏣🏤🏥🏦🏧🏨🏩🏪🏫🏬🏭🏮🏯🏰⏩⏪⏫🏴⏬⏰⏳🏸🏹🏺🐀🐁🐂🐃🐄🐅🐆🐇🐈🐉🐊🐋🐌🐍🐎🐏🐐🐑🐒🐓🐔🐕🐖🐗🐘🐙🐚🐛🐜🐝🐞🐟🐠🐡🐢🐣🐤🐥🐦🐧🐨🐩🐪🐫🐬🐭🐮🐯🐰🐱🐲🐳🐴🐵🐶🐷🐸🐹🐺🐻🐼🐽🐾👀👂👃👄👅👆👇👈👉👊👋👌👍👎👏👐👑👒👓👔👕👖👗👘👙👚👛👜👝👞👟👠👡👢👣👤👥👦👧👨👩👪👫👬👭👮👯👰👱👲👳👴👵👶👷👸👹👺👻👼👽👾👿💀💁💂💃💄💅💆💇💈💉💊💋💌💍💎💏💐💑💒💓💔💕💖💗💘💙💚💛💜💝💞💟💠💡💢💣💤💥💦💧💨💩💪💫💬💭💮💯💰💱💲💳💴💵💶💷💸💹💺💻💼💽💾💿📀📁📂📃📄📅📆📇📈📉📊📋📌📍📎📏📐📑📒📓📔📕📖📗📘📙📚📛📜📝📞📟📠📡📢📣📤📥📦📧📨📩📪📫📬📭📮📯📰📱📲📳📴📵📶📷📸📹📺📻📼📿🔀🔁🔂🔃🔄🔅🔆🔇🔈🔉🔊🔋🔌🔍🔎🔏🔐🔑🔒🔓🔔🔕🔖🔗🔘🔙🔚🔛🔜🔝🔞🔟🔠🔡🔢🔣🔤🔥🔦🔧🔨🔩🔪🔫🔬🔭🔮🔯🔰🔱🔲🔳🔴🔵🔶🔷🔸🔹🔺🔻🔼🔽🕋🕌🕍🕎🕐🕑🕒🕓🕔🕕🕖🕗🕘🕙🕚🕛🕜🕝🕞🕟🕠🕡🕢🕣🕤🕥🕦🕧🕺🖕🖖🖤🗻🗼◽◾🗽🗾🗿😀😁😂😃😄😅😆😇😈😉😊😋😌😍😎😏😐😑☔☕😒😓😔😕😖😗😘😙😚😛😜😝😞😟😠😡😢😣😤😥😦😧😨😩😪😫😬😭😮😯😰😱😲😳😴😵😶😷😸😹😺😻😼😽😾😿🙀🙁🙂🙃♈♉♊♋♌♍♎♏♐♑♒♓🙋🙌🙍🙎🙏♿🚀🚁🚂🚃🚄🚅🚆🚇🚈🚉🚊🚋🚌🚍🚎🚏🚐🚑🚒⚓🚓🚔🚕🚖🚗🚘🚙🚚🚛🚜🚝🚞🚟⚡🚠🚡🚢🚣🚤🚥🚦🚧⚪⚫🚨🚩🚪🚫🚬🚭🚮🚯🚰🚱🚲🚳🚴🚵🚶🚷🚸⚽⚾🚹🚺🚻🚼🚽⛄⛅🚾🚿🛀🛁🛂🛃🛄🛅⛎🛌🛐🛑🛒⛔⛪🛫🛬⛲⛳🛴⛵🛵🛶\\U0001f6f7\\U0001f6f8⛺\\U0001f6f9⛽✅\\U0001f9e6🙄✊✋🙅🙆🙇🙈\\U0001f9e7🙉🙊✨\\U0001f9e8❌❎\\U0001f9e9❓❔❕❗\\U0001f9ea\\U0001f9eb➗🦐🦑➰➿\\U0001f9f0'"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/markdown": [
"**blacklisted Emojis:**"
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/plain": [
"'♀♂🏻🏼🏽🏾🏿'"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"emoji_code_list = []\n",
"for entry in emoji_codes:\n",
" # testing whether we have an entry or a range:\n",
" if '.' in entry:\n",
" # range\n",
" a,b = entry.split(\"..\")\n",
" for i in range(int(a,16),int(b,16) +1):\n",
" if i not in emoji_blacklist:\n",
" emoji_code_list.append(i)\n",
" else:\n",
" # single entry\n",
" if i not in emoji_blacklist:\n",
" emoji_code_list.append(int(entry,16))\n",
"emoji_code_set = set(emoji_code_list)\n",
"display(Markdown(\"**used Emojis:**\"))\n",
"display(\"\".join([chr(x) for x in emoji_code_set]))\n",
"display(Markdown(\"**blacklisted Emojis:**\"))\n",
"display(\"\".join([chr(x) for x in emoji_blacklist]))"
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"True False\n"
]
}
],
"source": [
"# simple test:\n",
"print(ord(\"😀\") in emoji_code_set, ord(\"a\") in emoji_code_set)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"* expanding column and fill new emojis"
]
},
{
"cell_type": "code",
"execution_count": 11,
"metadata": {},
"outputs": [],
"source": [
"messages[\"emojis\"] = None"
]
},
{
"cell_type": "code",
"execution_count": 12,
"metadata": {},
"outputs": [],
"source": [
"for i in messages.index:\n",
" emoji_list = []\n",
" m = messages.iloc[i]['message']\n",
" m_new = \"\"\n",
" for c in str(m):\n",
" if ord(c) in emoji_code_set:\n",
" emoji_list.append(c)\n",
" elif ord(c) not in emoji_blacklist:\n",
" m_new += c\n",
" \n",
" messages.loc[i,'emojis'] = set(emoji_list)\n",
" #remove emiójis from message\n",
" messages.loc[i,'message'] = m_new\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"messages[:20]"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"* get a list only containing messaged with emojis"
]
},
{
"cell_type": "code",
"execution_count": 14,
"metadata": {},
"outputs": [],
"source": [
"emoji_messages = messages[[True if len(e) > 0 else False for e in messages['emojis']]]\n",
"emoji_messages = emoji_messages[emoji_messages['message'] != \"\"]"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"display(emoji_messages)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"----\n",
"## learning part"
]
},
{
"cell_type": "code",
"execution_count": 16,
"metadata": {},
"outputs": [],
"source": [
"import numpy as np\n",
"import itertools\n",
"import sklearn.utils as sku\n",
"from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer, HashingVectorizer\n",
"from sklearn.model_selection import train_test_split\n",
"from sklearn.preprocessing import MultiLabelBinarizer"
]
},
{
"cell_type": "code",
"execution_count": 17,
"metadata": {},
"outputs": [],
"source": [
"mlb = MultiLabelBinarizer()\n",
"\n",
"labels=mlb.fit_transform(emoji_messages['emojis'])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": 18,
"metadata": {},
"outputs": [],
"source": [
"X1, Xt1, y1, yt1 = train_test_split(emoji_messages['message'], labels, test_size=0.1, random_state=4222)"
]
},
{
"cell_type": "code",
"execution_count": 19,
"metadata": {},
"outputs": [],
"source": [
"vectorizer = TfidfVectorizer(stop_words='english')\n",
"vec_train = vectorizer.fit_transform(X1)\n",
"vec_test = vectorizer.transform(Xt1)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from sklearn.ensemble import RandomForestClassifier as RFC\n",
"from sklearn.neural_network import MLPClassifier as MLP\n",
"from sklearn.naive_bayes import MultinomialNB as MNB\n",
"from sklearn.multiclass import OneVsRestClassifier as OVRC\n",
"#clf_a = OVRC(RFC(criterion='entropy', random_state=4222))\n",
"clf_a = OVRC(MLP(hidden_layer_sizes=(10,)))\n",
"#clf_a = OVRC(MNB())\n",
"clf_a.fit(vec_train, y1)"
]
},
{
"cell_type": "code",
"execution_count": 23,
"metadata": {},
"outputs": [],
"source": [
"pred = clf_a.predict(vectorizer.transform(Xt1))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"testlist = pd.DataFrame({'message': Xt1, 'pred': mlb.inverse_transform(pred), 'teacher': mlb.inverse_transform(yt1)})\n",
"testlist.to_csv('export.csv')\n",
"testlist"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"\n",
"out = widgets.Output()\n",
"\n",
"t = widgets.Text()\n",
"b = widgets.Button(\n",
" description='get smiley',\n",
" disabled=False,\n",
" button_style='', # 'success', 'info', 'warning', 'danger' or ''\n",
" tooltip='Click me',\n",
" icon='check'\n",
")\n",
"\n",
"\n",
"\n",
"def handle_submit(sender):\n",
" with out:\n",
" clear_output()\n",
" with out:\n",
" display(Markdown(\"# \" + str(mlb.inverse_transform(clf_a.predict(vectorizer.transform([t.value])))[0])))\n",
"\n",
"b.on_click(handle_submit)\n",
" \n",
"display(t)\n",
"display(widgets.VBox([b, out])) "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.5"
}
},
"nbformat": 4,
"nbformat_minor": 2
}