This commit is contained in:
Jonas Weinz 2018-05-20 10:38:14 +02:00
parent ef5422bb0c
commit 4b31d44b87
2 changed files with 737 additions and 402 deletions

View File

@ -0,0 +1,423 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import pandas as pd\n",
"from IPython.display import clear_output, Markdown, Math\n",
"import ipywidgets as widgets\n",
"import os"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"----\n",
"## file input stuff:\n",
"\n",
"* replace `test.txt` with yout whatsapp log file"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"%%bash\n",
"./whatsapp2csv.sh test.txt"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"* read table `test.csv` exported by `whatsapp2csv.sh`"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"messages = pd.read_csv('test.txt.csv', delimiter='\\t')\n",
"messages.head()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"* read emoji-data (can be found here: https://www.unicode.org/Public/emoji/11.0/emoji-data.txt) and generate a table file out of it"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"%%bash\n",
"if [ ! -e emoji-data.txt ]\n",
"then\n",
" echo \"downloading emoji specification\"\n",
" wget https://www.unicode.org/Public/emoji/11.0/emoji-data.txt\n",
"else\n",
" echo \"found existing emoji specification\"\n",
"fi\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"emoji_blacklist = set([\n",
" 0x1F3FB,\n",
" 0x1F3FC,\n",
" 0x1F3FD,\n",
" 0x1F3FE,\n",
" 0x1F3FF\n",
"])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"emoji_data = pd.read_csv('emoji-data.txt', delimiter=';', comment='#', names=[\"unicode\",\"type\"])\n",
"emoji_data['type'] = emoji_data['type'].str.strip()\n",
"emoji_data = emoji_data[emoji_data['type'] == \"Emoji_Presentation\"]\n",
"emoji_data"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"* now build a set out of the unicode types"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"ord(\"😀\") == int('0x1f600',16)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"emoji_codes = emoji_data['unicode']\n",
"emoji_codes.head()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"* we have to iterate over the whole list and extract all given ranges:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"emoji_code_list = []\n",
"for entry in emoji_codes:\n",
" # testing whether we have an entry or a range:\n",
" if '.' in entry:\n",
" # range\n",
" a,b = entry.split(\"..\")\n",
" for i in range(int(a,16),int(b,16) +1):\n",
" if i not in emoji_blacklist:\n",
" emoji_code_list.append(i)\n",
" else:\n",
" # single entry\n",
" if i not in emoji_blacklist:\n",
" emoji_code_list.append(int(entry,16))\n",
"emoji_code_set = set(emoji_code_list)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# simple test:\n",
"print(ord(\"😀\") in emoji_code_set, ord(\"a\") in emoji_code_set)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"* expanding column and fill new emojis"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"messages[\"emojis\"] = None"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"for i in messages.index:\n",
" emoji_list = []\n",
" to_remove = []\n",
" m = messages.iloc[i]['message']\n",
" for c in str(m):\n",
" if ord(c) in emoji_code_set:\n",
" emoji_list.append(c)\n",
" elif ord(c) in emoji_blacklist:\n",
" to_remove.append(c)\n",
" \n",
" messages.loc[i,'emojis'] = emoji_list\n",
" #remove emiójis from message\n",
" for e in (emoji_list + to_remove):\n",
" m = m.replace(e,\"\")\n",
" messages.loc[i,'message'] = m\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"messages[:20]"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"* get a list only containing messaged with emojis"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"emoji_messages = messages[[True if len(e) > 0 else False for e in messages['emojis']]]\n",
"emoji_messages = emoji_messages[emoji_messages['message'] != \"\"]"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"display(emoji_messages)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"----\n",
"## learning part"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import numpy as np\n",
"import itertools\n",
"import sklearn.utils as sku\n",
"from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer, HashingVectorizer\n",
"from sklearn.model_selection import train_test_split"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"labels=[e[-1] for e in emoji_messages['emojis']]"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"labels[:10]"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"X1, Xt1, y1, yt1 = train_test_split(emoji_messages['message'], labels, test_size=0.1, random_state=4222)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"vectorizer = TfidfVectorizer(stop_words='english')\n",
"vec_train = vectorizer.fit_transform(X1)\n",
"vec_test = vectorizer.transform(Xt1)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from sklearn.ensemble import RandomForestClassifier as RFC\n",
"from sklearn.neural_network import MLPClassifier as MLP\n",
"from sklearn.naive_bayes import MultinomialNB as MNB\n",
"#clf_a = RFC(criterion='entropy', random_state=4222)\n",
"clf_a = MLP()\n",
"clf_a.fit(vec_train, y1)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"pred = clf_a.predict(vectorizer.transform(Xt1))\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"testlist = pd.DataFrame({'message': Xt1, 'pred': pred, 'trained': yt1})\n",
"testlist = pd.merge(testlist, emoji_messages['emojis'].to_frame(), left_index=True, right_index=True)\n",
"testlist.to_csv('export.csv')\n",
"testlist"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"display(clf_a.predict(vectorizer.transform([\"Boah Caner\"]))[0])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print(chr(0x1F3F))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"vec_train[0]"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"\n",
"out = widgets.Output()\n",
"\n",
"t = widgets.Text()\n",
"b = widgets.Button(\n",
" description='get smiley',\n",
" disabled=False,\n",
" button_style='', # 'success', 'info', 'warning', 'danger' or ''\n",
" tooltip='Click me',\n",
" icon='check'\n",
")\n",
"\n",
"\n",
"\n",
"def handle_submit(sender):\n",
" with out:\n",
" clear_output()\n",
" with out:\n",
" display(Markdown(\"# \" + str(clf_a.predict(vectorizer.transform([t.value]))[0])))\n",
"\n",
"b.on_click(handle_submit)\n",
" \n",
"display(t)\n",
"display(widgets.VBox([b, out])) "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.5"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

View File

@ -2,7 +2,7 @@
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
@ -12,47 +12,45 @@
"import os"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"----\n",
"## file input stuff:\n",
"\n",
"* replace `test.txt` with your whatsapp log file"
]
},
{
"cell_type": "code",
"execution_count": 2,
"execution_count": null,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"================================================================================\n",
"processing File: test.txt\n",
"================================================================================\n",
"================================================================================\n",
"successfully finished action: processing File: test.txt\n",
"================================================================================\n",
"================================================================================\n",
"Wrote output to test.txt.csv\n",
"================================================================================\n"
]
}
],
"outputs": [],
"source": [
"%%bash\n",
"./whatsapp2csv.sh test.txt"
"def create_widgets(t_text, b_text, out, additional_widgets=[]):\n",
" texts = []\n",
" for t in t_text:\n",
" texts.append(widgets.Text(t))\n",
" \n",
" button = widgets.Button(\n",
" description=b_text,\n",
" disabled=False,\n",
" button_style='', # 'success', 'info', 'warning', 'danger' or ''\n",
" tooltip=b_text,\n",
" icon='check'\n",
" )\n",
" display(widgets.VBox([widgets.HBox(texts + additional_widgets + [button]), out]))\n",
" return texts + [button]\n",
"\n",
"out_convert = widgets.Output()\n",
"out_build = widgets.Output()\n",
"out_train = widgets.Output()\n",
"out_save = widgets.Output()\n",
"out_read = widgets.Output()\n",
"out_test = widgets.Output()\n",
"\n",
"def mp(msg):\n",
" display(Markdown(msg))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"* read table `test.csv` exported by `whatsapp2csv.sh`"
"# Emoji Tester\n",
"\n",
"just run all cells at first. Then select on of the actions below."
]
},
{
@ -61,30 +59,63 @@
"metadata": {},
"outputs": [],
"source": [
"messages = pd.read_csv('test.txt.csv', delimiter='\\t')\n",
"messages.head()"
"mp(\"## converting plain whatsapp export to csv\")\n",
"t_convert, b_convert = create_widgets([\"test.txt\"], \"convert whatsapp file to csv\", out_convert)\n",
"mp(\"## read csv and build database\")\n",
"single_label = widgets.Checkbox(value=False, description='using only last emoji', disable=False)\n",
"t_build, b_build = create_widgets([\"test.txt.csv\"], \"read\", out_build, [single_label])\n",
"mp(\"## Train\")\n",
"d = widgets.Dropdown(options=['DecisionTree', 'MLP', 'RandomForest'], value='MLP', description='Learning Method', disabled=False)\n",
"ova = widgets.Checkbox(value=False, description='Using one vs all (very slow, only with multi-label!)', disabled=False)\n",
"b_train = button = widgets.Button(description=\"train\", disabled=False, button_style='', tooltip=\"train\",icon='check')\n",
"display(widgets.VBox([widgets.HBox([d,ova,b_train]), out_train]))\n",
"mp(\"## save trained classifier\")\n",
"t_save_c, t_save_m, t_save_v, b_save = create_widgets([\"clf.pkl\", \"mlb.pkl\", \"vectorizer.pkl\"], \"save classifier\", out_save)\n",
"mp(\"## import trained classifier\")\n",
"t_read_c, t_read_m, t_read_v, b_read = create_widgets([\"clf.pkl\", \"mlb.pkl\", \"vectorizer.pkl\"], \"import classifier\", out_read)\n",
"mp(\"## predict emoji on custom text\")\n",
"b_prop = widgets.Checkbox(value=False, description='Show probabilities (only on trees)', disabled=False)\n",
"t_test, b_test = create_widgets([\"\"], \"get emoji\", out_test,[b_prop])"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"* read emoji-data (can be found here: https://www.unicode.org/Public/emoji/11.0/emoji-data.txt) and generate a table file out of it"
"----\n",
"## Code Section:"
]
},
{
"cell_type": "code",
"execution_count": 4,
"execution_count": null,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"found existing emoji specification\n"
]
}
],
"outputs": [],
"source": [
"def convert(b):\n",
" with out_convert:\n",
" clear_output()\n",
" with out_convert:\n",
" mp(\"**converting \" + t_convert.value + \"…**\")\n",
" import subprocess\n",
" print(str(subprocess.check_output([\"./whatsapp2csv.sh\", t_convert.value])).strip())\n",
" mp(\"**done**\")\n",
"\n",
"b_convert.on_click(convert)\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"* download emoji specification if not already existing"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"%%bash\n",
"if [ ! -e emoji-data.txt ]\n",
@ -96,9 +127,16 @@
"fi\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"* stuff for creating emoji database"
]
},
{
"cell_type": "code",
"execution_count": 5,
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
@ -110,365 +148,239 @@
" 0x1F3FF,\n",
" 0x2642,\n",
" 0x2640\n",
"])"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [],
"source": [
"emoji_data = pd.read_csv('emoji-data.txt', delimiter=';', comment='#', names=[\"unicode\",\"type\"])\n",
"emoji_data['type'] = emoji_data['type'].str.strip()\n",
"emoji_data = emoji_data[emoji_data['type'] == \"Emoji_Presentation\"]"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"* now build a set out of the unicode types"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"True"
]
},
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"ord(\"😀\") == int('0x1f600',16)"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"228 231A..231B \n",
"229 23E9..23EC \n",
"230 23F0 \n",
"231 23F3 \n",
"232 25FD..25FE \n",
"Name: unicode, dtype: object"
]
},
"execution_count": 8,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"emoji_codes = emoji_data['unicode']\n",
"emoji_codes.head()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"* we have to iterate over the whole list and extract all given ranges:"
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {},
"outputs": [
{
"data": {
"text/markdown": [
"**used Emojis:**"
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/plain": [
"'🀄\\U0001f9f1\\U0001f9f2\\U0001f9f3\\U0001f9f4\\U0001f9f5\\U0001f9f6🃏🤐🤑🤒🤓🤔🤕🤖🤗🤘🤙🤚🤛🤜🤝🤞\\U0001f91f🤠🤡🤢🤣🤤🤥🤦🤧\\U0001f928\\U0001f929\\U0001f92a\\U0001f92b\\U0001f92c\\U0001f92d\\U0001f92e\\U0001f92f🤰\\U0001f931\\U0001f932🤳🤴🤵🤶🤷🤸🤹🤺🤼🤽🤾🥀🥁🥂🥃🥄🥅🥇🥈🥉🥊🥋\\U0001f94c\\U0001f94d\\U0001f94e\\U0001f94f🥐🥑🥒🥓🥔🥕🥖🥗🥘🥙🥚🥛🥜🥝🥞\\U0001f95f\\U0001f960\\U0001f961\\U0001f962\\U0001f963\\U0001f964\\U0001f965\\U0001f966\\U0001f967\\U0001f968\\U0001f969\\U0001f96a\\U0001f96b\\U0001f96c\\U0001f96d\\U0001f96e\\U0001f96f\\U0001f970\\U0001f9ec\\U0001f9ed\\U0001f973\\U0001f974\\U0001f975\\U0001f976\\U0001f9ee\\U0001f97a\\U0001f9ef\\U0001f97c\\U0001f97d\\U0001f97e\\U0001f97f🦀🦁🦂🦃🦄🦅🦆🦇🦈🦉🦊🦋🦌🦍🆎🦎🦏🆑🆒🆓🆔🆕🆖🆗🆘🆙🆚\\U0001f992\\U0001f993\\U0001f994\\U0001f995\\U0001f996\\U0001f997\\U0001f998\\U0001f999\\U0001f99a\\U0001f99b\\U0001f99c\\U0001f99d\\U0001f99e\\U0001f99f\\U0001f9a0\\U0001f9a1\\U0001f9a2\\U0001f9f8\\U0001f9b0\\U0001f9b1\\U0001f9b2\\U0001f9b3\\U0001f9b4\\U0001f9b5\\U0001f9b6\\U0001f9b7\\U0001f9b8\\U0001f9b9🧀\\U0001f9c1\\U0001f9c2\\U0001f9d0\\U0001f9d1\\U0001f9d2\\U0001f9d3\\U0001f9d4\\U0001f9d5\\U0001f9d6\\U0001f9d7\\U0001f9d8\\U0001f9d9\\U0001f9da\\U0001f9db\\U0001f9dc\\U0001f9dd\\U0001f9de\\U0001f9df\\U0001f9e0\\U0001f9e1\\U0001f9e2\\U0001f9e3\\U0001f9e4\\U0001f9e5🇦🇧🇨🇩🇪🇫🇬🇭🇮🇯🇰🇱🇲🇳🇴🇵🇶🇷🇸🇹🇺🇻🇼🇽🇾🇿\\U0001f9f7🈁\\U0001f9f9\\U0001f9fa\\U0001f9fb\\U0001f9fc\\U0001f9fd\\U0001f9fe\\U0001f9ff🈚🈯🈲🈳🈴🈵🈶🈸🈹🈺🉐🉑🌀🌁🌂🌃🌄🌅🌆🌇🌈🌉🌊🌋🌌🌍🌎🌏🌐🌑🌒🌓🌔🌕🌖🌗🌘🌙⌚⌛⬛⬜🌚🌛🌜🌝🌞🌟🌠🌭🌮🌯🌰🌱🌲🌳🌴🌵🌷🌸🌹🌺🌻🌼🌽🌾🌿🍀🍁🍂🍃🍄🍅🍆🍇🍈🍉🍊🍋🍌🍍🍎🍏⭐🍐🍑🍒🍓⭕🍔🍕🍖🍗🍘🍙🍚🍛🍜🍟🍝🍞🍠🍡🍤🍢🍣🍥🍦🍩🍧🍨🍪🍫🍮🍬🍭🍯🍰🍳🍱🍲🍴🍵🍸🍶🍷🍹🍺🍻🍼🍾🍿🎀🎁🎂🎃🎅🎄🎆🎈🎉🎊🎋🎌🎍🎇🎏🎐🎎🎑🎒🎓🎠🎡🎢🎣🎤🎥🎦🎧🎨🎩🎪🎫🎬🎭🎮🎯🎰🎱🎲🎳🎴🎵🎶🎷🎸🎹🎺🎻🎼🎽🎾🎿🏀🏁🏂🏃🏄🏅🏆🏇🏈🏉🏊🏏🏐🏑🏒🏓🏠🏡🏢🏣🏤🏥🏦🏧🏨🏩🏪🏫🏬🏭🏮🏯🏰⏩⏪⏫🏴⏬⏰⏳🏸🏹🏺🐀🐁🐂🐃🐄🐅🐆🐇🐈🐉🐊🐋🐌🐍🐎🐏🐐🐑🐒🐓🐔🐕🐖🐗🐘🐙🐚🐛🐜🐝🐞🐟🐠🐡🐢🐣🐤🐥🐦🐧🐨🐩🐪🐫🐬🐭🐮🐯🐰🐱🐲🐳🐴🐵🐶🐷🐸🐹🐺🐻🐼🐽🐾👀👂👃👄👅👆👇👈👉👊👋👌👍👎👏👐👑👒👓👔👕👖👗👘👙👚👛👜👝👞👟👠👡👢👣👤👥👦👧👨👩👪👫👬👭👮👯👰👱👲👳👴👵👶👷👸👹👺👻👼👽👾👿💀💁💂💃💄💅💆💇💈💉💊💋💌💍💎💏💐💑💒💓💔💕💖💗💘💙💚💛💜💝💞💟💠💡💢💣💤💥💦💧💨💩💪💫💬💭💮💯💰💱💲💳💴💵💶💷💸💹💺💻💼💽💾💿📀📁📂📃📄📅📆📇📈📉📊📋📌📍📎📏📐📑📒📓📔📕📖📗📘📙📚📛📜📝📞📟📠📡📢📣📤📥📦📧📨📩📪📫📬📭📮📯📰📱📲📳📴📵📶📷📸📹📺📻📼📿🔀🔁🔂🔃🔄🔅🔆🔇🔈🔉🔊🔋🔌🔍🔎🔏🔐🔑🔒🔓🔔🔕🔖🔗🔘🔙🔚🔛🔜🔝🔞🔟🔠🔡🔢🔣🔤🔥🔦🔧🔨🔩🔪🔫🔬🔭🔮🔯🔰🔱🔲🔳🔴🔵🔶🔷🔸🔹🔺🔻🔼🔽🕋🕌🕍🕎🕐🕑🕒🕓🕔🕕🕖🕗🕘🕙🕚🕛🕜🕝🕞🕟🕠🕡🕢🕣🕤🕥🕦🕧🕺🖕🖖🖤🗻🗼◽◾🗽🗾🗿😀😁😂😃😄😅😆😇😈😉😊😋😌😍😎😏😐😑☔☕😒😓😔😕😖😗😘😙😚😛😜😝😞😟😠😡😢😣😤😥😦😧😨😩😪😫😬😭😮😯😰😱😲😳😴😵😶😷😸😹😺😻😼😽😾😿🙀🙁🙂🙃♈♉♊♋♌♍♎♏♐♑♒♓🙋🙌🙍🙎🙏♿🚀🚁🚂🚃🚄🚅🚆🚇🚈🚉🚊🚋🚌🚍🚎🚏🚐🚑🚒⚓🚓🚔🚕🚖🚗🚘🚙🚚🚛🚜🚝🚞🚟⚡🚠🚡🚢🚣🚤🚥🚦🚧⚪⚫🚨🚩🚪🚫🚬🚭🚮🚯🚰🚱🚲🚳🚴🚵🚶🚷🚸⚽⚾🚹🚺🚻🚼🚽⛄⛅🚾🚿🛀🛁🛂🛃🛄🛅⛎🛌🛐🛑🛒⛔⛪🛫🛬⛲⛳🛴⛵🛵🛶\\U0001f6f7\\U0001f6f8⛺\\U0001f6f9⛽✅\\U0001f9e6🙄✊✋🙅🙆🙇🙈\\U0001f9e7🙉🙊✨\\U0001f9e8❌❎\\U0001f9e9❓❔❕❗\\U0001f9ea\\U0001f9eb➗🦐🦑➰➿\\U0001f9f0'"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/markdown": [
"**blacklisted Emojis:**"
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/plain": [
"'♀♂🏻🏼🏽🏾🏿'"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"emoji_code_list = []\n",
"for entry in emoji_codes:\n",
" # testing whether we have an entry or a range:\n",
" if '.' in entry:\n",
" # range\n",
" a,b = entry.split(\"..\")\n",
" for i in range(int(a,16),int(b,16) +1):\n",
" if i not in emoji_blacklist:\n",
" emoji_code_list.append(i)\n",
" else:\n",
" # single entry\n",
" if i not in emoji_blacklist:\n",
" emoji_code_list.append(int(entry,16))\n",
"emoji_code_set = set(emoji_code_list)\n",
"display(Markdown(\"**used Emojis:**\"))\n",
"display(\"\".join([chr(x) for x in emoji_code_set]))\n",
"display(Markdown(\"**blacklisted Emojis:**\"))\n",
"display(\"\".join([chr(x) for x in emoji_blacklist]))"
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"True False\n"
]
}
],
"source": [
"# simple test:\n",
"print(ord(\"😀\") in emoji_code_set, ord(\"a\") in emoji_code_set)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"* expanding column and fill new emojis"
]
},
{
"cell_type": "code",
"execution_count": 11,
"metadata": {},
"outputs": [],
"source": [
"messages[\"emojis\"] = None"
]
},
{
"cell_type": "code",
"execution_count": 12,
"metadata": {},
"outputs": [],
"source": [
"for i in messages.index:\n",
" emoji_list = []\n",
" m = messages.iloc[i]['message']\n",
" m_new = \"\"\n",
" for c in str(m):\n",
" if ord(c) in emoji_code_set:\n",
" emoji_list.append(c)\n",
" elif ord(c) not in emoji_blacklist:\n",
" m_new += c\n",
" \n",
" messages.loc[i,'emojis'] = set(emoji_list)\n",
" #remove emiójis from message\n",
" messages.loc[i,'message'] = m_new\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"messages[:20]"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"* get a list only containing messaged with emojis"
]
},
{
"cell_type": "code",
"execution_count": 14,
"metadata": {},
"outputs": [],
"source": [
"emoji_messages = messages[[True if len(e) > 0 else False for e in messages['emojis']]]\n",
"emoji_messages = emoji_messages[emoji_messages['message'] != \"\"]"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"display(emoji_messages)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"----\n",
"## learning part"
]
},
{
"cell_type": "code",
"execution_count": 16,
"metadata": {},
"outputs": [],
"source": [
"import numpy as np\n",
"import itertools\n",
"import sklearn.utils as sku\n",
"from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer, HashingVectorizer\n",
"from sklearn.model_selection import train_test_split\n",
"from sklearn.preprocessing import MultiLabelBinarizer"
]
},
{
"cell_type": "code",
"execution_count": 17,
"metadata": {},
"outputs": [],
"source": [
"mlb = MultiLabelBinarizer()\n",
"])\n",
"\n",
"labels=mlb.fit_transform(emoji_messages['emojis'])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": 18,
"metadata": {},
"outputs": [],
"source": [
"X1, Xt1, y1, yt1 = train_test_split(emoji_messages['message'], labels, test_size=0.1, random_state=4222)"
]
},
{
"cell_type": "code",
"execution_count": 19,
"metadata": {},
"outputs": [],
"source": [
"vectorizer = TfidfVectorizer(stop_words='english')\n",
"vec_train = vectorizer.fit_transform(X1)\n",
"vec_test = vectorizer.transform(Xt1)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from sklearn.ensemble import RandomForestClassifier as RFC\n",
"from sklearn.neural_network import MLPClassifier as MLP\n",
"from sklearn.naive_bayes import MultinomialNB as MNB\n",
"from sklearn.multiclass import OneVsRestClassifier as OVRC\n",
"#clf_a = OVRC(RFC(criterion='entropy', random_state=4222))\n",
"clf_a = OVRC(MLP(hidden_layer_sizes=(10,)))\n",
"#clf_a = OVRC(MNB())\n",
"clf_a.fit(vec_train, y1)"
]
},
{
"cell_type": "code",
"execution_count": 23,
"metadata": {},
"outputs": [],
"source": [
"pred = clf_a.predict(vectorizer.transform(Xt1))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"testlist = pd.DataFrame({'message': Xt1, 'pred': mlb.inverse_transform(pred), 'teacher': mlb.inverse_transform(yt1)})\n",
"testlist.to_csv('export.csv')\n",
"testlist"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"emoji_code_set = None\n",
"\n",
"out = widgets.Output()\n",
"\n",
"t = widgets.Text()\n",
"b = widgets.Button(\n",
" description='get smiley',\n",
" disabled=False,\n",
" button_style='', # 'success', 'info', 'warning', 'danger' or ''\n",
" tooltip='Click me',\n",
" icon='check'\n",
")\n",
"\n",
"\n",
"\n",
"def handle_submit(sender):\n",
" with out:\n",
" clear_output()\n",
" with out:\n",
" display(Markdown(\"# \" + str(mlb.inverse_transform(clf_a.predict(vectorizer.transform([t.value])))[0])))\n",
"\n",
"b.on_click(handle_submit)\n",
"def create_emoji_set():\n",
" global emoji_code_set\n",
" \n",
"display(t)\n",
"display(widgets.VBox([b, out])) "
" emoji_data = pd.read_csv('emoji-data.txt', delimiter=';', comment='#', names=[\"unicode\",\"type\"])\n",
" emoji_data['type'] = emoji_data['type'].str.strip()\n",
" emoji_data = emoji_data[emoji_data['type'] == \"Emoji_Presentation\"]\n",
" \n",
" emoji_codes = emoji_data['unicode']\n",
" emoji_codes.head()\n",
" \n",
" emoji_code_list = []\n",
" for entry in emoji_codes:\n",
" # testing whether we have an entry or a range:\n",
" if '.' in entry:\n",
" # range\n",
" a,b = entry.split(\"..\")\n",
" for i in range(int(a,16),int(b,16) +1):\n",
" if i not in emoji_blacklist:\n",
" emoji_code_list.append(i)\n",
" else:\n",
" # single entry\n",
" if i not in emoji_blacklist:\n",
" emoji_code_list.append(int(entry,16))\n",
" emoji_code_set = set(emoji_code_list)\n",
" display(Markdown(\"**imported Emojis** (without modifier):\\n>\" + \"\".join([chr(x) for x in emoji_code_set])))\n",
" display(Markdown(\"**blacklisted Emojis:**\\n>\" + \"\".join([chr(x) for x in emoji_blacklist])))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"* stuff for reading whatsapp messages"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"messages = None\n",
"vectorizer = None\n",
"clf_a = None\n",
"mlb = None\n",
"\n",
"emoji_messages=None"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def read_message_and_build_db(filename):\n",
" global messages\n",
" global emoji_messages\n",
" global vectorizer\n",
" global clf_a\n",
" global mlb\n",
" \n",
" messages = pd.read_csv(filename, delimiter='\\t')\n",
" mp(\"**filter messages and creating labels. This can take a while...**\")\n",
" messages[\"emojis\"] = None\n",
" \n",
" msg_batchsize = 1000\n",
" msg_counter = 0\n",
" \n",
" for i in messages.index:\n",
" \n",
" msg_counter+=1\n",
" if msg_counter >= msg_batchsize:\n",
" print(str(100 * i / messages.shape[0]) + \"%\")\n",
" msg_counter=0\n",
" \n",
" emoji_list = []\n",
" m = messages.iloc[i]['message']\n",
" m_new = \"\"\n",
" for c in str(m):\n",
" if ord(c) in emoji_code_set:\n",
" emoji_list.append(c)\n",
" elif ord(c) not in emoji_blacklist:\n",
" m_new += c\n",
" # if single label: only use last found emoji\n",
" messages.loc[i,'emojis'] = set(emoji_list) if (not single_label.value) or len(emoji_list)==0 else set(emoji_list[-1])\n",
" #remove emiójis from message\n",
" messages.loc[i,'message'] = m_new\n",
" \n",
" emoji_messages = messages[[True if len(e) > 0 else False for e in messages['emojis']]]\n",
" emoji_messages = emoji_messages[emoji_messages['message'] != \"\"]\n",
" \n",
" mp(\"**Done**\")\n",
" \n",
" display(emoji_messages)\n",
"\n",
"def train(b):\n",
" global messages\n",
" global emoji_messages\n",
" global vectorizer\n",
" global clf_a\n",
" global mlb\n",
" with out_train:\n",
" clear_output()\n",
" # train part:\n",
" import numpy as np\n",
" import itertools\n",
" import sklearn.utils as sku\n",
" from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer, HashingVectorizer\n",
" from sklearn.model_selection import train_test_split\n",
" from sklearn.preprocessing import MultiLabelBinarizer\n",
"\n",
" mlb = MultiLabelBinarizer() if not single_label.value else None\n",
" \n",
" if not mlb:\n",
" l = [list(e)[-1] for e in emoji_messages['emojis']]\n",
" \n",
" labels=mlb.fit_transform(emoji_messages['emojis']) if mlb else l\n",
" \n",
" if mlb:\n",
" display(Markdown(\"**emojis contained in Dataset:**\\n >\" + \"\".join(mlb.classes_ )))\n",
" else:\n",
" display(Markdown(\"**emojis contained in Dataset:**\\n >\" + \"\".join(set(l))))\n",
"\n",
" X1, Xt1, y1, yt1 = train_test_split(emoji_messages['message'], labels, test_size=0.1, random_state=4222)\n",
"\n",
" vectorizer = TfidfVectorizer(stop_words='english')\n",
" vec_train = vectorizer.fit_transform(X1)\n",
" vec_test = vectorizer.transform(Xt1)\n",
"\n",
" mp(\"**train classifier. This can take a very long time… Grab a coffe! 😀**\")\n",
"\n",
" from sklearn.ensemble import RandomForestClassifier as RFC\n",
" from sklearn.neural_network import MLPClassifier as MLP\n",
" #from sklearn.naive_bayes import MultinomialNB as MNB\n",
" from sklearn.tree import DecisionTreeClassifier as DTC\n",
" from sklearn.multiclass import OneVsRestClassifier as OVRC\n",
" clf_a = None\n",
" if (d.value == \"DecisionTree\"):\n",
" clf_a = DTC()\n",
" elif d.value == \"MLP\":\n",
" clf_a = MLP(hidden_layer_sizes=(64,))\n",
" elif d.value == \"RandomForest\":\n",
" RFC(criterion='entropy', random_state=4222)\n",
"\n",
" if ova.value:\n",
" clf_a=OVRC(clf_a)\n",
"\n",
" display(clf_a)\n",
" clf_a.fit(vec_train, y1)\n",
"\n",
" mp(\"**training done**\")\n",
"\n",
" pred = clf_a.predict(vectorizer.transform(Xt1))\n",
"\n",
" testlist = pd.DataFrame({'message': Xt1, 'pred': mlb.inverse_transform(pred) if mlb else pred, 'teacher': mlb.inverse_transform(yt1) if mlb else yt1})\n",
" testlist.to_csv('export.csv')\n",
" display(testlist)\n",
" \n",
"def build_db(b):\n",
" with out_build:\n",
" clear_output()\n",
" create_emoji_set()\n",
" read_message_and_build_db(t_build.value)\n",
"b_build.on_click(build_db)\n",
"b_train.on_click(train)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from sklearn.externals import joblib\n",
"def write_to_file(b):\n",
" global vectorizer\n",
" global clf_a\n",
" global mlb\n",
" \n",
" with out_save:\n",
" clear_output()\n",
" mp(\"**write to file...**\")\n",
" joblib.dump(clf_a, t_save_c.value)\n",
" if mlb:\n",
" joblib.dump(mlb, t_save_m.value) \n",
" joblib.dump(vectorizer, t_save_v.value)\n",
" mp(\"**done**\")\n",
"b_save.on_click(write_to_file)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def read_from_file(b):\n",
" global vectorizer\n",
" global clf_a\n",
" global mlb\n",
" \n",
" with out_read:\n",
" clear_output()\n",
" mp(\"**read from file…**\")\n",
" clf_a = joblib.load(t_read_c.value)\n",
" if t_read_m.value != \"\":\n",
" mlb = joblib.load(t_read_m.value)\n",
" vectorizer = joblib.load(t_read_v.value)\n",
" mp(\"**done**\")\n",
"b_read.on_click(read_from_file)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def predict(b):\n",
" with out_test:\n",
" clear_output()\n",
" v = mlb.inverse_transform(clf_a.predict(vectorizer.transform([t_test.value])))[0] if mlb else clf_a.predict(vectorizer.transform([t_test.value]))[0]\n",
" mp(\"**prediction:**\\n# \" + (\"\".join(v) if len(v)>0 else \" \"))\n",
" if b_prop.value:\n",
" pred = clf_a.predict_proba(vectorizer.transform([t_test.value]))\n",
" print(mlb.inverse_transform(pred))\n",
"\n",
"b_test.on_click(predict)"
]
},
{