From 4b31d44b877aa34fd45a483e3990e4fab5a72f93 Mon Sep 17 00:00:00 2001 From: Jonas Weinz Date: Sun, 20 May 2018 10:38:14 +0200 Subject: [PATCH] blubb --- Project/Tools/emoji tester old.ipynb | 423 ++++++++++++++++ Project/Tools/emoji tester.ipynb | 716 ++++++++++++--------------- 2 files changed, 737 insertions(+), 402 deletions(-) create mode 100644 Project/Tools/emoji tester old.ipynb diff --git a/Project/Tools/emoji tester old.ipynb b/Project/Tools/emoji tester old.ipynb new file mode 100644 index 0000000..054473e --- /dev/null +++ b/Project/Tools/emoji tester old.ipynb @@ -0,0 +1,423 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import pandas as pd\n", + "from IPython.display import clear_output, Markdown, Math\n", + "import ipywidgets as widgets\n", + "import os" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "----\n", + "## file input stuff:\n", + "\n", + "* replace `test.txt` with yout whatsapp log file" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%%bash\n", + "./whatsapp2csv.sh test.txt" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "* read table `test.csv` exported by `whatsapp2csv.sh`" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "messages = pd.read_csv('test.txt.csv', delimiter='\\t')\n", + "messages.head()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "* read emoji-data (can be found here: https://www.unicode.org/Public/emoji/11.0/emoji-data.txt) and generate a table file out of it" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%%bash\n", + "if [ ! -e emoji-data.txt ]\n", + "then\n", + " echo \"downloading emoji specification\"\n", + " wget https://www.unicode.org/Public/emoji/11.0/emoji-data.txt\n", + "else\n", + " echo \"found existing emoji specification\"\n", + "fi\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "emoji_blacklist = set([\n", + " 0x1F3FB,\n", + " 0x1F3FC,\n", + " 0x1F3FD,\n", + " 0x1F3FE,\n", + " 0x1F3FF\n", + "])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "emoji_data = pd.read_csv('emoji-data.txt', delimiter=';', comment='#', names=[\"unicode\",\"type\"])\n", + "emoji_data['type'] = emoji_data['type'].str.strip()\n", + "emoji_data = emoji_data[emoji_data['type'] == \"Emoji_Presentation\"]\n", + "emoji_data" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "* now build a set out of the unicode types" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "ord(\"๐Ÿ˜€\") == int('0x1f600',16)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "emoji_codes = emoji_data['unicode']\n", + "emoji_codes.head()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "* we have to iterate over the whole list and extract all given ranges:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "emoji_code_list = []\n", + "for entry in emoji_codes:\n", + " # testing whether we have an entry or a range:\n", + " if '.' in entry:\n", + " # range\n", + " a,b = entry.split(\"..\")\n", + " for i in range(int(a,16),int(b,16) +1):\n", + " if i not in emoji_blacklist:\n", + " emoji_code_list.append(i)\n", + " else:\n", + " # single entry\n", + " if i not in emoji_blacklist:\n", + " emoji_code_list.append(int(entry,16))\n", + "emoji_code_set = set(emoji_code_list)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# simple test:\n", + "print(ord(\"๐Ÿ˜€\") in emoji_code_set, ord(\"a\") in emoji_code_set)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "* expanding column and fill new emojis" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "messages[\"emojis\"] = None" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "for i in messages.index:\n", + " emoji_list = []\n", + " to_remove = []\n", + " m = messages.iloc[i]['message']\n", + " for c in str(m):\n", + " if ord(c) in emoji_code_set:\n", + " emoji_list.append(c)\n", + " elif ord(c) in emoji_blacklist:\n", + " to_remove.append(c)\n", + " \n", + " messages.loc[i,'emojis'] = emoji_list\n", + " #remove emiรณjis from message\n", + " for e in (emoji_list + to_remove):\n", + " m = m.replace(e,\"\")\n", + " messages.loc[i,'message'] = m\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "messages[:20]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "* get a list only containing messaged with emojis" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "emoji_messages = messages[[True if len(e) > 0 else False for e in messages['emojis']]]\n", + "emoji_messages = emoji_messages[emoji_messages['message'] != \"\"]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "display(emoji_messages)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "----\n", + "## learning part" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import numpy as np\n", + "import itertools\n", + "import sklearn.utils as sku\n", + "from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer, HashingVectorizer\n", + "from sklearn.model_selection import train_test_split" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "labels=[e[-1] for e in emoji_messages['emojis']]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "labels[:10]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "X1, Xt1, y1, yt1 = train_test_split(emoji_messages['message'], labels, test_size=0.1, random_state=4222)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "vectorizer = TfidfVectorizer(stop_words='english')\n", + "vec_train = vectorizer.fit_transform(X1)\n", + "vec_test = vectorizer.transform(Xt1)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from sklearn.ensemble import RandomForestClassifier as RFC\n", + "from sklearn.neural_network import MLPClassifier as MLP\n", + "from sklearn.naive_bayes import MultinomialNB as MNB\n", + "#clf_a = RFC(criterion='entropy', random_state=4222)\n", + "clf_a = MLP()\n", + "clf_a.fit(vec_train, y1)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "pred = clf_a.predict(vectorizer.transform(Xt1))\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "testlist = pd.DataFrame({'message': Xt1, 'pred': pred, 'trained': yt1})\n", + "testlist = pd.merge(testlist, emoji_messages['emojis'].to_frame(), left_index=True, right_index=True)\n", + "testlist.to_csv('export.csv')\n", + "testlist" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "display(clf_a.predict(vectorizer.transform([\"Boah Caner\"]))[0])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(chr(0x1F3F))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "vec_train[0]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "\n", + "out = widgets.Output()\n", + "\n", + "t = widgets.Text()\n", + "b = widgets.Button(\n", + " description='get smiley',\n", + " disabled=False,\n", + " button_style='', # 'success', 'info', 'warning', 'danger' or ''\n", + " tooltip='Click me',\n", + " icon='check'\n", + ")\n", + "\n", + "\n", + "\n", + "def handle_submit(sender):\n", + " with out:\n", + " clear_output()\n", + " with out:\n", + " display(Markdown(\"# \" + str(clf_a.predict(vectorizer.transform([t.value]))[0])))\n", + "\n", + "b.on_click(handle_submit)\n", + " \n", + "display(t)\n", + "display(widgets.VBox([b, out])) " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.5" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/Project/Tools/emoji tester.ipynb b/Project/Tools/emoji tester.ipynb index 23f3462..f9b59db 100644 --- a/Project/Tools/emoji tester.ipynb +++ b/Project/Tools/emoji tester.ipynb @@ -2,7 +2,7 @@ "cells": [ { "cell_type": "code", - "execution_count": 1, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -12,47 +12,45 @@ "import os" ] }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "----\n", - "## file input stuff:\n", - "\n", - "* replace `test.txt` with your whatsapp log file" - ] - }, { "cell_type": "code", - "execution_count": 2, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "================================================================================\n", - "processing File: test.txt\n", - "================================================================================\n", - "================================================================================\n", - "successfully finished action: processing File: test.txt\n", - "================================================================================\n", - "================================================================================\n", - "Wrote output to test.txt.csv\n", - "================================================================================\n" - ] - } - ], + "outputs": [], "source": [ - "%%bash\n", - "./whatsapp2csv.sh test.txt" + "def create_widgets(t_text, b_text, out, additional_widgets=[]):\n", + " texts = []\n", + " for t in t_text:\n", + " texts.append(widgets.Text(t))\n", + " \n", + " button = widgets.Button(\n", + " description=b_text,\n", + " disabled=False,\n", + " button_style='', # 'success', 'info', 'warning', 'danger' or ''\n", + " tooltip=b_text,\n", + " icon='check'\n", + " )\n", + " display(widgets.VBox([widgets.HBox(texts + additional_widgets + [button]), out]))\n", + " return texts + [button]\n", + "\n", + "out_convert = widgets.Output()\n", + "out_build = widgets.Output()\n", + "out_train = widgets.Output()\n", + "out_save = widgets.Output()\n", + "out_read = widgets.Output()\n", + "out_test = widgets.Output()\n", + "\n", + "def mp(msg):\n", + " display(Markdown(msg))" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "* read table `test.csv` exported by `whatsapp2csv.sh`" + "# Emoji Tester\n", + "\n", + "just run all cells at first. Then select on of the actions below." ] }, { @@ -61,30 +59,63 @@ "metadata": {}, "outputs": [], "source": [ - "messages = pd.read_csv('test.txt.csv', delimiter='\\t')\n", - "messages.head()" + "mp(\"## converting plain whatsapp export to csv\")\n", + "t_convert, b_convert = create_widgets([\"test.txt\"], \"convert whatsapp file to csv\", out_convert)\n", + "mp(\"## read csv and build database\")\n", + "single_label = widgets.Checkbox(value=False, description='using only last emoji', disable=False)\n", + "t_build, b_build = create_widgets([\"test.txt.csv\"], \"read\", out_build, [single_label])\n", + "mp(\"## Train\")\n", + "d = widgets.Dropdown(options=['DecisionTree', 'MLP', 'RandomForest'], value='MLP', description='Learning Method', disabled=False)\n", + "ova = widgets.Checkbox(value=False, description='Using one vs all (very slow, only with multi-label!)', disabled=False)\n", + "b_train = button = widgets.Button(description=\"train\", disabled=False, button_style='', tooltip=\"train\",icon='check')\n", + "display(widgets.VBox([widgets.HBox([d,ova,b_train]), out_train]))\n", + "mp(\"## save trained classifier\")\n", + "t_save_c, t_save_m, t_save_v, b_save = create_widgets([\"clf.pkl\", \"mlb.pkl\", \"vectorizer.pkl\"], \"save classifier\", out_save)\n", + "mp(\"## import trained classifier\")\n", + "t_read_c, t_read_m, t_read_v, b_read = create_widgets([\"clf.pkl\", \"mlb.pkl\", \"vectorizer.pkl\"], \"import classifier\", out_read)\n", + "mp(\"## predict emoji on custom text\")\n", + "b_prop = widgets.Checkbox(value=False, description='Show probabilities (only on trees)', disabled=False)\n", + "t_test, b_test = create_widgets([\"\"], \"get emoji\", out_test,[b_prop])" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "* read emoji-data (can be found here: https://www.unicode.org/Public/emoji/11.0/emoji-data.txt) and generate a table file out of it" + "----\n", + "## Code Section:" ] }, { "cell_type": "code", - "execution_count": 4, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "found existing emoji specification\n" - ] - } - ], + "outputs": [], + "source": [ + "def convert(b):\n", + " with out_convert:\n", + " clear_output()\n", + " with out_convert:\n", + " mp(\"**converting \" + t_convert.value + \"โ€ฆ**\")\n", + " import subprocess\n", + " print(str(subprocess.check_output([\"./whatsapp2csv.sh\", t_convert.value])).strip())\n", + " mp(\"**done**\")\n", + "\n", + "b_convert.on_click(convert)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "* download emoji specification if not already existing" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "%%bash\n", "if [ ! -e emoji-data.txt ]\n", @@ -96,9 +127,16 @@ "fi\n" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "* stuff for creating emoji database" + ] + }, { "cell_type": "code", - "execution_count": 5, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -110,365 +148,239 @@ " 0x1F3FF,\n", " 0x2642,\n", " 0x2640\n", - "])" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [], - "source": [ - "emoji_data = pd.read_csv('emoji-data.txt', delimiter=';', comment='#', names=[\"unicode\",\"type\"])\n", - "emoji_data['type'] = emoji_data['type'].str.strip()\n", - "emoji_data = emoji_data[emoji_data['type'] == \"Emoji_Presentation\"]" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "* now build a set out of the unicode types" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "True" - ] - }, - "execution_count": 7, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "ord(\"๐Ÿ˜€\") == int('0x1f600',16)" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "228 231A..231B \n", - "229 23E9..23EC \n", - "230 23F0 \n", - "231 23F3 \n", - "232 25FD..25FE \n", - "Name: unicode, dtype: object" - ] - }, - "execution_count": 8, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "emoji_codes = emoji_data['unicode']\n", - "emoji_codes.head()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "* we have to iterate over the whole list and extract all given ranges:" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "metadata": {}, - "outputs": [ - { - "data": { - "text/markdown": [ - "**used Emojis:**" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/plain": [ - "'๐Ÿ€„\\U0001f9f1\\U0001f9f2\\U0001f9f3\\U0001f9f4\\U0001f9f5\\U0001f9f6๐Ÿƒ๐Ÿค๐Ÿค‘๐Ÿค’๐Ÿค“๐Ÿค”๐Ÿค•๐Ÿค–๐Ÿค—๐Ÿค˜๐Ÿค™๐Ÿคš๐Ÿค›๐Ÿคœ๐Ÿค๐Ÿคž\\U0001f91f๐Ÿค ๐Ÿคก๐Ÿคข๐Ÿคฃ๐Ÿคค๐Ÿคฅ๐Ÿคฆ๐Ÿคง\\U0001f928\\U0001f929\\U0001f92a\\U0001f92b\\U0001f92c\\U0001f92d\\U0001f92e\\U0001f92f๐Ÿคฐ\\U0001f931\\U0001f932๐Ÿคณ๐Ÿคด๐Ÿคต๐Ÿคถ๐Ÿคท๐Ÿคธ๐Ÿคน๐Ÿคบ๐Ÿคผ๐Ÿคฝ๐Ÿคพ๐Ÿฅ€๐Ÿฅ๐Ÿฅ‚๐Ÿฅƒ๐Ÿฅ„๐Ÿฅ…๐Ÿฅ‡๐Ÿฅˆ๐Ÿฅ‰๐ŸฅŠ๐Ÿฅ‹\\U0001f94c\\U0001f94d\\U0001f94e\\U0001f94f๐Ÿฅ๐Ÿฅ‘๐Ÿฅ’๐Ÿฅ“๐Ÿฅ”๐Ÿฅ•๐Ÿฅ–๐Ÿฅ—๐Ÿฅ˜๐Ÿฅ™๐Ÿฅš๐Ÿฅ›๐Ÿฅœ๐Ÿฅ๐Ÿฅž\\U0001f95f\\U0001f960\\U0001f961\\U0001f962\\U0001f963\\U0001f964\\U0001f965\\U0001f966\\U0001f967\\U0001f968\\U0001f969\\U0001f96a\\U0001f96b\\U0001f96c\\U0001f96d\\U0001f96e\\U0001f96f\\U0001f970\\U0001f9ec\\U0001f9ed\\U0001f973\\U0001f974\\U0001f975\\U0001f976\\U0001f9ee\\U0001f97a\\U0001f9ef\\U0001f97c\\U0001f97d\\U0001f97e\\U0001f97f๐Ÿฆ€๐Ÿฆ๐Ÿฆ‚๐Ÿฆƒ๐Ÿฆ„๐Ÿฆ…๐Ÿฆ†๐Ÿฆ‡๐Ÿฆˆ๐Ÿฆ‰๐ŸฆŠ๐Ÿฆ‹๐ŸฆŒ๐Ÿฆ๐Ÿ†Ž๐ŸฆŽ๐Ÿฆ๐Ÿ†‘๐Ÿ†’๐Ÿ†“๐Ÿ†”๐Ÿ†•๐Ÿ†–๐Ÿ†—๐Ÿ†˜๐Ÿ†™๐Ÿ†š\\U0001f992\\U0001f993\\U0001f994\\U0001f995\\U0001f996\\U0001f997\\U0001f998\\U0001f999\\U0001f99a\\U0001f99b\\U0001f99c\\U0001f99d\\U0001f99e\\U0001f99f\\U0001f9a0\\U0001f9a1\\U0001f9a2\\U0001f9f8\\U0001f9b0\\U0001f9b1\\U0001f9b2\\U0001f9b3\\U0001f9b4\\U0001f9b5\\U0001f9b6\\U0001f9b7\\U0001f9b8\\U0001f9b9๐Ÿง€\\U0001f9c1\\U0001f9c2\\U0001f9d0\\U0001f9d1\\U0001f9d2\\U0001f9d3\\U0001f9d4\\U0001f9d5\\U0001f9d6\\U0001f9d7\\U0001f9d8\\U0001f9d9\\U0001f9da\\U0001f9db\\U0001f9dc\\U0001f9dd\\U0001f9de\\U0001f9df\\U0001f9e0\\U0001f9e1\\U0001f9e2\\U0001f9e3\\U0001f9e4\\U0001f9e5๐Ÿ‡ฆ๐Ÿ‡ง๐Ÿ‡จ๐Ÿ‡ฉ๐Ÿ‡ช๐Ÿ‡ซ๐Ÿ‡ฌ๐Ÿ‡ญ๐Ÿ‡ฎ๐Ÿ‡ฏ๐Ÿ‡ฐ๐Ÿ‡ฑ๐Ÿ‡ฒ๐Ÿ‡ณ๐Ÿ‡ด๐Ÿ‡ต๐Ÿ‡ถ๐Ÿ‡ท๐Ÿ‡ธ๐Ÿ‡น๐Ÿ‡บ๐Ÿ‡ป๐Ÿ‡ผ๐Ÿ‡ฝ๐Ÿ‡พ๐Ÿ‡ฟ\\U0001f9f7๐Ÿˆ\\U0001f9f9\\U0001f9fa\\U0001f9fb\\U0001f9fc\\U0001f9fd\\U0001f9fe\\U0001f9ff๐Ÿˆš๐Ÿˆฏ๐Ÿˆฒ๐Ÿˆณ๐Ÿˆด๐Ÿˆต๐Ÿˆถ๐Ÿˆธ๐Ÿˆน๐Ÿˆบ๐Ÿ‰๐Ÿ‰‘๐ŸŒ€๐ŸŒ๐ŸŒ‚๐ŸŒƒ๐ŸŒ„๐ŸŒ…๐ŸŒ†๐ŸŒ‡๐ŸŒˆ๐ŸŒ‰๐ŸŒŠ๐ŸŒ‹๐ŸŒŒ๐ŸŒ๐ŸŒŽ๐ŸŒ๐ŸŒ๐ŸŒ‘๐ŸŒ’๐ŸŒ“๐ŸŒ”๐ŸŒ•๐ŸŒ–๐ŸŒ—๐ŸŒ˜๐ŸŒ™โŒšโŒ›โฌ›โฌœ๐ŸŒš๐ŸŒ›๐ŸŒœ๐ŸŒ๐ŸŒž๐ŸŒŸ๐ŸŒ ๐ŸŒญ๐ŸŒฎ๐ŸŒฏ๐ŸŒฐ๐ŸŒฑ๐ŸŒฒ๐ŸŒณ๐ŸŒด๐ŸŒต๐ŸŒท๐ŸŒธ๐ŸŒน๐ŸŒบ๐ŸŒป๐ŸŒผ๐ŸŒฝ๐ŸŒพ๐ŸŒฟ๐Ÿ€๐Ÿ๐Ÿ‚๐Ÿƒ๐Ÿ„๐Ÿ…๐Ÿ†๐Ÿ‡๐Ÿˆ๐Ÿ‰๐ŸŠ๐Ÿ‹๐ŸŒ๐Ÿ๐ŸŽ๐Ÿโญ๐Ÿ๐Ÿ‘๐Ÿ’๐Ÿ“โญ•๐Ÿ”๐Ÿ•๐Ÿ–๐Ÿ—๐Ÿ˜๐Ÿ™๐Ÿš๐Ÿ›๐Ÿœ๐ŸŸ๐Ÿ๐Ÿž๐Ÿ ๐Ÿก๐Ÿค๐Ÿข๐Ÿฃ๐Ÿฅ๐Ÿฆ๐Ÿฉ๐Ÿง๐Ÿจ๐Ÿช๐Ÿซ๐Ÿฎ๐Ÿฌ๐Ÿญ๐Ÿฏ๐Ÿฐ๐Ÿณ๐Ÿฑ๐Ÿฒ๐Ÿด๐Ÿต๐Ÿธ๐Ÿถ๐Ÿท๐Ÿน๐Ÿบ๐Ÿป๐Ÿผ๐Ÿพ๐Ÿฟ๐ŸŽ€๐ŸŽ๐ŸŽ‚๐ŸŽƒ๐ŸŽ…๐ŸŽ„๐ŸŽ†๐ŸŽˆ๐ŸŽ‰๐ŸŽŠ๐ŸŽ‹๐ŸŽŒ๐ŸŽ๐ŸŽ‡๐ŸŽ๐ŸŽ๐ŸŽŽ๐ŸŽ‘๐ŸŽ’๐ŸŽ“๐ŸŽ ๐ŸŽก๐ŸŽข๐ŸŽฃ๐ŸŽค๐ŸŽฅ๐ŸŽฆ๐ŸŽง๐ŸŽจ๐ŸŽฉ๐ŸŽช๐ŸŽซ๐ŸŽฌ๐ŸŽญ๐ŸŽฎ๐ŸŽฏ๐ŸŽฐ๐ŸŽฑ๐ŸŽฒ๐ŸŽณ๐ŸŽด๐ŸŽต๐ŸŽถ๐ŸŽท๐ŸŽธ๐ŸŽน๐ŸŽบ๐ŸŽป๐ŸŽผ๐ŸŽฝ๐ŸŽพ๐ŸŽฟ๐Ÿ€๐Ÿ๐Ÿ‚๐Ÿƒ๐Ÿ„๐Ÿ…๐Ÿ†๐Ÿ‡๐Ÿˆ๐Ÿ‰๐ŸŠ๐Ÿ๐Ÿ๐Ÿ‘๐Ÿ’๐Ÿ“๐Ÿ ๐Ÿก๐Ÿข๐Ÿฃ๐Ÿค๐Ÿฅ๐Ÿฆ๐Ÿง๐Ÿจ๐Ÿฉ๐Ÿช๐Ÿซ๐Ÿฌ๐Ÿญ๐Ÿฎ๐Ÿฏ๐Ÿฐโฉโชโซ๐Ÿดโฌโฐโณ๐Ÿธ๐Ÿน๐Ÿบ๐Ÿ€๐Ÿ๐Ÿ‚๐Ÿƒ๐Ÿ„๐Ÿ…๐Ÿ†๐Ÿ‡๐Ÿˆ๐Ÿ‰๐ŸŠ๐Ÿ‹๐ŸŒ๐Ÿ๐ŸŽ๐Ÿ๐Ÿ๐Ÿ‘๐Ÿ’๐Ÿ“๐Ÿ”๐Ÿ•๐Ÿ–๐Ÿ—๐Ÿ˜๐Ÿ™๐Ÿš๐Ÿ›๐Ÿœ๐Ÿ๐Ÿž๐ŸŸ๐Ÿ ๐Ÿก๐Ÿข๐Ÿฃ๐Ÿค๐Ÿฅ๐Ÿฆ๐Ÿง๐Ÿจ๐Ÿฉ๐Ÿช๐Ÿซ๐Ÿฌ๐Ÿญ๐Ÿฎ๐Ÿฏ๐Ÿฐ๐Ÿฑ๐Ÿฒ๐Ÿณ๐Ÿด๐Ÿต๐Ÿถ๐Ÿท๐Ÿธ๐Ÿน๐Ÿบ๐Ÿป๐Ÿผ๐Ÿฝ๐Ÿพ๐Ÿ‘€๐Ÿ‘‚๐Ÿ‘ƒ๐Ÿ‘„๐Ÿ‘…๐Ÿ‘†๐Ÿ‘‡๐Ÿ‘ˆ๐Ÿ‘‰๐Ÿ‘Š๐Ÿ‘‹๐Ÿ‘Œ๐Ÿ‘๐Ÿ‘Ž๐Ÿ‘๐Ÿ‘๐Ÿ‘‘๐Ÿ‘’๐Ÿ‘“๐Ÿ‘”๐Ÿ‘•๐Ÿ‘–๐Ÿ‘—๐Ÿ‘˜๐Ÿ‘™๐Ÿ‘š๐Ÿ‘›๐Ÿ‘œ๐Ÿ‘๐Ÿ‘ž๐Ÿ‘Ÿ๐Ÿ‘ ๐Ÿ‘ก๐Ÿ‘ข๐Ÿ‘ฃ๐Ÿ‘ค๐Ÿ‘ฅ๐Ÿ‘ฆ๐Ÿ‘ง๐Ÿ‘จ๐Ÿ‘ฉ๐Ÿ‘ช๐Ÿ‘ซ๐Ÿ‘ฌ๐Ÿ‘ญ๐Ÿ‘ฎ๐Ÿ‘ฏ๐Ÿ‘ฐ๐Ÿ‘ฑ๐Ÿ‘ฒ๐Ÿ‘ณ๐Ÿ‘ด๐Ÿ‘ต๐Ÿ‘ถ๐Ÿ‘ท๐Ÿ‘ธ๐Ÿ‘น๐Ÿ‘บ๐Ÿ‘ป๐Ÿ‘ผ๐Ÿ‘ฝ๐Ÿ‘พ๐Ÿ‘ฟ๐Ÿ’€๐Ÿ’๐Ÿ’‚๐Ÿ’ƒ๐Ÿ’„๐Ÿ’…๐Ÿ’†๐Ÿ’‡๐Ÿ’ˆ๐Ÿ’‰๐Ÿ’Š๐Ÿ’‹๐Ÿ’Œ๐Ÿ’๐Ÿ’Ž๐Ÿ’๐Ÿ’๐Ÿ’‘๐Ÿ’’๐Ÿ’“๐Ÿ’”๐Ÿ’•๐Ÿ’–๐Ÿ’—๐Ÿ’˜๐Ÿ’™๐Ÿ’š๐Ÿ’›๐Ÿ’œ๐Ÿ’๐Ÿ’ž๐Ÿ’Ÿ๐Ÿ’ ๐Ÿ’ก๐Ÿ’ข๐Ÿ’ฃ๐Ÿ’ค๐Ÿ’ฅ๐Ÿ’ฆ๐Ÿ’ง๐Ÿ’จ๐Ÿ’ฉ๐Ÿ’ช๐Ÿ’ซ๐Ÿ’ฌ๐Ÿ’ญ๐Ÿ’ฎ๐Ÿ’ฏ๐Ÿ’ฐ๐Ÿ’ฑ๐Ÿ’ฒ๐Ÿ’ณ๐Ÿ’ด๐Ÿ’ต๐Ÿ’ถ๐Ÿ’ท๐Ÿ’ธ๐Ÿ’น๐Ÿ’บ๐Ÿ’ป๐Ÿ’ผ๐Ÿ’ฝ๐Ÿ’พ๐Ÿ’ฟ๐Ÿ“€๐Ÿ“๐Ÿ“‚๐Ÿ“ƒ๐Ÿ“„๐Ÿ“…๐Ÿ“†๐Ÿ“‡๐Ÿ“ˆ๐Ÿ“‰๐Ÿ“Š๐Ÿ“‹๐Ÿ“Œ๐Ÿ“๐Ÿ“Ž๐Ÿ“๐Ÿ“๐Ÿ“‘๐Ÿ“’๐Ÿ““๐Ÿ“”๐Ÿ“•๐Ÿ“–๐Ÿ“—๐Ÿ“˜๐Ÿ“™๐Ÿ“š๐Ÿ“›๐Ÿ“œ๐Ÿ“๐Ÿ“ž๐Ÿ“Ÿ๐Ÿ“ ๐Ÿ“ก๐Ÿ“ข๐Ÿ“ฃ๐Ÿ“ค๐Ÿ“ฅ๐Ÿ“ฆ๐Ÿ“ง๐Ÿ“จ๐Ÿ“ฉ๐Ÿ“ช๐Ÿ“ซ๐Ÿ“ฌ๐Ÿ“ญ๐Ÿ“ฎ๐Ÿ“ฏ๐Ÿ“ฐ๐Ÿ“ฑ๐Ÿ“ฒ๐Ÿ“ณ๐Ÿ“ด๐Ÿ“ต๐Ÿ“ถ๐Ÿ“ท๐Ÿ“ธ๐Ÿ“น๐Ÿ“บ๐Ÿ“ป๐Ÿ“ผ๐Ÿ“ฟ๐Ÿ”€๐Ÿ”๐Ÿ”‚๐Ÿ”ƒ๐Ÿ”„๐Ÿ”…๐Ÿ”†๐Ÿ”‡๐Ÿ”ˆ๐Ÿ”‰๐Ÿ”Š๐Ÿ”‹๐Ÿ”Œ๐Ÿ”๐Ÿ”Ž๐Ÿ”๐Ÿ”๐Ÿ”‘๐Ÿ”’๐Ÿ”“๐Ÿ””๐Ÿ”•๐Ÿ”–๐Ÿ”—๐Ÿ”˜๐Ÿ”™๐Ÿ”š๐Ÿ”›๐Ÿ”œ๐Ÿ”๐Ÿ”ž๐Ÿ”Ÿ๐Ÿ” ๐Ÿ”ก๐Ÿ”ข๐Ÿ”ฃ๐Ÿ”ค๐Ÿ”ฅ๐Ÿ”ฆ๐Ÿ”ง๐Ÿ”จ๐Ÿ”ฉ๐Ÿ”ช๐Ÿ”ซ๐Ÿ”ฌ๐Ÿ”ญ๐Ÿ”ฎ๐Ÿ”ฏ๐Ÿ”ฐ๐Ÿ”ฑ๐Ÿ”ฒ๐Ÿ”ณ๐Ÿ”ด๐Ÿ”ต๐Ÿ”ถ๐Ÿ”ท๐Ÿ”ธ๐Ÿ”น๐Ÿ”บ๐Ÿ”ป๐Ÿ”ผ๐Ÿ”ฝ๐Ÿ•‹๐Ÿ•Œ๐Ÿ•๐Ÿ•Ž๐Ÿ•๐Ÿ•‘๐Ÿ•’๐Ÿ•“๐Ÿ•”๐Ÿ••๐Ÿ•–๐Ÿ•—๐Ÿ•˜๐Ÿ•™๐Ÿ•š๐Ÿ•›๐Ÿ•œ๐Ÿ•๐Ÿ•ž๐Ÿ•Ÿ๐Ÿ• ๐Ÿ•ก๐Ÿ•ข๐Ÿ•ฃ๐Ÿ•ค๐Ÿ•ฅ๐Ÿ•ฆ๐Ÿ•ง๐Ÿ•บ๐Ÿ–•๐Ÿ––๐Ÿ–ค๐Ÿ—ป๐Ÿ—ผโ—ฝโ—พ๐Ÿ—ฝ๐Ÿ—พ๐Ÿ—ฟ๐Ÿ˜€๐Ÿ˜๐Ÿ˜‚๐Ÿ˜ƒ๐Ÿ˜„๐Ÿ˜…๐Ÿ˜†๐Ÿ˜‡๐Ÿ˜ˆ๐Ÿ˜‰๐Ÿ˜Š๐Ÿ˜‹๐Ÿ˜Œ๐Ÿ˜๐Ÿ˜Ž๐Ÿ˜๐Ÿ˜๐Ÿ˜‘โ˜”โ˜•๐Ÿ˜’๐Ÿ˜“๐Ÿ˜”๐Ÿ˜•๐Ÿ˜–๐Ÿ˜—๐Ÿ˜˜๐Ÿ˜™๐Ÿ˜š๐Ÿ˜›๐Ÿ˜œ๐Ÿ˜๐Ÿ˜ž๐Ÿ˜Ÿ๐Ÿ˜ ๐Ÿ˜ก๐Ÿ˜ข๐Ÿ˜ฃ๐Ÿ˜ค๐Ÿ˜ฅ๐Ÿ˜ฆ๐Ÿ˜ง๐Ÿ˜จ๐Ÿ˜ฉ๐Ÿ˜ช๐Ÿ˜ซ๐Ÿ˜ฌ๐Ÿ˜ญ๐Ÿ˜ฎ๐Ÿ˜ฏ๐Ÿ˜ฐ๐Ÿ˜ฑ๐Ÿ˜ฒ๐Ÿ˜ณ๐Ÿ˜ด๐Ÿ˜ต๐Ÿ˜ถ๐Ÿ˜ท๐Ÿ˜ธ๐Ÿ˜น๐Ÿ˜บ๐Ÿ˜ป๐Ÿ˜ผ๐Ÿ˜ฝ๐Ÿ˜พ๐Ÿ˜ฟ๐Ÿ™€๐Ÿ™๐Ÿ™‚๐Ÿ™ƒโ™ˆโ™‰โ™Šโ™‹โ™Œโ™โ™Žโ™โ™โ™‘โ™’โ™“๐Ÿ™‹๐Ÿ™Œ๐Ÿ™๐Ÿ™Ž๐Ÿ™โ™ฟ๐Ÿš€๐Ÿš๐Ÿš‚๐Ÿšƒ๐Ÿš„๐Ÿš…๐Ÿš†๐Ÿš‡๐Ÿšˆ๐Ÿš‰๐ŸšŠ๐Ÿš‹๐ŸšŒ๐Ÿš๐ŸšŽ๐Ÿš๐Ÿš๐Ÿš‘๐Ÿš’โš“๐Ÿš“๐Ÿš”๐Ÿš•๐Ÿš–๐Ÿš—๐Ÿš˜๐Ÿš™๐Ÿšš๐Ÿš›๐Ÿšœ๐Ÿš๐Ÿšž๐ŸšŸโšก๐Ÿš ๐Ÿšก๐Ÿšข๐Ÿšฃ๐Ÿšค๐Ÿšฅ๐Ÿšฆ๐Ÿšงโšชโšซ๐Ÿšจ๐Ÿšฉ๐Ÿšช๐Ÿšซ๐Ÿšฌ๐Ÿšญ๐Ÿšฎ๐Ÿšฏ๐Ÿšฐ๐Ÿšฑ๐Ÿšฒ๐Ÿšณ๐Ÿšด๐Ÿšต๐Ÿšถ๐Ÿšท๐Ÿšธโšฝโšพ๐Ÿšน๐Ÿšบ๐Ÿšป๐Ÿšผ๐Ÿšฝโ›„โ›…๐Ÿšพ๐Ÿšฟ๐Ÿ›€๐Ÿ›๐Ÿ›‚๐Ÿ›ƒ๐Ÿ›„๐Ÿ›…โ›Ž๐Ÿ›Œ๐Ÿ›๐Ÿ›‘๐Ÿ›’โ›”โ›ช๐Ÿ›ซ๐Ÿ›ฌโ›ฒโ›ณ๐Ÿ›ดโ›ต๐Ÿ›ต๐Ÿ›ถ\\U0001f6f7\\U0001f6f8โ›บ\\U0001f6f9โ›ฝโœ…\\U0001f9e6๐Ÿ™„โœŠโœ‹๐Ÿ™…๐Ÿ™†๐Ÿ™‡๐Ÿ™ˆ\\U0001f9e7๐Ÿ™‰๐Ÿ™Šโœจ\\U0001f9e8โŒโŽ\\U0001f9e9โ“โ”โ•โ—\\U0001f9ea\\U0001f9ebโž•โž–โž—๐Ÿฆ๐Ÿฆ‘โžฐโžฟ\\U0001f9f0'" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/markdown": [ - "**blacklisted Emojis:**" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/plain": [ - "'โ™€โ™‚๐Ÿป๐Ÿผ๐Ÿฝ๐Ÿพ๐Ÿฟ'" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "emoji_code_list = []\n", - "for entry in emoji_codes:\n", - " # testing whether we have an entry or a range:\n", - " if '.' in entry:\n", - " # range\n", - " a,b = entry.split(\"..\")\n", - " for i in range(int(a,16),int(b,16) +1):\n", - " if i not in emoji_blacklist:\n", - " emoji_code_list.append(i)\n", - " else:\n", - " # single entry\n", - " if i not in emoji_blacklist:\n", - " emoji_code_list.append(int(entry,16))\n", - "emoji_code_set = set(emoji_code_list)\n", - "display(Markdown(\"**used Emojis:**\"))\n", - "display(\"\".join([chr(x) for x in emoji_code_set]))\n", - "display(Markdown(\"**blacklisted Emojis:**\"))\n", - "display(\"\".join([chr(x) for x in emoji_blacklist]))" - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "True False\n" - ] - } - ], - "source": [ - "# simple test:\n", - "print(ord(\"๐Ÿ˜€\") in emoji_code_set, ord(\"a\") in emoji_code_set)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "* expanding column and fill new emojis" - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "metadata": {}, - "outputs": [], - "source": [ - "messages[\"emojis\"] = None" - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "metadata": {}, - "outputs": [], - "source": [ - "for i in messages.index:\n", - " emoji_list = []\n", - " m = messages.iloc[i]['message']\n", - " m_new = \"\"\n", - " for c in str(m):\n", - " if ord(c) in emoji_code_set:\n", - " emoji_list.append(c)\n", - " elif ord(c) not in emoji_blacklist:\n", - " m_new += c\n", - " \n", - " messages.loc[i,'emojis'] = set(emoji_list)\n", - " #remove emiรณjis from message\n", - " messages.loc[i,'message'] = m_new\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "messages[:20]" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "* get a list only containing messaged with emojis" - ] - }, - { - "cell_type": "code", - "execution_count": 14, - "metadata": {}, - "outputs": [], - "source": [ - "emoji_messages = messages[[True if len(e) > 0 else False for e in messages['emojis']]]\n", - "emoji_messages = emoji_messages[emoji_messages['message'] != \"\"]" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "display(emoji_messages)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "----\n", - "## learning part" - ] - }, - { - "cell_type": "code", - "execution_count": 16, - "metadata": {}, - "outputs": [], - "source": [ - "import numpy as np\n", - "import itertools\n", - "import sklearn.utils as sku\n", - "from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer, HashingVectorizer\n", - "from sklearn.model_selection import train_test_split\n", - "from sklearn.preprocessing import MultiLabelBinarizer" - ] - }, - { - "cell_type": "code", - "execution_count": 17, - "metadata": {}, - "outputs": [], - "source": [ - "mlb = MultiLabelBinarizer()\n", + "])\n", "\n", - "labels=mlb.fit_transform(emoji_messages['emojis'])" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": 18, - "metadata": {}, - "outputs": [], - "source": [ - "X1, Xt1, y1, yt1 = train_test_split(emoji_messages['message'], labels, test_size=0.1, random_state=4222)" - ] - }, - { - "cell_type": "code", - "execution_count": 19, - "metadata": {}, - "outputs": [], - "source": [ - "vectorizer = TfidfVectorizer(stop_words='english')\n", - "vec_train = vectorizer.fit_transform(X1)\n", - "vec_test = vectorizer.transform(Xt1)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from sklearn.ensemble import RandomForestClassifier as RFC\n", - "from sklearn.neural_network import MLPClassifier as MLP\n", - "from sklearn.naive_bayes import MultinomialNB as MNB\n", - "from sklearn.multiclass import OneVsRestClassifier as OVRC\n", - "#clf_a = OVRC(RFC(criterion='entropy', random_state=4222))\n", - "clf_a = OVRC(MLP(hidden_layer_sizes=(10,)))\n", - "#clf_a = OVRC(MNB())\n", - "clf_a.fit(vec_train, y1)" - ] - }, - { - "cell_type": "code", - "execution_count": 23, - "metadata": {}, - "outputs": [], - "source": [ - "pred = clf_a.predict(vectorizer.transform(Xt1))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "testlist = pd.DataFrame({'message': Xt1, 'pred': mlb.inverse_transform(pred), 'teacher': mlb.inverse_transform(yt1)})\n", - "testlist.to_csv('export.csv')\n", - "testlist" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ + "emoji_code_set = None\n", "\n", - "out = widgets.Output()\n", - "\n", - "t = widgets.Text()\n", - "b = widgets.Button(\n", - " description='get smiley',\n", - " disabled=False,\n", - " button_style='', # 'success', 'info', 'warning', 'danger' or ''\n", - " tooltip='Click me',\n", - " icon='check'\n", - ")\n", - "\n", - "\n", - "\n", - "def handle_submit(sender):\n", - " with out:\n", - " clear_output()\n", - " with out:\n", - " display(Markdown(\"# \" + str(mlb.inverse_transform(clf_a.predict(vectorizer.transform([t.value])))[0])))\n", - "\n", - "b.on_click(handle_submit)\n", + "def create_emoji_set():\n", + " global emoji_code_set\n", " \n", - "display(t)\n", - "display(widgets.VBox([b, out])) " + " emoji_data = pd.read_csv('emoji-data.txt', delimiter=';', comment='#', names=[\"unicode\",\"type\"])\n", + " emoji_data['type'] = emoji_data['type'].str.strip()\n", + " emoji_data = emoji_data[emoji_data['type'] == \"Emoji_Presentation\"]\n", + " \n", + " emoji_codes = emoji_data['unicode']\n", + " emoji_codes.head()\n", + " \n", + " emoji_code_list = []\n", + " for entry in emoji_codes:\n", + " # testing whether we have an entry or a range:\n", + " if '.' in entry:\n", + " # range\n", + " a,b = entry.split(\"..\")\n", + " for i in range(int(a,16),int(b,16) +1):\n", + " if i not in emoji_blacklist:\n", + " emoji_code_list.append(i)\n", + " else:\n", + " # single entry\n", + " if i not in emoji_blacklist:\n", + " emoji_code_list.append(int(entry,16))\n", + " emoji_code_set = set(emoji_code_list)\n", + " display(Markdown(\"**imported Emojis** (without modifier):\\n>\" + \"\".join([chr(x) for x in emoji_code_set])))\n", + " display(Markdown(\"**blacklisted Emojis:**\\n>\" + \"\".join([chr(x) for x in emoji_blacklist])))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "* stuff for reading whatsapp messages" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "messages = None\n", + "vectorizer = None\n", + "clf_a = None\n", + "mlb = None\n", + "\n", + "emoji_messages=None" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def read_message_and_build_db(filename):\n", + " global messages\n", + " global emoji_messages\n", + " global vectorizer\n", + " global clf_a\n", + " global mlb\n", + " \n", + " messages = pd.read_csv(filename, delimiter='\\t')\n", + " mp(\"**filter messages and creating labels. This can take a while...**\")\n", + " messages[\"emojis\"] = None\n", + " \n", + " msg_batchsize = 1000\n", + " msg_counter = 0\n", + " \n", + " for i in messages.index:\n", + " \n", + " msg_counter+=1\n", + " if msg_counter >= msg_batchsize:\n", + " print(str(100 * i / messages.shape[0]) + \"%\")\n", + " msg_counter=0\n", + " \n", + " emoji_list = []\n", + " m = messages.iloc[i]['message']\n", + " m_new = \"\"\n", + " for c in str(m):\n", + " if ord(c) in emoji_code_set:\n", + " emoji_list.append(c)\n", + " elif ord(c) not in emoji_blacklist:\n", + " m_new += c\n", + " # if single label: only use last found emoji\n", + " messages.loc[i,'emojis'] = set(emoji_list) if (not single_label.value) or len(emoji_list)==0 else set(emoji_list[-1])\n", + " #remove emiรณjis from message\n", + " messages.loc[i,'message'] = m_new\n", + " \n", + " emoji_messages = messages[[True if len(e) > 0 else False for e in messages['emojis']]]\n", + " emoji_messages = emoji_messages[emoji_messages['message'] != \"\"]\n", + " \n", + " mp(\"**Done**\")\n", + " \n", + " display(emoji_messages)\n", + "\n", + "def train(b):\n", + " global messages\n", + " global emoji_messages\n", + " global vectorizer\n", + " global clf_a\n", + " global mlb\n", + " with out_train:\n", + " clear_output()\n", + " # train part:\n", + " import numpy as np\n", + " import itertools\n", + " import sklearn.utils as sku\n", + " from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer, HashingVectorizer\n", + " from sklearn.model_selection import train_test_split\n", + " from sklearn.preprocessing import MultiLabelBinarizer\n", + "\n", + " mlb = MultiLabelBinarizer() if not single_label.value else None\n", + " \n", + " if not mlb:\n", + " l = [list(e)[-1] for e in emoji_messages['emojis']]\n", + " \n", + " labels=mlb.fit_transform(emoji_messages['emojis']) if mlb else l\n", + " \n", + " if mlb:\n", + " display(Markdown(\"**emojis contained in Dataset:**\\n >\" + \"\".join(mlb.classes_ )))\n", + " else:\n", + " display(Markdown(\"**emojis contained in Dataset:**\\n >\" + \"\".join(set(l))))\n", + "\n", + " X1, Xt1, y1, yt1 = train_test_split(emoji_messages['message'], labels, test_size=0.1, random_state=4222)\n", + "\n", + " vectorizer = TfidfVectorizer(stop_words='english')\n", + " vec_train = vectorizer.fit_transform(X1)\n", + " vec_test = vectorizer.transform(Xt1)\n", + "\n", + " mp(\"**train classifier. This can take a very long timeโ€ฆ Grab a coffe! ๐Ÿ˜€**\")\n", + "\n", + " from sklearn.ensemble import RandomForestClassifier as RFC\n", + " from sklearn.neural_network import MLPClassifier as MLP\n", + " #from sklearn.naive_bayes import MultinomialNB as MNB\n", + " from sklearn.tree import DecisionTreeClassifier as DTC\n", + " from sklearn.multiclass import OneVsRestClassifier as OVRC\n", + " clf_a = None\n", + " if (d.value == \"DecisionTree\"):\n", + " clf_a = DTC()\n", + " elif d.value == \"MLP\":\n", + " clf_a = MLP(hidden_layer_sizes=(64,))\n", + " elif d.value == \"RandomForest\":\n", + " RFC(criterion='entropy', random_state=4222)\n", + "\n", + " if ova.value:\n", + " clf_a=OVRC(clf_a)\n", + "\n", + " display(clf_a)\n", + " clf_a.fit(vec_train, y1)\n", + "\n", + " mp(\"**training done**\")\n", + "\n", + " pred = clf_a.predict(vectorizer.transform(Xt1))\n", + "\n", + " testlist = pd.DataFrame({'message': Xt1, 'pred': mlb.inverse_transform(pred) if mlb else pred, 'teacher': mlb.inverse_transform(yt1) if mlb else yt1})\n", + " testlist.to_csv('export.csv')\n", + " display(testlist)\n", + " \n", + "def build_db(b):\n", + " with out_build:\n", + " clear_output()\n", + " create_emoji_set()\n", + " read_message_and_build_db(t_build.value)\n", + "b_build.on_click(build_db)\n", + "b_train.on_click(train)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from sklearn.externals import joblib\n", + "def write_to_file(b):\n", + " global vectorizer\n", + " global clf_a\n", + " global mlb\n", + " \n", + " with out_save:\n", + " clear_output()\n", + " mp(\"**write to file...**\")\n", + " joblib.dump(clf_a, t_save_c.value)\n", + " if mlb:\n", + " joblib.dump(mlb, t_save_m.value) \n", + " joblib.dump(vectorizer, t_save_v.value)\n", + " mp(\"**done**\")\n", + "b_save.on_click(write_to_file)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def read_from_file(b):\n", + " global vectorizer\n", + " global clf_a\n", + " global mlb\n", + " \n", + " with out_read:\n", + " clear_output()\n", + " mp(\"**read from fileโ€ฆ**\")\n", + " clf_a = joblib.load(t_read_c.value)\n", + " if t_read_m.value != \"\":\n", + " mlb = joblib.load(t_read_m.value)\n", + " vectorizer = joblib.load(t_read_v.value)\n", + " mp(\"**done**\")\n", + "b_read.on_click(read_from_file)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def predict(b):\n", + " with out_test:\n", + " clear_output()\n", + " v = mlb.inverse_transform(clf_a.predict(vectorizer.transform([t_test.value])))[0] if mlb else clf_a.predict(vectorizer.transform([t_test.value]))[0]\n", + " mp(\"**prediction:**\\n# \" + (\"\".join(v) if len(v)>0 else \" \"))\n", + " if b_prop.value:\n", + " pred = clf_a.predict_proba(vectorizer.transform([t_test.value]))\n", + " print(mlb.inverse_transform(pred))\n", + "\n", + "b_test.on_click(predict)" ] }, {