{ "cells": [ { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "import pandas as pd\n", "from IPython.display import clear_output, Markdown, Math\n", "import ipywidgets as widgets\n", "import os" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "def create_widgets(t_text, b_text, out, additional_widgets=[]):\n", " texts = []\n", " for t in t_text:\n", " texts.append(widgets.Text(t))\n", " \n", " button = widgets.Button(\n", " description=b_text,\n", " disabled=False,\n", " button_style='', # 'success', 'info', 'warning', 'danger' or ''\n", " tooltip=b_text,\n", " icon='check'\n", " )\n", " display(widgets.VBox([widgets.HBox(texts + additional_widgets + [button]), out]))\n", " return texts + [button]\n", "\n", "out_convert = widgets.Output()\n", "out_build = widgets.Output()\n", "out_train = widgets.Output()\n", "out_save = widgets.Output()\n", "out_read = widgets.Output()\n", "out_test = widgets.Output()\n", "\n", "def mp(msg):\n", " display(Markdown(msg))" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "# Emoji Tester\n", "\n", "just run all cells at first. Then select on of the actions below." ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "mp(\"## converting plain whatsapp export to csv\")\n", "t_convert, b_convert = create_widgets([\"test.txt\"], \"convert whatsapp file to csv\", out_convert)\n", "mp(\"## read csv and build database\")\n", "single_label = widgets.Checkbox(value=False, description='using only last emoji', disable=False)\n", "t_build, b_build = create_widgets([\"test.txt.csv\"], \"read\", out_build, [single_label])\n", "mp(\"## Train\")\n", "d = widgets.Dropdown(options=['DecisionTree', 'MLP', 'RandomForest'], value='MLP', description='Learning Method', disabled=False)\n", "ova = widgets.Checkbox(value=False, description='Using one vs all (very slow, only with multi-label!)', disabled=False)\n", "b_train = button = widgets.Button(description=\"train\", disabled=False, button_style='', tooltip=\"train\",icon='check')\n", "display(widgets.VBox([widgets.HBox([d,ova,b_train]), out_train]))\n", "mp(\"## save trained classifier\")\n", "t_save_c, t_save_m, t_save_v, b_save = create_widgets([\"clf.pkl\", \"mlb.pkl\", \"vectorizer.pkl\"], \"save classifier\", out_save)\n", "mp(\"## import trained classifier\")\n", "t_read_c, t_read_m, t_read_v, b_read = create_widgets([\"clf.pkl\", \"mlb.pkl\", \"vectorizer.pkl\"], \"import classifier\", out_read)\n", "mp(\"## predict emoji on custom text\")\n", "b_prop = widgets.Checkbox(value=False, description='Show probabilities (only on trees)', disabled=False)\n", "t_test, b_test = create_widgets([\"\"], \"get emoji\", out_test,[b_prop])" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "----\n", "## Code Section:" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "def convert(b):\n", " with out_convert:\n", " clear_output()\n", " with out_convert:\n", " mp(\"**converting \" + t_convert.value + \"…**\")\n", " import subprocess\n", " print(str(subprocess.check_output([\"./whatsapp2csv.sh\", t_convert.value])).strip())\n", " mp(\"**done**\")\n", "\n", "b_convert.on_click(convert)\n" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "* download emoji specification if not already existing" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "%%bash\n", "if [ ! -e emoji-data.txt ]\n", "then\n", " echo \"downloading emoji specification\"\n", " wget https://www.unicode.org/Public/emoji/11.0/emoji-data.txt\n", "else\n", " echo \"found existing emoji specification\"\n", "fi\n" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "* stuff for creating emoji database" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "emoji_blacklist = set([\n", " 0x1F3FB,\n", " 0x1F3FC,\n", " 0x1F3FD,\n", " 0x1F3FE,\n", " 0x1F3FF,\n", " 0x2642,\n", " 0x2640\n", "])\n", "\n", "emoji_code_set = None\n", "\n", "def create_emoji_set():\n", " global emoji_code_set\n", " \n", " emoji_data = pd.read_csv('emoji-data.txt', delimiter=';', comment='#', names=[\"unicode\",\"type\"])\n", " emoji_data['type'] = emoji_data['type'].str.strip()\n", " emoji_data = emoji_data[emoji_data['type'] == \"Emoji_Presentation\"]\n", " \n", " emoji_codes = emoji_data['unicode']\n", " emoji_codes.head()\n", " \n", " emoji_code_list = []\n", " for entry in emoji_codes:\n", " # testing whether we have an entry or a range:\n", " if '.' in entry:\n", " # range\n", " a,b = entry.split(\"..\")\n", " for i in range(int(a,16),int(b,16) +1):\n", " if i not in emoji_blacklist:\n", " emoji_code_list.append(i)\n", " else:\n", " # single entry\n", " if i not in emoji_blacklist:\n", " emoji_code_list.append(int(entry,16))\n", " emoji_code_set = set(emoji_code_list)\n", " display(Markdown(\"**imported Emojis** (without modifier):\\n>\" + \"\".join([chr(x) for x in emoji_code_set])))\n", " display(Markdown(\"**blacklisted Emojis:**\\n>\" + \"\".join([chr(x) for x in emoji_blacklist])))" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "* stuff for reading whatsapp messages" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "messages = None\n", "vectorizer = None\n", "clf_a = None\n", "mlb = None\n", "\n", "emoji_messages=None" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "def read_message_and_build_db(filename):\n", " global messages\n", " global emoji_messages\n", " global vectorizer\n", " global clf_a\n", " global mlb\n", " \n", " messages = pd.read_csv(filename, delimiter='\\t')\n", " mp(\"**filter messages and creating labels. This can take a while...**\")\n", " messages[\"emojis\"] = None\n", " \n", " msg_batchsize = 1000\n", " msg_counter = 0\n", " \n", " for i in messages.index:\n", " \n", " msg_counter+=1\n", " if msg_counter >= msg_batchsize:\n", " print(str(100 * i / messages.shape[0]) + \"%\")\n", " msg_counter=0\n", " \n", " emoji_list = []\n", " m = messages.iloc[i]['message']\n", " m_new = \"\"\n", " for c in str(m):\n", " if ord(c) in emoji_code_set:\n", " emoji_list.append(c)\n", " elif ord(c) not in emoji_blacklist:\n", " m_new += c\n", " # if single label: only use last found emoji\n", " messages.loc[i,'emojis'] = set(emoji_list) if (not single_label.value) or len(emoji_list)==0 else set(emoji_list[-1])\n", " #remove emiójis from message\n", " messages.loc[i,'message'] = m_new\n", " \n", " emoji_messages = messages[[True if len(e) > 0 else False for e in messages['emojis']]]\n", " emoji_messages = emoji_messages[emoji_messages['message'] != \"\"]\n", " \n", " mp(\"**Done**\")\n", " \n", " display(emoji_messages)\n", "\n", "def train(b):\n", " global messages\n", " global emoji_messages\n", " global vectorizer\n", " global clf_a\n", " global mlb\n", " with out_train:\n", " clear_output()\n", " # train part:\n", " import numpy as np\n", " import itertools\n", " import sklearn.utils as sku\n", " from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer, HashingVectorizer\n", " from sklearn.model_selection import train_test_split\n", " from sklearn.preprocessing import MultiLabelBinarizer\n", "\n", " mlb = MultiLabelBinarizer() if not single_label.value else None\n", " \n", " if not mlb:\n", " l = [list(e)[-1] for e in emoji_messages['emojis']]\n", " \n", " labels=mlb.fit_transform(emoji_messages['emojis']) if mlb else l\n", " \n", " if mlb:\n", " display(Markdown(\"**emojis contained in Dataset:**\\n >\" + \"\".join(mlb.classes_ )))\n", " else:\n", " display(Markdown(\"**emojis contained in Dataset:**\\n >\" + \"\".join(set(l))))\n", "\n", " X1, Xt1, y1, yt1 = train_test_split(emoji_messages['message'], labels, test_size=0.1, random_state=4222)\n", "\n", " vectorizer = TfidfVectorizer(stop_words='english')\n", " vec_train = vectorizer.fit_transform(X1)\n", " vec_test = vectorizer.transform(Xt1)\n", "\n", " mp(\"**train classifier. This can take a very long time… Grab a coffe! 😀**\")\n", "\n", " from sklearn.ensemble import RandomForestClassifier as RFC\n", " from sklearn.neural_network import MLPClassifier as MLP\n", " #from sklearn.naive_bayes import MultinomialNB as MNB\n", " from sklearn.tree import DecisionTreeClassifier as DTC\n", " from sklearn.multiclass import OneVsRestClassifier as OVRC\n", " clf_a = None\n", " if (d.value == \"DecisionTree\"):\n", " clf_a = DTC()\n", " elif d.value == \"MLP\":\n", " clf_a = MLP(hidden_layer_sizes=(64,))\n", " elif d.value == \"RandomForest\":\n", " RFC(criterion='entropy', random_state=4222)\n", "\n", " if ova.value:\n", " clf_a=OVRC(clf_a)\n", "\n", " display(clf_a)\n", " clf_a.fit(vec_train, y1)\n", "\n", " mp(\"**training done**\")\n", "\n", " pred = clf_a.predict(vectorizer.transform(Xt1))\n", "\n", " testlist = pd.DataFrame({'message': Xt1, 'pred': mlb.inverse_transform(pred) if mlb else pred, 'teacher': mlb.inverse_transform(yt1) if mlb else yt1})\n", " testlist.to_csv('export.csv')\n", " display(testlist)\n", " \n", "def build_db(b):\n", " with out_build:\n", " clear_output()\n", " create_emoji_set()\n", " read_message_and_build_db(t_build.value)\n", "b_build.on_click(build_db)\n", "b_train.on_click(train)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "from sklearn.externals import joblib\n", "def write_to_file(b):\n", " global vectorizer\n", " global clf_a\n", " global mlb\n", " \n", " with out_save:\n", " clear_output()\n", " mp(\"**write to file...**\")\n", " joblib.dump(clf_a, t_save_c.value)\n", " if mlb:\n", " joblib.dump(mlb, t_save_m.value) \n", " joblib.dump(vectorizer, t_save_v.value)\n", " mp(\"**done**\")\n", "b_save.on_click(write_to_file)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "def read_from_file(b):\n", " global vectorizer\n", " global clf_a\n", " global mlb\n", " \n", " with out_read:\n", " clear_output()\n", " mp(\"**read from file…**\")\n", " clf_a = joblib.load(t_read_c.value)\n", " if t_read_m.value != \"\":\n", " mlb = joblib.load(t_read_m.value)\n", " vectorizer = joblib.load(t_read_v.value)\n", " mp(\"**done**\")\n", "b_read.on_click(read_from_file)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "def predict(b):\n", " with out_test:\n", " clear_output()\n", " v = mlb.inverse_transform(clf_a.predict(vectorizer.transform([t_test.value])))[0] if mlb else clf_a.predict(vectorizer.transform([t_test.value]))[0]\n", " mp(\"**prediction:**\\n# \" + (\"\".join(v) if len(v)>0 else \" \"))\n", " if b_prop.value:\n", " pred = clf_a.predict_proba(vectorizer.transform([t_test.value]))\n", " print(mlb.inverse_transform(pred))\n", "\n", "b_test.on_click(predict)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.6.5" } }, "nbformat": 4, "nbformat_minor": 2 }