{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "# Evaluation\n", "We want to evaluate our approach" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Needed\n", "We want to define needed components for this UI" ] }, { "cell_type": "code", "execution_count": 1, "metadata": { "collapsed": true }, "outputs": [], "source": [ "import random\n", "import ipywidgets as widgets\n", "from IPython.display import display, clear_output\n", "import math\n", "import datetime" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Trigger refresh of prediction\n", "each action of typing and sending should yield a new updated prediction for best fitting emojis" ] }, { "cell_type": "markdown", "metadata": { "collapsed": true }, "source": [ "Initial definition of emojis used later" ] }, { "cell_type": "code", "execution_count": 2, "metadata": { "collapsed": true }, "outputs": [], "source": [ "#locally defined based on the first analysis of parts of our twitter data: resulting in the 20 most used emojis\n", "#we used them for our first approaches of prediction\n", "top_emojis = ['😂','😭','😍','😩','😊','😘','🙏','🙌','😉','😁','😅','😎','😢','😒','😏','😌','😔','😋','😀','😤']\n", "#possible initial set of predictions, only used in naive test cases\n", "predictions = [\"🤐\",\"🤑\",\"🤒\",\"🤓\",\"🤔\",\"🤕\",\"🤗\",\"🤘\"]" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "#### Advanced Approach\n", "define the classifier for advanced prediction, used for the sentiment prediction" ] }, { "cell_type": "code", "execution_count": 3, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "Using TensorFlow backend.\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "[nltk_data] Downloading package punkt to /Users/Carsten/nltk_data...\n", "[nltk_data] Package punkt is already up-to-date!\n", "[nltk_data] Downloading package averaged_perceptron_tagger to\n", "[nltk_data] /Users/Carsten/nltk_data...\n", "[nltk_data] Package averaged_perceptron_tagger is already up-to-\n", "[nltk_data] date!\n", "[nltk_data] Downloading package wordnet to /Users/Carsten/nltk_data...\n", "[nltk_data] Package wordnet is already up-to-date!\n" ] } ], "source": [ "#navigation into right path and generating classifier\n", "import sys\n", "sys.path.append(\"..\")\n", "sys.path.append(\"../naive_approach\")\n", "\n", "\n", "\n", "import simple_approach.simple_twitter_learning as stl\n", "clf_advanced = stl.pipeline_manager.load_pipeline_from_files( '../simple_approach/custom_classifier', ['keras_model'], ['vectorizer', 'keras_model'])\n", "\n", "import Tools.Emoji_Distance as ed" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "#### Generate new Sample for online learning / reinforcement learning" ] }, { "cell_type": "code", "execution_count": 4, "metadata": { "collapsed": true }, "outputs": [], "source": [ "def generate_new_training_sample (msg, emoji):\n", " sentiment = ed.emoji_to_sentiment_vector(emoji)\n", " \n", " #TODO message msg could be filtred\n", " text = msg\n", " return text, sentiment" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "#### Naive Approach\n", "for topic related emoji prediction" ] }, { "cell_type": "code", "execution_count": 5, "metadata": {}, "outputs": [], "source": [ "#sys.path.append(\"..\")\n", "#print(sys.path)\n", "\n", "import naive_approach as clf_naive" ] }, { "cell_type": "code", "execution_count": 6, "metadata": { "collapsed": true }, "outputs": [], "source": [ "tmp_dict = clf_naive.prepareData()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "#### Merge Predictions\n", "combine the predictions of both approaches" ] }, { "cell_type": "code", "execution_count": 7, "metadata": { "collapsed": true }, "outputs": [], "source": [ "def merged_prediction(msg , split = 0.5 , number = 8, target_emojis = top_emojis):\n", " \n", " #calc ratio of prediction splitted between advanced aprroach and naive approach\n", " number_advanced = round(split*number)\n", " number_naive = round((1-split)*number)\n", " \n", " #predict emojis with the naive approach\n", " prediction_naive , prediction_naive_values = clf_naive.predict(sentence = msg, lookup= tmp_dict, n = number_naive)\n", "\n", " #filter 0 values\n", " tmp1 = []\n", " tmp2 = []\n", " epsilon = 0.0001\n", "\n", " for i in range(len(prediction_naive)):\n", " if(abs(prediction_naive_values[i]) > epsilon):\n", " tmp1.append(prediction_naive[i])\n", " tmp2.append(prediction_naive[i])\n", "\n", " prediction_naive = tmp1\n", " prediction_naive_values = tmp2\n", " \n", " if(len(prediction_naive) < number_naive):\n", " #print(\"only few matches\")\n", " number_advanced = number - len(prediction_naive)\n", " \n", " #print(number, number_advanced, number_naive)\n", " \n", " #predict the advanced approach\n", " sentiment = clf_advanced.predict([msg])\n", " prediction_advanced = ed.sentiment_vector_to_emoji(sentiment,n_results = number_advanced, custom_target_emojis=target_emojis)\n", " \n", " #concat both predictions\n", " prediction = list(prediction_advanced)+list(prediction_naive)\n", " \n", " return prediction[:number]" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Actions triggered when something is changed" ] }, { "cell_type": "code", "execution_count": 8, "metadata": { "collapsed": true }, "outputs": [], "source": [ "def trigger_new_prediction(all_chat, current_message):\n", " global predictions\n", " \n", " #random prediction for initial test\n", " #random.shuffle(predictions)\n", " \n", " #first prediction only using advanced approach\n", " #sent = clf_advanced.predict([current_message])\n", " #p = ed.sentiment_vector_to_emoji(sent,n_results = 8, custom_target_emojis=top_emojis)\n", " \n", " #merged prediction\n", " if(current_message != \"\"):\n", " p = merged_prediction(msg = current_message, target_emojis=top_emojis)\n", "\n", " predictions = p" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Trigger Prediction for CSV Table" ] }, { "cell_type": "code", "execution_count": 9, "metadata": {}, "outputs": [ { "ename": "ParserError", "evalue": "Error tokenizing data. C error: Expected 1 fields in line 27, saw 2\n", "output_type": "error", "traceback": [ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", "\u001b[0;31mParserError\u001b[0m Traceback (most recent call last)", "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[0;31m# get table\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mpandas\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mpd\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 3\u001b[0;31m \u001b[0mdf\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mpd\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mread_csv\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"Evaluation Sentences - Tabellenblatt1.csv\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 4\u001b[0m \u001b[0mdf\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mhead\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m~/anaconda3/lib/python3.6/site-packages/pandas/io/parsers.py\u001b[0m in \u001b[0;36mparser_f\u001b[0;34m(filepath_or_buffer, sep, delimiter, header, names, index_col, usecols, squeeze, prefix, mangle_dupe_cols, dtype, engine, converters, true_values, false_values, skipinitialspace, skiprows, nrows, na_values, keep_default_na, na_filter, verbose, skip_blank_lines, parse_dates, infer_datetime_format, keep_date_col, date_parser, dayfirst, iterator, chunksize, compression, thousands, decimal, lineterminator, quotechar, quoting, escapechar, comment, encoding, dialect, tupleize_cols, error_bad_lines, warn_bad_lines, skipfooter, skip_footer, doublequote, delim_whitespace, as_recarray, compact_ints, use_unsigned, low_memory, buffer_lines, memory_map, float_precision)\u001b[0m\n\u001b[1;32m 653\u001b[0m skip_blank_lines=skip_blank_lines)\n\u001b[1;32m 654\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 655\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0m_read\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfilepath_or_buffer\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mkwds\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 656\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 657\u001b[0m \u001b[0mparser_f\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__name__\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mname\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m~/anaconda3/lib/python3.6/site-packages/pandas/io/parsers.py\u001b[0m in \u001b[0;36m_read\u001b[0;34m(filepath_or_buffer, kwds)\u001b[0m\n\u001b[1;32m 409\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 410\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 411\u001b[0;31m \u001b[0mdata\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mparser\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mread\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mnrows\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 412\u001b[0m \u001b[0;32mfinally\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 413\u001b[0m \u001b[0mparser\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mclose\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m~/anaconda3/lib/python3.6/site-packages/pandas/io/parsers.py\u001b[0m in \u001b[0;36mread\u001b[0;34m(self, nrows)\u001b[0m\n\u001b[1;32m 1003\u001b[0m \u001b[0;32mraise\u001b[0m \u001b[0mValueError\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'skipfooter not supported for iteration'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1004\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1005\u001b[0;31m \u001b[0mret\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_engine\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mread\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mnrows\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1006\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1007\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0moptions\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mget\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'as_recarray'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m~/anaconda3/lib/python3.6/site-packages/pandas/io/parsers.py\u001b[0m in \u001b[0;36mread\u001b[0;34m(self, nrows)\u001b[0m\n\u001b[1;32m 1746\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mread\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mnrows\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mNone\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1747\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1748\u001b[0;31m \u001b[0mdata\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_reader\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mread\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mnrows\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1749\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mStopIteration\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1750\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_first_chunk\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32mpandas/_libs/parsers.pyx\u001b[0m in \u001b[0;36mpandas._libs.parsers.TextReader.read (pandas/_libs/parsers.c:10862)\u001b[0;34m()\u001b[0m\n", "\u001b[0;32mpandas/_libs/parsers.pyx\u001b[0m in \u001b[0;36mpandas._libs.parsers.TextReader._read_low_memory (pandas/_libs/parsers.c:11138)\u001b[0;34m()\u001b[0m\n", "\u001b[0;32mpandas/_libs/parsers.pyx\u001b[0m in \u001b[0;36mpandas._libs.parsers.TextReader._read_rows (pandas/_libs/parsers.c:11884)\u001b[0;34m()\u001b[0m\n", "\u001b[0;32mpandas/_libs/parsers.pyx\u001b[0m in \u001b[0;36mpandas._libs.parsers.TextReader._tokenize_rows (pandas/_libs/parsers.c:11755)\u001b[0;34m()\u001b[0m\n", "\u001b[0;32mpandas/_libs/parsers.pyx\u001b[0m in \u001b[0;36mpandas._libs.parsers.raise_parser_error (pandas/_libs/parsers.c:28765)\u001b[0;34m()\u001b[0m\n", "\u001b[0;31mParserError\u001b[0m: Error tokenizing data. C error: Expected 1 fields in line 27, saw 2\n" ] } ], "source": [ "# get table\n", "import pandas as pd\n", "df = pd.read_csv(\"Evaluation Sentences - Tabellenblatt1.csv\")\n", "df.head()" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "all_predictions = []\n", "\n", "for index, row in df.iterrows():\n", " sentence = row[\"Sentence\"]\n", " #print(sentence)\n", "\n", " trigger_new_prediction(all_chat=\"\", current_message = sentence)\n", " #print(predictions)\n", " \n", " #prediction to string\n", " tmp_prediction = \"\".join(predictions)\n", " \n", " #construct the preediction column\n", " all_predictions.append(tmp_prediction)\n", " " ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "df[\"prediction\"] = all_predictions\n", "\n", "df.head()\n", "\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": true }, "outputs": [], "source": [ "df.to_csv(\"Evaluation Sentences - Tabellenblatt1.csv\", sep='\\t', encoding='utf-8')" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "collapsed": true }, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.6.3" } }, "nbformat": 4, "nbformat_minor": 2 }