{ "cells": [ { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "Using TensorFlow backend.\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "[nltk_data] Downloading package punkt to /home/jonas/nltk_data...\n", "[nltk_data] Package punkt is already up-to-date!\n", "[nltk_data] Downloading package averaged_perceptron_tagger to\n", "[nltk_data] /home/jonas/nltk_data...\n", "[nltk_data] Package averaged_perceptron_tagger is already up-to-\n", "[nltk_data] date!\n", "[nltk_data] Downloading package wordnet to /home/jonas/nltk_data...\n", "[nltk_data] Package wordnet is already up-to-date!\n" ] }, { "data": { "text/plain": [ "True" ] }, "execution_count": 1, "metadata": {}, "output_type": "execute_result" } ], "source": [ "import pandas as pd\n", "from IPython.display import clear_output, Markdown, Math\n", "import ipywidgets as widgets\n", "import os\n", "import glob\n", "import json\n", "import numpy as np\n", "import itertools\n", "import sklearn.utils as sku\n", "from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer, HashingVectorizer\n", "from sklearn.model_selection import train_test_split\n", "from sklearn.preprocessing import MultiLabelBinarizer\n", "import nltk\n", "from keras.models import load_model\n", "from sklearn.externals import joblib\n", "import pickle\n", "import operator\n", "from sklearn.pipeline import Pipeline\n", "nltk.download('punkt')\n", "nltk.download('averaged_perceptron_tagger')\n", "nltk.download('wordnet')" ] }, { "cell_type": "code", "execution_count": 2, "metadata": {}, "outputs": [], "source": [ "import sys\n", "sys.path.append(\"..\")\n", "\n", "from Tools.Emoji_Distance import sentiment_vector_to_emoji\n", "from Tools.Emoji_Distance import emoji_to_sentiment_vector\n", "\n", "def emoji2sent(emoji_arr, only_emoticons=True):\n", " return np.array([emoji_to_sentiment_vector(e, only_emoticons=only_emoticons) for e in emoji_arr])\n", "\n", "def sent2emoji(sent_arr, custom_target_emojis=None, only_emoticons=True):\n", " return [sentiment_vector_to_emoji(s, custom_target_emojis=custom_target_emojis, only_emoticons=only_emoticons) for s in sent_arr]" ] }, { "cell_type": "code", "execution_count": 3, "metadata": {}, "outputs": [], "source": [ "SINGLE_LABEL = True" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "----\n", "## classes and functions we are using later:\n", "----" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "* functions for selecting items from a set / list" ] }, { "cell_type": "code", "execution_count": 4, "metadata": {}, "outputs": [], "source": [ "def latest(lst):\n", " return lst[-1] if len(lst) > 0 else 'X' \n", "def most_common(lst):\n", " # trying to find the most common used emoji in the given lst\n", " return max(set(lst), key=lst.count) if len(lst) > 0 else \"X\" # setting label to 'X' if there is an empty emoji list" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "* our emoji blacklist (skin and sex modifiers)" ] }, { "cell_type": "code", "execution_count": 5, "metadata": {}, "outputs": [], "source": [ "# defining blacklist for modifier emojis:\n", "emoji_blacklist = set([\n", " chr(0x1F3FB),\n", " chr(0x1F3FC),\n", " chr(0x1F3FD),\n", " chr(0x1F3FE),\n", " chr(0x1F3FF),\n", " chr(0x2642),\n", " chr(0x2640)\n", "])" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "* lemmatization helper functions" ] }, { "cell_type": "code", "execution_count": 6, "metadata": {}, "outputs": [], "source": [ "from nltk.stem.snowball import SnowballStemmer\n", "from nltk.stem import WordNetLemmatizer\n", "from nltk import pos_tag\n", "from nltk import word_tokenize\n", "from nltk.corpus import wordnet\n", "\n", "def get_wordnet_pos(treebank_tag):\n", "\n", " if treebank_tag.startswith('J'):\n", " return wordnet.ADJ\n", " elif treebank_tag.startswith('V'):\n", " return wordnet.VERB\n", " elif treebank_tag.startswith('N'):\n", " return wordnet.NOUN\n", " elif treebank_tag.startswith('R'):\n", " return wordnet.ADV\n", " else:\n", " return wordnet.NOUN" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "* the sample data manager loads and preprocesses data" ] }, { "cell_type": "code", "execution_count": 7, "metadata": {}, "outputs": [], "source": [ "class sample_data_manager(object):\n", " @staticmethod\n", " def generate_and_read(path:str, only_emoticons=True, apply_stemming=True, n_top_emojis=-1, file_range=None):\n", " sdm = sample_data_manager(path)\n", " sdm.read_files(file_index_range=range(sdm.n_files) if file_range is None else file_range, only_emoticons=only_emoticons)\n", " if apply_stemming:\n", " sdm.apply_stemming_and_lemmatization()\n", " \n", " sdm.generate_emoji_count_and_weights()\n", " \n", " if n_top_emojis > 0:\n", " sdm.filter_by_top_emojis(n_top=n_top_emojis)\n", " \n", " return sdm\n", " \n", " \n", " def __init__(self, data_root_folder:str):\n", " self.data_root_folder = data_root_folder\n", " self.json_files = sorted(glob.glob(self.data_root_folder + \"/*.json\"))\n", " self.n_files = len(self.json_files)\n", " self.raw_data = None\n", " self.emojis = None\n", " self.plain_text = None\n", " self.labels = None\n", " self.emoji_count = None\n", " self.emoji_weights = None\n", " self.X = None\n", " self.y = None\n", " self.Xt = None\n", " self.yt = None\n", " self.top_emojis = None\n", " \n", " def read_files(self, file_index_range:list, only_emoticons=True):\n", " assert np.min(file_index_range) >= 0 and np.max(file_index_range) < self.n_files\n", " for i in file_index_range:\n", " print(\"reading file: \" + self.json_files[i] + \"...\")\n", " if self.raw_data is None:\n", " self.raw_data = pd.read_json(self.json_files[i], encoding=\"utf-8\")\n", " else:\n", " self.raw_data = self.raw_data.append(pd.read_json(self.json_files[i], encoding=\"utf-8\"))\n", " \n", " self.emojis = self.raw_data['EMOJI']\n", " self.plain_text = self.raw_data['text']\n", " \n", " # replacing keywords. TODO: maybe these information can be extracted and used\n", " self.plain_text = self.plain_text.str.replace(\"(||)\",\"\").str.replace(\"[\" + \"\".join(list(emoji_blacklist)) + \"]\",\"\")\n", " \n", " # so far filtering for the latest emoji. TODO: maybe there are also better approaches\n", " self.labels = emoji2sent([latest(e) for e in self.emojis], only_emoticons=only_emoticons )\n", " \n", " # and filter out all samples we have no label for:\n", " wrong_labels = np.isnan(np.linalg.norm(self.labels, axis=1)) \n", "\n", " self.labels = self.labels[np.invert(wrong_labels)]\n", " self.plain_text = self.plain_text[np.invert(wrong_labels)]\n", " self.emojis = self.emojis[np.invert(wrong_labels)]\n", " \n", " print(\"imported \" + str(len(self.labels)) + \" samples\")\n", " \n", " def apply_stemming_and_lemmatization(self):\n", " stemmer = SnowballStemmer(\"english\")\n", " for key in self.plain_text.keys():\n", " stemmed_sent = []\n", " for word in self.plain_text[key].split(\" \"):\n", " word_stemmed = stemmer.stem(word)\n", " stemmed_sent.append(word_stemmed)\n", " stemmed_sent = (\" \").join(stemmed_sent)\n", " self.plain_text[key] = stemmed_sent\n", " \n", " lemmatizer = WordNetLemmatizer()\n", " for key in self.plain_text.keys():\n", " lemmatized_sent = []\n", " sent_pos = pos_tag(word_tokenize(self.plain_text[key]))\n", " for word in sent_pos:\n", " wordnet_pos = get_wordnet_pos(word[1].lower())\n", " word_lemmatized = lemmatizer.lemmatize(word[0], pos=wordnet_pos)\n", " lemmatized_sent.append(word_lemmatized)\n", " lemmatized_sent = (\" \").join(lemmatized_sent)\n", " self.plain_text[key] = lemmatized_sent\n", " \n", " def generate_emoji_count_and_weights(self):\n", " self.emoji_count = {}\n", " for e_list in self.emojis:\n", " for e in set(e_list):\n", " if e not in self.emoji_count:\n", " self.emoji_count[e] = 0\n", " self.emoji_count[e] += 1\n", " \n", " emoji_sum = sum([self.emoji_count[e] for e in self.emoji_count])\n", "\n", " self.emoji_weights = {}\n", " for e in self.emoji_count:\n", " # tfidf for emojis\n", " self.emoji_weights[e] = np.log((emoji_sum / self.emoji_count[e]))\n", "\n", " weights_sum= sum([self.emoji_weights[x] for x in self.emoji_weights])\n", "\n", " # normalize:\n", " for e in self.emoji_weights:\n", " self.emoji_weights[e] = self.emoji_weights[e] / weights_sum\n", "\n", " self.emoji_weights['X'] = 0 # dummy values\n", " self.emoji_count['X'] = 0\n", " \n", " def get_emoji_count(self):\n", " sorted_emoji_count = list(reversed(sorted(self.emoji_count.items(), key=operator.itemgetter(1))))\n", " #display(sorted_emoji_count)\n", " return sorted_emoji_count\n", " \n", " def filter_by_top_emojis(self,n_top = 20):\n", " self.top_emojis = [x[0] for x in self.get_emoji_count()[:n_top]]\n", " in_top = [sentiment_vector_to_emoji(x) in self.top_emojis for x in self.labels]\n", " self.labels = self.labels[in_top]\n", " self.plain_text = self.plain_text[in_top]\n", " self.emojis = self.emojis[in_top]\n", " print(\"remaining samples after top emoji filtering: \", len(self.labels))\n", " \n", " def create_train_test_split(self, split = 0.1, random_state = 4222):\n", " self.X, self.Xt, self.y, self.yt = train_test_split(self.plain_text, self.labels, test_size=split, random_state=random_state)\n", "\n" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "* the pipeline manager saves and stores sklearn pipelines. Keras models are handled differently, so the have to be named explicitly during save and load operations" ] }, { "cell_type": "code", "execution_count": 8, "metadata": {}, "outputs": [], "source": [ "class pipeline_manager(object):\n", " @staticmethod\n", " def load_pipeline_from_files(file_prefix:str, keras_models = [], all_models = []):\n", " pm = pipeline_manager(keras_models=keras_models)\n", " pm.load(file_prefix, all_models)\n", " return pm\n", " \n", " @staticmethod\n", " def create_keras_pipeline_with_vectorizer(vectorizer, layers, sdm:sample_data_manager):\n", " '''\n", " creates pipeline with vectorizer and keras classifier\n", " '''\n", " from keras.models import Sequential\n", " from keras.layers import Dense\n", " \n", " if sdm.X is None:\n", " sdm.create_train_test_split()\n", " \n", " vec_train = vectorizer.fit_transform(sdm.X)\n", " vec_test = vectorizer.transform(sdm.Xt)\n", " # creating keras model:\n", " model=Sequential()\n", " \n", " keras_layers = []\n", " first_layer = True\n", " for layer in layers:\n", " if first_layer:\n", " model.add(Dense(units=layer[0], activation=layer[1], input_dim=vectorizer.transform([\" \"])[0]._shape[1]))\n", " first_layer = False\n", " else:\n", " model.add(Dense(units=layer[0], activation=layer[1]))\n", " \n", " model.compile(loss='mean_squared_error',\n", " optimizer='adam')\n", " \n", " pipeline = Pipeline([\n", " ('vectorizer',vectorizer),\n", " ('keras_model', model)\n", " ])\n", " \n", " return pipeline_manager(pipeline=pipeline, keras_models=['keras_model'])\n", " \n", " @staticmethod\n", " def create_pipeline_with_classifier_and_vectorizer(vectorizer, classifier, sdm:sample_data_manager = None):\n", " '''\n", " creates a pipeline with vectorizer and classifier for non keras classifiers\n", " if sample data manager is given, the vectorizer will be also fitted!\n", " '''\n", " if sdm is not None:\n", " if sdm.X is None:\n", " sdm.create_train_test_split()\n", "\n", " vec_train = vectorizer.fit_transform(sdm.X)\n", " vec_test = vectorizer.transform(sdm.Xt)\n", " \n", " pipeline = Pipeline([\n", " ('vectorizer',vectorizer),\n", " ('classifier', classifier)\n", " ])\n", " \n", " return pipeline_manager(pipeline=pipeline, keras_models=[])\n", " \n", " def __init__(self, pipeline = None, keras_models = []):\n", " self.pipeline = pipeline\n", " self.additional_objects = {}\n", " self.keras_models = keras_models\n", " \n", " def save(self, prefix:str):\n", " print(self.keras_models)\n", " # doing this like explained here: https://stackoverflow.com/a/43415459\n", " for step in self.pipeline.named_steps:\n", " if step in self.keras_models:\n", " self.pipeline.named_steps[step].model.save(prefix + \".\" + step)\n", " else:\n", " joblib.dump(self.pipeline.named_steps[step], prefix + \".\" + str(step))\n", " \n", " load_command = \"pipeline_manager.load_pipeline_from_files( '\"\n", " load_command += prefix + \"', \" + str(self.keras_models) + \", \"\n", " load_command += str(list(self.pipeline.named_steps.keys())) + \")\"\n", " \n", " import __main__ as main\n", " if not hasattr(main, '__file__'):\n", " display(\"saved pipeline. It can be loaded the following way:\")\n", " display(Markdown(\"> ```\\n\"+load_command+\"\\n```\"))\n", " else:\n", " print(\"saved pipeline. It can be loaded the following way:\")\n", " print(load_command)\n", " \n", " \n", " def load(self, prefix:str, models = []):\n", " self.pipeline = None\n", " model_list = []\n", " for model in models:\n", " if model in self.keras_models:\n", " model_list.append((model, load_model(prefix + \".\" + model)))\n", " else:\n", " model_list.append((model, joblib.load(prefix+\".\" + model)))\n", " self.pipeline = Pipeline(model_list)\n", " \n", " def fit(self,X,y):\n", " self.pipeline.fit(X,y)\n", " \n", " def predict(self,X):\n", " return self.pipeline.predict(X)\n", " " ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "* the trainer class passes Data from the sample manager to the pipeline manager" ] }, { "cell_type": "code", "execution_count": 9, "metadata": {}, "outputs": [], "source": [ "class trainer(object):\n", " def __init__(self, sdm:sample_data_manager, pm:pipeline_manager):\n", " self.sdm = sdm\n", " self.pm = pm\n", " \n", " def fit(self, max_size=10000, disabled_fit_steps=['vectorizer']):\n", " # TODO: make batch fitting available here (eg: continous waiting for data and fitting them)\n", " if self.sdm.X is None:\n", " self.sdm.create_train_test_split()\n", " disabled_fits = {}\n", " disabled_fit_transforms = {}\n", " \n", " named_steps = self.pm.pipeline.named_steps\n", " \n", " for s in disabled_fit_steps:\n", " # now it gets a little bit dirty:\n", " # replace fit functions we don't want to call again (e.g. for vectorizers)\n", " disabled_fits[s] = named_steps[s].fit\n", " disabled_fit_transforms[s] = named_steps[s].fit_transform\n", " named_steps[s].fit = lambda self, X, y=None: self\n", " named_steps[s].fit_transform = named_steps[s].transform\n", " \n", " self.pm.fit(X = self.sdm.X[:max_size], y = self.sdm.y[:max_size])\n", " \n", " # restore replaced fit functions:\n", " for s in disabled_fit_steps:\n", " named_steps[s].fit = disabled_fits[s]\n", " named_steps[s].fit_transform = disabled_fit_transforms[s]\n", " \n", " def test(self):\n", " '''\n", " return: prediction:list, teacher:list\n", " '''\n", " if self.sdm.X is None:\n", " self.sdm.create_train_test_split()\n", " return self.pm.predict(self.sdm.Xt), self.sdm.yt\n", "\n", " " ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "----\n", "## Train" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "* when in notebook environment: run the stuff below:" ] }, { "cell_type": "code", "execution_count": 10, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "reading file: ./data_en/2017-11-01.json...\n", "imported 33368 samples\n", "remaining samples after top emoji filtering: 26197\n" ] } ], "source": [ "import __main__ as main\n", "if not hasattr(main, '__file__'):\n", " # we are in an interactive environment (probably in jupyter)\n", " # load data:\n", " sdm = sample_data_manager.generate_and_read(path=\"./data_en/\", n_top_emojis=20, file_range=range(1))\n", " " ] }, { "cell_type": "code", "execution_count": 11, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Epoch 1/1\n", "10000/10000 [==============================] - 109s 11ms/step - loss: 0.0197\n" ] } ], "source": [ " #pm = pipeline_manager.create_keras_pipeline_with_vectorizer(vectorizer=TfidfVectorizer(stop_words='english'),\n", " # layers=[(10000, 'relu'),(5000, 'relu'),(2500, 'relu'),(y1[0].shape[0],None)], sdm=sdm)\n", " pm = pipeline_manager.create_keras_pipeline_with_vectorizer(vectorizer=TfidfVectorizer(stop_words='english'),\n", " layers=[(2500, 'relu'),(3,None)], sdm=sdm)\n", " \n", " tr = trainer(sdm=sdm, pm=pm)\n", " tr.fit(10000)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "----\n", "## save classifier" ] }, { "cell_type": "code", "execution_count": 13, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "['keras_model']\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "/home/jonas/.local/lib/python3.6/site-packages/keras/engine/sequential.py:109: UserWarning: `Sequential.model` is deprecated. `Sequential` is a subclass of `Model`, you can just use your `Sequential` instance directly.\n", " warnings.warn('`Sequential.model` is deprecated. '\n" ] }, { "data": { "text/plain": [ "'saved pipeline. It can be loaded the following way:'" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/markdown": [ "> ```\n", "pipeline_manager.load_pipeline_from_files( 'custom_classifier', ['keras_model'], ['vectorizer', 'keras_model'])\n", "```" ], "text/plain": [ "" ] }, "metadata": {}, "output_type": "display_data" } ], "source": [ "import __main__ as main\n", "if not hasattr(main, '__file__'):\n", " pm.save('custom_classifier')" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "----\n", "## Prediction" ] }, { "cell_type": "code", "execution_count": 14, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "array([[0.4423941 , 0.22976081, 0.26076168],\n", " [0.75167173, 0.2919423 , 0.3423372 ],\n", " [0.48943695, 0.21931192, 0.22773138],\n", " ...,\n", " [0.51003224, 0.26002786, 0.25588542],\n", " [0.5808168 , 0.30632192, 0.2964917 ],\n", " [0.39000767, 0.31723523, 0.24713083]], dtype=float32)" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/plain": [ "array([[0.46813021, 0.24716181, 0.28470797],\n", " [0.46813021, 0.24716181, 0.28470797],\n", " [0.70401758, 0.05932203, 0.23666039],\n", " ...,\n", " [0.46813021, 0.24716181, 0.28470797],\n", " [0.46813021, 0.24716181, 0.28470797],\n", " [0.46813021, 0.24716181, 0.28470797]])" ] }, "metadata": {}, "output_type": "display_data" }, { "name": "stdout", "output_type": "stream", "text": [ "prediction variance: 0.009700283\n", "teacher variance: 0.03341702104519965\n" ] }, { "data": { "text/html": [ "
\n", "\n", "\n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", "
textteacherteacher_sentimentpredictpredicted_sentiment
35671i feel like i care so much more in everi situatπŸ˜‚[0.46813021474490496, 0.24716181096977158, 0.2...πŸ˜‚[0.44239410758018494, 0.2297608107328415, 0.26...
25683i did not meat to add that 2 there ... hav see...πŸ˜‚[0.46813021474490496, 0.24716181096977158, 0.2...😌[0.7516717314720154, 0.291942298412323, 0.3423...
8985neverβ€¦πŸ˜Š[0.7040175768989329, 0.059322033898305086, 0.2...πŸ˜‚[0.48943695425987244, 0.21931192278862, 0.2277...
5410lmao on me ! ! ! wtf wa he suppos to sayπŸ˜‚[0.46813021474490496, 0.24716181096977158, 0.2...😒[0.3661550283432007, 0.32579296827316284, 0.23...
62611this dude alway help me get through my school ...😊[0.7040175768989329, 0.059322033898305086, 0.2...πŸ˜‚[0.48689204454421997, 0.20729433000087738, 0.2...
\n", "
" ], "text/plain": [ " text teacher \\\n", "35671 i feel like i care so much more in everi situat πŸ˜‚ \n", "25683 i did not meat to add that 2 there ... hav see... πŸ˜‚ \n", "8985 never… 😊 \n", "5410 lmao on me ! ! ! wtf wa he suppos to say πŸ˜‚ \n", "62611 this dude alway help me get through my school ... 😊 \n", "\n", " teacher_sentiment predict \\\n", "35671 [0.46813021474490496, 0.24716181096977158, 0.2... πŸ˜‚ \n", "25683 [0.46813021474490496, 0.24716181096977158, 0.2... 😌 \n", "8985 [0.7040175768989329, 0.059322033898305086, 0.2... πŸ˜‚ \n", "5410 [0.46813021474490496, 0.24716181096977158, 0.2... 😒 \n", "62611 [0.7040175768989329, 0.059322033898305086, 0.2... πŸ˜‚ \n", "\n", " predicted_sentiment \n", "35671 [0.44239410758018494, 0.2297608107328415, 0.26... \n", "25683 [0.7516717314720154, 0.291942298412323, 0.3423... \n", "8985 [0.48943695425987244, 0.21931192278862, 0.2277... \n", "5410 [0.3661550283432007, 0.32579296827316284, 0.23... \n", "62611 [0.48689204454421997, 0.20729433000087738, 0.2... " ] }, "metadata": {}, "output_type": "display_data" }, { "name": "stdout", "output_type": "stream", "text": [ "Mean Squared Error: [0.02340565 0.02344435 0.00374819]\n", "Variance teacher: [0.02183094 0.02513847 0.00285735]\n", "Variance prediction: [0.0083875 0.00473354 0.00115709]\n" ] } ], "source": [ "import __main__ as main\n", "if not hasattr(main, '__file__'):\n", " pred, teacher = tr.test()\n", " \n", " display(pred)\n", " display(teacher)\n", " \n", " print('prediction variance: ', np.linalg.norm(np.var(pred, axis=0)))\n", " print('teacher variance: ', np.linalg.norm(np.var(teacher, axis=0)))\n", " \n", " # build a dataframe to visualize test results:\n", " testlist = pd.DataFrame({'text': sdm.Xt, \n", " 'teacher': sent2emoji(sdm.yt),\n", " 'teacher_sentiment': sdm.yt.tolist(),\n", " 'predict': sent2emoji(pred, custom_target_emojis=sdm.top_emojis),\n", " 'predicted_sentiment': pred.tolist()})\n", " # display:\n", " display(testlist.head())\n", " \n", " # mean squared error:\n", " teacher_sentiments = np.array([sample[1]['teacher_sentiment'] for sample in testlist.iterrows()])\n", " predicted_sentiments = np.array([sample[1]['predicted_sentiment'] for sample in testlist.iterrows()])\n", "\n", " mean_squared_error = ((teacher_sentiments - predicted_sentiments)**2).mean(axis=0)\n", " print(\"Mean Squared Error: \", mean_squared_error)\n", " print(\"Variance teacher: \", np.var(teacher_sentiments, axis=0))\n", " print(\"Variance prediction: \", np.var(predicted_sentiments, axis=0))\n", " \n", " # save to csv:\n", " testlist.to_csv('test.csv')" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "----\n", "## Load classifier" ] }, { "cell_type": "code", "execution_count": 15, "metadata": {}, "outputs": [ { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "2ca4e06fcd4f41c2bfd161f9f16ca594", "version_major": 2, "version_minor": 0 }, "text/plain": [ "Text(value='')" ] }, "metadata": {}, "output_type": "display_data" }, { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "a39abb79d70e4ae1952b2d928cfab174", "version_major": 2, "version_minor": 0 }, "text/plain": [ "VBox(children=(Button(description='get emoji', icon='check', style=ButtonStyle(), tooltip='Click me'), Output(…" ] }, "metadata": {}, "output_type": "display_data" } ], "source": [ "import __main__ as main\n", "if not hasattr(main, '__file__'):\n", " try:\n", " pm\n", " except NameError:\n", " pass\n", " else:\n", " del pm # delete existing pipeline manager if ther is one\n", "\n", " pm = pipeline_manager.load_pipeline_from_files( 'custom_classifier', ['keras_model'], ['vectorizer', 'keras_model'])\n", " lookup_emojis = [#'πŸ˜‚',\n", " '😭',\n", " '😍',\n", " '😩',\n", " '😊',\n", " '😘',\n", " 'πŸ™',\n", " 'πŸ™Œ',\n", " 'πŸ˜‰',\n", " '😁',\n", " 'πŸ˜…',\n", " '😎',\n", " '😒',\n", " 'πŸ˜’',\n", " '😏',\n", " '😌',\n", " 'πŸ˜”',\n", " 'πŸ˜‹',\n", " 'πŸ˜€',\n", " '😀']\n", " out = widgets.Output()\n", "\n", " t = widgets.Text()\n", " b = widgets.Button(\n", " description='get emoji',\n", " disabled=False,\n", " button_style='', # 'success', 'info', 'warning', 'danger' or ''\n", " tooltip='Click me',\n", " icon='check'\n", " )\n", "\n", "\n", "\n", " def handle_submit(sender):\n", " with out:\n", " clear_output()\n", " with out:\n", " pred = pm.predict([t.value])\n", "\n", " display(Markdown(\"# Predicted Emoji \" + str(sent2emoji(pred, lookup_emojis)[0])))\n", " display(Markdown(\"# Sentiment Vector: $$ \\pmatrix{\" + str(pred[0,0]) +\n", " \"\\\\\\\\\" + str(pred[0,1]) + \"\\\\\\\\\" + str(pred[0,2]) + \"}$$\"))\n", "\n", " b.on_click(handle_submit)\n", "\n", " display(t)\n", " display(widgets.VBox([b, out])) " ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.6.5" } }, "nbformat": 4, "nbformat_minor": 2 }