updated notebook according to changes in the python file
This commit is contained in:
		| @ -2,43 +2,9 @@ | |||||||
|  "cells": [ |  "cells": [ | ||||||
|   { |   { | ||||||
|    "cell_type": "code", |    "cell_type": "code", | ||||||
|    "execution_count": 1, |    "execution_count": null, | ||||||
|    "metadata": {}, |    "metadata": {}, | ||||||
|    "outputs": [ |    "outputs": [], | ||||||
|     { |  | ||||||
|      "name": "stderr", |  | ||||||
|      "output_type": "stream", |  | ||||||
|      "text": [ |  | ||||||
|       "/home/jonas/.local/lib/python3.6/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n", |  | ||||||
|       "  from ._conv import register_converters as _register_converters\n", |  | ||||||
|       "Using TensorFlow backend.\n" |  | ||||||
|      ] |  | ||||||
|     }, |  | ||||||
|     { |  | ||||||
|      "name": "stdout", |  | ||||||
|      "output_type": "stream", |  | ||||||
|      "text": [ |  | ||||||
|       "[nltk_data] Downloading package punkt to /home/jonas/nltk_data...\n", |  | ||||||
|       "[nltk_data]   Package punkt is already up-to-date!\n", |  | ||||||
|       "[nltk_data] Downloading package averaged_perceptron_tagger to\n", |  | ||||||
|       "[nltk_data]     /home/jonas/nltk_data...\n", |  | ||||||
|       "[nltk_data]   Package averaged_perceptron_tagger is already up-to-\n", |  | ||||||
|       "[nltk_data]       date!\n", |  | ||||||
|       "[nltk_data] Downloading package wordnet to /home/jonas/nltk_data...\n", |  | ||||||
|       "[nltk_data]   Package wordnet is already up-to-date!\n" |  | ||||||
|      ] |  | ||||||
|     }, |  | ||||||
|     { |  | ||||||
|      "data": { |  | ||||||
|       "text/plain": [ |  | ||||||
|        "True" |  | ||||||
|       ] |  | ||||||
|      }, |  | ||||||
|      "execution_count": 1, |  | ||||||
|      "metadata": {}, |  | ||||||
|      "output_type": "execute_result" |  | ||||||
|     } |  | ||||||
|    ], |  | ||||||
|    "source": [ |    "source": [ | ||||||
|     "import pandas as pd\n", |     "import pandas as pd\n", | ||||||
|     "from IPython.display import clear_output, Markdown, Math\n", |     "from IPython.display import clear_output, Markdown, Math\n", | ||||||
| @ -59,6 +25,7 @@ | |||||||
|     "import pickle\n", |     "import pickle\n", | ||||||
|     "import operator\n", |     "import operator\n", | ||||||
|     "from sklearn.pipeline import Pipeline\n", |     "from sklearn.pipeline import Pipeline\n", | ||||||
|  |     "import json\n", | ||||||
|     "nltk.download('punkt')\n", |     "nltk.download('punkt')\n", | ||||||
|     "nltk.download('averaged_perceptron_tagger')\n", |     "nltk.download('averaged_perceptron_tagger')\n", | ||||||
|     "nltk.download('wordnet')" |     "nltk.download('wordnet')" | ||||||
| @ -66,7 +33,7 @@ | |||||||
|   }, |   }, | ||||||
|   { |   { | ||||||
|    "cell_type": "code", |    "cell_type": "code", | ||||||
|    "execution_count": 2, |    "execution_count": null, | ||||||
|    "metadata": {}, |    "metadata": {}, | ||||||
|    "outputs": [], |    "outputs": [], | ||||||
|    "source": [ |    "source": [ | ||||||
| @ -84,7 +51,7 @@ | |||||||
|   }, |   }, | ||||||
|   { |   { | ||||||
|    "cell_type": "code", |    "cell_type": "code", | ||||||
|    "execution_count": 3, |    "execution_count": null, | ||||||
|    "metadata": {}, |    "metadata": {}, | ||||||
|    "outputs": [], |    "outputs": [], | ||||||
|    "source": [ |    "source": [ | ||||||
| @ -109,7 +76,7 @@ | |||||||
|   }, |   }, | ||||||
|   { |   { | ||||||
|    "cell_type": "code", |    "cell_type": "code", | ||||||
|    "execution_count": 4, |    "execution_count": null, | ||||||
|    "metadata": {}, |    "metadata": {}, | ||||||
|    "outputs": [], |    "outputs": [], | ||||||
|    "source": [ |    "source": [ | ||||||
| @ -129,7 +96,7 @@ | |||||||
|   }, |   }, | ||||||
|   { |   { | ||||||
|    "cell_type": "code", |    "cell_type": "code", | ||||||
|    "execution_count": 5, |    "execution_count": null, | ||||||
|    "metadata": {}, |    "metadata": {}, | ||||||
|    "outputs": [], |    "outputs": [], | ||||||
|    "source": [ |    "source": [ | ||||||
| @ -154,7 +121,7 @@ | |||||||
|   }, |   }, | ||||||
|   { |   { | ||||||
|    "cell_type": "code", |    "cell_type": "code", | ||||||
|    "execution_count": 6, |    "execution_count": null, | ||||||
|    "metadata": {}, |    "metadata": {}, | ||||||
|    "outputs": [], |    "outputs": [], | ||||||
|    "source": [ |    "source": [ | ||||||
| @ -194,13 +161,13 @@ | |||||||
|   }, |   }, | ||||||
|   { |   { | ||||||
|    "cell_type": "code", |    "cell_type": "code", | ||||||
|    "execution_count": 7, |    "execution_count": null, | ||||||
|    "metadata": {}, |    "metadata": {}, | ||||||
|    "outputs": [], |    "outputs": [], | ||||||
|    "source": [ |    "source": [ | ||||||
|     "class sample_data_manager(object):\n", |     "class sample_data_manager(object):\n", | ||||||
|     "    @staticmethod\n", |     "    @staticmethod\n", | ||||||
|     "    def generate_and_read(path:str, only_emoticons=True, apply_stemming=True, n_top_emojis=-1, file_range=None, n_kmeans_cluster=-1):\n", |     "    def generate_and_read(path:str, only_emoticons=True, apply_stemming=True, n_top_emojis=-1, file_range=None, n_kmeans_cluster=-1, progress_callback=None):\n", | ||||||
|     "        \"\"\"\n", |     "        \"\"\"\n", | ||||||
|     "        generate, read and process train data in one step.\n", |     "        generate, read and process train data in one step.\n", | ||||||
|     "        \n", |     "        \n", | ||||||
| @ -214,7 +181,7 @@ | |||||||
|     "        @return: sample_data_manager object\n", |     "        @return: sample_data_manager object\n", | ||||||
|     "        \"\"\"\n", |     "        \"\"\"\n", | ||||||
|     "        sdm = sample_data_manager(path)\n", |     "        sdm = sample_data_manager(path)\n", | ||||||
|     "        sdm.read_files(file_index_range=range(sdm.n_files) if file_range is None else file_range, only_emoticons=only_emoticons)\n", |     "        sdm.read_files(file_index_range=range(sdm.n_files) if file_range is None else file_range, only_emoticons=only_emoticons, progress_callback=progress_callback)\n", | ||||||
|     "        if apply_stemming:\n", |     "        if apply_stemming:\n", | ||||||
|     "            sdm.apply_stemming_and_lemmatization()\n", |     "            sdm.apply_stemming_and_lemmatization()\n", | ||||||
|     "        \n", |     "        \n", | ||||||
| @ -254,7 +221,7 @@ | |||||||
|     "        self.kmeans_cluster = None\n", |     "        self.kmeans_cluster = None\n", | ||||||
|     "        self.label_binarizer = None\n", |     "        self.label_binarizer = None\n", | ||||||
|     "    \n", |     "    \n", | ||||||
|     "    def read_files(self, file_index_range:list, only_emoticons=True):\n", |     "    def read_files(self, file_index_range:list, only_emoticons=True, progress_callback=None):\n", | ||||||
|     "        \"\"\"\n", |     "        \"\"\"\n", | ||||||
|     "        reading (multiple) files to one panda table.\n", |     "        reading (multiple) files to one panda table.\n", | ||||||
|     "        \n", |     "        \n", | ||||||
| @ -268,7 +235,8 @@ | |||||||
|     "                self.raw_data = pd.read_json(self.json_files[i], encoding=\"utf-8\")\n", |     "                self.raw_data = pd.read_json(self.json_files[i], encoding=\"utf-8\")\n", | ||||||
|     "            else:\n", |     "            else:\n", | ||||||
|     "                self.raw_data = self.raw_data.append(pd.read_json(self.json_files[i], encoding=\"utf-8\"))\n", |     "                self.raw_data = self.raw_data.append(pd.read_json(self.json_files[i], encoding=\"utf-8\"))\n", | ||||||
|     "        \n", |     "            if progress_callback is not None:\n", | ||||||
|  |     "                progress_callback()\n", | ||||||
|     "        self.emojis = self.raw_data['EMOJI']\n", |     "        self.emojis = self.raw_data['EMOJI']\n", | ||||||
|     "        self.plain_text = self.raw_data['text']\n", |     "        self.plain_text = self.raw_data['text']\n", | ||||||
|     "        \n", |     "        \n", | ||||||
| @ -396,8 +364,7 @@ | |||||||
|     "        \n", |     "        \n", | ||||||
|     "        labels = self.binary_labels if self.use_binary_labels else self.labels\n", |     "        labels = self.binary_labels if self.use_binary_labels else self.labels\n", | ||||||
|     "        assert labels is not None\n", |     "        assert labels is not None\n", | ||||||
|     "        self.X, self.Xt, self.y, self.yt = train_test_split(self.plain_text, labels, test_size=split, random_state=random_state)\n", |     "        self.X, self.Xt, self.y, self.yt = train_test_split(self.plain_text, labels, test_size=split, random_state=random_state)\n" | ||||||
|     "\n" |  | ||||||
|    ] |    ] | ||||||
|   }, |   }, | ||||||
|   { |   { | ||||||
| @ -409,12 +376,26 @@ | |||||||
|   }, |   }, | ||||||
|   { |   { | ||||||
|    "cell_type": "code", |    "cell_type": "code", | ||||||
|    "execution_count": 8, |    "execution_count": null, | ||||||
|    "metadata": {}, |    "metadata": {}, | ||||||
|    "outputs": [], |    "outputs": [], | ||||||
|    "source": [ |    "source": [ | ||||||
|     "class pipeline_manager(object):\n", |     "class pipeline_manager(object):\n", | ||||||
|     "    @staticmethod\n", |     "    @staticmethod\n", | ||||||
|  |     "    def load_from_pipeline_file(pipeline_file:str):\n", | ||||||
|  |     "        \"\"\"\n", | ||||||
|  |     "        loading a json configuration file and using it's paramters to call 'load_pipeline_from_files'\n", | ||||||
|  |     "        \"\"\"\n", | ||||||
|  |     "        with open(pipeline_file, 'r') as f:\n", | ||||||
|  |     "            d = json.load(f)\n", | ||||||
|  |     "        \n", | ||||||
|  |     "        keras_models = d['keras_models']\n", | ||||||
|  |     "        all_models = d['all_models']\n", | ||||||
|  |     "        \n", | ||||||
|  |     "        return pipeline_manager.load_pipeline_from_files(pipeline_file.rsplit('.',1)[0], keras_models, all_models)\n", | ||||||
|  |     "\n", | ||||||
|  |     "\n", | ||||||
|  |     "    @staticmethod\n", | ||||||
|     "    def load_pipeline_from_files(file_prefix:str, keras_models = [], all_models = []):\n", |     "    def load_pipeline_from_files(file_prefix:str, keras_models = [], all_models = []):\n", | ||||||
|     "        \"\"\"\n", |     "        \"\"\"\n", | ||||||
|     "        load a pipeline from files. A pipeline should be represented by multiple model files in the form '<file_prefix>.<model_name>'\n", |     "        load a pipeline from files. A pipeline should be represented by multiple model files in the form '<file_prefix>.<model_name>'\n", | ||||||
| @ -527,6 +508,7 @@ | |||||||
|     "        @param prefix: file prefix for all models\n", |     "        @param prefix: file prefix for all models\n", | ||||||
|     "        \"\"\"\n", |     "        \"\"\"\n", | ||||||
|     "        \n", |     "        \n", | ||||||
|  |     "\n", | ||||||
|     "        print(self.keras_models)\n", |     "        print(self.keras_models)\n", | ||||||
|     "        # doing this like explained here: https://stackoverflow.com/a/43415459\n", |     "        # doing this like explained here: https://stackoverflow.com/a/43415459\n", | ||||||
|     "        for step in self.pipeline.named_steps:\n", |     "        for step in self.pipeline.named_steps:\n", | ||||||
| @ -538,6 +520,9 @@ | |||||||
|     "        load_command = \"pipeline_manager.load_pipeline_from_files( '\"\n", |     "        load_command = \"pipeline_manager.load_pipeline_from_files( '\"\n", | ||||||
|     "        load_command += prefix + \"', \" + str(self.keras_models) + \", \"\n", |     "        load_command += prefix + \"', \" + str(self.keras_models) + \", \"\n", | ||||||
|     "        load_command += str(list(self.pipeline.named_steps.keys())) + \")\"\n", |     "        load_command += str(list(self.pipeline.named_steps.keys())) + \")\"\n", | ||||||
|  |     "\n", | ||||||
|  |     "        with open(prefix + '.pipeline', 'w') as outfile:\n", | ||||||
|  |     "            json.dump({'keras_models': self.keras_models, 'all_models': [step for step in self.pipeline.named_steps]}, outfile)\n", | ||||||
|     "        \n", |     "        \n", | ||||||
|     "        import __main__ as main\n", |     "        import __main__ as main\n", | ||||||
|     "        if not hasattr(main, '__file__'):\n", |     "        if not hasattr(main, '__file__'):\n", | ||||||
| @ -584,7 +569,7 @@ | |||||||
|   }, |   }, | ||||||
|   { |   { | ||||||
|    "cell_type": "code", |    "cell_type": "code", | ||||||
|    "execution_count": 9, |    "execution_count": null, | ||||||
|    "metadata": {}, |    "metadata": {}, | ||||||
|    "outputs": [], |    "outputs": [], | ||||||
|    "source": [ |    "source": [ | ||||||
| @ -594,7 +579,7 @@ | |||||||
|     "        self.sdm = sdm\n", |     "        self.sdm = sdm\n", | ||||||
|     "        self.pm = pm\n", |     "        self.pm = pm\n", | ||||||
|     "    \n", |     "    \n", | ||||||
|     "    def fit(self, max_size=10000, disabled_fit_steps=['vectorizer']):\n", |     "    def fit(self, max_size=10000, disabled_fit_steps=['vectorizer'], keras_batch_fitting_layer=['keras_model'], batch_size=None, n_epochs=1, progress_callback=None):\n", | ||||||
|     "        \"\"\"\n", |     "        \"\"\"\n", | ||||||
|     "        fitting data in the pipeline. Because we don't want to refit the vectorizer, the pipeline models containing the vectorizer have to be named explicitly\n", |     "        fitting data in the pipeline. Because we don't want to refit the vectorizer, the pipeline models containing the vectorizer have to be named explicitly\n", | ||||||
|     "        \n", |     "        \n", | ||||||
| @ -607,22 +592,44 @@ | |||||||
|     "        disabled_fits = {}\n", |     "        disabled_fits = {}\n", | ||||||
|     "        disabled_fit_transforms = {}\n", |     "        disabled_fit_transforms = {}\n", | ||||||
|     "        \n", |     "        \n", | ||||||
|  |     "        disabled_keras_fits = {}\n", | ||||||
|  |     "        \n", | ||||||
|     "        named_steps = self.pm.pipeline.named_steps\n", |     "        named_steps = self.pm.pipeline.named_steps\n", | ||||||
|     "        \n", |     "        \n", | ||||||
|     "        for s in disabled_fit_steps:\n", |     "        for s in disabled_fit_steps:\n", | ||||||
|     "            # now it gets a little bit dirty:\n", |     "            # now it gets really dirty:\n", | ||||||
|     "            # replace fit functions we don't want to call again (e.g. for vectorizers)\n", |     "            # replace fit functions we don't want to call again (e.g. for vectorizers)\n", | ||||||
|     "            disabled_fits[s] = named_steps[s].fit\n", |     "            disabled_fits[s] = named_steps[s].fit\n", | ||||||
|     "            disabled_fit_transforms[s] = named_steps[s].fit_transform\n", |     "            disabled_fit_transforms[s] = named_steps[s].fit_transform\n", | ||||||
|     "            named_steps[s].fit = lambda self, X, y=None: self\n", |     "            named_steps[s].fit = lambda self, X, y=None: self\n", | ||||||
|     "            named_steps[s].fit_transform = named_steps[s].transform\n", |     "            named_steps[s].fit_transform = named_steps[s].transform\n", | ||||||
|     "            \n", |     "        \n", | ||||||
|     "        self.pm.fit(X = self.sdm.X[:max_size], y = self.sdm.y[:max_size])\n", |     "        for k in keras_batch_fitting_layer:\n", | ||||||
|  |     "            # forcing batch fitting on keras\n", | ||||||
|  |     "            disabled_keras_fits[k]=named_steps[k].fit\n", | ||||||
|  |     "            named_steps[k].fit = lambda X, y: named_steps[k].train_on_batch(X.todense(), y) # ← why has keras no sparse support on batch progressing!?!?!\n", | ||||||
|  |     "        \n", | ||||||
|  |     "        if batch_size is None:\n", | ||||||
|  |     "            self.pm.fit(X = self.sdm.X[:max_size], y = self.sdm.y[:max_size])\n", | ||||||
|  |     "        else:\n", | ||||||
|  |     "            n = len(self.sdm.X) // batch_size\n", | ||||||
|  |     "            for i in range(n_epochs):\n", | ||||||
|  |     "                for j in range(n):\n", | ||||||
|  |     "                    self.pm.fit(X = np.array(self.sdm.X[j*batch_size:(j+1)*batch_size]), y = np.array(self.sdm.y[j*batch_size:(j+1)*batch_size]))\n", | ||||||
|  |     "                    if progress_callback is not None:\n", | ||||||
|  |     "                        progress_callback()\n", | ||||||
|  |     "                    pred, yt = self.test()\n", | ||||||
|  |     "                    mean_squared_error = ((pred - yt)**2).mean(axis=0)\n", | ||||||
|  |     "                    print(\"#\" + str(j) + \": loss: \", mean_squared_error)\n", | ||||||
|  |     "\n", | ||||||
|     "        \n", |     "        \n", | ||||||
|     "        # restore replaced fit functions:\n", |     "        # restore replaced fit functions:\n", | ||||||
|     "        for s in disabled_fit_steps:\n", |     "        for s in disabled_fit_steps:\n", | ||||||
|     "            named_steps[s].fit = disabled_fits[s]\n", |     "            named_steps[s].fit = disabled_fits[s]\n", | ||||||
|     "            named_steps[s].fit_transform = disabled_fit_transforms[s]\n", |     "            named_steps[s].fit_transform = disabled_fit_transforms[s]\n", | ||||||
|  |     "        \n", | ||||||
|  |     "        for k in keras_batch_fitting_layer:\n", | ||||||
|  |     "            named_steps[k].fit = disabled_keras_fits[k]\n", | ||||||
|     "    \n", |     "    \n", | ||||||
|     "    def test(self):\n", |     "    def test(self):\n", | ||||||
|     "        '''\n", |     "        '''\n", | ||||||
| @ -630,9 +637,7 @@ | |||||||
|     "        '''\n", |     "        '''\n", | ||||||
|     "        if self.sdm.X is None:\n", |     "        if self.sdm.X is None:\n", | ||||||
|     "            self.sdm.create_train_test_split()\n", |     "            self.sdm.create_train_test_split()\n", | ||||||
|     "        return self.pm.predict(self.sdm.Xt), self.sdm.yt\n", |     "        return self.pm.predict(self.sdm.Xt), self.sdm.yt\n" | ||||||
|     "\n", |  | ||||||
|     "    " |  | ||||||
|    ] |    ] | ||||||
|   }, |   }, | ||||||
|   { |   { | ||||||
| @ -652,21 +657,9 @@ | |||||||
|   }, |   }, | ||||||
|   { |   { | ||||||
|    "cell_type": "code", |    "cell_type": "code", | ||||||
|    "execution_count": 10, |    "execution_count": null, | ||||||
|    "metadata": {}, |    "metadata": {}, | ||||||
|    "outputs": [ |    "outputs": [], | ||||||
|     { |  | ||||||
|      "name": "stdout", |  | ||||||
|      "output_type": "stream", |  | ||||||
|      "text": [ |  | ||||||
|       "reading file: ./data_en/2017-11-01.json...\n", |  | ||||||
|       "imported 33368 samples\n", |  | ||||||
|       "remaining samples after top emoji filtering:  26197\n", |  | ||||||
|       "Epoch 1/1\n", |  | ||||||
|       "100/100 [==============================] - 3s 28ms/step - loss: 0.1230\n" |  | ||||||
|      ] |  | ||||||
|     } |  | ||||||
|    ], |  | ||||||
|    "source": [ |    "source": [ | ||||||
|     "import __main__ as main\n", |     "import __main__ as main\n", | ||||||
|     "if not hasattr(main, '__file__'):\n", |     "if not hasattr(main, '__file__'):\n", | ||||||
| @ -698,40 +691,9 @@ | |||||||
|   }, |   }, | ||||||
|   { |   { | ||||||
|    "cell_type": "code", |    "cell_type": "code", | ||||||
|    "execution_count": 11, |    "execution_count": null, | ||||||
|    "metadata": {}, |    "metadata": {}, | ||||||
|    "outputs": [ |    "outputs": [], | ||||||
|     { |  | ||||||
|      "name": "stdout", |  | ||||||
|      "output_type": "stream", |  | ||||||
|      "text": [ |  | ||||||
|       "['keras_model']\n" |  | ||||||
|      ] |  | ||||||
|     }, |  | ||||||
|     { |  | ||||||
|      "data": { |  | ||||||
|       "text/plain": [ |  | ||||||
|        "'saved pipeline. It can be loaded the following way:'" |  | ||||||
|       ] |  | ||||||
|      }, |  | ||||||
|      "metadata": {}, |  | ||||||
|      "output_type": "display_data" |  | ||||||
|     }, |  | ||||||
|     { |  | ||||||
|      "data": { |  | ||||||
|       "text/markdown": [ |  | ||||||
|        "> ```\n", |  | ||||||
|        "pipeline_manager.load_pipeline_from_files( 'custom_classifier', ['keras_model'], ['vectorizer', 'keras_model'])\n", |  | ||||||
|        "```" |  | ||||||
|       ], |  | ||||||
|       "text/plain": [ |  | ||||||
|        "<IPython.core.display.Markdown object>" |  | ||||||
|       ] |  | ||||||
|      }, |  | ||||||
|      "metadata": {}, |  | ||||||
|      "output_type": "display_data" |  | ||||||
|     } |  | ||||||
|    ], |  | ||||||
|    "source": [ |    "source": [ | ||||||
|     "import __main__ as main\n", |     "import __main__ as main\n", | ||||||
|     "if not hasattr(main, '__file__'):\n", |     "if not hasattr(main, '__file__'):\n", | ||||||
| @ -750,156 +712,9 @@ | |||||||
|   }, |   }, | ||||||
|   { |   { | ||||||
|    "cell_type": "code", |    "cell_type": "code", | ||||||
|    "execution_count": 12, |    "execution_count": null, | ||||||
|    "metadata": {}, |    "metadata": {}, | ||||||
|    "outputs": [ |    "outputs": [], | ||||||
|     { |  | ||||||
|      "data": { |  | ||||||
|       "text/plain": [ |  | ||||||
|        "array([[0.16062996, 0.08324276, 0.09433182],\n", |  | ||||||
|        "       [0.16413   , 0.09421383, 0.07578427],\n", |  | ||||||
|        "       [0.11994962, 0.05705731, 0.06310127],\n", |  | ||||||
|        "       ...,\n", |  | ||||||
|        "       [0.13887292, 0.08502828, 0.08176519],\n", |  | ||||||
|        "       [0.18185864, 0.09223703, 0.10704609],\n", |  | ||||||
|        "       [0.17687687, 0.09147045, 0.10650696]], dtype=float32)" |  | ||||||
|       ] |  | ||||||
|      }, |  | ||||||
|      "metadata": {}, |  | ||||||
|      "output_type": "display_data" |  | ||||||
|     }, |  | ||||||
|     { |  | ||||||
|      "data": { |  | ||||||
|       "text/plain": [ |  | ||||||
|        "array([[0.46813021, 0.24716181, 0.28470797],\n", |  | ||||||
|        "       [0.46813021, 0.24716181, 0.28470797],\n", |  | ||||||
|        "       [0.70401758, 0.05932203, 0.23666039],\n", |  | ||||||
|        "       ...,\n", |  | ||||||
|        "       [0.46813021, 0.24716181, 0.28470797],\n", |  | ||||||
|        "       [0.46813021, 0.24716181, 0.28470797],\n", |  | ||||||
|        "       [0.46813021, 0.24716181, 0.28470797]])" |  | ||||||
|       ] |  | ||||||
|      }, |  | ||||||
|      "metadata": {}, |  | ||||||
|      "output_type": "display_data" |  | ||||||
|     }, |  | ||||||
|     { |  | ||||||
|      "name": "stdout", |  | ||||||
|      "output_type": "stream", |  | ||||||
|      "text": [ |  | ||||||
|       "prediction variance:  0.0005431187\n", |  | ||||||
|       "teacher variance:  0.03341702104519965\n" |  | ||||||
|      ] |  | ||||||
|     }, |  | ||||||
|     { |  | ||||||
|      "data": { |  | ||||||
|       "text/html": [ |  | ||||||
|        "<div>\n", |  | ||||||
|        "<style scoped>\n", |  | ||||||
|        "    .dataframe tbody tr th:only-of-type {\n", |  | ||||||
|        "        vertical-align: middle;\n", |  | ||||||
|        "    }\n", |  | ||||||
|        "\n", |  | ||||||
|        "    .dataframe tbody tr th {\n", |  | ||||||
|        "        vertical-align: top;\n", |  | ||||||
|        "    }\n", |  | ||||||
|        "\n", |  | ||||||
|        "    .dataframe thead th {\n", |  | ||||||
|        "        text-align: right;\n", |  | ||||||
|        "    }\n", |  | ||||||
|        "</style>\n", |  | ||||||
|        "<table border=\"1\" class=\"dataframe\">\n", |  | ||||||
|        "  <thead>\n", |  | ||||||
|        "    <tr style=\"text-align: right;\">\n", |  | ||||||
|        "      <th></th>\n", |  | ||||||
|        "      <th>predict</th>\n", |  | ||||||
|        "      <th>predicted_sentiment</th>\n", |  | ||||||
|        "      <th>teacher</th>\n", |  | ||||||
|        "      <th>teacher_sentiment</th>\n", |  | ||||||
|        "      <th>text</th>\n", |  | ||||||
|        "    </tr>\n", |  | ||||||
|        "  </thead>\n", |  | ||||||
|        "  <tbody>\n", |  | ||||||
|        "    <tr>\n", |  | ||||||
|        "      <th>35671</th>\n", |  | ||||||
|        "      <td>😂</td>\n", |  | ||||||
|        "      <td>[0.16062995791435242, 0.0832427591085434, 0.09...</td>\n", |  | ||||||
|        "      <td>😂</td>\n", |  | ||||||
|        "      <td>[0.46813021474490496, 0.24716181096977158, 0.2...</td>\n", |  | ||||||
|        "      <td>i feel like i care so much more in everi situat</td>\n", |  | ||||||
|        "    </tr>\n", |  | ||||||
|        "    <tr>\n", |  | ||||||
|        "      <th>25683</th>\n", |  | ||||||
|        "      <td>😢</td>\n", |  | ||||||
|        "      <td>[0.16413000226020813, 0.0942138284444809, 0.07...</td>\n", |  | ||||||
|        "      <td>😂</td>\n", |  | ||||||
|        "      <td>[0.46813021474490496, 0.24716181096977158, 0.2...</td>\n", |  | ||||||
|        "      <td>i did not meat to add that 2 there ... hav see...</td>\n", |  | ||||||
|        "    </tr>\n", |  | ||||||
|        "    <tr>\n", |  | ||||||
|        "      <th>8985</th>\n", |  | ||||||
|        "      <td>😂</td>\n", |  | ||||||
|        "      <td>[0.11994962394237518, 0.05705730617046356, 0.0...</td>\n", |  | ||||||
|        "      <td>😊</td>\n", |  | ||||||
|        "      <td>[0.7040175768989329, 0.059322033898305086, 0.2...</td>\n", |  | ||||||
|        "      <td>never…</td>\n", |  | ||||||
|        "    </tr>\n", |  | ||||||
|        "    <tr>\n", |  | ||||||
|        "      <th>5410</th>\n", |  | ||||||
|        "      <td>😂</td>\n", |  | ||||||
|        "      <td>[0.18114930391311646, 0.10199417173862457, 0.1...</td>\n", |  | ||||||
|        "      <td>😂</td>\n", |  | ||||||
|        "      <td>[0.46813021474490496, 0.24716181096977158, 0.2...</td>\n", |  | ||||||
|        "      <td>lmao on me ! ! ! wtf wa he suppos to say</td>\n", |  | ||||||
|        "    </tr>\n", |  | ||||||
|        "    <tr>\n", |  | ||||||
|        "      <th>62611</th>\n", |  | ||||||
|        "      <td>😂</td>\n", |  | ||||||
|        "      <td>[0.16997836530208588, 0.08633847534656525, 0.0...</td>\n", |  | ||||||
|        "      <td>😊</td>\n", |  | ||||||
|        "      <td>[0.7040175768989329, 0.059322033898305086, 0.2...</td>\n", |  | ||||||
|        "      <td>this dude alway help me get through my school ...</td>\n", |  | ||||||
|        "    </tr>\n", |  | ||||||
|        "  </tbody>\n", |  | ||||||
|        "</table>\n", |  | ||||||
|        "</div>" |  | ||||||
|       ], |  | ||||||
|       "text/plain": [ |  | ||||||
|        "      predict                                predicted_sentiment teacher  \\\n", |  | ||||||
|        "35671       😂  [0.16062995791435242, 0.0832427591085434, 0.09...       😂   \n", |  | ||||||
|        "25683       😢  [0.16413000226020813, 0.0942138284444809, 0.07...       😂   \n", |  | ||||||
|        "8985        😂  [0.11994962394237518, 0.05705730617046356, 0.0...       😊   \n", |  | ||||||
|        "5410        😂  [0.18114930391311646, 0.10199417173862457, 0.1...       😂   \n", |  | ||||||
|        "62611       😂  [0.16997836530208588, 0.08633847534656525, 0.0...       😊   \n", |  | ||||||
|        "\n", |  | ||||||
|        "                                       teacher_sentiment  \\\n", |  | ||||||
|        "35671  [0.46813021474490496, 0.24716181096977158, 0.2...   \n", |  | ||||||
|        "25683  [0.46813021474490496, 0.24716181096977158, 0.2...   \n", |  | ||||||
|        "8985   [0.7040175768989329, 0.059322033898305086, 0.2...   \n", |  | ||||||
|        "5410   [0.46813021474490496, 0.24716181096977158, 0.2...   \n", |  | ||||||
|        "62611  [0.7040175768989329, 0.059322033898305086, 0.2...   \n", |  | ||||||
|        "\n", |  | ||||||
|        "                                                    text  \n", |  | ||||||
|        "35671    i feel like i care so much more in everi situat  \n", |  | ||||||
|        "25683  i did not meat to add that 2 there ... hav see...  \n", |  | ||||||
|        "8985                                              never…  \n", |  | ||||||
|        "5410            lmao on me ! ! ! wtf wa he suppos to say  \n", |  | ||||||
|        "62611  this dude alway help me get through my school ...  " |  | ||||||
|       ] |  | ||||||
|      }, |  | ||||||
|      "metadata": {}, |  | ||||||
|      "output_type": "display_data" |  | ||||||
|     }, |  | ||||||
|     { |  | ||||||
|      "name": "stdout", |  | ||||||
|      "output_type": "stream", |  | ||||||
|      "text": [ |  | ||||||
|       "Mean Squared Error:  [0.13877691 0.04682433 0.02937794]\n", |  | ||||||
|       "Variance teacher:  [0.02183094 0.02513847 0.00285735]\n", |  | ||||||
|       "Variance prediction:  [0.00046378 0.00019441 0.00020516]\n" |  | ||||||
|      ] |  | ||||||
|     } |  | ||||||
|    ], |  | ||||||
|    "source": [ |    "source": [ | ||||||
|     "import __main__ as main\n", |     "import __main__ as main\n", | ||||||
|     "if not hasattr(main, '__file__'):\n", |     "if not hasattr(main, '__file__'):\n", | ||||||
| @ -945,68 +760,9 @@ | |||||||
|   }, |   }, | ||||||
|   { |   { | ||||||
|    "cell_type": "code", |    "cell_type": "code", | ||||||
|    "execution_count": 13, |    "execution_count": null, | ||||||
|    "metadata": {}, |    "metadata": {}, | ||||||
|    "outputs": [ |    "outputs": [], | ||||||
|     { |  | ||||||
|      "data": { |  | ||||||
|       "application/vnd.jupyter.widget-view+json": { |  | ||||||
|        "model_id": "003ae16760b04c25bdc9f2fe2193747a", |  | ||||||
|        "version_major": 2, |  | ||||||
|        "version_minor": 0 |  | ||||||
|       }, |  | ||||||
|       "text/html": [ |  | ||||||
|        "<p>Failed to display Jupyter Widget of type <code>Text</code>.</p>\n", |  | ||||||
|        "<p>\n", |  | ||||||
|        "  If you're reading this message in the Jupyter Notebook or JupyterLab Notebook, it may mean\n", |  | ||||||
|        "  that the widgets JavaScript is still loading. If this message persists, it\n", |  | ||||||
|        "  likely means that the widgets JavaScript library is either not installed or\n", |  | ||||||
|        "  not enabled. See the <a href=\"https://ipywidgets.readthedocs.io/en/stable/user_install.html\">Jupyter\n", |  | ||||||
|        "  Widgets Documentation</a> for setup instructions.\n", |  | ||||||
|        "</p>\n", |  | ||||||
|        "<p>\n", |  | ||||||
|        "  If you're reading this message in another frontend (for example, a static\n", |  | ||||||
|        "  rendering on GitHub or <a href=\"https://nbviewer.jupyter.org/\">NBViewer</a>),\n", |  | ||||||
|        "  it may mean that your frontend doesn't currently support widgets.\n", |  | ||||||
|        "</p>\n" |  | ||||||
|       ], |  | ||||||
|       "text/plain": [ |  | ||||||
|        "Text(value='')" |  | ||||||
|       ] |  | ||||||
|      }, |  | ||||||
|      "metadata": {}, |  | ||||||
|      "output_type": "display_data" |  | ||||||
|     }, |  | ||||||
|     { |  | ||||||
|      "data": { |  | ||||||
|       "application/vnd.jupyter.widget-view+json": { |  | ||||||
|        "model_id": "4580af82b30545f197a41e4359010556", |  | ||||||
|        "version_major": 2, |  | ||||||
|        "version_minor": 0 |  | ||||||
|       }, |  | ||||||
|       "text/html": [ |  | ||||||
|        "<p>Failed to display Jupyter Widget of type <code>VBox</code>.</p>\n", |  | ||||||
|        "<p>\n", |  | ||||||
|        "  If you're reading this message in the Jupyter Notebook or JupyterLab Notebook, it may mean\n", |  | ||||||
|        "  that the widgets JavaScript is still loading. If this message persists, it\n", |  | ||||||
|        "  likely means that the widgets JavaScript library is either not installed or\n", |  | ||||||
|        "  not enabled. See the <a href=\"https://ipywidgets.readthedocs.io/en/stable/user_install.html\">Jupyter\n", |  | ||||||
|        "  Widgets Documentation</a> for setup instructions.\n", |  | ||||||
|        "</p>\n", |  | ||||||
|        "<p>\n", |  | ||||||
|        "  If you're reading this message in another frontend (for example, a static\n", |  | ||||||
|        "  rendering on GitHub or <a href=\"https://nbviewer.jupyter.org/\">NBViewer</a>),\n", |  | ||||||
|        "  it may mean that your frontend doesn't currently support widgets.\n", |  | ||||||
|        "</p>\n" |  | ||||||
|       ], |  | ||||||
|       "text/plain": [ |  | ||||||
|        "VBox(children=(Button(description='get emoji', icon='check', style=ButtonStyle(), tooltip='Click me'), Output()))" |  | ||||||
|       ] |  | ||||||
|      }, |  | ||||||
|      "metadata": {}, |  | ||||||
|      "output_type": "display_data" |  | ||||||
|     } |  | ||||||
|    ], |  | ||||||
|    "source": [ |    "source": [ | ||||||
|     "import __main__ as main\n", |     "import __main__ as main\n", | ||||||
|     "if not hasattr(main, '__file__'):\n", |     "if not hasattr(main, '__file__'):\n", | ||||||
|  | |||||||
		Reference in New Issue
	
	Block a user