From 10fd5817e708a2b837b6e0df8f9fa508834b1a35 Mon Sep 17 00:00:00 2001 From: Jonas Weinz Date: Tue, 26 Jun 2018 14:47:39 +0200 Subject: [PATCH] updated notebook according to changes in the python file --- .../simple_twitter_learning.ipynb | 384 ++++-------------- 1 file changed, 70 insertions(+), 314 deletions(-) diff --git a/Project/simple_approach/simple_twitter_learning.ipynb b/Project/simple_approach/simple_twitter_learning.ipynb index 277e2b8..52f4f5b 100644 --- a/Project/simple_approach/simple_twitter_learning.ipynb +++ b/Project/simple_approach/simple_twitter_learning.ipynb @@ -2,43 +2,9 @@ "cells": [ { "cell_type": "code", - "execution_count": 1, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/home/jonas/.local/lib/python3.6/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n", - " from ._conv import register_converters as _register_converters\n", - "Using TensorFlow backend.\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[nltk_data] Downloading package punkt to /home/jonas/nltk_data...\n", - "[nltk_data] Package punkt is already up-to-date!\n", - "[nltk_data] Downloading package averaged_perceptron_tagger to\n", - "[nltk_data] /home/jonas/nltk_data...\n", - "[nltk_data] Package averaged_perceptron_tagger is already up-to-\n", - "[nltk_data] date!\n", - "[nltk_data] Downloading package wordnet to /home/jonas/nltk_data...\n", - "[nltk_data] Package wordnet is already up-to-date!\n" - ] - }, - { - "data": { - "text/plain": [ - "True" - ] - }, - "execution_count": 1, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "import pandas as pd\n", "from IPython.display import clear_output, Markdown, Math\n", @@ -59,6 +25,7 @@ "import pickle\n", "import operator\n", "from sklearn.pipeline import Pipeline\n", + "import json\n", "nltk.download('punkt')\n", "nltk.download('averaged_perceptron_tagger')\n", "nltk.download('wordnet')" @@ -66,7 +33,7 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -84,7 +51,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -109,7 +76,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -129,7 +96,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -154,7 +121,7 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -194,13 +161,13 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ "class sample_data_manager(object):\n", " @staticmethod\n", - " def generate_and_read(path:str, only_emoticons=True, apply_stemming=True, n_top_emojis=-1, file_range=None, n_kmeans_cluster=-1):\n", + " def generate_and_read(path:str, only_emoticons=True, apply_stemming=True, n_top_emojis=-1, file_range=None, n_kmeans_cluster=-1, progress_callback=None):\n", " \"\"\"\n", " generate, read and process train data in one step.\n", " \n", @@ -214,7 +181,7 @@ " @return: sample_data_manager object\n", " \"\"\"\n", " sdm = sample_data_manager(path)\n", - " sdm.read_files(file_index_range=range(sdm.n_files) if file_range is None else file_range, only_emoticons=only_emoticons)\n", + " sdm.read_files(file_index_range=range(sdm.n_files) if file_range is None else file_range, only_emoticons=only_emoticons, progress_callback=progress_callback)\n", " if apply_stemming:\n", " sdm.apply_stemming_and_lemmatization()\n", " \n", @@ -254,7 +221,7 @@ " self.kmeans_cluster = None\n", " self.label_binarizer = None\n", " \n", - " def read_files(self, file_index_range:list, only_emoticons=True):\n", + " def read_files(self, file_index_range:list, only_emoticons=True, progress_callback=None):\n", " \"\"\"\n", " reading (multiple) files to one panda table.\n", " \n", @@ -268,7 +235,8 @@ " self.raw_data = pd.read_json(self.json_files[i], encoding=\"utf-8\")\n", " else:\n", " self.raw_data = self.raw_data.append(pd.read_json(self.json_files[i], encoding=\"utf-8\"))\n", - " \n", + " if progress_callback is not None:\n", + " progress_callback()\n", " self.emojis = self.raw_data['EMOJI']\n", " self.plain_text = self.raw_data['text']\n", " \n", @@ -396,8 +364,7 @@ " \n", " labels = self.binary_labels if self.use_binary_labels else self.labels\n", " assert labels is not None\n", - " self.X, self.Xt, self.y, self.yt = train_test_split(self.plain_text, labels, test_size=split, random_state=random_state)\n", - "\n" + " self.X, self.Xt, self.y, self.yt = train_test_split(self.plain_text, labels, test_size=split, random_state=random_state)\n" ] }, { @@ -409,12 +376,26 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ "class pipeline_manager(object):\n", " @staticmethod\n", + " def load_from_pipeline_file(pipeline_file:str):\n", + " \"\"\"\n", + " loading a json configuration file and using it's paramters to call 'load_pipeline_from_files'\n", + " \"\"\"\n", + " with open(pipeline_file, 'r') as f:\n", + " d = json.load(f)\n", + " \n", + " keras_models = d['keras_models']\n", + " all_models = d['all_models']\n", + " \n", + " return pipeline_manager.load_pipeline_from_files(pipeline_file.rsplit('.',1)[0], keras_models, all_models)\n", + "\n", + "\n", + " @staticmethod\n", " def load_pipeline_from_files(file_prefix:str, keras_models = [], all_models = []):\n", " \"\"\"\n", " load a pipeline from files. A pipeline should be represented by multiple model files in the form '.'\n", @@ -527,6 +508,7 @@ " @param prefix: file prefix for all models\n", " \"\"\"\n", " \n", + "\n", " print(self.keras_models)\n", " # doing this like explained here: https://stackoverflow.com/a/43415459\n", " for step in self.pipeline.named_steps:\n", @@ -538,6 +520,9 @@ " load_command = \"pipeline_manager.load_pipeline_from_files( '\"\n", " load_command += prefix + \"', \" + str(self.keras_models) + \", \"\n", " load_command += str(list(self.pipeline.named_steps.keys())) + \")\"\n", + "\n", + " with open(prefix + '.pipeline', 'w') as outfile:\n", + " json.dump({'keras_models': self.keras_models, 'all_models': [step for step in self.pipeline.named_steps]}, outfile)\n", " \n", " import __main__ as main\n", " if not hasattr(main, '__file__'):\n", @@ -584,7 +569,7 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -594,7 +579,7 @@ " self.sdm = sdm\n", " self.pm = pm\n", " \n", - " def fit(self, max_size=10000, disabled_fit_steps=['vectorizer']):\n", + " def fit(self, max_size=10000, disabled_fit_steps=['vectorizer'], keras_batch_fitting_layer=['keras_model'], batch_size=None, n_epochs=1, progress_callback=None):\n", " \"\"\"\n", " fitting data in the pipeline. Because we don't want to refit the vectorizer, the pipeline models containing the vectorizer have to be named explicitly\n", " \n", @@ -607,22 +592,44 @@ " disabled_fits = {}\n", " disabled_fit_transforms = {}\n", " \n", + " disabled_keras_fits = {}\n", + " \n", " named_steps = self.pm.pipeline.named_steps\n", " \n", " for s in disabled_fit_steps:\n", - " # now it gets a little bit dirty:\n", + " # now it gets really dirty:\n", " # replace fit functions we don't want to call again (e.g. for vectorizers)\n", " disabled_fits[s] = named_steps[s].fit\n", " disabled_fit_transforms[s] = named_steps[s].fit_transform\n", " named_steps[s].fit = lambda self, X, y=None: self\n", " named_steps[s].fit_transform = named_steps[s].transform\n", - " \n", - " self.pm.fit(X = self.sdm.X[:max_size], y = self.sdm.y[:max_size])\n", + " \n", + " for k in keras_batch_fitting_layer:\n", + " # forcing batch fitting on keras\n", + " disabled_keras_fits[k]=named_steps[k].fit\n", + " named_steps[k].fit = lambda X, y: named_steps[k].train_on_batch(X.todense(), y) # ← why has keras no sparse support on batch progressing!?!?!\n", + " \n", + " if batch_size is None:\n", + " self.pm.fit(X = self.sdm.X[:max_size], y = self.sdm.y[:max_size])\n", + " else:\n", + " n = len(self.sdm.X) // batch_size\n", + " for i in range(n_epochs):\n", + " for j in range(n):\n", + " self.pm.fit(X = np.array(self.sdm.X[j*batch_size:(j+1)*batch_size]), y = np.array(self.sdm.y[j*batch_size:(j+1)*batch_size]))\n", + " if progress_callback is not None:\n", + " progress_callback()\n", + " pred, yt = self.test()\n", + " mean_squared_error = ((pred - yt)**2).mean(axis=0)\n", + " print(\"#\" + str(j) + \": loss: \", mean_squared_error)\n", + "\n", " \n", " # restore replaced fit functions:\n", " for s in disabled_fit_steps:\n", " named_steps[s].fit = disabled_fits[s]\n", " named_steps[s].fit_transform = disabled_fit_transforms[s]\n", + " \n", + " for k in keras_batch_fitting_layer:\n", + " named_steps[k].fit = disabled_keras_fits[k]\n", " \n", " def test(self):\n", " '''\n", @@ -630,9 +637,7 @@ " '''\n", " if self.sdm.X is None:\n", " self.sdm.create_train_test_split()\n", - " return self.pm.predict(self.sdm.Xt), self.sdm.yt\n", - "\n", - " " + " return self.pm.predict(self.sdm.Xt), self.sdm.yt\n" ] }, { @@ -652,21 +657,9 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "reading file: ./data_en/2017-11-01.json...\n", - "imported 33368 samples\n", - "remaining samples after top emoji filtering: 26197\n", - "Epoch 1/1\n", - "100/100 [==============================] - 3s 28ms/step - loss: 0.1230\n" - ] - } - ], + "outputs": [], "source": [ "import __main__ as main\n", "if not hasattr(main, '__file__'):\n", @@ -698,40 +691,9 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "['keras_model']\n" - ] - }, - { - "data": { - "text/plain": [ - "'saved pipeline. It can be loaded the following way:'" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/markdown": [ - "> ```\n", - "pipeline_manager.load_pipeline_from_files( 'custom_classifier', ['keras_model'], ['vectorizer', 'keras_model'])\n", - "```" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], + "outputs": [], "source": [ "import __main__ as main\n", "if not hasattr(main, '__file__'):\n", @@ -750,156 +712,9 @@ }, { "cell_type": "code", - "execution_count": 12, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "array([[0.16062996, 0.08324276, 0.09433182],\n", - " [0.16413 , 0.09421383, 0.07578427],\n", - " [0.11994962, 0.05705731, 0.06310127],\n", - " ...,\n", - " [0.13887292, 0.08502828, 0.08176519],\n", - " [0.18185864, 0.09223703, 0.10704609],\n", - " [0.17687687, 0.09147045, 0.10650696]], dtype=float32)" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/plain": [ - "array([[0.46813021, 0.24716181, 0.28470797],\n", - " [0.46813021, 0.24716181, 0.28470797],\n", - " [0.70401758, 0.05932203, 0.23666039],\n", - " ...,\n", - " [0.46813021, 0.24716181, 0.28470797],\n", - " [0.46813021, 0.24716181, 0.28470797],\n", - " [0.46813021, 0.24716181, 0.28470797]])" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "prediction variance: 0.0005431187\n", - "teacher variance: 0.03341702104519965\n" - ] - }, - { - "data": { - "text/html": [ - "
\n", - "\n", - "\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
predictpredicted_sentimentteacherteacher_sentimenttext
35671πŸ˜‚[0.16062995791435242, 0.0832427591085434, 0.09...πŸ˜‚[0.46813021474490496, 0.24716181096977158, 0.2...i feel like i care so much more in everi situat
25683😒[0.16413000226020813, 0.0942138284444809, 0.07...πŸ˜‚[0.46813021474490496, 0.24716181096977158, 0.2...i did not meat to add that 2 there ... hav see...
8985πŸ˜‚[0.11994962394237518, 0.05705730617046356, 0.0...😊[0.7040175768989329, 0.059322033898305086, 0.2...never…
5410πŸ˜‚[0.18114930391311646, 0.10199417173862457, 0.1...πŸ˜‚[0.46813021474490496, 0.24716181096977158, 0.2...lmao on me ! ! ! wtf wa he suppos to say
62611πŸ˜‚[0.16997836530208588, 0.08633847534656525, 0.0...😊[0.7040175768989329, 0.059322033898305086, 0.2...this dude alway help me get through my school ...
\n", - "
" - ], - "text/plain": [ - " predict predicted_sentiment teacher \\\n", - "35671 πŸ˜‚ [0.16062995791435242, 0.0832427591085434, 0.09... πŸ˜‚ \n", - "25683 😒 [0.16413000226020813, 0.0942138284444809, 0.07... πŸ˜‚ \n", - "8985 πŸ˜‚ [0.11994962394237518, 0.05705730617046356, 0.0... 😊 \n", - "5410 πŸ˜‚ [0.18114930391311646, 0.10199417173862457, 0.1... πŸ˜‚ \n", - "62611 πŸ˜‚ [0.16997836530208588, 0.08633847534656525, 0.0... 😊 \n", - "\n", - " teacher_sentiment \\\n", - "35671 [0.46813021474490496, 0.24716181096977158, 0.2... \n", - "25683 [0.46813021474490496, 0.24716181096977158, 0.2... \n", - "8985 [0.7040175768989329, 0.059322033898305086, 0.2... \n", - "5410 [0.46813021474490496, 0.24716181096977158, 0.2... \n", - "62611 [0.7040175768989329, 0.059322033898305086, 0.2... \n", - "\n", - " text \n", - "35671 i feel like i care so much more in everi situat \n", - "25683 i did not meat to add that 2 there ... hav see... \n", - "8985 never… \n", - "5410 lmao on me ! ! ! wtf wa he suppos to say \n", - "62611 this dude alway help me get through my school ... " - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Mean Squared Error: [0.13877691 0.04682433 0.02937794]\n", - "Variance teacher: [0.02183094 0.02513847 0.00285735]\n", - "Variance prediction: [0.00046378 0.00019441 0.00020516]\n" - ] - } - ], + "outputs": [], "source": [ "import __main__ as main\n", "if not hasattr(main, '__file__'):\n", @@ -945,68 +760,9 @@ }, { "cell_type": "code", - "execution_count": 13, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "003ae16760b04c25bdc9f2fe2193747a", - "version_major": 2, - "version_minor": 0 - }, - "text/html": [ - "

Failed to display Jupyter Widget of type Text.

\n", - "

\n", - " If you're reading this message in the Jupyter Notebook or JupyterLab Notebook, it may mean\n", - " that the widgets JavaScript is still loading. If this message persists, it\n", - " likely means that the widgets JavaScript library is either not installed or\n", - " not enabled. See the Jupyter\n", - " Widgets Documentation for setup instructions.\n", - "

\n", - "

\n", - " If you're reading this message in another frontend (for example, a static\n", - " rendering on GitHub or NBViewer),\n", - " it may mean that your frontend doesn't currently support widgets.\n", - "

\n" - ], - "text/plain": [ - "Text(value='')" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "4580af82b30545f197a41e4359010556", - "version_major": 2, - "version_minor": 0 - }, - "text/html": [ - "

Failed to display Jupyter Widget of type VBox.

\n", - "

\n", - " If you're reading this message in the Jupyter Notebook or JupyterLab Notebook, it may mean\n", - " that the widgets JavaScript is still loading. If this message persists, it\n", - " likely means that the widgets JavaScript library is either not installed or\n", - " not enabled. See the Jupyter\n", - " Widgets Documentation for setup instructions.\n", - "

\n", - "

\n", - " If you're reading this message in another frontend (for example, a static\n", - " rendering on GitHub or NBViewer),\n", - " it may mean that your frontend doesn't currently support widgets.\n", - "

\n" - ], - "text/plain": [ - "VBox(children=(Button(description='get emoji', icon='check', style=ButtonStyle(), tooltip='Click me'), Output()))" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], + "outputs": [], "source": [ "import __main__ as main\n", "if not hasattr(main, '__file__'):\n",