batch fitting with sparse matrices and keras is very uncomfortable *sigh*
This commit is contained in:
		
							
								
								
									
										571
									
								
								Project/simple_approach/Continous_Learner.ipynb
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										571
									
								
								Project/simple_approach/Continous_Learner.ipynb
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,571 @@ | ||||
| { | ||||
|  "cells": [ | ||||
|   { | ||||
|    "cell_type": "markdown", | ||||
|    "metadata": {}, | ||||
|    "source": [ | ||||
|     "# Continous Learner for Emoji classifier 🤓\n", | ||||
|     "**usage:**\n", | ||||
|     "run all cells, then go to the [user interface](#User-Interface)" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": 1, | ||||
|    "metadata": {}, | ||||
|    "outputs": [ | ||||
|     { | ||||
|      "name": "stderr", | ||||
|      "output_type": "stream", | ||||
|      "text": [ | ||||
|       "Using TensorFlow backend.\n" | ||||
|      ] | ||||
|     }, | ||||
|     { | ||||
|      "name": "stdout", | ||||
|      "output_type": "stream", | ||||
|      "text": [ | ||||
|       "[nltk_data] Downloading package punkt to /home/jonas/nltk_data...\n", | ||||
|       "[nltk_data]   Package punkt is already up-to-date!\n", | ||||
|       "[nltk_data] Downloading package averaged_perceptron_tagger to\n", | ||||
|       "[nltk_data]     /home/jonas/nltk_data...\n", | ||||
|       "[nltk_data]   Package averaged_perceptron_tagger is already up-to-\n", | ||||
|       "[nltk_data]       date!\n", | ||||
|       "[nltk_data] Downloading package wordnet to /home/jonas/nltk_data...\n", | ||||
|       "[nltk_data]   Package wordnet is already up-to-date!\n" | ||||
|      ] | ||||
|     } | ||||
|    ], | ||||
|    "source": [ | ||||
|     "import simple_twitter_learning as stl\n", | ||||
|     "import glob\n", | ||||
|     "import sys\n", | ||||
|     "from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer, HashingVectorizer" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "markdown", | ||||
|    "metadata": {}, | ||||
|    "source": [ | ||||
|     "## user interface area:" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "markdown", | ||||
|    "metadata": {}, | ||||
|    "source": [ | ||||
|     "* UI helper functions and global states" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": 2, | ||||
|    "metadata": {}, | ||||
|    "outputs": [], | ||||
|    "source": [ | ||||
|     "from IPython.display import clear_output, Markdown, Math\n", | ||||
|     "import ipywidgets as widgets\n", | ||||
|     "\n", | ||||
|     "out_areas = {}\n", | ||||
|     "shown_widgets = {}\n", | ||||
|     "tab_manager = widgets.Tab()\n", | ||||
|     "\n", | ||||
|     "def mp(obj):\n", | ||||
|     "    display(Markdown(obj))\n", | ||||
|     "\n", | ||||
|     "def set_widget_visibility(widget_names, visible=True):\n", | ||||
|     "    for w in widget_names:\n", | ||||
|     "        shown_widgets[w].disabled = not visible\n", | ||||
|     "\n", | ||||
|     "def create_area(area_name:str, list_widgets:list, out_name:str, tab=tab_manager):\n", | ||||
|     "    \"\"\"\n", | ||||
|     "    creates a table of widgets with corresponding output area below\n", | ||||
|     "    \n", | ||||
|     "    @param area_name: title of the area\n", | ||||
|     "    @param list_widgets: list of tuples: (widget, name:str)\n", | ||||
|     "    @param out_name: name for the output area\n", | ||||
|     "    \"\"\"\n", | ||||
|     "    if out_name is not None:\n", | ||||
|     "        out = widgets.Output()\n", | ||||
|     "        out_areas[out_name] = out\n", | ||||
|     "    h_box_widgets = []\n", | ||||
|     "    v_box_widgets = []\n", | ||||
|     "    for v in list_widgets:\n", | ||||
|     "        for h in v:\n", | ||||
|     "            if 'description' in h[0].__dir__():\n", | ||||
|     "                h[0].description = h[1]\n", | ||||
|     "            shown_widgets[h[1]] = h[0]\n", | ||||
|     "            h_box_widgets.append(h[0])\n", | ||||
|     "        v_box_widgets.append(widgets.HBox(h_box_widgets))\n", | ||||
|     "        h_box_widgets = []\n", | ||||
|     "    \n", | ||||
|     "    if out_name is not None:\n", | ||||
|     "        v_box_widgets += [out]\n", | ||||
|     "    tab.children = list(tab.children) + [widgets.VBox(v_box_widgets)]\n", | ||||
|     "    tab.set_title(len(tab.children) - 1, area_name)" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "markdown", | ||||
|    "metadata": {}, | ||||
|    "source": [ | ||||
|     "* build UI" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": 3, | ||||
|    "metadata": {}, | ||||
|    "outputs": [ | ||||
|     { | ||||
|      "data": { | ||||
|       "text/markdown": [ | ||||
|        "----" | ||||
|       ], | ||||
|       "text/plain": [ | ||||
|        "<IPython.core.display.Markdown object>" | ||||
|       ] | ||||
|      }, | ||||
|      "metadata": {}, | ||||
|      "output_type": "display_data" | ||||
|     }, | ||||
|     { | ||||
|      "data": { | ||||
|       "text/markdown": [ | ||||
|        "## User Interface" | ||||
|       ], | ||||
|       "text/plain": [ | ||||
|        "<IPython.core.display.Markdown object>" | ||||
|       ] | ||||
|      }, | ||||
|      "metadata": {}, | ||||
|      "output_type": "display_data" | ||||
|     }, | ||||
|     { | ||||
|      "data": { | ||||
|       "application/vnd.jupyter.widget-view+json": { | ||||
|        "model_id": "3c11801d12b643d9b059ba1058d66d5e", | ||||
|        "version_major": 2, | ||||
|        "version_minor": 0 | ||||
|       }, | ||||
|       "text/plain": [ | ||||
|        "Tab(children=(VBox(children=(HBox(children=(Text(value='./data_en/', description='root_path'), Button(descript…" | ||||
|       ] | ||||
|      }, | ||||
|      "metadata": {}, | ||||
|      "output_type": "display_data" | ||||
|     } | ||||
|    ], | ||||
|    "source": [ | ||||
|     "mp(\"----\")\n", | ||||
|     "mp(\"## User Interface\")\n", | ||||
|     "# create widgets\n", | ||||
|     "create_area(\"load dataset 💾\",\n", | ||||
|     "           [\n", | ||||
|     "               [\n", | ||||
|     "                   (widgets.Text(value=\"./data_en/\"), \"root_path\"),\n", | ||||
|     "                   (widgets.Button(), \"set_path\")\n", | ||||
|     "               ],\n", | ||||
|     "               [\n", | ||||
|     "                   (widgets.IntRangeSlider(disabled=True, min=0, max=0), \"file_range\"),\n", | ||||
|     "                   (widgets.Checkbox(disabled=True), \"only_emoticons\")\n", | ||||
|     "               ],\n", | ||||
|     "               [\n", | ||||
|     "                   (widgets.BoundedIntText(disabled=True,min=-1, max=10), \"k_means_cluster\"),\n", | ||||
|     "                   (widgets.BoundedIntText(disabled=True,min=-1, max=10), \"n_top_emojis\")\n", | ||||
|     "               ],\n", | ||||
|     "               [\n", | ||||
|     "                   (widgets.Button(disabled=True),\"load_data\")\n", | ||||
|     "               ]\n", | ||||
|     "           ],\n", | ||||
|     "           \"load\")\n", | ||||
|     "\n", | ||||
|     "classifier_tab = widgets.Tab()\n", | ||||
|     "\n", | ||||
|     "create_area(\"keras\",\n", | ||||
|     "           [\n", | ||||
|     "               [\n", | ||||
|     "                   (widgets.IntSlider(min=0, max=10), \"n_keras_layer\")\n", | ||||
|     "               ],\n", | ||||
|     "               [\n", | ||||
|     "                   (widgets.HBox([]), \"n_keras_neurons\")\n", | ||||
|     "               ],\n", | ||||
|     "               [\n", | ||||
|     "                   (widgets.HBox([]), \"keras_funcs\")\n", | ||||
|     "               ]\n", | ||||
|     "           ],\n", | ||||
|     "           None,\n", | ||||
|     "           classifier_tab)\n", | ||||
|     "\n", | ||||
|     "create_area(\"create classifier\",\n", | ||||
|     "           [\n", | ||||
|     "               [\n", | ||||
|     "                   (classifier_tab, \"classifier_tab\")\n", | ||||
|     "               ],\n", | ||||
|     "               [\n", | ||||
|     "                   (widgets.Button(), \"create_classifier\")\n", | ||||
|     "               ],\n", | ||||
|     "               [\n", | ||||
|     "                   (widgets.Text(), \"classifier name\"),\n", | ||||
|     "                   (widgets.Button(), \"save classifier\")\n", | ||||
|     "               ]\n", | ||||
|     "           ],\n", | ||||
|     "           \"create\")\n", | ||||
|     "\n", | ||||
|     "create_area(\"train classifier 🎓\", \n", | ||||
|     "            [\n", | ||||
|     "                [\n", | ||||
|     "                    (widgets.IntSlider(value=0,min=0,max=0), \"batch_size\"),\n", | ||||
|     "                    (widgets.FloatSlider(value=0.15, min=0, max=1), \"val_split\"),\n", | ||||
|     "                    (widgets.IntText(value=1), \"n_epochs\")\n", | ||||
|     "                ],\n", | ||||
|     "                [\n", | ||||
|     "                    (widgets.Button(),\"train\")\n", | ||||
|     "                ]\n", | ||||
|     "            ], \n", | ||||
|     "            \"train\" )\n", | ||||
|     "tab_manager" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "markdown", | ||||
|    "metadata": {}, | ||||
|    "source": [ | ||||
|     "----\n", | ||||
|     "## global variables:" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": 4, | ||||
|    "metadata": {}, | ||||
|    "outputs": [], | ||||
|    "source": [ | ||||
|     "sdm = None\n", | ||||
|     "pm = None\n", | ||||
|     "tr = None" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "markdown", | ||||
|    "metadata": {}, | ||||
|    "source": [ | ||||
|     "## pretty jupyter print" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": 5, | ||||
|    "metadata": {}, | ||||
|    "outputs": [], | ||||
|    "source": [ | ||||
|     "import collections\n", | ||||
|     "import traceback\n", | ||||
|     "from pprint import pprint as pp\n", | ||||
|     "\n", | ||||
|     "def jupyter_print(obj, cell_w = 10, headers=None, p_type=True, ret_mdown=False, index_offset=0, list_horizontal=False):\n", | ||||
|     "    \"\"\"\n", | ||||
|     "    pretty hacky function to convert arrays, lists and matrices into\n", | ||||
|     "    nice readable markdown code and render that in jupyter. if that is not possible\n", | ||||
|     "    it will use pretty print instead\n", | ||||
|     "    \"\"\"\n", | ||||
|     "    try:\n", | ||||
|     "        ts = \"**Type:** \" + str(type(obj)).strip(\"<>\") + \"\\n\\n\"\n", | ||||
|     "        if type(obj) == str:\n", | ||||
|     "            display(Markdown(obj))\n", | ||||
|     "        elif isinstance(obj, collections.Iterable):\n", | ||||
|     "            if isinstance(obj[0], collections.Iterable) and type(obj[0]) is not str:\n", | ||||
|     "                # we have a table\n", | ||||
|     "                \n", | ||||
|     "                if headers is None:\n", | ||||
|     "                    headers = [str(i) for i in range(len(obj[0]))]\n", | ||||
|     "                \n", | ||||
|     "                if len(headers) < len(obj[0]):\n", | ||||
|     "                    headers += [\" \" for i in range(len(obj[0]) - len(headers))]\n", | ||||
|     "                \n", | ||||
|     "                s = \"|\" + \" \" * cell_w + \"|\"\n", | ||||
|     "                \n", | ||||
|     "                for h in headers:\n", | ||||
|     "                    s += str(h) + \" \" * (cell_w - len(h)) + \"|\"\n", | ||||
|     "                s += \"\\n|\" + \"-\" * (len(headers) + (len(headers) + 1) * cell_w) + \"|\\n\"\n", | ||||
|     "                \n", | ||||
|     "                #s = (\"|\" + (\" \" * (cell_w))) * len(obj[0]) + \"|\\n\" + \"|\" + (\"-\" * (cell_w + 1)) * len(obj[0])\n", | ||||
|     "                #s += '|\\n'\n", | ||||
|     "                \n", | ||||
|     "                row = index_offset\n", | ||||
|     "                \n", | ||||
|     "                for o in obj:\n", | ||||
|     "                    s += \"|**\" + str(row) + \"**\" + \" \" * (cell_w - (len(str(row))+4))\n", | ||||
|     "                    row += 1\n", | ||||
|     "                    for i in o:\n", | ||||
|     "                        s += \"|\" + str(i) + \" \" * (cell_w - len(str(i)))\n", | ||||
|     "                    s+=\"|\" + '\\n'\n", | ||||
|     "                s += ts\n", | ||||
|     "                display(Markdown(s))\n", | ||||
|     "                return s if ret_mdown else None\n", | ||||
|     "            else:\n", | ||||
|     "                # we have a list\n", | ||||
|     "                \n", | ||||
|     "            \n", | ||||
|     "                if headers is None:\n", | ||||
|     "                    headers = [\"index\",\"value\"]\n", | ||||
|     "                    \n", | ||||
|     "                index_title = headers[0]\n", | ||||
|     "                value_title = headers[1]\n", | ||||
|     "                \n", | ||||
|     "                s = \"|\" + index_title + \" \" * (cell_w - len(value_title)) + \"|\" + value_title + \" \" * (cell_w - len(value_title)) + \"|\" + '\\n'\n", | ||||
|     "                s += \"|\" + \"-\" * (1 + 2 * cell_w) + '|\\n'\n", | ||||
|     "                i = index_offset\n", | ||||
|     "                for o in obj:\n", | ||||
|     "                    s_i = str(i)\n", | ||||
|     "                    s_o = str(o)\n", | ||||
|     "                    s += \"|\" + s_i + \" \" * (cell_w - len(s_i)) + \"|\" + s_o + \" \" * (cell_w - len(s_o)) + \"|\" + '\\n'\n", | ||||
|     "                    i+=1\n", | ||||
|     "                s += ts\n", | ||||
|     "                #print(s)\n", | ||||
|     "                display(Markdown(s))\n", | ||||
|     "                return s if ret_mdown else None\n", | ||||
|     "        else:\n", | ||||
|     "            jupyter_print([obj])\n", | ||||
|     "    except Exception as e:\n", | ||||
|     "        print(ts)\n", | ||||
|     "        pp(obj) \n", | ||||
|     "\n", | ||||
|     "jp = jupyter_print" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "markdown", | ||||
|    "metadata": {}, | ||||
|    "source": [ | ||||
|     "## output progress printing:" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": 6, | ||||
|    "metadata": {}, | ||||
|    "outputs": [], | ||||
|    "source": [ | ||||
|     "class progress_indicator(object):\n", | ||||
|     "    \n", | ||||
|     "    def __init__(self, n, description=\"progress\"):\n", | ||||
|     "        self.w = widgets.IntProgress(value=0, min=0,max=n, description = description)\n", | ||||
|     "        display(self.w)\n", | ||||
|     "    def update(self, dn=1):\n", | ||||
|     "        self.w.value += dn\n", | ||||
|     "        " | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "markdown", | ||||
|    "metadata": {}, | ||||
|    "source": [ | ||||
|     "----\n", | ||||
|     "## load datasets" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": 7, | ||||
|    "metadata": {}, | ||||
|    "outputs": [], | ||||
|    "source": [ | ||||
|     "def set_path(b):\n", | ||||
|     "    with out_areas[\"load\"]:\n", | ||||
|     "        clear_output()\n", | ||||
|     "        mp(\"----\")\n", | ||||
|     "        files = sorted(glob.glob(shown_widgets[\"root_path\"].value + \"/*.json\"))\n", | ||||
|     "        \n", | ||||
|     "        if len(files) == 0:\n", | ||||
|     "            sys.stderr.write(\"ERROR: no json files available in \" + shown_widgets[\"root_path\"].value + \"\\n\")\n", | ||||
|     "            set_widget_visibility([\"file_range\",\n", | ||||
|     "                               \"only_emoticons\",\n", | ||||
|     "                               \"k_means_cluster\",\n", | ||||
|     "                               \"n_top_emojis\",\n", | ||||
|     "                               \"load_data\"], False)\n", | ||||
|     "            return\n", | ||||
|     "            \n", | ||||
|     "        mp(\"**available files:**\")\n", | ||||
|     "        jp(files, headers=[\"fileindex\",\"filepath\"])\n", | ||||
|     "        set_widget_visibility([\"file_range\",\n", | ||||
|     "                               \"only_emoticons\",\n", | ||||
|     "                               \"k_means_cluster\",\n", | ||||
|     "                               \"n_top_emojis\",\n", | ||||
|     "                               \"load_data\"], True)\n", | ||||
|     "        shown_widgets[\"file_range\"].min=0\n", | ||||
|     "        shown_widgets[\"file_range\"].max=len(files) -1\n", | ||||
|     "\n", | ||||
|     "def load_data(b):\n", | ||||
|     "    global sdm\n", | ||||
|     "    with out_areas[\"load\"]:\n", | ||||
|     "        clear_output()\n", | ||||
|     "        mp(\"----\")\n", | ||||
|     "        \n", | ||||
|     "        r = shown_widgets[\"file_range\"].value\n", | ||||
|     "        r = (r[0], r[1] + 1) # range has to be exclusive according to the last element!\n", | ||||
|     "        \n", | ||||
|     "        p = progress_indicator(r[1] - r[0], \"reading progress\")\n", | ||||
|     "        \n", | ||||
|     "        sdm = stl.sample_data_manager.generate_and_read(path=shown_widgets[\"root_path\"].value,\n", | ||||
|     "                                                    n_top_emojis=shown_widgets[\"n_top_emojis\"].value,\n", | ||||
|     "                                                    file_range=range(r[0], r[1]),\n", | ||||
|     "                                                    n_kmeans_cluster=shown_widgets[\"k_means_cluster\"].value,\n", | ||||
|     "                                                    progress_callback=p.update)\n", | ||||
|     "        shown_widgets[\"batch_size\"].max = len(sdm.labels)\n", | ||||
|     "        \n", | ||||
|     "        \n", | ||||
|     "# linking functions with buttons:\n", | ||||
|     "shown_widgets[\"set_path\"].on_click(set_path)\n", | ||||
|     "shown_widgets[\"load_data\"].on_click(load_data)" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "markdown", | ||||
|    "metadata": {}, | ||||
|    "source": [ | ||||
|     "## train" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": 8, | ||||
|    "metadata": {}, | ||||
|    "outputs": [], | ||||
|    "source": [ | ||||
|     "def train(b):\n", | ||||
|     "    global sdm\n", | ||||
|     "    global pm\n", | ||||
|     "    global tr\n", | ||||
|     "    with out_areas[\"train\"]:\n", | ||||
|     "        clear_output()\n", | ||||
|     "        mp(\"----\")\n", | ||||
|     "        if sdm is None or pm is None:\n", | ||||
|     "            sys.stderr.write(\"ERROR: sample data and/or classifier missing!\\n\")\n", | ||||
|     "            return\n", | ||||
|     "    \n", | ||||
|     "        batch_size = shown_widgets[\"batch_size\"].value\n", | ||||
|     "        val_split = shown_widgets[\"val_split\"].value\n", | ||||
|     "        n_epochs = shown_widgets[\"n_epochs\"].value\n", | ||||
|     "        \n", | ||||
|     "        print(\"update train test split:\")\n", | ||||
|     "        sdm.create_train_test_split(split=val_split)\n", | ||||
|     "        batch_n = len(sdm.X) // batch_size\n", | ||||
|     "        \n", | ||||
|     "        print(\"fit\")\n", | ||||
|     "        \n", | ||||
|     "        p = progress_indicator(batch_n)\n", | ||||
|     "        \n", | ||||
|     "        tr = stl.trainer(sdm=sdm, pm=pm)\n", | ||||
|     "        tr.fit(progress_callback=p.update, batch_size=batch_size, n_epochs=n_epochs)\n", | ||||
|     "        \n", | ||||
|     "\n", | ||||
|     "# linking:\n", | ||||
|     "shown_widgets[\"train\"].on_click(train)\n", | ||||
|     "        " | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "markdown", | ||||
|    "metadata": {}, | ||||
|    "source": [ | ||||
|     "## create classifier" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": 9, | ||||
|    "metadata": {}, | ||||
|    "outputs": [], | ||||
|    "source": [ | ||||
|     "keras_acivations = [\n", | ||||
|     "    \"softmax\",\n", | ||||
|     "    \"elu\",\n", | ||||
|     "    \"selu\",\n", | ||||
|     "    \"softplus\",\n", | ||||
|     "    \"softsign\",\n", | ||||
|     "    \"relu\",\n", | ||||
|     "    \"tanh\",\n", | ||||
|     "    \"sigmoid\",\n", | ||||
|     "    \"hard_sigmoid\",\n", | ||||
|     "    \"linear\",\n", | ||||
|     "    \"None\"\n", | ||||
|     "]\n", | ||||
|     "\n", | ||||
|     "def populate_keras_options(b):\n", | ||||
|     "    n_layers = shown_widgets[\"n_keras_layer\"].value\n", | ||||
|     "    hbox_neurons = shown_widgets[\"n_keras_neurons\"]\n", | ||||
|     "    hbox_funcs = shown_widgets[\"keras_funcs\"]\n", | ||||
|     "    \n", | ||||
|     "    hbox_neurons.children = [widgets.IntText(description = str(i)) for i in range(n_layers)]\n", | ||||
|     "    hbox_funcs.children = [widgets.Dropdown(options=keras_acivations,description = str(i)) for i in range(n_layers)]\n", | ||||
|     "    \n", | ||||
|     "    #hbox_neurons.children[-1].disabled = True\n", | ||||
|     "\n", | ||||
|     "def create_classifier(b):\n", | ||||
|     "    global sdm\n", | ||||
|     "    global pm\n", | ||||
|     "    global tr\n", | ||||
|     "    with out_areas[\"create\"]:\n", | ||||
|     "        clear_output()\n", | ||||
|     "        mp(\"----\")\n", | ||||
|     "        if sdm is None:\n", | ||||
|     "            sys.stderr.write(\"load a dataset first!\\n\")\n", | ||||
|     "            return\n", | ||||
|     "        \n", | ||||
|     "        chosen_classifier = classifier_tab.get_title(classifier_tab.selected_index)\n", | ||||
|     "        \n", | ||||
|     "        mp(\"**chosen classifier**: `\" + chosen_classifier + \"`\")\n", | ||||
|     "        \n", | ||||
|     "        # TODO: add more classifier options here:\n", | ||||
|     "        if chosen_classifier is 'keras':\n", | ||||
|     "            sdm.create_train_test_split()\n", | ||||
|     "            \n", | ||||
|     "            n_layers = shown_widgets[\"n_keras_layer\"].value\n", | ||||
|     "            hbox_neurons = shown_widgets[\"n_keras_neurons\"]\n", | ||||
|     "            hbox_funcs = shown_widgets[\"keras_funcs\"]\n", | ||||
|     "\n", | ||||
|     "            layers = []\n", | ||||
|     "            for i in range(n_layers):\n", | ||||
|     "                func = hbox_funcs.children[i].value\n", | ||||
|     "                if func == 'None':\n", | ||||
|     "                    func = None\n", | ||||
|     "                layers.append((hbox_neurons.children[i].value, func))\n", | ||||
|     "            \n", | ||||
|     "            # modify last layer:\n", | ||||
|     "            layers[-1] = (sdm.y.shape[1], layers[-1][1])\n", | ||||
|     "            \n", | ||||
|     "            mp(\"**layers:** \")\n", | ||||
|     "            jp(layers, headers=['#neurons', 'activation_func'])\n", | ||||
|     "\n", | ||||
|     "            pm = stl.pipeline_manager.create_keras_pipeline_with_vectorizer(vectorizer=TfidfVectorizer(stop_words='english'),\n", | ||||
|     "                                                           layers=layers, sdm=sdm)\n", | ||||
|     "\n", | ||||
|     "# link\n", | ||||
|     "shown_widgets[\"n_keras_layer\"].observe(populate_keras_options)\n", | ||||
|     "shown_widgets[\"create_classifier\"].on_click(create_classifier)" | ||||
|    ] | ||||
|   } | ||||
|  ], | ||||
|  "metadata": { | ||||
|   "kernelspec": { | ||||
|    "display_name": "Python 3", | ||||
|    "language": "python", | ||||
|    "name": "python3" | ||||
|   }, | ||||
|   "language_info": { | ||||
|    "codemirror_mode": { | ||||
|     "name": "ipython", | ||||
|     "version": 3 | ||||
|    }, | ||||
|    "file_extension": ".py", | ||||
|    "mimetype": "text/x-python", | ||||
|    "name": "python", | ||||
|    "nbconvert_exporter": "python", | ||||
|    "pygments_lexer": "ipython3", | ||||
|    "version": "3.6.5" | ||||
|   } | ||||
|  }, | ||||
|  "nbformat": 4, | ||||
|  "nbformat_minor": 2 | ||||
| } | ||||
| @ -122,7 +122,7 @@ def get_wordnet_pos(treebank_tag): | ||||
|  | ||||
| class sample_data_manager(object): | ||||
|     @staticmethod | ||||
|     def generate_and_read(path:str, only_emoticons=True, apply_stemming=True, n_top_emojis=-1, file_range=None, n_kmeans_cluster=-1): | ||||
|     def generate_and_read(path:str, only_emoticons=True, apply_stemming=True, n_top_emojis=-1, file_range=None, n_kmeans_cluster=-1, progress_callback=None): | ||||
|         """ | ||||
|         generate, read and process train data in one step. | ||||
|          | ||||
| @ -136,7 +136,7 @@ class sample_data_manager(object): | ||||
|         @return: sample_data_manager object | ||||
|         """ | ||||
|         sdm = sample_data_manager(path) | ||||
|         sdm.read_files(file_index_range=range(sdm.n_files) if file_range is None else file_range, only_emoticons=only_emoticons) | ||||
|         sdm.read_files(file_index_range=range(sdm.n_files) if file_range is None else file_range, only_emoticons=only_emoticons, progress_callback=progress_callback) | ||||
|         if apply_stemming: | ||||
|             sdm.apply_stemming_and_lemmatization() | ||||
|          | ||||
| @ -176,7 +176,7 @@ class sample_data_manager(object): | ||||
|         self.kmeans_cluster = None | ||||
|         self.label_binarizer = None | ||||
|      | ||||
|     def read_files(self, file_index_range:list, only_emoticons=True): | ||||
|     def read_files(self, file_index_range:list, only_emoticons=True, progress_callback=None): | ||||
|         """ | ||||
|         reading (multiple) files to one panda table. | ||||
|          | ||||
| @ -190,7 +190,8 @@ class sample_data_manager(object): | ||||
|                 self.raw_data = pd.read_json(self.json_files[i], encoding="utf-8") | ||||
|             else: | ||||
|                 self.raw_data = self.raw_data.append(pd.read_json(self.json_files[i], encoding="utf-8")) | ||||
|          | ||||
|             if progress_callback is not None: | ||||
|                 progress_callback() | ||||
|         self.emojis = self.raw_data['EMOJI'] | ||||
|         self.plain_text = self.raw_data['text'] | ||||
|          | ||||
| @ -500,7 +501,7 @@ class trainer(object): | ||||
|         self.sdm = sdm | ||||
|         self.pm = pm | ||||
|      | ||||
|     def fit(self, max_size=10000, disabled_fit_steps=['vectorizer']): | ||||
|     def fit(self, max_size=10000, disabled_fit_steps=['vectorizer'], keras_batch_fitting_layer=['keras_model'], batch_size=None, n_epochs=1, progress_callback=None): | ||||
|         """ | ||||
|         fitting data in the pipeline. Because we don't want to refit the vectorizer, the pipeline models containing the vectorizer have to be named explicitly | ||||
|          | ||||
| @ -513,22 +514,44 @@ class trainer(object): | ||||
|         disabled_fits = {} | ||||
|         disabled_fit_transforms = {} | ||||
|          | ||||
|         disabled_keras_fits = {} | ||||
|          | ||||
|         named_steps = self.pm.pipeline.named_steps | ||||
|          | ||||
|         for s in disabled_fit_steps: | ||||
|             # now it gets a little bit dirty: | ||||
|             # now it gets really dirty: | ||||
|             # replace fit functions we don't want to call again (e.g. for vectorizers) | ||||
|             disabled_fits[s] = named_steps[s].fit | ||||
|             disabled_fit_transforms[s] = named_steps[s].fit_transform | ||||
|             named_steps[s].fit = lambda self, X, y=None: self | ||||
|             named_steps[s].fit_transform = named_steps[s].transform | ||||
|              | ||||
|         self.pm.fit(X = self.sdm.X[:max_size], y = self.sdm.y[:max_size]) | ||||
|          | ||||
|         for k in keras_batch_fitting_layer: | ||||
|             # forcing batch fitting on keras | ||||
|             disabled_keras_fits[k]=named_steps[k].fit | ||||
|             named_steps[k].fit = lambda X, y: named_steps[k].train_on_batch(X.todense(), y) # ← why has keras no sparse support on batch progressing!?!?! | ||||
|          | ||||
|         if batch_size is None: | ||||
|             self.pm.fit(X = self.sdm.X[:max_size], y = self.sdm.y[:max_size]) | ||||
|         else: | ||||
|             n = len(self.sdm.X) // batch_size | ||||
|             for i in range(n_epochs): | ||||
|                 for j in range(n): | ||||
|                     self.pm.fit(X = np.array(self.sdm.X[j*batch_size:(j+1)*batch_size]), y = np.array(self.sdm.y[j*batch_size:(j+1)*batch_size])) | ||||
|                     if progress_callback is not None: | ||||
|                         progress_callback() | ||||
|                     pred, yt = self.test() | ||||
|                     mean_squared_error = ((pred - yt)**2).mean(axis=0) | ||||
|                     print("#" + str(j) + ": loss: ", mean_squared_error) | ||||
|  | ||||
|          | ||||
|         # restore replaced fit functions: | ||||
|         for s in disabled_fit_steps: | ||||
|             named_steps[s].fit = disabled_fits[s] | ||||
|             named_steps[s].fit_transform = disabled_fit_transforms[s] | ||||
|          | ||||
|         for k in keras_batch_fitting_layer: | ||||
|             named_steps[k].fit = disabled_keras_fits[k] | ||||
|      | ||||
|     def test(self): | ||||
|         ''' | ||||
|  | ||||
		Reference in New Issue
	
	Block a user