diff --git a/Project/advanced_approach/Continous_Learner.ipynb b/Project/advanced_approach/Continous_Learner.ipynb deleted file mode 100644 index 41753db..0000000 --- a/Project/advanced_approach/Continous_Learner.ipynb +++ /dev/null @@ -1,847 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Learner for Emoji classifier πŸ€“\n", - "**usage:**\n", - "run all cells, then go to the [user interface](#User-Interface)" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [], - "source": [ - "%matplotlib inline" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Using TensorFlow backend.\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[nltk_data] Downloading package punkt to /home/jonas/nltk_data...\n", - "[nltk_data] Package punkt is already up-to-date!\n", - "[nltk_data] Downloading package averaged_perceptron_tagger to\n", - "[nltk_data] /home/jonas/nltk_data...\n", - "[nltk_data] Package averaged_perceptron_tagger is already up-to-\n", - "[nltk_data] date!\n", - "[nltk_data] Downloading package wordnet to /home/jonas/nltk_data...\n", - "[nltk_data] Package wordnet is already up-to-date!\n" - ] - } - ], - "source": [ - "import twitter_learning as twl\n", - "import glob\n", - "import sys\n", - "from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer, HashingVectorizer\n", - "import pickle\n", - "import matplotlib.pyplot as plt\n", - "import matplotlib\n", - "import numpy as np" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## user interface area:" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "* UI helper functions and global states" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [], - "source": [ - "from IPython.display import clear_output, Markdown, Math\n", - "import ipywidgets as widgets\n", - "\n", - "out_areas = {}\n", - "shown_widgets = {}\n", - "tab_manager = widgets.Tab()\n", - "\n", - "def mp(obj):\n", - " display(Markdown(obj))\n", - "\n", - "def set_widget_visibility(widget_names, visible=True):\n", - " for w in widget_names:\n", - " shown_widgets[w].disabled = not visible\n", - "\n", - "def create_area(area_name:str, list_widgets:list, out_name:str, tab=tab_manager):\n", - " \"\"\"\n", - " creates a table of widgets with corresponding output area below\n", - " \n", - " @param area_name: title of the area\n", - " @param list_widgets: list of tuples: (widget, name:str)\n", - " @param out_name: name for the output area\n", - " \"\"\"\n", - " if out_name is not None:\n", - " out = widgets.Output()\n", - " out_areas[out_name] = out\n", - " h_box_widgets = []\n", - " v_box_widgets = []\n", - " for v in list_widgets:\n", - " for h in v:\n", - " if 'description' in h[0].__dir__() and h[1] is not None:\n", - " h[0].description = h[1]\n", - " if h[1] is not None:\n", - " shown_widgets[h[1]] = h[0]\n", - " h_box_widgets.append(h[0])\n", - " v_box_widgets.append(widgets.HBox(h_box_widgets))\n", - " h_box_widgets = []\n", - " \n", - " if out_name is not None:\n", - " v_box_widgets += [out]\n", - " tab.children = list(tab.children) + [widgets.VBox(v_box_widgets)]\n", - " tab.set_title(len(tab.children) - 1, area_name)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "* build UI" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [ - { - "data": { - "text/markdown": [ - "----" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/markdown": [ - "## User Interface" - ], - "text/plain": [ - "" - ] - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "application/vnd.jupyter.widget-view+json": { - "model_id": "e94b33b8493a48798d3adda091986a78", - "version_major": 2, - "version_minor": 0 - }, - "text/plain": [ - "Tab(children=(VBox(children=(HBox(children=(HTML(value=' Data Root Folder:
setup the folder conta…" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "mp(\"----\")\n", - "mp(\"## User Interface\")\n", - "# create widgets\n", - "create_area(\"load dataset πŸ’Ύ\",\n", - " [\n", - " [\n", - " (widgets.HTML(\" Data Root Folder:
setup the folder containing *.json train data \"), None)\n", - " ],\n", - " [\n", - " (widgets.Text(value=\"./data_en/\"), \"root_path\"),\n", - " (widgets.Button(), \"set_path\")\n", - " ],\n", - " [\n", - " (widgets.HTML(\" Loading and preprocessing options:
setup the range of files to load. Only_emoticons will filter out 'non-smiley' emojis, min_words is the minimum amount of words for one document. Also you can setup top-emoji filtering or only load samples containing a custom emoji set\"), None)\n", - " ],\n", - " [\n", - " (widgets.IntRangeSlider(disabled=True, min=0, max=0), \"file_range\"),\n", - " (widgets.Checkbox(value=True,disabled=True), \"only_emoticons\"),\n", - " (widgets.Checkbox(value=False,disabled=True), \"apply_lemmatization_and_stemming\"),\n", - " (widgets.BoundedIntText(value=5,min=0, max=10), \"min_words\")\n", - " ],\n", - " [\n", - " #(widgets.BoundedIntText(value=-1,disabled=True,min=-1, max=10), \"k_means_cluster\")\n", - " (widgets.BoundedIntText(value=20,disabled=True,min=-1, max=100), \"n_top_emojis\"),\n", - " (widgets.Dropdown(options=[\"latest\", \"mean\"], value=\"latest\"), \"label_criteria\"),\n", - " (widgets.Text(value=\"\"), \"custom_emojis\")\n", - " ],\n", - " [\n", - " (widgets.Button(disabled=True),\"load_data\")\n", - " ]\n", - " ],\n", - " \"load\")\n", - "\n", - "classifier_tab = widgets.Tab()\n", - "\n", - "create_area(\"keras\",\n", - " [\n", - " [\n", - " (widgets.IntSlider(min=0, max=10), \"n_keras_layer\")\n", - " ],\n", - " [\n", - " (widgets.HBox([]), \"n_keras_neurons\")\n", - " ],\n", - " [\n", - " (widgets.HBox([]), \"keras_funcs\")\n", - " ]\n", - " ],\n", - " None,\n", - " classifier_tab)\n", - "\n", - "create_area(\"create/save/load classifier\",\n", - " [\n", - " [\n", - " (classifier_tab, \"classifier_tab\")\n", - " ],\n", - " [\n", - " (widgets.HTML(\" Create new Classifier:
create a new keras classifier with layer options from above. Also a vectorizer will be trained on loaded sample data. If doc2vec is disabled, TFIDF is used\"), None)\n", - " ],\n", - " [\n", - " (widgets.Checkbox(value=True),\"use_doc2vec\"),\n", - " (widgets.Checkbox(value=True),\"d2v_use_pretrained\"),\n", - " (widgets.IntText(value=100),\"d2v_size\"),\n", - " (widgets.IntText(value=8), \"d2v_window\"),\n", - " (widgets.IntSlider(value=5, min=0, max=32), \"d2v_min_count\")\n", - " ],\n", - " [\n", - " (widgets.Button(), \"create_classifier\")\n", - " ],\n", - " [\n", - " (widgets.HTML(\" Save Classifier: \"), None)\n", - " ],\n", - " [\n", - " (widgets.Text(), \"classifier_name\"),\n", - " (widgets.Button(), \"save_classifier\")\n", - " ],\n", - " [\n", - " (widgets.HTML(\" Load Classifier: \"), None)\n", - " ],\n", - " [\n", - " (widgets.Select(options=sorted(glob.glob(\"./*.pipeline\"))), \"clf_file_selector\"),\n", - " (widgets.Text(), \"clf_file\"),\n", - " (widgets.Button(), \"load_classifier\")\n", - " ]\n", - " ],\n", - " \"create\")\n", - "\n", - "create_area(\"train classifier πŸŽ“\", \n", - " [\n", - " [\n", - " (widgets.HTML(\" Custom Batch Settings:
(Ignored if batch_size is 0)\"), None)\n", - " ],\n", - " [\n", - " (widgets.IntSlider(value=0,min=0,max=0), \"batch_size\"),\n", - " (widgets.FloatSlider(value=0.15, min=0, max=1), \"val_split\")\n", - " ],\n", - " [\n", - " (widgets.HTML(\" Train: \"), None)\n", - " ],\n", - " [\n", - " (widgets.IntText(value=1), \"n_epochs\"),\n", - " (widgets.Button(),\"train\")\n", - " ]\n", - " ], \n", - " \"train\" )\n", - "create_area(\"playground 😎\",\n", - " [\n", - " [\n", - " (widgets.HTML(\" predict single sentence
(uses min distance to given emojis in prediction_ground_set)\"), None)\n", - " ],\n", - " [\n", - " (widgets.Text(),\"test_input\"),\n", - " (widgets.Text(value=\"πŸ˜³πŸ˜‹πŸ˜€πŸ˜ŒπŸ˜πŸ˜”πŸ˜’πŸ˜ŽπŸ˜’πŸ˜…πŸ˜πŸ˜‰πŸ™ŒπŸ™πŸ˜˜πŸ˜ŠπŸ˜©πŸ˜πŸ˜­πŸ˜‚\"),\"prediction_ground_set\"),\n", - " (widgets.HTML(\"

βˆ…

\"),\"prediction\"),\n", - " ],\n", - " [\n", - " (widgets.Checkbox(),\"show_sorted_list\"),\n", - " (widgets.Button(),\"show_plot\")\n", - " ],\n", - " [\n", - " (widgets.HTML(\" Test on loaded validation set:
(performs prediction plot on all validation samples that are labeled with given emojis)\"), None)\n", - " ],\n", - " [\n", - " (widgets.Text(value=\"πŸ˜³πŸ˜‹πŸ˜€πŸ˜ŒπŸ˜πŸ˜”πŸ˜’πŸ˜ŽπŸ˜’πŸ˜…πŸ˜πŸ˜‰πŸ™ŒπŸ™πŸ˜˜πŸ˜ŠπŸ˜©πŸ˜πŸ˜­πŸ˜‚\"), \"validation_emojis\"),\n", - " (widgets.Button(),\"show_validation_plot\")\n", - " ]\n", - " ],\n", - " \"playground\")\n", - "\n", - "tab_manager" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "----\n", - "## global variables:" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [], - "source": [ - "sdm = None\n", - "pm = None\n", - "tr = None" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## pretty jupyter print" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [], - "source": [ - "import collections\n", - "import traceback\n", - "from pprint import pprint as pp\n", - "\n", - "def jupyter_print(obj, cell_w = 10, headers=None, p_type=True, ret_mdown=False, index_offset=0, list_horizontal=False):\n", - " \"\"\"\n", - " pretty hacky function to convert arrays, lists and matrices into\n", - " nice readable markdown code and render that in jupyter. if that is not possible\n", - " it will use pretty print instead\n", - " \"\"\"\n", - " try:\n", - " ts = \"**Type:** \" + str(type(obj)).strip(\"<>\") + \"\\n\\n\"\n", - " if type(obj) == str:\n", - " display(Markdown(obj))\n", - " elif isinstance(obj, collections.Iterable):\n", - " if isinstance(obj[0], collections.Iterable) and type(obj[0]) is not str:\n", - " # we have a table\n", - " \n", - " if headers is None:\n", - " headers = [str(i) for i in range(len(obj[0]))]\n", - " \n", - " if len(headers) < len(obj[0]):\n", - " headers += [\" \" for i in range(len(obj[0]) - len(headers))]\n", - " \n", - " s = \"|\" + \" \" * cell_w + \"|\"\n", - " \n", - " for h in headers:\n", - " s += str(h) + \" \" * (cell_w - len(h)) + \"|\"\n", - " s += \"\\n|\" + \"-\" * (len(headers) + (len(headers) + 1) * cell_w) + \"|\\n\"\n", - " \n", - " #s = (\"|\" + (\" \" * (cell_w))) * len(obj[0]) + \"|\\n\" + \"|\" + (\"-\" * (cell_w + 1)) * len(obj[0])\n", - " #s += '|\\n'\n", - " \n", - " row = index_offset\n", - " \n", - " for o in obj:\n", - " s += \"|**\" + str(row) + \"**\" + \" \" * (cell_w - (len(str(row))+4))\n", - " row += 1\n", - " for i in o:\n", - " s += \"|\" + str(i) + \" \" * (cell_w - len(str(i)))\n", - " s+=\"|\" + '\\n'\n", - " s += ts\n", - " display(Markdown(s))\n", - " return s if ret_mdown else None\n", - " else:\n", - " # we have a list\n", - " \n", - " \n", - " if headers is None:\n", - " headers = [\"index\",\"value\"]\n", - " \n", - " index_title = headers[0]\n", - " value_title = headers[1]\n", - " \n", - " s = \"|\" + index_title + \" \" * (cell_w - len(value_title)) + \"|\" + value_title + \" \" * (cell_w - len(value_title)) + \"|\" + '\\n'\n", - " s += \"|\" + \"-\" * (1 + 2 * cell_w) + '|\\n'\n", - " i = index_offset\n", - " for o in obj:\n", - " s_i = str(i)\n", - " s_o = str(o)\n", - " s += \"|\" + s_i + \" \" * (cell_w - len(s_i)) + \"|\" + s_o + \" \" * (cell_w - len(s_o)) + \"|\" + '\\n'\n", - " i+=1\n", - " s += ts\n", - " #print(s)\n", - " display(Markdown(s))\n", - " return s if ret_mdown else None\n", - " else:\n", - " jupyter_print([obj])\n", - " except Exception as e:\n", - " print(ts)\n", - " pp(obj) \n", - "\n", - "jp = jupyter_print" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## output progress printing:" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [], - "source": [ - "class progress_indicator(object):\n", - " \n", - " def __init__(self, description=\"progress\"):\n", - " self.w = widgets.FloatProgress(value=0, min=0,max=1, description = description)\n", - " display(self.w)\n", - " def update(self, val):\n", - " self.w.value = val\n", - " " - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "----\n", - "## load datasets" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "metadata": {}, - "outputs": [], - "source": [ - "def set_path(b):\n", - " with out_areas[\"load\"]:\n", - " clear_output()\n", - " mp(\"----\")\n", - " files = sorted(glob.glob(shown_widgets[\"root_path\"].value + \"/*.json\"))\n", - " \n", - " if len(files) == 0:\n", - " sys.stderr.write(\"ERROR: no json files available in \" + shown_widgets[\"root_path\"].value + \"\\n\")\n", - " set_widget_visibility([\"file_range\",\n", - " \"only_emoticons\",\n", - " \"n_top_emojis\",\n", - " \"apply_lemmatization_and_stemming\",\n", - " \"load_data\"], False)\n", - " return\n", - " \n", - " mp(\"**available files:**\")\n", - " jp(files, headers=[\"fileindex\",\"filepath\"])\n", - " set_widget_visibility([\"file_range\",\n", - " \"only_emoticons\",\n", - " \"n_top_emojis\",\n", - " \"apply_lemmatization_and_stemming\",\n", - " \"load_data\"], True)\n", - " shown_widgets[\"file_range\"].min=0\n", - " shown_widgets[\"file_range\"].max=len(files) -1\n", - "\n", - "def load_data(b):\n", - " global sdm\n", - " with out_areas[\"load\"]:\n", - " clear_output()\n", - " mp(\"----\")\n", - " \n", - " r = shown_widgets[\"file_range\"].value\n", - " r = (r[0], r[1] + 1) # range has to be exclusive according to the last element!\n", - " \n", - " p_r = progress_indicator(\"reading progress\")\n", - " \n", - " lemm_and_stemm = shown_widgets[\"apply_lemmatization_and_stemming\"].value\n", - " \n", - " if lemm_and_stemm:\n", - " p_s = progress_indicator(\"stemming progress\")\n", - " \n", - " emoji_mean = shown_widgets[\"label_criteria\"].value == \"mean\"\n", - " \n", - " custom_emojis = list(shown_widgets[\"custom_emojis\"].value)\n", - " \n", - " min_words = shown_widgets[\"min_words\"].value\n", - " \n", - " sdm = twl.sample_data_manager.generate_and_read(path=shown_widgets[\"root_path\"].value,\n", - " n_top_emojis=shown_widgets[\"n_top_emojis\"].value,\n", - " file_range=range(r[0], r[1]),\n", - " n_kmeans_cluster=-1,\n", - " read_progress_callback=p_r.update,\n", - " stem_progress_callback=p_s.update if lemm_and_stemm else None,\n", - " apply_stemming = lemm_and_stemm,\n", - " emoji_mean=emoji_mean,\n", - " custom_target_emojis=custom_emojis if len(custom_emojis) > 0 else None,\n", - " min_words=min_words)\n", - " shown_widgets[\"batch_size\"].max = len(sdm.labels)\n", - " \n", - " \n", - "# linking functions with buttons:\n", - "shown_widgets[\"set_path\"].on_click(set_path)\n", - "shown_widgets[\"load_data\"].on_click(load_data)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## train" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "metadata": {}, - "outputs": [], - "source": [ - "def train(b):\n", - " global sdm\n", - " global pm\n", - " global tr\n", - " with out_areas[\"train\"]:\n", - " clear_output()\n", - " mp(\"----\")\n", - " if sdm is None or pm is None:\n", - " sys.stderr.write(\"ERROR: sample data and/or classifier missing!\\n\")\n", - " return\n", - " \n", - " batch_size = shown_widgets[\"batch_size\"].value\n", - " val_split = shown_widgets[\"val_split\"].value\n", - " n_epochs = shown_widgets[\"n_epochs\"].value\n", - " \n", - " print(\"update train test split:\")\n", - " sdm.create_train_test_split(split=val_split)\n", - " \n", - " print(\"fit\")\n", - " \n", - " p = progress_indicator()\n", - " \n", - " tr = twl.trainer(sdm=sdm, pm=pm)\n", - " tr.fit(progress_callback=p.update, batch_size=batch_size if batch_size > 0 else None, n_epochs=n_epochs)\n", - " \n", - "\n", - "# linking:\n", - "shown_widgets[\"train\"].on_click(train)\n", - " " - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## create classifier" - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "metadata": {}, - "outputs": [], - "source": [ - "keras_acivations = [\n", - " \"softmax\",\n", - " \"elu\",\n", - " \"selu\",\n", - " \"softplus\",\n", - " \"softsign\",\n", - " \"relu\",\n", - " \"tanh\",\n", - " \"sigmoid\",\n", - " \"hard_sigmoid\",\n", - " \"linear\",\n", - " \"None\"\n", - "]\n", - "\n", - "def populate_keras_options(b):\n", - " n_layers = shown_widgets[\"n_keras_layer\"].value\n", - " hbox_neurons = shown_widgets[\"n_keras_neurons\"]\n", - " hbox_funcs = shown_widgets[\"keras_funcs\"]\n", - " \n", - " hbox_neurons.children = [widgets.IntText(description = str(i)) for i in range(n_layers)]\n", - " hbox_funcs.children = [widgets.Dropdown(options=keras_acivations,description = str(i)) for i in range(n_layers)]\n", - " \n", - " #hbox_neurons.children[-1].disabled = True\n", - "\n", - "def create_classifier(b):\n", - " global sdm\n", - " global pm\n", - " global tr\n", - " with out_areas[\"create\"]:\n", - " clear_output()\n", - " mp(\"----\")\n", - " if sdm is None:\n", - " sys.stderr.write(\"load a dataset first!\\n\")\n", - " return\n", - " \n", - " chosen_classifier = classifier_tab.get_title(classifier_tab.selected_index)\n", - " \n", - " mp(\"**chosen classifier**: `\" + chosen_classifier + \"`\")\n", - " \n", - " # creating the vectorizer\n", - " vectorizer = None\n", - " if shown_widgets[\"use_doc2vec\"].value:\n", - " if shown_widgets[\"d2v_use_pretrained\"].value:\n", - " vectorizer = pickle.load( open( \"doc2VecModel.p\", \"rb\" ) )\n", - " else:\n", - " vectorizer = twl.skd2v.Doc2VecTransformer(size=shown_widgets[\"d2v_size\"].value,\n", - " window=shown_widgets[\"d2v_window\"].value,\n", - " min_count=shown_widgets[\"d2v_min_count\"].value)\n", - " else:\n", - " vectorizer=TfidfVectorizer(stop_words='english')\n", - " \n", - " # TODO: add more classifier options here:\n", - " if chosen_classifier is 'keras':\n", - " sdm.create_train_test_split()\n", - " \n", - " n_layers = shown_widgets[\"n_keras_layer\"].value\n", - " hbox_neurons = shown_widgets[\"n_keras_neurons\"]\n", - " hbox_funcs = shown_widgets[\"keras_funcs\"]\n", - "\n", - " layers = []\n", - " for i in range(n_layers):\n", - " func = hbox_funcs.children[i].value\n", - " if func == 'None':\n", - " func = None\n", - " layers.append((hbox_neurons.children[i].value, func))\n", - " \n", - " # modify last layer:\n", - " layers[-1] = (sdm.y.shape[1], layers[-1][1])\n", - " \n", - " mp(\"**layers:** \")\n", - " jp(layers, headers=['#neurons', 'activation_func'])\n", - "\n", - " pm = stl.pipeline_manager.create_keras_pipeline_with_vectorizer(vectorizer, layers=layers, sdm=sdm, fit_vectorizer=not shown_widgets[\"d2v_use_pretrained\"].value)\n", - "\n", - "def save_classifier(b):\n", - " global sdm\n", - " global pm\n", - " global tr\n", - " with out_areas[\"create\"]:\n", - " clear_output()\n", - " mp(\"----\")\n", - " if pm is None:\n", - " sys.stderr.write(\"ERROR: create classifier first\")\n", - " return\n", - " \n", - " pm.save(shown_widgets[\"classifier_name\"].value)\n", - "\n", - "def load_classifier(b):\n", - " global sdm\n", - " global pm\n", - " global tr\n", - " with out_areas[\"create\"]:\n", - " clear_output()\n", - " mp(\"----\")\n", - "\n", - "def update_file_selector(b):\n", - " shown_widgets[\"clf_file_selector\"].options = sorted(glob.glob(\"./*.pipeline\"))\n", - "\n", - "def clf_file_selector(b):\n", - " shown_widgets[\"clf_file\"].value = shown_widgets[\"clf_file_selector\"].value\n", - " update_file_selector(b)\n", - "\n", - "def load_classifier(b):\n", - " global sdm\n", - " global pm\n", - " global tr\n", - " with out_areas[\"create\"]:\n", - " clear_output()\n", - " mp(\"----\")\n", - " clf_file = shown_widgets[\"clf_file\"].value\n", - " pm = stl.pipeline_manager.load_from_pipeline_file(clf_file)\n", - " \n", - "\n", - "# link\n", - "shown_widgets[\"n_keras_layer\"].observe(populate_keras_options)\n", - "shown_widgets[\"create_classifier\"].on_click(create_classifier)\n", - "shown_widgets[\"save_classifier\"].on_click(save_classifier)\n", - "shown_widgets[\"load_classifier\"].on_click(load_classifier)\n", - "shown_widgets[\"clf_file_selector\"].observe(clf_file_selector)\n", - "\n", - "\n", - "\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## plotting stuff for testing area" - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "metadata": {}, - "outputs": [], - "source": [ - "def sentiment_score(s):\n", - " #(pos, neg, neu)^T\n", - " return s[0] - s[1]\n", - "\n", - "def plot_sentiment_space(predicted_sentiment_vectors, top_sentiments, top_emojis):\n", - " # sentiment score axis\n", - " top_X = np.array([sentiment_score(x) for x in top_sentiments])\n", - " pred_X = np.array([sentiment_score(x) for x in predicted_sentiment_vectors])\n", - " \n", - " # neutral axis:\n", - " top_Y = np.array([x[2] for x in top_sentiments])\n", - " pred_Y = np.array([x[2] for x in predicted_sentiment_vectors])\n", - " \n", - " fig_1, ax_1 = plt.subplots()#figsize=(15,10))\n", - " plt.title(\"sentiment-score-plot\")\n", - " plt.xlabel(\"sentiment score\")\n", - " plt.ylabel(\"neutrality\")\n", - " plt.xlim([-1,1])\n", - " plt.ylim([0,1])\n", - " for i in range(len(top_X)):\n", - " plt.text(top_X[i], top_Y[i], top_emojis[i])\n", - " plt.plot(pred_X, pred_Y, 'bo')\n", - " #plt.savefig(title + \" -- sentiment-plot.png\", bbox_inches='tight')\n", - " \n", - " # sentiment score axis\n", - " top_X = np.array([x[0] for x in top_sentiments])\n", - " pred_X = np.array([x[0] for x in predicted_sentiment_vectors])\n", - " \n", - " # neutral axis:\n", - " top_Y = np.array([x[1] for x in top_sentiments])\n", - " pred_Y = np.array([x[1] for x in predicted_sentiment_vectors])\n", - " \n", - " fig_2, ax_2 = plt.subplots()#figsize=(15,10))\n", - " plt.title(\"positive-negative-plot\")\n", - " plt.xlabel(\"positive\")\n", - " plt.ylabel(\"negative\")\n", - " plt.xlim([0,1])\n", - " plt.ylim([0,1])\n", - " for i in range(len(top_X)):\n", - " plt.text(top_X[i], top_Y[i], top_emojis[i])\n", - " plt.plot(pred_X, pred_Y, 'bo')\n", - " #plt.savefig(title + \" -- positive-negative-plot.png\", bbox_inches='tight')\n", - " plt.show()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## testing area" - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "metadata": {}, - "outputs": [], - "source": [ - "top_20 = list(\"πŸ˜³πŸ˜‹πŸ˜€πŸ˜ŒπŸ˜πŸ˜”πŸ˜’πŸ˜ŽπŸ˜’πŸ˜…πŸ˜πŸ˜‰πŸ™ŒπŸ™πŸ˜˜πŸ˜ŠπŸ˜©πŸ˜πŸ˜­πŸ˜‚\")\n", - "top_20_sents = twl.emoji2sent(top_20)\n", - "\n", - "pred = None\n", - "\n", - "def test_input(b):\n", - " global sdm\n", - " global pm\n", - " global tr\n", - " global pred\n", - " with out_areas[\"playground\"]:\n", - " clear_output()\n", - " mp(\"----\")\n", - " if pm is None:\n", - " sys.stderr.write(\"ERROR: load or create classifier first\")\n", - " return\n", - " X = shown_widgets[\"test_input\"].value\n", - " pred = pm.predict([X])\n", - " target_list=list(shown_widgets[\"prediction_ground_set\"].value)\n", - " shown_widgets[\"prediction\"].value = \"

\" + str(twl.sent2emoji(pred,custom_target_emojis=target_list)[0]) + \"

\"\n", - " if shown_widgets[\"show_sorted_list\"].value:\n", - " mp(\"## \" + \"\".join(twl.edist.sentiment_vector_to_emoji(pred, only_emoticons=True, n_results=100, custom_target_emojis=target_list)))\n", - " \n", - "\n", - "\n", - "def plot_pred(b):\n", - " global sdm\n", - " global pm\n", - " global tr\n", - " global pred\n", - " with out_areas[\"playground\"]:\n", - " plot_sentiment_space(pred, top_20_sents, top_20)\n", - " \n", - " \n", - "def plot_subset_pred(b):\n", - " global sdm\n", - " global pm\n", - " global tr\n", - " global pred\n", - " with out_areas[\"playground\"]:\n", - " clear_output()\n", - " \n", - " if sdm is None or pm is None:\n", - " sys.stderr.write(\"ERROR: sample data and/or classifier missing!\\n\")\n", - " return\n", - " \n", - " if tr is None:\n", - " tr = twl.trainer(sdm=sdm, pm=pm)\n", - " \n", - " pred, y = tr.test(emoji_subset=list(shown_widgets[\"validation_emojis\"].value))\n", - " print(len(pred))\n", - " plot_sentiment_space(pred, top_20_sents, top_20)\n", - "\n", - "#link\n", - "shown_widgets[\"test_input\"].observe(test_input)\n", - "shown_widgets[\"show_plot\"].on_click(plot_pred)\n", - "shown_widgets[\"show_validation_plot\"].on_click(plot_subset_pred)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.6.5" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/Project/advanced_approach/README.md b/Project/advanced_approach/README.md index 746b789..b5a0a07 100644 --- a/Project/advanced_approach/README.md +++ b/Project/advanced_approach/README.md @@ -4,32 +4,64 @@ ## Folder Overview -| Filename | short_description | -| -------------------------------------------- | ------------------------------------------------------------ | -| twitter_learning.py / twitter_learning.ipynb | module containing the main classes for the learning process | -| Learner.ipynb | notebook containing a user interface to control the learn process | -| Evaluation_sentiment_dataset.ipynb | notebook creating an evaluation on the sentiment dataset | +| Filename | short_description | +| ------------------------------------------------------------ | ------------------------------------------------------------ | +| [twitter_learning.py](twitter_learning.py) | module containing the main classes for the learning process | +| [Learner.ipynb](Learner.ipynb) | notebook containing a user interface to control the learn process | +| [Evaluation_sentiment_dataset.ipynb](Evaluation_sentiment_dataset.ipynb) | notebook creating an evaluation on the sentiment dataset | ---- ## twitter_learning.py -TODO +We wanted the possibility to load and save classifier and also to continue the train process of a loaded classifier on other datasets. Since this functionality doesn't fit well in a single linear processed notebook, we wrote this module containing three classes for managing train data, classifier and train process: + +### sample_data_manager + +class for preprocessing twitter data provided as json files. Creates samples with sentiment labels and provides train and validation samples + +### pipeline_manager + +creates, saves or loads a sklean-pipeline with vectorizer and keras classifier. When a new classifier is created, the vectorizer is fit by all samples that a given [sample_data_manager](#sample_data_manager) has stored at that moment. + +### trainer + +expects a [pipeline_manager](#pipeline_manager) and a [sample_data_manager](#sample_data_manager). It controls the train process and feeds the pipeline with data from the [sample_data_manager](#sample_data_manager). Since we want the possibility to continue training on loaded classifiers, this class modifies the pipeline during runtime to prevent a reset of the vectorizers while fitting new data. ---- ## Learner.ipynb -This file provides all controls for the train process and feeds the classifier with data. To use it just run all cells and jump to the user interface Part. Some explanations for the options: +In short: the user interface for methods provided in [twitter_learning.py](#twitter_learning.py) This file provides all controls for the train process and feeds the classifier with data. To use it just run all cells and jump to the user interface Part. In order to train on a dataset, just use it in the following way: ### load datasets -![1532531542185](README.assets/1532531542185.png) + +* setup the root folder for the json files containing our twitter samples +* then you can set the range of files that will be loaded. Also you can setup more preprocessing steps, like top-emoji usage filtering or only load samples containing a specific set of emojis (given as string) +### create/save/load classifier + + + +* create a new keras pipeline including a vectorizer. you can setup the number of neurons and the activation function per layer (the last layer will be automatically adjusted to fit the label-dimension). if doc2vec is deactivated, tfidf is used. In the image above is the configuration we used for our final classifier. +* you can also save a trained classifier and load it later again by a .pipeline configuration file (either by selecting them in the file selector box or by give the full path to the `clf_file` box) + +### train classifier + + + +* if sample data and classifier are loaded, the classifier can be trained here + +### Test single predictions + + + +* in the playground tab you can predict single sentences and get the nearest emoji in sentiment space of a given emoji set. Also you can plot the predictions of samples with given labels (as emoji) of the validation set of the currently loaded twitter data ---- -## Evaluation_sentiment_dataset.ipynb +## Evaluation_sentiment_dataset.ipynb -TODO \ No newline at end of file +this is just a notebook to perform and plot the predictions on our hand labeled validation set \ No newline at end of file