nlp-lab/Project/simple_approach/Continous_Learner.ipynb
2018-07-21 10:32:34 +02:00

703 lines
29 KiB
Plaintext

{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Continous Learner for Emoji classifier 🤓\n",
"**usage:**\n",
"run all cells, then go to the [user interface](#User-Interface)"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"%matplotlib inline"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"Using TensorFlow backend.\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[nltk_data] Downloading package punkt to /home/jonas/nltk_data...\n",
"[nltk_data] Package punkt is already up-to-date!\n",
"[nltk_data] Downloading package averaged_perceptron_tagger to\n",
"[nltk_data] /home/jonas/nltk_data...\n",
"[nltk_data] Package averaged_perceptron_tagger is already up-to-\n",
"[nltk_data] date!\n",
"[nltk_data] Downloading package wordnet to /home/jonas/nltk_data...\n",
"[nltk_data] Package wordnet is already up-to-date!\n"
]
},
{
"ename": "NameError",
"evalue": "name 'min_words' is not defined",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)",
"\u001b[0;32m<ipython-input-2-ce00b6a80bda>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0;32mimport\u001b[0m \u001b[0msimple_twitter_learning\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mstl\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 2\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mglob\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 3\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0msys\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 4\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0msklearn\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfeature_extraction\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtext\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mCountVectorizer\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mTfidfVectorizer\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mHashingVectorizer\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 5\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mpickle\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m~/Dokumente/gitRepos/NLP-LAB/Project/simple_approach/simple_twitter_learning.py\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[1;32m 164\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 165\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 166\u001b[0;31m \u001b[0;32mclass\u001b[0m \u001b[0msample_data_manager\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mobject\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 167\u001b[0m \u001b[0;34m@\u001b[0m\u001b[0mstaticmethod\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 168\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mgenerate_and_read\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mpath\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0mstr\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0monly_emoticons\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mTrue\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mapply_stemming\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mTrue\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mn_top_emojis\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m-\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfile_range\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mNone\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mn_kmeans_cluster\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m-\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mread_progress_callback\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mNone\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mstem_progress_callback\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mNone\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0memoji_mean\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mFalse\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcustom_target_emojis\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmin_words\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m~/Dokumente/gitRepos/NLP-LAB/Project/simple_approach/simple_twitter_learning.py\u001b[0m in \u001b[0;36msample_data_manager\u001b[0;34m()\u001b[0m\n\u001b[1;32m 412\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"remaining samples after custom emoji filtering: \"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mlabels\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 413\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 414\u001b[0;31m \u001b[0;32mdef\u001b[0m \u001b[0mfilter_by_sentence_length\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmin_words\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mmin_words\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 415\u001b[0m \u001b[0;32massert\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mplain_text\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 416\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;31mNameError\u001b[0m: name 'min_words' is not defined"
]
}
],
"source": [
"import simple_twitter_learning as stl\n",
"import glob\n",
"import sys\n",
"from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer, HashingVectorizer\n",
"import pickle"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## user interface area:"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"* UI helper functions and global states"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from IPython.display import clear_output, Markdown, Math\n",
"import ipywidgets as widgets\n",
"\n",
"out_areas = {}\n",
"shown_widgets = {}\n",
"tab_manager = widgets.Tab()\n",
"\n",
"def mp(obj):\n",
" display(Markdown(obj))\n",
"\n",
"def set_widget_visibility(widget_names, visible=True):\n",
" for w in widget_names:\n",
" shown_widgets[w].disabled = not visible\n",
"\n",
"def create_area(area_name:str, list_widgets:list, out_name:str, tab=tab_manager):\n",
" \"\"\"\n",
" creates a table of widgets with corresponding output area below\n",
" \n",
" @param area_name: title of the area\n",
" @param list_widgets: list of tuples: (widget, name:str)\n",
" @param out_name: name for the output area\n",
" \"\"\"\n",
" if out_name is not None:\n",
" out = widgets.Output()\n",
" out_areas[out_name] = out\n",
" h_box_widgets = []\n",
" v_box_widgets = []\n",
" for v in list_widgets:\n",
" for h in v:\n",
" if 'description' in h[0].__dir__():\n",
" h[0].description = h[1]\n",
" shown_widgets[h[1]] = h[0]\n",
" h_box_widgets.append(h[0])\n",
" v_box_widgets.append(widgets.HBox(h_box_widgets))\n",
" h_box_widgets = []\n",
" \n",
" if out_name is not None:\n",
" v_box_widgets += [out]\n",
" tab.children = list(tab.children) + [widgets.VBox(v_box_widgets)]\n",
" tab.set_title(len(tab.children) - 1, area_name)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"* build UI"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"mp(\"----\")\n",
"mp(\"## User Interface\")\n",
"# create widgets\n",
"create_area(\"load dataset 💾\",\n",
" [\n",
" [\n",
" (widgets.Text(value=\"./data_en/\"), \"root_path\"),\n",
" (widgets.Button(), \"set_path\")\n",
" ],\n",
" [\n",
" (widgets.IntRangeSlider(disabled=True, min=0, max=0), \"file_range\"),\n",
" (widgets.Checkbox(value=True,disabled=True), \"only_emoticons\"),\n",
" (widgets.Checkbox(value=False,disabled=True), \"apply_lemmatization_and_stemming\"),\n",
" (widgets.BoundedIntText(value=5,min=0, max=10), \"min_words\")\n",
" ],\n",
" [\n",
" (widgets.BoundedIntText(value=-1,disabled=True,min=-1, max=10), \"k_means_cluster\"),\n",
" (widgets.BoundedIntText(value=20,disabled=True,min=-1, max=100), \"n_top_emojis\"),\n",
" (widgets.Dropdown(options=[\"latest\", \"mean\"], value=\"latest\"), \"label_criteria\"),\n",
" (widgets.Text(value=\"\"), \"custom_emojis\")\n",
" ],\n",
" [\n",
" (widgets.Button(disabled=True),\"load_data\")\n",
" ]\n",
" ],\n",
" \"load\")\n",
"\n",
"classifier_tab = widgets.Tab()\n",
"\n",
"create_area(\"keras\",\n",
" [\n",
" [\n",
" (widgets.IntSlider(min=0, max=10), \"n_keras_layer\")\n",
" ],\n",
" [\n",
" (widgets.HBox([]), \"n_keras_neurons\")\n",
" ],\n",
" [\n",
" (widgets.HBox([]), \"keras_funcs\")\n",
" ]\n",
" ],\n",
" None,\n",
" classifier_tab)\n",
"\n",
"create_area(\"create/save/load classifier\",\n",
" [\n",
" [\n",
" (classifier_tab, \"classifier_tab\")\n",
" ],\n",
" [\n",
" (widgets.Checkbox(value=True),\"use_doc2vec\"),\n",
" (widgets.Checkbox(value=True),\"d2v_use_pretrained\"),\n",
" (widgets.IntText(value=100),\"d2v_size\"),\n",
" (widgets.IntText(value=8), \"d2v_window\"),\n",
" (widgets.IntSlider(value=5, min=0, max=32), \"d2v_min_count\")\n",
" ],\n",
" [\n",
" (widgets.Button(), \"create_classifier\")\n",
" ],\n",
" [\n",
" (widgets.Label(\"save_area:\"), \"save_area:\")\n",
" ],\n",
" [\n",
" (widgets.Text(), \"classifier_name\"),\n",
" (widgets.Button(), \"save_classifier\")\n",
" ],\n",
" [\n",
" (widgets.Label(\"load_area:\"), \"load_area:\")\n",
" ],\n",
" [\n",
" (widgets.Select(options=sorted(glob.glob(\"./*.pipeline\"))), \"clf_file_selector\"),\n",
" (widgets.Text(), \"clf_file\"),\n",
" (widgets.Button(), \"load_classifier\")\n",
" ]\n",
" ],\n",
" \"create\")\n",
"\n",
"create_area(\"train classifier 🎓\", \n",
" [\n",
" [\n",
" (widgets.IntSlider(value=0,min=0,max=0), \"batch_size\"),\n",
" (widgets.FloatSlider(value=0.15, min=0, max=1), \"val_split\"),\n",
" (widgets.IntText(value=1), \"n_epochs\")\n",
" ],\n",
" [\n",
" (widgets.Button(),\"train\")\n",
" ]\n",
" ], \n",
" \"train\" )\n",
"create_area(\"playground 😎\",\n",
" [\n",
" [\n",
" (widgets.Text(),\"test_input\"),\n",
" (widgets.HTML(),\"prediction\")\n",
" ],\n",
" [\n",
" (widgets.Checkbox(),\"show_sorted_list\")\n",
" ]\n",
" ],\n",
" \"playground\")\n",
"\n",
"tab_manager"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"----\n",
"## global variables:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"sdm = None\n",
"pm = None\n",
"tr = None"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## pretty jupyter print"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import collections\n",
"import traceback\n",
"from pprint import pprint as pp\n",
"\n",
"def jupyter_print(obj, cell_w = 10, headers=None, p_type=True, ret_mdown=False, index_offset=0, list_horizontal=False):\n",
" \"\"\"\n",
" pretty hacky function to convert arrays, lists and matrices into\n",
" nice readable markdown code and render that in jupyter. if that is not possible\n",
" it will use pretty print instead\n",
" \"\"\"\n",
" try:\n",
" ts = \"**Type:** \" + str(type(obj)).strip(\"<>\") + \"\\n\\n\"\n",
" if type(obj) == str:\n",
" display(Markdown(obj))\n",
" elif isinstance(obj, collections.Iterable):\n",
" if isinstance(obj[0], collections.Iterable) and type(obj[0]) is not str:\n",
" # we have a table\n",
" \n",
" if headers is None:\n",
" headers = [str(i) for i in range(len(obj[0]))]\n",
" \n",
" if len(headers) < len(obj[0]):\n",
" headers += [\" \" for i in range(len(obj[0]) - len(headers))]\n",
" \n",
" s = \"|\" + \" \" * cell_w + \"|\"\n",
" \n",
" for h in headers:\n",
" s += str(h) + \" \" * (cell_w - len(h)) + \"|\"\n",
" s += \"\\n|\" + \"-\" * (len(headers) + (len(headers) + 1) * cell_w) + \"|\\n\"\n",
" \n",
" #s = (\"|\" + (\" \" * (cell_w))) * len(obj[0]) + \"|\\n\" + \"|\" + (\"-\" * (cell_w + 1)) * len(obj[0])\n",
" #s += '|\\n'\n",
" \n",
" row = index_offset\n",
" \n",
" for o in obj:\n",
" s += \"|**\" + str(row) + \"**\" + \" \" * (cell_w - (len(str(row))+4))\n",
" row += 1\n",
" for i in o:\n",
" s += \"|\" + str(i) + \" \" * (cell_w - len(str(i)))\n",
" s+=\"|\" + '\\n'\n",
" s += ts\n",
" display(Markdown(s))\n",
" return s if ret_mdown else None\n",
" else:\n",
" # we have a list\n",
" \n",
" \n",
" if headers is None:\n",
" headers = [\"index\",\"value\"]\n",
" \n",
" index_title = headers[0]\n",
" value_title = headers[1]\n",
" \n",
" s = \"|\" + index_title + \" \" * (cell_w - len(value_title)) + \"|\" + value_title + \" \" * (cell_w - len(value_title)) + \"|\" + '\\n'\n",
" s += \"|\" + \"-\" * (1 + 2 * cell_w) + '|\\n'\n",
" i = index_offset\n",
" for o in obj:\n",
" s_i = str(i)\n",
" s_o = str(o)\n",
" s += \"|\" + s_i + \" \" * (cell_w - len(s_i)) + \"|\" + s_o + \" \" * (cell_w - len(s_o)) + \"|\" + '\\n'\n",
" i+=1\n",
" s += ts\n",
" #print(s)\n",
" display(Markdown(s))\n",
" return s if ret_mdown else None\n",
" else:\n",
" jupyter_print([obj])\n",
" except Exception as e:\n",
" print(ts)\n",
" pp(obj) \n",
"\n",
"jp = jupyter_print"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## output progress printing:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"class progress_indicator(object):\n",
" \n",
" def __init__(self, description=\"progress\"):\n",
" self.w = widgets.FloatProgress(value=0, min=0,max=1, description = description)\n",
" display(self.w)\n",
" def update(self, val):\n",
" self.w.value = val\n",
" "
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"----\n",
"## load datasets"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def set_path(b):\n",
" with out_areas[\"load\"]:\n",
" clear_output()\n",
" mp(\"----\")\n",
" files = sorted(glob.glob(shown_widgets[\"root_path\"].value + \"/*.json\"))\n",
" \n",
" if len(files) == 0:\n",
" sys.stderr.write(\"ERROR: no json files available in \" + shown_widgets[\"root_path\"].value + \"\\n\")\n",
" set_widget_visibility([\"file_range\",\n",
" \"only_emoticons\",\n",
" \"k_means_cluster\",\n",
" \"n_top_emojis\",\n",
" \"apply_lemmatization_and_stemming\",\n",
" \"load_data\"], False)\n",
" return\n",
" \n",
" mp(\"**available files:**\")\n",
" jp(files, headers=[\"fileindex\",\"filepath\"])\n",
" set_widget_visibility([\"file_range\",\n",
" \"only_emoticons\",\n",
" \"k_means_cluster\",\n",
" \"n_top_emojis\",\n",
" \"apply_lemmatization_and_stemming\",\n",
" \"load_data\"], True)\n",
" shown_widgets[\"file_range\"].min=0\n",
" shown_widgets[\"file_range\"].max=len(files) -1\n",
"\n",
"def load_data(b):\n",
" global sdm\n",
" with out_areas[\"load\"]:\n",
" clear_output()\n",
" mp(\"----\")\n",
" \n",
" r = shown_widgets[\"file_range\"].value\n",
" r = (r[0], r[1] + 1) # range has to be exclusive according to the last element!\n",
" \n",
" p_r = progress_indicator(\"reading progress\")\n",
" \n",
" lemm_and_stemm = shown_widgets[\"apply_lemmatization_and_stemming\"].value\n",
" \n",
" if lemm_and_stemm:\n",
" p_s = progress_indicator(\"stemming progress\")\n",
" \n",
" emoji_mean = shown_widgets[\"label_criteria\"].value == \"mean\"\n",
" \n",
" custom_emojis = list(shown_widgets[\"custom_emojis\"].value)\n",
" \n",
" min_words = shown_widgets[\"min_words\"].value\n",
" \n",
" sdm = stl.sample_data_manager.generate_and_read(path=shown_widgets[\"root_path\"].value,\n",
" n_top_emojis=shown_widgets[\"n_top_emojis\"].value,\n",
" file_range=range(r[0], r[1]),\n",
" n_kmeans_cluster=shown_widgets[\"k_means_cluster\"].value,\n",
" read_progress_callback=p_r.update,\n",
" stem_progress_callback=p_s.update if lemm_and_stemm else None,\n",
" apply_stemming = lemm_and_stemm,\n",
" emoji_mean=emoji_mean,\n",
" custom_target_emojis=custom_emojis if len(custom_emojis) > 0 else None,\n",
" min_words=min_words)\n",
" shown_widgets[\"batch_size\"].max = len(sdm.labels)\n",
" \n",
" \n",
"# linking functions with buttons:\n",
"shown_widgets[\"set_path\"].on_click(set_path)\n",
"shown_widgets[\"load_data\"].on_click(load_data)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## train"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def train(b):\n",
" global sdm\n",
" global pm\n",
" global tr\n",
" with out_areas[\"train\"]:\n",
" clear_output()\n",
" mp(\"----\")\n",
" if sdm is None or pm is None:\n",
" sys.stderr.write(\"ERROR: sample data and/or classifier missing!\\n\")\n",
" return\n",
" \n",
" batch_size = shown_widgets[\"batch_size\"].value\n",
" val_split = shown_widgets[\"val_split\"].value\n",
" n_epochs = shown_widgets[\"n_epochs\"].value\n",
" \n",
" print(\"update train test split:\")\n",
" sdm.create_train_test_split(split=val_split)\n",
" \n",
" print(\"fit\")\n",
" \n",
" p = progress_indicator()\n",
" \n",
" tr = stl.trainer(sdm=sdm, pm=pm)\n",
" tr.fit(progress_callback=p.update, batch_size=batch_size if batch_size > 0 else None, n_epochs=n_epochs)\n",
" \n",
"\n",
"# linking:\n",
"shown_widgets[\"train\"].on_click(train)\n",
" "
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## create classifier"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"keras_acivations = [\n",
" \"softmax\",\n",
" \"elu\",\n",
" \"selu\",\n",
" \"softplus\",\n",
" \"softsign\",\n",
" \"relu\",\n",
" \"tanh\",\n",
" \"sigmoid\",\n",
" \"hard_sigmoid\",\n",
" \"linear\",\n",
" \"None\"\n",
"]\n",
"\n",
"def populate_keras_options(b):\n",
" n_layers = shown_widgets[\"n_keras_layer\"].value\n",
" hbox_neurons = shown_widgets[\"n_keras_neurons\"]\n",
" hbox_funcs = shown_widgets[\"keras_funcs\"]\n",
" \n",
" hbox_neurons.children = [widgets.IntText(description = str(i)) for i in range(n_layers)]\n",
" hbox_funcs.children = [widgets.Dropdown(options=keras_acivations,description = str(i)) for i in range(n_layers)]\n",
" \n",
" #hbox_neurons.children[-1].disabled = True\n",
"\n",
"def create_classifier(b):\n",
" global sdm\n",
" global pm\n",
" global tr\n",
" with out_areas[\"create\"]:\n",
" clear_output()\n",
" mp(\"----\")\n",
" if sdm is None:\n",
" sys.stderr.write(\"load a dataset first!\\n\")\n",
" return\n",
" \n",
" chosen_classifier = classifier_tab.get_title(classifier_tab.selected_index)\n",
" \n",
" mp(\"**chosen classifier**: `\" + chosen_classifier + \"`\")\n",
" \n",
" # creating the vectorizer\n",
" vectorizer = None\n",
" if shown_widgets[\"use_doc2vec\"].value:\n",
" if shown_widgets[\"d2v_use_pretrained\"].value:\n",
" vectorizer = pickle.load( open( \"doc2VecModel.p\", \"rb\" ) )\n",
" else:\n",
" vectorizer = stl.skd2v.Doc2VecTransformer(size=shown_widgets[\"d2v_size\"].value,\n",
" window=shown_widgets[\"d2v_window\"].value,\n",
" min_count=shown_widgets[\"d2v_min_count\"].value)\n",
" else:\n",
" vectorizer=TfidfVectorizer(stop_words='english')\n",
" \n",
" # TODO: add more classifier options here:\n",
" if chosen_classifier is 'keras':\n",
" sdm.create_train_test_split()\n",
" \n",
" n_layers = shown_widgets[\"n_keras_layer\"].value\n",
" hbox_neurons = shown_widgets[\"n_keras_neurons\"]\n",
" hbox_funcs = shown_widgets[\"keras_funcs\"]\n",
"\n",
" layers = []\n",
" for i in range(n_layers):\n",
" func = hbox_funcs.children[i].value\n",
" if func == 'None':\n",
" func = None\n",
" layers.append((hbox_neurons.children[i].value, func))\n",
" \n",
" # modify last layer:\n",
" layers[-1] = (sdm.y.shape[1], layers[-1][1])\n",
" \n",
" mp(\"**layers:** \")\n",
" jp(layers, headers=['#neurons', 'activation_func'])\n",
"\n",
" pm = stl.pipeline_manager.create_keras_pipeline_with_vectorizer(vectorizer, layers=layers, sdm=sdm, fit_vectorizer=not shown_widgets[\"d2v_use_pretrained\"].value)\n",
"\n",
"def save_classifier(b):\n",
" global sdm\n",
" global pm\n",
" global tr\n",
" with out_areas[\"create\"]:\n",
" clear_output()\n",
" mp(\"----\")\n",
" if pm is None:\n",
" sys.stderr.write(\"ERROR: create classifier first\")\n",
" return\n",
" \n",
" pm.save(shown_widgets[\"classifier_name\"].value)\n",
"\n",
"def load_classifier(b):\n",
" global sdm\n",
" global pm\n",
" global tr\n",
" with out_areas[\"create\"]:\n",
" clear_output()\n",
" mp(\"----\")\n",
"\n",
"def update_file_selector(b):\n",
" shown_widgets[\"clf_file_selector\"].options = sorted(glob.glob(\"./*.pipeline\"))\n",
"\n",
"def clf_file_selector(b):\n",
" shown_widgets[\"clf_file\"].value = shown_widgets[\"clf_file_selector\"].value\n",
" update_file_selector(b)\n",
"\n",
"def load_classifier(b):\n",
" global sdm\n",
" global pm\n",
" global tr\n",
" with out_areas[\"create\"]:\n",
" clear_output()\n",
" mp(\"----\")\n",
" clf_file = shown_widgets[\"clf_file\"].value\n",
" pm = stl.pipeline_manager.load_from_pipeline_file(clf_file)\n",
" \n",
"\n",
"# link\n",
"shown_widgets[\"n_keras_layer\"].observe(populate_keras_options)\n",
"shown_widgets[\"create_classifier\"].on_click(create_classifier)\n",
"shown_widgets[\"save_classifier\"].on_click(save_classifier)\n",
"shown_widgets[\"load_classifier\"].on_click(load_classifier)\n",
"shown_widgets[\"clf_file_selector\"].observe(clf_file_selector)\n",
"\n",
"\n",
"\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## testing area"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def test_input(b):\n",
" global sdm\n",
" global pm\n",
" global tr\n",
" with out_areas[\"playground\"]:\n",
" clear_output()\n",
" mp(\"----\")\n",
" if pm is None:\n",
" sys.stderr.write(\"ERROR: load or create classifier first\")\n",
" return\n",
" X = shown_widgets[\"test_input\"].value\n",
" pred = pm.predict([X])\n",
" shown_widgets[\"prediction\"].value = \"<h1> \" + str(stl.sent2emoji(pred)[0]) + \"</h1>\"\n",
" if shown_widgets[\"show_sorted_list\"].value:\n",
" mp(\"## \" + \"\".join(stl.edist.sentiment_vector_to_emoji(pred, only_emoticons=True, n_results=100)))\n",
"\n",
"#link\n",
"shown_widgets[\"test_input\"].observe(test_input)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.5"
}
},
"nbformat": 4,
"nbformat_minor": 2
}