annotated widgets in user interface

This commit is contained in:
Jonas Weinz 2018-07-26 16:52:51 +02:00
parent 051c34ad5d
commit 89e4122187

View File

@ -105,9 +105,10 @@
" v_box_widgets = []\n",
" for v in list_widgets:\n",
" for h in v:\n",
" if 'description' in h[0].__dir__():\n",
" if 'description' in h[0].__dir__() and h[1] is not None:\n",
" h[0].description = h[1]\n",
" shown_widgets[h[1]] = h[0]\n",
" if h[1] is not None:\n",
" shown_widgets[h[1]] = h[0]\n",
" h_box_widgets.append(h[0])\n",
" v_box_widgets.append(widgets.HBox(h_box_widgets))\n",
" h_box_widgets = []\n",
@ -157,12 +158,12 @@
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "e508c9a1f8634b7ba790cf2f291e215a",
"model_id": "9035abacb17b41e4ac3875663fb23014",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Tab(children=(VBox(children=(HBox(children=(Text(value='./data_en/', description='root_path'), Button(descript…"
"Tab(children=(VBox(children=(HBox(children=(HTML(value='<b> Data Root Folder: </b> <br> setup the folder conta…"
]
},
"metadata": {},
@ -176,17 +177,23 @@
"create_area(\"load dataset 💾\",\n",
" [\n",
" [\n",
" (widgets.HTML(\"<b> Data Root Folder: </b> <br> setup the folder containing *.json train data \"), None)\n",
" ],\n",
" [\n",
" (widgets.Text(value=\"./data_en/\"), \"root_path\"),\n",
" (widgets.Button(), \"set_path\")\n",
" ],\n",
" [\n",
" (widgets.HTML(\"<b> Loading and preprocessing options: </b> <br> setup the range of files to load. Only_emoticons will filter out 'non-smiley' emojis, min_words is the minimum amount of words for one document. Also you can setup top-emoji filtering or only load samples containing a custom emoji set\"), None)\n",
" ],\n",
" [\n",
" (widgets.IntRangeSlider(disabled=True, min=0, max=0), \"file_range\"),\n",
" (widgets.Checkbox(value=True,disabled=True), \"only_emoticons\"),\n",
" (widgets.Checkbox(value=False,disabled=True), \"apply_lemmatization_and_stemming\"),\n",
" (widgets.BoundedIntText(value=5,min=0, max=10), \"min_words\")\n",
" ],\n",
" [\n",
" (widgets.BoundedIntText(value=-1,disabled=True,min=-1, max=10), \"k_means_cluster\"),\n",
" #(widgets.BoundedIntText(value=-1,disabled=True,min=-1, max=10), \"k_means_cluster\")\n",
" (widgets.BoundedIntText(value=20,disabled=True,min=-1, max=100), \"n_top_emojis\"),\n",
" (widgets.Dropdown(options=[\"latest\", \"mean\"], value=\"latest\"), \"label_criteria\"),\n",
" (widgets.Text(value=\"\"), \"custom_emojis\")\n",
@ -220,6 +227,9 @@
" (classifier_tab, \"classifier_tab\")\n",
" ],\n",
" [\n",
" (widgets.HTML(\"<b> Create new Classifier: </b> <br> create a new keras classifier with layer options from above. Also a vectorizer will be trained on loaded sample data. If doc2vec is disabled, TFIDF is used\"), None)\n",
" ],\n",
" [\n",
" (widgets.Checkbox(value=True),\"use_doc2vec\"),\n",
" (widgets.Checkbox(value=True),\"d2v_use_pretrained\"),\n",
" (widgets.IntText(value=100),\"d2v_size\"),\n",
@ -230,14 +240,14 @@
" (widgets.Button(), \"create_classifier\")\n",
" ],\n",
" [\n",
" (widgets.Label(\"save_area:\"), \"save_area:\")\n",
" (widgets.HTML(\"<b> Save Classifier: </b>\"), None)\n",
" ],\n",
" [\n",
" (widgets.Text(), \"classifier_name\"),\n",
" (widgets.Button(), \"save_classifier\")\n",
" ],\n",
" [\n",
" (widgets.Label(\"load_area:\"), \"load_area:\")\n",
" (widgets.HTML(\"<b> Load Classifier: </b>\"), None)\n",
" ],\n",
" [\n",
" (widgets.Select(options=sorted(glob.glob(\"./*.pipeline\"))), \"clf_file_selector\"),\n",
@ -250,11 +260,17 @@
"create_area(\"train classifier 🎓\", \n",
" [\n",
" [\n",
" (widgets.IntSlider(value=0,min=0,max=0), \"batch_size\"),\n",
" (widgets.FloatSlider(value=0.15, min=0, max=1), \"val_split\"),\n",
" (widgets.IntText(value=1), \"n_epochs\")\n",
" (widgets.HTML(\"<b> Custom Batch Settings: </b> <br> (Ignored if batch_size is 0)\"), None)\n",
" ],\n",
" [\n",
" (widgets.IntSlider(value=0,min=0,max=0), \"batch_size\"),\n",
" (widgets.FloatSlider(value=0.15, min=0, max=1), \"val_split\")\n",
" ],\n",
" [\n",
" (widgets.HTML(\"<b> Train: </b>\"), None)\n",
" ],\n",
" [\n",
" (widgets.IntText(value=1), \"n_epochs\"),\n",
" (widgets.Button(),\"train\")\n",
" ]\n",
" ], \n",
@ -262,16 +278,22 @@
"create_area(\"playground 😎\",\n",
" [\n",
" [\n",
" (widgets.HTML(\"<b> predict single sentence </b> <br> (uses min distance to given emojis in prediction_ground_set)\"), None)\n",
" ],\n",
" [\n",
" (widgets.Text(),\"test_input\"),\n",
" (widgets.HTML(),\"prediction\"),\n",
" (widgets.Text(value=\"😳😋😀😌😏😔😒😎😢😅😁😉🙌🙏😘😊😩😍😭😂\"),\"prediction_ground_set\")\n",
" (widgets.Text(value=\"😳😋😀😌😏😔😒😎😢😅😁😉🙌🙏😘😊😩😍😭😂\"),\"prediction_ground_set\"),\n",
" (widgets.HTML(\"<h1>∅</h1>\"),\"prediction\"),\n",
" ],\n",
" [\n",
" (widgets.Checkbox(),\"show_sorted_list\"),\n",
" (widgets.Button(),\"show_plot\")\n",
" ],\n",
" [\n",
" (widgets.Text(), \"validation_emojis\"),\n",
" (widgets.HTML(\"<b> Test on loaded validation set: </b> <br> (performs prediction plot on all validation samples that are labeled with given emojis)\"), None)\n",
" ],\n",
" [\n",
" (widgets.Text(value=\"😳😋😀😌😏😔😒😎😢😅😁😉🙌🙏😘😊😩😍😭😂\"), \"validation_emojis\"),\n",
" (widgets.Button(),\"show_validation_plot\")\n",
" ]\n",
" ],\n",
@ -434,7 +456,6 @@
" sys.stderr.write(\"ERROR: no json files available in \" + shown_widgets[\"root_path\"].value + \"\\n\")\n",
" set_widget_visibility([\"file_range\",\n",
" \"only_emoticons\",\n",
" \"k_means_cluster\",\n",
" \"n_top_emojis\",\n",
" \"apply_lemmatization_and_stemming\",\n",
" \"load_data\"], False)\n",
@ -444,7 +465,6 @@
" jp(files, headers=[\"fileindex\",\"filepath\"])\n",
" set_widget_visibility([\"file_range\",\n",
" \"only_emoticons\",\n",
" \"k_means_cluster\",\n",
" \"n_top_emojis\",\n",
" \"apply_lemmatization_and_stemming\",\n",
" \"load_data\"], True)\n",
@ -476,7 +496,7 @@
" sdm = stl.sample_data_manager.generate_and_read(path=shown_widgets[\"root_path\"].value,\n",
" n_top_emojis=shown_widgets[\"n_top_emojis\"].value,\n",
" file_range=range(r[0], r[1]),\n",
" n_kmeans_cluster=shown_widgets[\"k_means_cluster\"].value,\n",
" n_kmeans_cluster=-1,\n",
" read_progress_callback=p_r.update,\n",
" stem_progress_callback=p_s.update if lemm_and_stemm else None,\n",
" apply_stemming = lemm_and_stemm,\n",
@ -801,13 +821,6 @@
"shown_widgets[\"show_plot\"].on_click(plot_pred)\n",
"shown_widgets[\"show_validation_plot\"].on_click(plot_subset_pred)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {