nlp-lab/Project/Tools/User_Interface.ipynb

701 lines
18 KiB
Plaintext

{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# User Interface Configuration and Information\n",
"We want to create a small user interface for our prototype in emoji prediction"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Needed\n",
"We want to define needed components for this UI"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"import random\n",
"import ipywidgets as widgets\n",
"from IPython.display import display, clear_output\n",
"import math\n",
"import datetime"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Trigger refresh of prediction\n",
"each action of typing and sending should yield a new updated prediction for best fitting emojis"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Initial definition of emojis used later"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"#locally defined based on the first analysis of parts of our twitter data: resulting in the 20 most used emojis\n",
"#we used them for our first approaches of prediction\n",
"\n",
"top_emojis = list(\"😳😋😀😌😏😔😒😎😢😅😁😉🙌🙏😘😊😩😍😭😂\")\n",
"#possible initial set of predictions, only used in naive test cases\n",
"predictions = [\"🤐\",\"🤑\",\"🤒\",\"🤓\",\"🤔\",\"🤕\",\"🤗\",\"🤘\"]"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Advanced Approach\n",
"define the classifier for advanced prediction, used for the sentiment prediction"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"Using TensorFlow backend.\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[nltk_data] Downloading package punkt to /home/jonas/nltk_data...\n",
"[nltk_data] Package punkt is already up-to-date!\n",
"[nltk_data] Downloading package averaged_perceptron_tagger to\n",
"[nltk_data] /home/jonas/nltk_data...\n",
"[nltk_data] Package averaged_perceptron_tagger is already up-to-\n",
"[nltk_data] date!\n",
"[nltk_data] Downloading package wordnet to /home/jonas/nltk_data...\n",
"[nltk_data] Package wordnet is already up-to-date!\n"
]
}
],
"source": [
"#navigation into right path and generating classifier\n",
"import sys\n",
"sys.path.append(\"..\")\n",
"sys.path.append(\"../naive_approach\")\n",
"\n",
"import advanced_approach.twitter_learning as twl\n",
"#clf_advanced = stl.pipeline_manager.load_pipeline_from_files( '\"/Users/Carsten/DataSets/NLP_LAB/tfidf_final/final_epoch01.pipeline', ['keras_model'], ['vectorizer', 'keras_model'])\n",
"clf_advanced = twl.pipeline_manager.load_from_pipeline_file(\"/home/jonas/Dokumente/NLP_DATA/python_dumps/pipelines/tfidf_final/final_epoch01.pipeline\")\n",
"import Tools.Emoji_Distance as ed"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Generate new Sample for online learning / reinforcement learning"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"def generate_new_training_sample (msg, emoji):\n",
" sentiment = ed.emoji_to_sentiment_vector(emoji)\n",
" \n",
" #TODO message msg could be filtred\n",
" text = msg\n",
" return text, sentiment"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Naive Approach\n",
"for topic related emoji prediction"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"#sys.path.append(\"..\")\n",
"#print(sys.path)\n",
"\n",
"import naive_approach as clf_naive"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"tmp_dict = clf_naive.prepareData()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Merge Predictions\n",
"combine the predictions of both approaches"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"def merged_prediction(msg , split = 0.5 , number = 8, target_emojis = top_emojis):\n",
" \n",
" #calc ratio of prediction splitted between advanced aprroach and naive approach\n",
" number_advanced = round(split*number)\n",
" number_naive = round((1-split)*number)\n",
" \n",
" #predict emojis with the naive approach\n",
" prediction_naive , prediction_naive_values = clf_naive.predict(sentence = msg, lookup= tmp_dict, n = number_naive)\n",
"\n",
" #filter 0 values\n",
" tmp1 = []\n",
" tmp2 = []\n",
" epsilon = 0.0001\n",
"\n",
" for i in range(len(prediction_naive)):\n",
" if(abs(prediction_naive_values[i]) > epsilon):\n",
" tmp1.append(prediction_naive[i])\n",
" tmp2.append(prediction_naive[i])\n",
"\n",
" prediction_naive = tmp1\n",
" prediction_naive_values = tmp2\n",
" \n",
" if(len(prediction_naive) < number_naive):\n",
" #print(\"only few matches\")\n",
" number_advanced = number - len(prediction_naive)\n",
" \n",
" #print(number, number_advanced, number_naive)\n",
" \n",
" #predict the advanced approach\n",
" sentiment = clf_advanced.predict([msg])\n",
" prediction_advanced = ed.sentiment_vector_to_emoji(sentiment,n_results = number_advanced, custom_target_emojis=target_emojis)\n",
" \n",
" #concat both predictions\n",
" prediction = list(prediction_advanced)+list(prediction_naive)\n",
" \n",
" return prediction[:number], len(tmp1)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Actions triggered when something is changed"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"def trigger_new_prediction(all_chat, current_message):\n",
" global predictions\n",
" \n",
" #random prediction for initial test\n",
" #random.shuffle(predictions)\n",
" \n",
" #first prediction only using advanced approach\n",
" #sent = clf_advanced.predict([current_message])\n",
" #p = ed.sentiment_vector_to_emoji(sent,n_results = 8, custom_target_emojis=top_emojis)\n",
" \n",
" #merged prediction\n",
" if(current_message != \"\"):\n",
" p,n = merged_prediction(msg = current_message, target_emojis=top_emojis)\n",
"\n",
" predictions = p\n",
" update_descriptions()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### User Output\n",
"the wiritten text as an overview or list of text"
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"all_text = \"no text yet \\n\"\n",
"\n",
"out = widgets.Output(layout = widgets.Layout(max_height = \"500px\"))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### User Input\n",
"the user has to interact with our UI so hee needs:"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Text Input field\n",
"a simple line for text input on the bottom of UI"
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"text_input = widgets.Text()"
]
},
{
"cell_type": "code",
"execution_count": 11,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"def submit_new_message(p):\n",
" global all_text\n",
" bar = \"----------- \\n\"\n",
" time = str(datetime.datetime.now())+\"\\n\"\n",
" msg = text_input.value +\"\\n\"\n",
" new_message = bar + time + msg\n",
" all_text += new_message \n",
" \n",
" with out:\n",
" clear_output()\n",
" print(all_text)\n",
" \n",
" trigger_new_prediction(all_text, text_input.value)\n",
" update_descriptions()\n",
" text_input.value = \"\""
]
},
{
"cell_type": "code",
"execution_count": 12,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"text_input.on_submit(submit_new_message)\n",
"\n",
"#text_input.observe(lambda b: trigger_new_prediction(None, text_input.value))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### A sent button to enter yout typed in message\n",
"alternatively it should be also possible to simply type enter"
]
},
{
"cell_type": "code",
"execution_count": 13,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"sent_button = widgets.Button(description = \"Sent\")\n",
"\n",
"sent_button.on_click(submit_new_message)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### A list of buttons for selecting predicted emojis\n",
"a set of fixed size of buttons with a dynamic changeable labeling replaced by the unicode emoji"
]
},
{
"cell_type": "code",
"execution_count": 14,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"p0_button = widgets.Button(description = \"p0\")\n",
"\n",
"def on_p0_button_click(p):\n",
" update_descriptions() \n",
" #with out:\n",
" text_input.value += \" \"+predictions[0]\n",
"\n",
"p0_button.on_click(on_p0_button_click)"
]
},
{
"cell_type": "code",
"execution_count": 15,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"p1_button = widgets.Button(description = \"p1\")\n",
"\n",
"def on_p1_button_click(p):\n",
" update_descriptions()\n",
" with out:\n",
" text_input.value += \" \"+predictions[1]\n",
"\n",
"p1_button.on_click(on_p1_button_click)"
]
},
{
"cell_type": "code",
"execution_count": 16,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"p2_button = widgets.Button(description = \"p2\")\n",
"\n",
"def on_p2_button_click(p):\n",
" update_descriptions()\n",
" with out:\n",
" text_input.value += \" \"+predictions[2]\n",
"\n",
"p2_button.on_click(on_p2_button_click)"
]
},
{
"cell_type": "code",
"execution_count": 17,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"p3_button = widgets.Button(description = \"p3\")\n",
"\n",
"def on_p3_button_click(p):\n",
" update_descriptions()\n",
" with out:\n",
" text_input.value += \" \"+predictions[3]\n",
"\n",
"p3_button.on_click(on_p3_button_click)"
]
},
{
"cell_type": "code",
"execution_count": 18,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"p4_button = widgets.Button(description = \"p4\")\n",
"\n",
"def on_p4_button_click(p):\n",
" update_descriptions()\n",
" with out:\n",
" text_input.value += \" \"+predictions[4]\n",
"\n",
"p4_button.on_click(on_p4_button_click)"
]
},
{
"cell_type": "code",
"execution_count": 19,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"p5_button = widgets.Button(description = \"p5\")\n",
"\n",
"def on_p5_button_click(p):\n",
" update_descriptions()\n",
" with out:\n",
" text_input.value += \" \"+predictions[5]\n",
"\n",
"p5_button.on_click(on_p5_button_click)"
]
},
{
"cell_type": "code",
"execution_count": 20,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"p6_button = widgets.Button(description = \"p6\")\n",
"\n",
"def on_p6_button_click(p):\n",
" update_descriptions()\n",
" with out:\n",
" text_input.value += \" \"+predictions[6]\n",
"\n",
"p6_button.on_click(on_p6_button_click)"
]
},
{
"cell_type": "code",
"execution_count": 21,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"p7_button = widgets.Button(description = \"p7\")\n",
"\n",
"def on_p7_button_click(p):\n",
" update_descriptions()\n",
" with out:\n",
" text_input.value += \" \"+predictions[7]\n",
"\n",
"p7_button.on_click(on_p7_button_click)"
]
},
{
"cell_type": "code",
"execution_count": 22,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"def update_descriptions():\n",
" global predictions\n",
" p0_button.description = predictions[0]\n",
" p1_button.description = predictions[1]\n",
" p2_button.description = predictions[2]\n",
" p3_button.description = predictions[3]\n",
" p4_button.description = predictions[4]\n",
" p5_button.description = predictions[5]\n",
" p6_button.description = predictions[6]\n",
" p7_button.description = predictions[7]"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Additional\n",
"Developer Information\n",
"#### Output of in and out commands\n",
"#### Prob distribution or whole list of sorted emojis\n",
"#### configuration information"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Prototype UI"
]
},
{
"cell_type": "code",
"execution_count": 23,
"metadata": {},
"outputs": [
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "de02918c53e745d7b1b8fd47226464db",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"VBox(children=(VBox(children=(Output(layout=Layout(max_height='500px')),)), HBox(children=(Button(description=…"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"text_output = widgets.VBox([out])\n",
"all_prediction_buttons = widgets.HBox([p0_button,p1_button,p2_button,p3_button,p4_button,p5_button,p6_button,p7_button])\n",
"user_input = widgets.HBox([text_input,sent_button])\n",
"total_layout = widgets.VBox([text_output,all_prediction_buttons,user_input],layout = widgets.Layout(max_width = \"450px\"))\n",
"display(total_layout)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"improve naive approach\n",
"word2vec\n",
"\n",
"longer sentences\n",
"own training set"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## I Am Groot"
]
},
{
"cell_type": "code",
"execution_count": 24,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"text_input_groot = widgets.Text()\n",
"out_groot = widgets.Output(layout = widgets.Layout(max_height = \"500px\"))\n",
"all_text_groot = \"no text yet \\n\"\n",
"text_output_groot = widgets.VBox([out_groot])\n",
"user_input_groot = widgets.HBox([text_input_groot])\n",
"total_layout_groot = widgets.VBox([text_output_groot,user_input_groot],layout = widgets.Layout(max_width = \"450px\"))"
]
},
{
"cell_type": "code",
"execution_count": 25,
"metadata": {},
"outputs": [],
"source": [
"def submit_new_message_groot(p):\n",
" global all_text_groot\n",
" bar = \"----------- \\n\"\n",
" time = str(datetime.datetime.now())+\"\\n\"\n",
" msg = \"User: \"+text_input_groot.value +\"\\n\"\n",
" new_message = bar + time + msg\n",
" all_text_groot += new_message \n",
" \n",
" mp, n_topic = merged_prediction(msg)\n",
" \n",
" answer = \"ChatGROOT: I AM GROOT\" + mp[0]\n",
" \n",
" if n_topic > 0:\n",
" answer += mp[8 - n_topic]\n",
" \n",
" all_text_groot += answer + \"\\n\"\n",
" \n",
" with out_groot:\n",
" clear_output()\n",
" print(all_text_groot)\n",
" \n",
" text_input_groot.value = \"\""
]
},
{
"cell_type": "code",
"execution_count": 26,
"metadata": {},
"outputs": [
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "4e56d0b43b594e34b799365a468301ee",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"VBox(children=(VBox(children=(Output(layout=Layout(max_height='500px')),)), HBox(children=(Text(value=''),))),…"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"text_input_groot.on_submit(submit_new_message_groot)\n",
"display(total_layout_groot)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.5"
}
},
"nbformat": 4,
"nbformat_minor": 2
}