nlp-lab/Project/Tools/Evaluation_with_csv.ipynb
2018-07-19 13:00:57 +02:00

450 lines
24 KiB
Plaintext

{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Evaluation\n",
"We want to evaluate our approach"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Needed\n",
"We want to define needed components for this UI"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"import random\n",
"import ipywidgets as widgets\n",
"from IPython.display import display, clear_output\n",
"import math\n",
"import datetime"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Trigger refresh of prediction\n",
"each action of typing and sending should yield a new updated prediction for best fitting emojis"
]
},
{
"cell_type": "markdown",
"metadata": {
"collapsed": true
},
"source": [
"Initial definition of emojis used later"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"#locally defined based on the first analysis of parts of our twitter data: resulting in the 20 most used emojis\n",
"#we used them for our first approaches of prediction\n",
"top_emojis = ['😂','😭','😍','😩','😊','😘','🙏','🙌','😉','😁','😅','😎','😢','😒','😏','😌','😔','😋','😀','😤']\n",
"#possible initial set of predictions, only used in naive test cases\n",
"predictions = [\"🤐\",\"🤑\",\"🤒\",\"🤓\",\"🤔\",\"🤕\",\"🤗\",\"🤘\"]"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Advanced Approach\n",
"define the classifier for advanced prediction, used for the sentiment prediction"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"Using TensorFlow backend.\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[nltk_data] Downloading package punkt to /Users/Carsten/nltk_data...\n",
"[nltk_data] Package punkt is already up-to-date!\n",
"[nltk_data] Downloading package averaged_perceptron_tagger to\n",
"[nltk_data] /Users/Carsten/nltk_data...\n",
"[nltk_data] Package averaged_perceptron_tagger is already up-to-\n",
"[nltk_data] date!\n",
"[nltk_data] Downloading package wordnet to /Users/Carsten/nltk_data...\n",
"[nltk_data] Package wordnet is already up-to-date!\n"
]
}
],
"source": [
"#navigation into right path and generating classifier\n",
"import sys\n",
"sys.path.append(\"..\")\n",
"\n",
"import simple_approach.simple_twitter_learning as stl\n",
"clf_advanced = stl.pipeline_manager.load_pipeline_from_files( '../simple_approach/custom_classifier', ['keras_model'], ['vectorizer', 'keras_model'])\n",
"\n",
"import Tools.Emoji_Distance as ed"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Generate new Sample for online learning / reinforcement learning"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"def generate_new_training_sample (msg, emoji):\n",
" sentiment = ed.emoji_to_sentiment_vector(emoji)\n",
" \n",
" #TODO message msg could be filtred\n",
" text = msg\n",
" return text, sentiment"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Naive Approach\n",
"for topic related emoji prediction"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"#sys.path.append(\"..\")\n",
"#print(sys.path)\n",
"\n",
"import naive_approach.naive_approach as clf_naive"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"tmp_dict = clf_naive.prepareData()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Merge Predictions\n",
"combine the predictions of both approaches"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"def merged_prediction(msg , split = 0.5 , number = 8, target_emojis = top_emojis):\n",
" \n",
" #calc ratio of prediction splitted between advanced aprroach and naive approach\n",
" number_advanced = round(split*number)\n",
" number_naive = round((1-split)*number)\n",
" \n",
" #predict emojis with the naive approach\n",
" prediction_naive , prediction_naive_values = clf_naive.predict(sentence = msg, lookup= tmp_dict, n = number_naive)\n",
"\n",
" #filter 0 values\n",
" tmp1 = []\n",
" tmp2 = []\n",
" epsilon = 0.0001\n",
"\n",
" for i in range(len(prediction_naive)):\n",
" if(abs(prediction_naive_values[i]) > epsilon):\n",
" tmp1.append(prediction_naive[i])\n",
" tmp2.append(prediction_naive[i])\n",
"\n",
" prediction_naive = tmp1\n",
" prediction_naive_values = tmp2\n",
" \n",
" if(len(prediction_naive) < number_naive):\n",
" #print(\"only few matches\")\n",
" number_advanced = number - len(prediction_naive)\n",
" \n",
" #print(number, number_advanced, number_naive)\n",
" \n",
" #predict the advanced approach\n",
" sentiment = clf_advanced.predict([msg])\n",
" prediction_advanced = ed.sentiment_vector_to_emoji(sentiment,n_results = number_advanced, custom_target_emojis=target_emojis)\n",
" \n",
" #concat both predictions\n",
" prediction = list(prediction_advanced)+list(prediction_naive)\n",
" \n",
" return prediction[:number]"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Actions triggered when something is changed"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"def trigger_new_prediction(all_chat, current_message):\n",
" global predictions\n",
" \n",
" #random prediction for initial test\n",
" #random.shuffle(predictions)\n",
" \n",
" #first prediction only using advanced approach\n",
" #sent = clf_advanced.predict([current_message])\n",
" #p = ed.sentiment_vector_to_emoji(sent,n_results = 8, custom_target_emojis=top_emojis)\n",
" \n",
" #merged prediction\n",
" if(current_message != \"\"):\n",
" p = merged_prediction(msg = current_message, target_emojis=top_emojis)\n",
"\n",
" predictions = p\n",
" update_descriptions()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Trigger Prediction for CSV Table"
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"<div>\n",
"<style>\n",
" .dataframe thead tr:only-child th {\n",
" text-align: right;\n",
" }\n",
"\n",
" .dataframe thead th {\n",
" text-align: left;\n",
" }\n",
"\n",
" .dataframe tbody tr th {\n",
" vertical-align: top;\n",
" }\n",
"</style>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: right;\">\n",
" <th></th>\n",
" <th>Sentence</th>\n",
" <th>prediction</th>\n",
" <th>topic hit</th>\n",
" <th>sentiment hit</th>\n",
" <th>both</th>\n",
" <th>ranked</th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" <tr>\n",
" <th>0</th>\n",
" <td>Hi how are you?</td>\n",
" <td>NaN</td>\n",
" <td>NaN</td>\n",
" <td>NaN</td>\n",
" <td>NaN</td>\n",
" <td>NaN</td>\n",
" </tr>\n",
" <tr>\n",
" <th>1</th>\n",
" <td>do you've got time</td>\n",
" <td>NaN</td>\n",
" <td>NaN</td>\n",
" <td>NaN</td>\n",
" <td>NaN</td>\n",
" <td>NaN</td>\n",
" </tr>\n",
" <tr>\n",
" <th>2</th>\n",
" <td>I go out for party tonight</td>\n",
" <td>NaN</td>\n",
" <td>NaN</td>\n",
" <td>NaN</td>\n",
" <td>NaN</td>\n",
" <td>NaN</td>\n",
" </tr>\n",
" <tr>\n",
" <th>3</th>\n",
" <td>I'll take the bus or train</td>\n",
" <td>NaN</td>\n",
" <td>NaN</td>\n",
" <td>NaN</td>\n",
" <td>NaN</td>\n",
" <td>NaN</td>\n",
" </tr>\n",
" <tr>\n",
" <th>4</th>\n",
" <td>You look gorgeous in this dress</td>\n",
" <td>NaN</td>\n",
" <td>NaN</td>\n",
" <td>NaN</td>\n",
" <td>NaN</td>\n",
" <td>NaN</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>\n",
"</div>"
],
"text/plain": [
" Sentence prediction topic hit sentiment hit \\\n",
"0 Hi how are you? NaN NaN NaN \n",
"1 do you've got time NaN NaN NaN \n",
"2 I go out for party tonight NaN NaN NaN \n",
"3 I'll take the bus or train NaN NaN NaN \n",
"4 You look gorgeous in this dress NaN NaN NaN \n",
"\n",
" both ranked \n",
"0 NaN NaN \n",
"1 NaN NaN \n",
"2 NaN NaN \n",
"3 NaN NaN \n",
"4 NaN NaN "
]
},
"execution_count": 9,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# get table\n",
"import pandas as pd\n",
"df = pd.read_csv(\"Evaluation Sentences - Tabellenblatt1.csv\")\n",
"df.head()"
]
},
{
"cell_type": "code",
"execution_count": 11,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Hi how are you?\n"
]
},
{
"ename": "FileNotFoundError",
"evalue": "[Errno 2] No such file or directory: 'word2vec.model'",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mFileNotFoundError\u001b[0m Traceback (most recent call last)",
"\u001b[0;32m<ipython-input-11-22a65efd4496>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[1;32m 3\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0msentence\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 4\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 5\u001b[0;31m \u001b[0mtrigger_new_prediction\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mall_chat\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m\"\"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcurrent_message\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0msentence\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 6\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mprediction\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 7\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m<ipython-input-8-20fe10f899eb>\u001b[0m in \u001b[0;36mtrigger_new_prediction\u001b[0;34m(all_chat, current_message)\u001b[0m\n\u001b[1;32m 11\u001b[0m \u001b[0;31m#merged prediction\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 12\u001b[0m \u001b[0;32mif\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mcurrent_message\u001b[0m \u001b[0;34m!=\u001b[0m \u001b[0;34m\"\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 13\u001b[0;31m \u001b[0mp\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mmerged_prediction\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmsg\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mcurrent_message\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtarget_emojis\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mtop_emojis\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 14\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 15\u001b[0m \u001b[0mpredictions\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mp\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m<ipython-input-7-5ed291336bae>\u001b[0m in \u001b[0;36mmerged_prediction\u001b[0;34m(msg, split, number, target_emojis)\u001b[0m\n\u001b[1;32m 6\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 7\u001b[0m \u001b[0;31m#predict emojis with the naive approach\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 8\u001b[0;31m \u001b[0mprediction_naive\u001b[0m \u001b[0;34m,\u001b[0m \u001b[0mprediction_naive_values\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mclf_naive\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpredict\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0msentence\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mmsg\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlookup\u001b[0m\u001b[0;34m=\u001b[0m \u001b[0mtmp_dict\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mn\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mnumber_naive\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 9\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 10\u001b[0m \u001b[0;31m#filter 0 values\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m~/GitRepos/NLP-LAB/Project/naive_approach/naive_approach.py\u001b[0m in \u001b[0;36mpredict\u001b[0;34m(sentence, lookup, emojis_to_consider, criteria, lang, n, t)\u001b[0m\n\u001b[1;32m 98\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mpredict\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0msentence\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlookup\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0memojis_to_consider\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m\"all\"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcriteria\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m\"threshold\"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlang\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m'eng'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mn\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m10\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mt\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m0.9\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 99\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 100\u001b[0;31m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mevaluate_sentence\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0msentence\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlang\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0memojis_to_consider\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0memojis_to_consider\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 101\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 102\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m~/GitRepos/NLP-LAB/Project/naive_approach/naive_approach.py\u001b[0m in \u001b[0;36mevaluate_sentence\u001b[0;34m(sentence, description_key, lang, emojis_to_consider, stem)\u001b[0m\n\u001b[1;32m 44\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mevaluate_sentence\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0msentence\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdescription_key\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m'description'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlang\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m'eng'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0memojis_to_consider\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m\"all\"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mstem\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mTrue\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 45\u001b[0m \u001b[0;31m# assumes there is a trained w2v model stored in the same directory!\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 46\u001b[0;31m \u001b[0mwv\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mKeyedVectors\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mload\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"word2vec.model\"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmmap\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m'r'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 47\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 48\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mstem\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m~/anaconda3/lib/python3.6/site-packages/gensim/models/keyedvectors.py\u001b[0m in \u001b[0;36mload\u001b[0;34m(cls, fname_or_handle, **kwargs)\u001b[0m\n\u001b[1;32m 120\u001b[0m \u001b[0;34m@\u001b[0m\u001b[0mclassmethod\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 121\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mload\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mcls\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfname_or_handle\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 122\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0msuper\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mBaseKeyedVectors\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcls\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mload\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfname_or_handle\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 123\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 124\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0msimilarity\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mentity1\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mentity2\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m~/anaconda3/lib/python3.6/site-packages/gensim/utils.py\u001b[0m in \u001b[0;36mload\u001b[0;34m(cls, fname, mmap)\u001b[0m\n\u001b[1;32m 423\u001b[0m \u001b[0mcompress\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0msubname\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mSaveLoad\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_adapt_by_suffix\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfname\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 424\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 425\u001b[0;31m \u001b[0mobj\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0munpickle\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfname\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 426\u001b[0m \u001b[0mobj\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_load_specials\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfname\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmmap\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcompress\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0msubname\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 427\u001b[0m \u001b[0mlogger\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0minfo\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"loaded %s\"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfname\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m~/anaconda3/lib/python3.6/site-packages/gensim/utils.py\u001b[0m in \u001b[0;36munpickle\u001b[0;34m(fname)\u001b[0m\n\u001b[1;32m 1327\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1328\u001b[0m \"\"\"\n\u001b[0;32m-> 1329\u001b[0;31m \u001b[0;32mwith\u001b[0m \u001b[0msmart_open\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfname\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m'rb'\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mf\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1330\u001b[0m \u001b[0;31m# Because of loading from S3 load can't be used (missing readline in smart_open)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1331\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0msys\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mversion_info\u001b[0m \u001b[0;34m>\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0;36m3\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;36m0\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m~/anaconda3/lib/python3.6/site-packages/smart_open/smart_open_lib.py\u001b[0m in \u001b[0;36msmart_open\u001b[0;34m(uri, mode, **kw)\u001b[0m\n\u001b[1;32m 179\u001b[0m \u001b[0;32mraise\u001b[0m \u001b[0mTypeError\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'mode should be a string'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 180\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 181\u001b[0;31m \u001b[0mfobj\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_shortcut_open\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0muri\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmode\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkw\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 182\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mfobj\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 183\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mfobj\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m~/anaconda3/lib/python3.6/site-packages/smart_open/smart_open_lib.py\u001b[0m in \u001b[0;36m_shortcut_open\u001b[0;34m(uri, mode, **kw)\u001b[0m\n\u001b[1;32m 285\u001b[0m \u001b[0mmode\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mmode\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mreplace\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'b'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m''\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 286\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 287\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mio\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mopen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mparsed_uri\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0muri_path\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmode\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mopen_kwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 288\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 289\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;31mFileNotFoundError\u001b[0m: [Errno 2] No such file or directory: 'word2vec.model'"
]
}
],
"source": [
"for index, row in df.iterrows():\n",
" sentence = row[\"Sentence\"]\n",
" print(sentence)\n",
"\n",
" trigger_new_prediction(all_chat=\"\", current_message = sentence)\n",
" print(prediction)\n",
" \n",
" "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.3"
}
},
"nbformat": 4,
"nbformat_minor": 2
}