stemming als parameter und anpassungen für evaluation

This commit is contained in:
Carsten 2018-07-23 12:09:36 +02:00
parent b99b2006f7
commit d899e2f069
2 changed files with 158 additions and 51 deletions

View File

@ -147,34 +147,7 @@
"cell_type": "code", "cell_type": "code",
"execution_count": 5, "execution_count": 5,
"metadata": {}, "metadata": {},
"outputs": [ "outputs": [],
{
"ename": "KeyError",
"evalue": "'character'",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mTypeError\u001b[0m Traceback (most recent call last)",
"\u001b[0;32m~/anaconda3/lib/python3.6/site-packages/pandas/core/indexes/base.py\u001b[0m in \u001b[0;36mget_value\u001b[0;34m(self, series, key)\u001b[0m\n\u001b[1;32m 2482\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 2483\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mlibts\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mget_value_box\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0ms\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mkey\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 2484\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mIndexError\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32mpandas/_libs/tslib.pyx\u001b[0m in \u001b[0;36mpandas._libs.tslib.get_value_box (pandas/_libs/tslib.c:18843)\u001b[0;34m()\u001b[0m\n",
"\u001b[0;32mpandas/_libs/tslib.pyx\u001b[0m in \u001b[0;36mpandas._libs.tslib.get_value_box (pandas/_libs/tslib.c:18477)\u001b[0;34m()\u001b[0m\n",
"\u001b[0;31mTypeError\u001b[0m: 'str' object cannot be interpreted as an integer",
"\nDuring handling of the above exception, another exception occurred:\n",
"\u001b[0;31mKeyError\u001b[0m Traceback (most recent call last)",
"\u001b[0;32m<ipython-input-5-2e408a3beaf0>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[1;32m 2\u001b[0m \u001b[0;31m#print(sys.path)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 3\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 4\u001b[0;31m \u001b[0;32mimport\u001b[0m \u001b[0mnaive_approach\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mclf_naive\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
"\u001b[0;32m~/GitRepos/NLP-LAB/Project/naive_approach/naive_approach.py\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[1;32m 25\u001b[0m \u001b[0mtableDict\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m{\u001b[0m\u001b[0;34m}\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 26\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mindex\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mrow\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mtable\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0miterrows\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 27\u001b[0;31m \u001b[0mtableDict\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mupdate\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m{\u001b[0m\u001b[0mindex\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0mrow\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'character'\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mrow\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'description'\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m}\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 28\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 29\u001b[0m \u001b[0;31m#######################\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m~/anaconda3/lib/python3.6/site-packages/pandas/core/series.py\u001b[0m in \u001b[0;36m__getitem__\u001b[0;34m(self, key)\u001b[0m\n\u001b[1;32m 599\u001b[0m \u001b[0mkey\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mcom\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_apply_if_callable\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mkey\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 600\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 601\u001b[0;31m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mindex\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mget_value\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mkey\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 602\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 603\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0mis_scalar\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mresult\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m~/anaconda3/lib/python3.6/site-packages/pandas/core/indexes/base.py\u001b[0m in \u001b[0;36mget_value\u001b[0;34m(self, series, key)\u001b[0m\n\u001b[1;32m 2489\u001b[0m \u001b[0;32mraise\u001b[0m \u001b[0mInvalidIndexError\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mkey\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2490\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 2491\u001b[0;31m \u001b[0;32mraise\u001b[0m \u001b[0me1\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 2492\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mException\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0;31m# pragma: no cover\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2493\u001b[0m \u001b[0;32mraise\u001b[0m \u001b[0me1\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m~/anaconda3/lib/python3.6/site-packages/pandas/core/indexes/base.py\u001b[0m in \u001b[0;36mget_value\u001b[0;34m(self, series, key)\u001b[0m\n\u001b[1;32m 2475\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2476\u001b[0m return self._engine.get_value(s, k,\n\u001b[0;32m-> 2477\u001b[0;31m tz=getattr(series.dtype, 'tz', None))\n\u001b[0m\u001b[1;32m 2478\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mKeyError\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me1\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2479\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m>\u001b[0m \u001b[0;36m0\u001b[0m \u001b[0;32mand\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0minferred_type\u001b[0m \u001b[0;32min\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0;34m'integer'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m'boolean'\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32mpandas/_libs/index.pyx\u001b[0m in \u001b[0;36mpandas._libs.index.IndexEngine.get_value\u001b[0;34m()\u001b[0m\n",
"\u001b[0;32mpandas/_libs/index.pyx\u001b[0m in \u001b[0;36mpandas._libs.index.IndexEngine.get_value\u001b[0;34m()\u001b[0m\n",
"\u001b[0;32mpandas/_libs/index.pyx\u001b[0m in \u001b[0;36mpandas._libs.index.IndexEngine.get_loc\u001b[0;34m()\u001b[0m\n",
"\u001b[0;32mpandas/_libs/hashtable_class_helper.pxi\u001b[0m in \u001b[0;36mpandas._libs.hashtable.PyObjectHashTable.get_item\u001b[0;34m()\u001b[0m\n",
"\u001b[0;32mpandas/_libs/hashtable_class_helper.pxi\u001b[0m in \u001b[0;36mpandas._libs.hashtable.PyObjectHashTable.get_item\u001b[0;34m()\u001b[0m\n",
"\u001b[0;31mKeyError\u001b[0m: 'character'"
]
}
],
"source": [ "source": [
"#sys.path.append(\"..\")\n", "#sys.path.append(\"..\")\n",
"#print(sys.path)\n", "#print(sys.path)\n",
@ -184,13 +157,13 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 6,
"metadata": { "metadata": {
"collapsed": true "collapsed": true
}, },
"outputs": [], "outputs": [],
"source": [ "source": [
"tmp_dict = clf_naive.prepareData()" "tmp_dict = clf_naive.prepareData(stem=True)"
] ]
}, },
{ {
@ -203,10 +176,8 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 7,
"metadata": { "metadata": {},
"collapsed": true
},
"outputs": [], "outputs": [],
"source": [ "source": [
"def merged_prediction(msg , split = 0.5 , number = 8, target_emojis = top_emojis):\n", "def merged_prediction(msg , split = 0.5 , number = 8, target_emojis = top_emojis):\n",
@ -216,7 +187,7 @@
" number_naive = round((1-split)*number)\n", " number_naive = round((1-split)*number)\n",
" \n", " \n",
" #predict emojis with the naive approach\n", " #predict emojis with the naive approach\n",
" prediction_naive , prediction_naive_values = clf_naive.predict(sentence = msg, lookup= tmp_dict, n = number_naive, em)\n", " prediction_naive , prediction_naive_values = clf_naive.predict(sentence = msg, lookup= tmp_dict, n = number_naive, embeddings = \"wordnet\", stem = True)\n",
"\n", "\n",
" #filter 0 values\n", " #filter 0 values\n",
" tmp1 = []\n", " tmp1 = []\n",
@ -256,7 +227,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 8,
"metadata": { "metadata": {
"collapsed": true "collapsed": true
}, },
@ -288,22 +259,89 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 9,
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [
{
"data": {
"text/html": [
"<div>\n",
"<style>\n",
" .dataframe thead tr:only-child th {\n",
" text-align: right;\n",
" }\n",
"\n",
" .dataframe thead th {\n",
" text-align: left;\n",
" }\n",
"\n",
" .dataframe tbody tr th {\n",
" vertical-align: top;\n",
" }\n",
"</style>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: right;\">\n",
" <th></th>\n",
" <th>Sentence</th>\n",
" <th>prediction</th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" <tr>\n",
" <th>0</th>\n",
" <td>i like computer games</td>\n",
" <td>NaN</td>\n",
" </tr>\n",
" <tr>\n",
" <th>1</th>\n",
" <td>it is great weather for using the swimming pool</td>\n",
" <td>NaN</td>\n",
" </tr>\n",
" <tr>\n",
" <th>2</th>\n",
" <td>old cars are this loud</td>\n",
" <td>NaN</td>\n",
" </tr>\n",
" <tr>\n",
" <th>3</th>\n",
" <td>i hear a plane above our house</td>\n",
" <td>NaN</td>\n",
" </tr>\n",
" <tr>\n",
" <th>4</th>\n",
" <td>these are really cute pets</td>\n",
" <td>NaN</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>\n",
"</div>"
],
"text/plain": [
" Sentence prediction\n",
"0 i like computer games NaN\n",
"1 it is great weather for using the swimming pool NaN\n",
"2 old cars are this loud NaN\n",
"3 i hear a plane above our house NaN\n",
"4 these are really cute pets NaN"
]
},
"execution_count": 9,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [ "source": [
"# get table\n", "# get table\n",
"import pandas as pd\n", "import pandas as pd\n",
"df = pd.read_csv(\"Evaluation Sentences - Tabellenblatt1.csv\", sep=\"\\t\")\n", "df = pd.read_csv(\"Evaluation Sentences - Topic related sentences.csv\")#, sep=\"\\t\")\n",
"df.head()" "df.head()"
] ]
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 10,
"metadata": { "metadata": {},
"collapsed": true
},
"outputs": [], "outputs": [],
"source": [ "source": [
"all_predictions = []\n", "all_predictions = []\n",
@ -325,9 +363,78 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 11,
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [
{
"data": {
"text/html": [
"<div>\n",
"<style>\n",
" .dataframe thead tr:only-child th {\n",
" text-align: right;\n",
" }\n",
"\n",
" .dataframe thead th {\n",
" text-align: left;\n",
" }\n",
"\n",
" .dataframe tbody tr th {\n",
" vertical-align: top;\n",
" }\n",
"</style>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: right;\">\n",
" <th></th>\n",
" <th>Sentence</th>\n",
" <th>prediction</th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" <tr>\n",
" <th>0</th>\n",
" <td>i like computer games</td>\n",
" <td>😅😂😢😭😁😌🎮🎲</td>\n",
" </tr>\n",
" <tr>\n",
" <th>1</th>\n",
" <td>it is great weather for using the swimming pool</td>\n",
" <td>😌😁😎🙌😀😉🎐🍃</td>\n",
" </tr>\n",
" <tr>\n",
" <th>2</th>\n",
" <td>old cars are this loud</td>\n",
" <td>😅😂😢😭🚕🚃🚚🚋</td>\n",
" </tr>\n",
" <tr>\n",
" <th>3</th>\n",
" <td>i hear a plane above our house</td>\n",
" <td>😅😂😢😭😁😔😌😉</td>\n",
" </tr>\n",
" <tr>\n",
" <th>4</th>\n",
" <td>these are really cute pets</td>\n",
" <td>😂😅😁😌😎😉🙌😀</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>\n",
"</div>"
],
"text/plain": [
" Sentence prediction\n",
"0 i like computer games 😅😂😢😭😁😌🎮🎲\n",
"1 it is great weather for using the swimming pool 😌😁😎🙌😀😉🎐🍃\n",
"2 old cars are this loud 😅😂😢😭🚕🚃🚚🚋\n",
"3 i hear a plane above our house 😅😂😢😭😁😔😌😉\n",
"4 these are really cute pets 😂😅😁😌😎😉🙌😀"
]
},
"execution_count": 11,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [ "source": [
"df[\"prediction\"] = all_predictions\n", "df[\"prediction\"] = all_predictions\n",
"\n", "\n",
@ -337,13 +444,13 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": 12,
"metadata": { "metadata": {
"collapsed": true "collapsed": true
}, },
"outputs": [], "outputs": [],
"source": [ "source": [
"df.to_csv(\"Evaluation Sentences - Wordnet - newClf.csv\", sep='\\t', encoding='utf-8')" "df.to_csv(\"E_S - topic - wordnet - stemming.csv\", sep='\\t', encoding='utf-8')"
] ]
}, },
{ {

View File

@ -18,7 +18,7 @@ import pprint
from gensim.models import Word2Vec, KeyedVectors from gensim.models import Word2Vec, KeyedVectors
# # Naive Approach # # Naive Approach
table = pd.read_csv('../Tools/emoji_descriptions_preprocessed.csv') table = pd.read_csv('../Tools/emoji_descriptions_preprocessed.csv', delimiter = ";")
##Store table in the format: ##Store table in the format:
## { index: [emoji, description]} ## { index: [emoji, description]}
@ -49,7 +49,7 @@ def evaluate_sentence(sentence, description_key = 'description', lang = 'eng', e
if embeddings=="word2Vec": if embeddings=="word2Vec":
wv = KeyedVectors.load(str(__location__)+"/word2vec.model", mmap='r') wv = KeyedVectors.load(str(__location__)+"/word2vec.model", mmap='r')
elif embeddings=="fastText": elif embeddings=="fastText":
wv = KeyedVectors.load("/fastTextVectors.kv", mmap='r') wv = KeyedVectors.load(str(__location__)+"/fastTextVectors.kv", mmap='r')
if (stem): if (stem):
sentence = stemming(sentence) sentence = stemming(sentence)
@ -118,9 +118,9 @@ def prepareData(stem=True, lower=True):
# make a prediction for an input sentence # make a prediction for an input sentence
# embeddings = ["wordnet", "word2Vec", "fastText"] # embeddings = ["wordnet", "word2Vec", "fastText"]
def predict(sentence, lookup, emojis_to_consider="all", criteria="threshold", lang = 'eng',\ def predict(sentence, lookup, emojis_to_consider="all", criteria="threshold", lang = 'eng',\
embeddings="wordnet", n=10, t=0.9): embeddings="wordnet", n=10, t=0.9, stem = True):
result = evaluate_sentence(sentence, lang, emojis_to_consider=emojis_to_consider, embeddings=embeddings) result = evaluate_sentence(sentence, lang, emojis_to_consider=emojis_to_consider, embeddings=embeddings, stem = stem)
try: try:
if(criteria=="summed"): if(criteria=="summed"):