master-thesis/Tagging/CRF_evaluation.ipynb

233 lines
8.6 KiB
Plaintext

{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# CRF entity recognition evaluation"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"from IPython.core.display import Markdown, HTML, display\n",
"\n",
"import sys\n",
"sys.path.insert(0, '..') # noqa\n",
"import settings # noqa\n",
"\n",
"import crf_data_generator as cdg\n",
"import pycrfsuite\n",
"\n",
"import numpy as np"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"#data = cdg.ConlluCRFReader(\"../\" + settings.gzipped_conllu_data_root + \"recipes2.conllu.gz\")\n",
"data = cdg.ConlluCRFReader(\"filtered_recipes.conllu\")\n",
"\n",
"data_iterator = iter(data)"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"def sentence_as_markdown_table( tokens, labels = None, predictions = None):\n",
" n = len(tokens)\n",
" s = \"<table>\\n<tr>\\n<th>Sentence:</th>\\n\"\n",
" for t in tokens:\n",
" s += f\"<th>{t}</th>\"\n",
" \n",
" s += \"<tr>\\n\"\n",
" \n",
" if labels is not None:\n",
" s += \"<th>labels:</th>\" + \"\".join([f\"<th>{l}</th>\" for l in labels])\n",
" s += \"</tr>\\n\"\n",
" \n",
" if predictions is not None:\n",
" s+= \"<th>Predicitions:</th>\" + \"\".join([f\"<th>{p}</th>\" for p in predictions])\n",
" \n",
" display(HTML(s + \"</tr>\\n</table>\\n\\n\"))\n",
"\n",
" \n",
" "
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"<contextlib.closing at 0x7f8427ce2c88>"
]
},
"execution_count": 4,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"tagger = pycrfsuite.Tagger()\n",
"tagger.open('test.crfsuite')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Error Rate:"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [
{
"ename": "IndexError",
"evalue": "list index out of range",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mIndexError\u001b[0m Traceback (most recent call last)",
"\u001b[0;32m<ipython-input-5-df3e3a31d984>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[1;32m 4\u001b[0m \u001b[0mpred_ingredients\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 5\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 6\u001b[0;31m \u001b[0;32mfor\u001b[0m \u001b[0mfeatures\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlabels\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtokens\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mdata\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 7\u001b[0m \u001b[0mpred\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtagger\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtag\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfeatures\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 8\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mi\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mrange\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtokens\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m~/Dokumente/gitRepos/master_thesis/Tagging/crf_data_generator.py\u001b[0m in \u001b[0;36m__next__\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 107\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 108\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_parent\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_iter_documents\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 109\u001b[0;31m \u001b[0mnext_sent\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_iter\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__next__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 110\u001b[0m \u001b[0mfeatures\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0msent2features\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mnext_sent\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 111\u001b[0m \u001b[0mlabels\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0msent2labels\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mnext_sent\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;31mIndexError\u001b[0m: list index out of range"
]
}
],
"source": [
"label_actions = []\n",
"label_ingredients = []\n",
"pred_actions = []\n",
"pred_ingredients = []\n",
"\n",
"for features, labels, tokens in data:\n",
" pred = tagger.tag(features)\n",
" for i in range(len(tokens)):\n",
" label_actions.append(labels[i] == \"action\")\n",
" label_ingredients.append(labels[i] == \"ingredient\")\n",
" pred_actions.append(pred[i] == \"action\")\n",
" pred_ingredients.append(pred[i] == \"ingredient\")\n",
" "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"label_actions = np.array(label_actions, dtype=np.bool)\n",
"label_ingredients = np.array(label_ingredients, dtype=np.bool)\n",
"pred_actions = np.array(pred_actions, dtype=np.bool)\n",
"pred_ingredients = np.array(pred_ingredients, dtype=np.bool)\n",
"\n",
"len(label_actions)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"a_tp = np.sum(label_actions & pred_actions)\n",
"i_tp = np.sum(label_ingredients & pred_ingredients)\n",
"\n",
"a_fp = np.sum(np.logical_not(label_actions) & pred_actions)\n",
"i_fp = np.sum(np.logical_not(label_ingredients) & pred_ingredients)\n",
"\n",
"Markdown(f\"\"\"\n",
"* **\\# all tokens**: {len(label_ingredients)}\n",
"\n",
"\n",
"* **\\# real actions**: {np.sum(label_actions)}\n",
"* **\\# predicted actions**: {np.sum(pred_actions)}\n",
"\n",
"\n",
"* **\\# real ingredients**: {np.sum(label_ingredients)}\n",
"* **\\# predicted ingredients**: {np.sum(pred_ingredients)} \n",
"\n",
"\n",
"* **action error**: {1 - np.sum(label_actions == pred_actions) / len(label_ingredients)}\n",
"* **ingredient error**: {1 - np.sum(label_ingredients == pred_ingredients) / len(label_ingredients)}\n",
"\n",
"* **action true positives**: {a_tp} of {np.sum(label_actions)} ({a_tp / np.sum(label_actions)})\n",
"* **ingredient true positives**: {i_tp} of {np.sum(label_ingredients)} ({i_tp / np.sum(label_ingredients)})\n",
"\n",
"* **action false positives**: {a_fp} of {np.sum(label_actions)} ({a_fp / np.sum(label_actions)})\n",
"* **ingredient false positives**: {i_fp} of {np.sum(label_ingredients)} ({i_fp / np.sum(label_ingredients)})\n",
"\n",
"\n",
"\n",
"\"\"\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Evaluation Example"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"for i, (features, labels, tokens) in enumerate(data):\n",
" if i > 100:\n",
" break\n",
" \n",
" prediction = tagger.tag(features)\n",
" \n",
" sentence_as_markdown_table(tokens, labels, prediction)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.3"
}
},
"nbformat": 4,
"nbformat_minor": 4
}