master-thesis/Tagging/CRF_training.ipynb

436 lines
12 KiB
Plaintext

{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"import conllu_batch_generator as cbg"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"cr = cbg.ConlluReader(\"filtered_recipes.conllu\")"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"TokenList<Dissolve, Jello, in, boiling, water, .>"
]
},
"execution_count": 3,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"next(iter(cr))[0]"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [],
"source": [
"def word2features(sent, i):\n",
" word = sent[i]['form']\n",
" postag = sent[i]['upostag']\n",
" features = [\n",
" 'bias',\n",
" #'word.lower=' + word.lower(),\n",
" 'word[-3:]=' + word[-3:],\n",
" 'word[-2:]=' + word[-2:],\n",
" 'word.isupper=%s' % word.isupper(),\n",
" 'word.istitle=%s' % word.istitle(),\n",
" 'word.isdigit=%s' % word.isdigit(),\n",
" 'postag=' + postag,\n",
" 'postag[:2]=' + postag[:2],\n",
" ]\n",
" if i > 0:\n",
" word1 = sent[i-1]['form']\n",
" postag1 = sent[i-1]['upostag']\n",
" features.extend([\n",
" '-1:word.lower=' + word1.lower(),\n",
" '-1:word.istitle=%s' % word1.istitle(),\n",
" '-1:word.isupper=%s' % word1.isupper(),\n",
" '-1:postag=' + postag1,\n",
" '-1:postag[:2]=' + postag1[:2],\n",
" ])\n",
" if i > 1:\n",
" word1 = sent[i-2]['form']\n",
" postag1 = sent[i-2]['upostag']\n",
" features.extend([\n",
" '-2:word.lower=' + word1.lower(),\n",
" '-2:word.istitle=%s' % word1.istitle(),\n",
" '-2:word.isupper=%s' % word1.isupper(),\n",
" '-2:postag=' + postag1,\n",
" '-2:postag[:2]=' + postag1[:2],\n",
" ])\n",
" else:\n",
" features.append('BOS')\n",
" \n",
" if i < len(sent)-1:\n",
" word1 = sent[i+1]['form']\n",
" postag1 = sent[i+1]['upostag']\n",
" features.extend([\n",
" '+1:word.lower=' + word1.lower(),\n",
" '+1:word.istitle=%s' % word1.istitle(),\n",
" '+1:word.isupper=%s' % word1.isupper(),\n",
" '+1:postag=' + postag1,\n",
" '+1:postag[:2]=' + postag1[:2],\n",
" ])\n",
" if i < len(sent)-2:\n",
" word1 = sent[i+1]['form']\n",
" postag1 = sent[i+1]['upostag']\n",
" features.extend([\n",
" '+2:word.lower=' + word1.lower(),\n",
" '+2:word.istitle=%s' % word1.istitle(),\n",
" '+2:word.isupper=%s' % word1.isupper(),\n",
" '+2:postag=' + postag1,\n",
" '+2:postag[:2]=' + postag1[:2],\n",
" ])\n",
" else:\n",
" features.append('EOS')\n",
" \n",
" return features"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [],
"source": [
"def sent2labels(sent):\n",
" labels = []\n",
" for token in sent:\n",
" if token['misc'] is not None and 'food_type' in token['misc']:\n",
" labels.append(token['misc']['food_type'])\n",
" else:\n",
" labels.append(\"0\")\n",
" return labels"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [],
"source": [
"def sent2features(sent):\n",
" return [word2features(sent, i) for i in range(len(sent))]"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [],
"source": [
"def sent2tokens(sent):\n",
" return [token['form'] for token in sent]"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {},
"outputs": [],
"source": [
"def feature2tokens(sent):\n",
" return [t[1].split(\"=\")[1] for t in sent]"
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {},
"outputs": [],
"source": [
"def conllu2tokens(sent):\n",
" return [t['form'] for t in sent]"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"* create test dataset:"
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {},
"outputs": [],
"source": [
"# read 50000 samples:"
]
},
{
"cell_type": "code",
"execution_count": 11,
"metadata": {},
"outputs": [],
"source": [
"n_train = 50000\n",
"n_test = 1000\n",
"\n",
"X_train = []\n",
"Y_train = []\n",
"t_train = []\n",
"\n",
"X_test = []\n",
"Y_test = []\n",
"t_test = []\n",
"\n",
"\n",
"i = 0\n",
"for sample in cr:\n",
" if len(sample) == 0:\n",
" continue\n",
" i += 1\n",
" if i < n_train:\n",
" X_train.append(sent2features(sample[0]))\n",
" Y_train.append(sent2labels(sample[0]))\n",
" t_train.append(conllu2tokens(sample[0]))\n",
" else:\n",
" X_test.append(sent2features(sample[0]))\n",
" Y_test.append(sent2labels(sample[0]))\n",
" t_test.append(conllu2tokens(sample[0]))\n",
" \n",
" if i >= n_train + n_test:\n",
" break\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": 12,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"47538"
]
},
"execution_count": 12,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"len(X_train)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"* train with crfsuite"
]
},
{
"cell_type": "code",
"execution_count": 13,
"metadata": {},
"outputs": [],
"source": [
"import pycrfsuite"
]
},
{
"cell_type": "code",
"execution_count": 14,
"metadata": {},
"outputs": [],
"source": [
"trainer = pycrfsuite.Trainer(verbose=False)\n",
"\n",
"for xseq, yseq in zip(X_train, Y_train):\n",
" trainer.append(xseq, yseq)"
]
},
{
"cell_type": "code",
"execution_count": 15,
"metadata": {},
"outputs": [],
"source": [
"trainer.set_params({\n",
" 'c1': 1.0, # coefficient for L1 penalty\n",
" 'c2': 1e-3, # coefficient for L2 penalty\n",
" #'max_iterations': 50, # stop earlier\n",
"\n",
" # include transitions that are possible, but not observed\n",
" 'feature.possible_transitions': True\n",
"})"
]
},
{
"cell_type": "code",
"execution_count": 16,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"['feature.minfreq',\n",
" 'feature.possible_states',\n",
" 'feature.possible_transitions',\n",
" 'c1',\n",
" 'c2',\n",
" 'max_iterations',\n",
" 'num_memories',\n",
" 'epsilon',\n",
" 'period',\n",
" 'delta',\n",
" 'linesearch',\n",
" 'max_linesearch']"
]
},
"execution_count": 16,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"trainer.params()"
]
},
{
"cell_type": "code",
"execution_count": 17,
"metadata": {},
"outputs": [],
"source": [
"trainer.train('test.crfsuite')"
]
},
{
"cell_type": "code",
"execution_count": 18,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'num': 830,\n",
" 'scores': {},\n",
" 'loss': 41171.669638,\n",
" 'feature_norm': 126.341894,\n",
" 'error_norm': 85.690855,\n",
" 'active_features': 6055,\n",
" 'linesearch_trials': 2,\n",
" 'linesearch_step': 0.5,\n",
" 'time': 0.724}"
]
},
"execution_count": 18,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"trainer.logparser.last_iteration\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"* test:"
]
},
{
"cell_type": "code",
"execution_count": 19,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"<contextlib.closing at 0x7f26d79813c8>"
]
},
"execution_count": 19,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"tagger = pycrfsuite.Tagger()\n",
"tagger.open('test.crfsuite')"
]
},
{
"cell_type": "code",
"execution_count": 20,
"metadata": {},
"outputs": [
{
"ename": "IndexError",
"evalue": "list index out of range",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mIndexError\u001b[0m Traceback (most recent call last)",
"\u001b[0;32m<ipython-input-20-a88100b49642>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mi\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mrange\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m100\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;36m130\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 2\u001b[0;31m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m' '\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mjoin\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mt_test\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mi\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 3\u001b[0m \u001b[0;31m#print(' '.join(feature2tokens(X_test[i])), end='\\n\\n')\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 4\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"Predicted:\"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m' '\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mjoin\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtagger\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtag\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mX_test\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mi\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 5\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"Correct: \"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m' '\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mjoin\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mY_test\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mi\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;31mIndexError\u001b[0m: list index out of range"
]
}
],
"source": [
"for i in range(100,130):\n",
" print(' '.join(t_test[i]))\n",
" #print(' '.join(feature2tokens(X_test[i])), end='\\n\\n')\n",
" print(\"Predicted:\", ' '.join(tagger.tag(X_test[i])))\n",
" print(\"Correct: \", ' '.join(Y_test[i]))\n",
" \n",
" print(\"\\n\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.3"
}
},
"nbformat": 4,
"nbformat_minor": 4
}