fist crf results for simple entity recognition
and also synced other stuff
This commit is contained in:
		
							
								
								
									
										1680
									
								
								Tagging/CRF_evaluation.ipynb
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										1680
									
								
								Tagging/CRF_evaluation.ipynb
									
									
									
									
									
										Normal file
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							
							
								
								
									
										576
									
								
								Tagging/CRF_training.ipynb
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										576
									
								
								Tagging/CRF_training.ipynb
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,576 @@ | |||||||
|  | { | ||||||
|  |  "cells": [ | ||||||
|  |   { | ||||||
|  |    "cell_type": "code", | ||||||
|  |    "execution_count": 1, | ||||||
|  |    "metadata": {}, | ||||||
|  |    "outputs": [], | ||||||
|  |    "source": [ | ||||||
|  |     "import conllu_batch_generator as cbg" | ||||||
|  |    ] | ||||||
|  |   }, | ||||||
|  |   { | ||||||
|  |    "cell_type": "code", | ||||||
|  |    "execution_count": 2, | ||||||
|  |    "metadata": {}, | ||||||
|  |    "outputs": [], | ||||||
|  |    "source": [ | ||||||
|  |     "cr = cbg.ConlluReader(\"recipes0.conllu\")" | ||||||
|  |    ] | ||||||
|  |   }, | ||||||
|  |   { | ||||||
|  |    "cell_type": "code", | ||||||
|  |    "execution_count": 3, | ||||||
|  |    "metadata": {}, | ||||||
|  |    "outputs": [], | ||||||
|  |    "source": [ | ||||||
|  |     "t = cr.__iter__().__next__()" | ||||||
|  |    ] | ||||||
|  |   }, | ||||||
|  |   { | ||||||
|  |    "cell_type": "code", | ||||||
|  |    "execution_count": 4, | ||||||
|  |    "metadata": {}, | ||||||
|  |    "outputs": [ | ||||||
|  |     { | ||||||
|  |      "data": { | ||||||
|  |       "text/plain": [ | ||||||
|  |        "OrderedDict([('id', 2),\n", | ||||||
|  |        "             ('form', 'oven'),\n", | ||||||
|  |        "             ('lemma', 'oven'),\n", | ||||||
|  |        "             ('upostag', 'ADV'),\n", | ||||||
|  |        "             ('xpostag', 'RB'),\n", | ||||||
|  |        "             ('feats', None),\n", | ||||||
|  |        "             ('head', None),\n", | ||||||
|  |        "             ('deprel', '_'),\n", | ||||||
|  |        "             ('deps', None),\n", | ||||||
|  |        "             ('misc', None)])" | ||||||
|  |       ] | ||||||
|  |      }, | ||||||
|  |      "execution_count": 4, | ||||||
|  |      "metadata": {}, | ||||||
|  |      "output_type": "execute_result" | ||||||
|  |     } | ||||||
|  |    ], | ||||||
|  |    "source": [ | ||||||
|  |     "t[0][1]" | ||||||
|  |    ] | ||||||
|  |   }, | ||||||
|  |   { | ||||||
|  |    "cell_type": "code", | ||||||
|  |    "execution_count": 5, | ||||||
|  |    "metadata": {}, | ||||||
|  |    "outputs": [], | ||||||
|  |    "source": [ | ||||||
|  |     "def word2features(sent, i):\n", | ||||||
|  |     "    word = sent[i]['form']\n", | ||||||
|  |     "    postag = sent[i]['upostag']\n", | ||||||
|  |     "    features = [\n", | ||||||
|  |     "        'bias',\n", | ||||||
|  |     "        #'word.lower=' + word.lower(),\n", | ||||||
|  |     "        'word[-3:]=' + word[-3:],\n", | ||||||
|  |     "        'word[-2:]=' + word[-2:],\n", | ||||||
|  |     "        'word.isupper=%s' % word.isupper(),\n", | ||||||
|  |     "        'word.istitle=%s' % word.istitle(),\n", | ||||||
|  |     "        'word.isdigit=%s' % word.isdigit(),\n", | ||||||
|  |     "        'postag=' + postag,\n", | ||||||
|  |     "        'postag[:2]=' + postag[:2],\n", | ||||||
|  |     "    ]\n", | ||||||
|  |     "    if i > 0:\n", | ||||||
|  |     "        word1 = sent[i-1]['form']\n", | ||||||
|  |     "        postag1 = sent[i-1]['upostag']\n", | ||||||
|  |     "        features.extend([\n", | ||||||
|  |     "            '-1:word.lower=' + word1.lower(),\n", | ||||||
|  |     "            '-1:word.istitle=%s' % word1.istitle(),\n", | ||||||
|  |     "            '-1:word.isupper=%s' % word1.isupper(),\n", | ||||||
|  |     "            '-1:postag=' + postag1,\n", | ||||||
|  |     "            '-1:postag[:2]=' + postag1[:2],\n", | ||||||
|  |     "        ])\n", | ||||||
|  |     "        if i > 1:\n", | ||||||
|  |     "            word1 = sent[i-2]['form']\n", | ||||||
|  |     "            postag1 = sent[i-2]['upostag']\n", | ||||||
|  |     "            features.extend([\n", | ||||||
|  |     "                '-2:word.lower=' + word1.lower(),\n", | ||||||
|  |     "                '-2:word.istitle=%s' % word1.istitle(),\n", | ||||||
|  |     "                '-2:word.isupper=%s' % word1.isupper(),\n", | ||||||
|  |     "                '-2:postag=' + postag1,\n", | ||||||
|  |     "                '-2:postag[:2]=' + postag1[:2],\n", | ||||||
|  |     "            ])\n", | ||||||
|  |     "    else:\n", | ||||||
|  |     "        features.append('BOS')\n", | ||||||
|  |     "        \n", | ||||||
|  |     "    if i < len(sent)-1:\n", | ||||||
|  |     "        word1 = sent[i+1]['form']\n", | ||||||
|  |     "        postag1 = sent[i+1]['upostag']\n", | ||||||
|  |     "        features.extend([\n", | ||||||
|  |     "            '+1:word.lower=' + word1.lower(),\n", | ||||||
|  |     "            '+1:word.istitle=%s' % word1.istitle(),\n", | ||||||
|  |     "            '+1:word.isupper=%s' % word1.isupper(),\n", | ||||||
|  |     "            '+1:postag=' + postag1,\n", | ||||||
|  |     "            '+1:postag[:2]=' + postag1[:2],\n", | ||||||
|  |     "        ])\n", | ||||||
|  |     "        if i < len(sent)-2:\n", | ||||||
|  |     "            word1 = sent[i+1]['form']\n", | ||||||
|  |     "            postag1 = sent[i+1]['upostag']\n", | ||||||
|  |     "            features.extend([\n", | ||||||
|  |     "                '+2:word.lower=' + word1.lower(),\n", | ||||||
|  |     "                '+2:word.istitle=%s' % word1.istitle(),\n", | ||||||
|  |     "                '+2:word.isupper=%s' % word1.isupper(),\n", | ||||||
|  |     "                '+2:postag=' + postag1,\n", | ||||||
|  |     "                '+2:postag[:2]=' + postag1[:2],\n", | ||||||
|  |     "            ])\n", | ||||||
|  |     "    else:\n", | ||||||
|  |     "        features.append('EOS')\n", | ||||||
|  |     "                \n", | ||||||
|  |     "    return features" | ||||||
|  |    ] | ||||||
|  |   }, | ||||||
|  |   { | ||||||
|  |    "cell_type": "code", | ||||||
|  |    "execution_count": 6, | ||||||
|  |    "metadata": {}, | ||||||
|  |    "outputs": [], | ||||||
|  |    "source": [ | ||||||
|  |     "def sent2labels(sent):\n", | ||||||
|  |     "    labels = []\n", | ||||||
|  |     "    for token in sent:\n", | ||||||
|  |     "        if token['misc'] is not None and 'food_type' in token['misc']:\n", | ||||||
|  |     "            labels.append(token['misc']['food_type'])\n", | ||||||
|  |     "        else:\n", | ||||||
|  |     "            labels.append(\"0\")\n", | ||||||
|  |     "    return labels" | ||||||
|  |    ] | ||||||
|  |   }, | ||||||
|  |   { | ||||||
|  |    "cell_type": "code", | ||||||
|  |    "execution_count": 7, | ||||||
|  |    "metadata": {}, | ||||||
|  |    "outputs": [], | ||||||
|  |    "source": [ | ||||||
|  |     "def sent2features(sent):\n", | ||||||
|  |     "    return [word2features(sent, i) for i in range(len(sent))]" | ||||||
|  |    ] | ||||||
|  |   }, | ||||||
|  |   { | ||||||
|  |    "cell_type": "code", | ||||||
|  |    "execution_count": 8, | ||||||
|  |    "metadata": {}, | ||||||
|  |    "outputs": [], | ||||||
|  |    "source": [ | ||||||
|  |     "def sent2tokens(sent):\n", | ||||||
|  |     "    return [token['form'] for token in sent]" | ||||||
|  |    ] | ||||||
|  |   }, | ||||||
|  |   { | ||||||
|  |    "cell_type": "code", | ||||||
|  |    "execution_count": 9, | ||||||
|  |    "metadata": {}, | ||||||
|  |    "outputs": [], | ||||||
|  |    "source": [ | ||||||
|  |     "def feature2tokens(sent):\n", | ||||||
|  |     "    return [t[1].split(\"=\")[1] for t in sent]" | ||||||
|  |    ] | ||||||
|  |   }, | ||||||
|  |   { | ||||||
|  |    "cell_type": "code", | ||||||
|  |    "execution_count": 10, | ||||||
|  |    "metadata": {}, | ||||||
|  |    "outputs": [], | ||||||
|  |    "source": [ | ||||||
|  |     "def conllu2tokens(sent):\n", | ||||||
|  |     "    return [t['form'] for t in sent]" | ||||||
|  |    ] | ||||||
|  |   }, | ||||||
|  |   { | ||||||
|  |    "cell_type": "markdown", | ||||||
|  |    "metadata": {}, | ||||||
|  |    "source": [ | ||||||
|  |     "* create test dataset:" | ||||||
|  |    ] | ||||||
|  |   }, | ||||||
|  |   { | ||||||
|  |    "cell_type": "code", | ||||||
|  |    "execution_count": 11, | ||||||
|  |    "metadata": {}, | ||||||
|  |    "outputs": [], | ||||||
|  |    "source": [ | ||||||
|  |     "# read 50000 samples:" | ||||||
|  |    ] | ||||||
|  |   }, | ||||||
|  |   { | ||||||
|  |    "cell_type": "code", | ||||||
|  |    "execution_count": 12, | ||||||
|  |    "metadata": {}, | ||||||
|  |    "outputs": [], | ||||||
|  |    "source": [ | ||||||
|  |     "n_train = 50000\n", | ||||||
|  |     "n_test = 1000\n", | ||||||
|  |     "\n", | ||||||
|  |     "X_train = []\n", | ||||||
|  |     "Y_train = []\n", | ||||||
|  |     "t_train = []\n", | ||||||
|  |     "\n", | ||||||
|  |     "X_test = []\n", | ||||||
|  |     "Y_test = []\n", | ||||||
|  |     "t_test = []\n", | ||||||
|  |     "\n", | ||||||
|  |     "\n", | ||||||
|  |     "\n", | ||||||
|  |     "for i,sample in enumerate(cr):\n", | ||||||
|  |     "    \n", | ||||||
|  |     "    if i < n_train:\n", | ||||||
|  |     "        X_train.append(sent2features(sample[0]))\n", | ||||||
|  |     "        Y_train.append(sent2labels(sample[0]))\n", | ||||||
|  |     "        t_train.append(conllu2tokens(sample[0]))\n", | ||||||
|  |     "    else:\n", | ||||||
|  |     "        X_test.append(sent2features(sample[0]))\n", | ||||||
|  |     "        Y_test.append(sent2labels(sample[0]))\n", | ||||||
|  |     "        t_test.append(conllu2tokens(sample[0]))\n", | ||||||
|  |     "    \n", | ||||||
|  |     "    if i >= n_train + n_test:\n", | ||||||
|  |     "        break\n", | ||||||
|  |     "\n" | ||||||
|  |    ] | ||||||
|  |   }, | ||||||
|  |   { | ||||||
|  |    "cell_type": "markdown", | ||||||
|  |    "metadata": {}, | ||||||
|  |    "source": [ | ||||||
|  |     "* train with crfsuite" | ||||||
|  |    ] | ||||||
|  |   }, | ||||||
|  |   { | ||||||
|  |    "cell_type": "code", | ||||||
|  |    "execution_count": 13, | ||||||
|  |    "metadata": {}, | ||||||
|  |    "outputs": [], | ||||||
|  |    "source": [ | ||||||
|  |     "import pycrfsuite" | ||||||
|  |    ] | ||||||
|  |   }, | ||||||
|  |   { | ||||||
|  |    "cell_type": "code", | ||||||
|  |    "execution_count": 14, | ||||||
|  |    "metadata": {}, | ||||||
|  |    "outputs": [], | ||||||
|  |    "source": [ | ||||||
|  |     "trainer = pycrfsuite.Trainer(verbose=False)\n", | ||||||
|  |     "\n", | ||||||
|  |     "for xseq, yseq in zip(X_train, Y_train):\n", | ||||||
|  |     "    trainer.append(xseq, yseq)" | ||||||
|  |    ] | ||||||
|  |   }, | ||||||
|  |   { | ||||||
|  |    "cell_type": "code", | ||||||
|  |    "execution_count": 15, | ||||||
|  |    "metadata": {}, | ||||||
|  |    "outputs": [], | ||||||
|  |    "source": [ | ||||||
|  |     "trainer.set_params({\n", | ||||||
|  |     "    'c1': 1.0,   # coefficient for L1 penalty\n", | ||||||
|  |     "    'c2': 1e-3,  # coefficient for L2 penalty\n", | ||||||
|  |     "    #'max_iterations': 50,  # stop earlier\n", | ||||||
|  |     "\n", | ||||||
|  |     "    # include transitions that are possible, but not observed\n", | ||||||
|  |     "    'feature.possible_transitions': True\n", | ||||||
|  |     "})" | ||||||
|  |    ] | ||||||
|  |   }, | ||||||
|  |   { | ||||||
|  |    "cell_type": "code", | ||||||
|  |    "execution_count": 16, | ||||||
|  |    "metadata": {}, | ||||||
|  |    "outputs": [ | ||||||
|  |     { | ||||||
|  |      "data": { | ||||||
|  |       "text/plain": [ | ||||||
|  |        "['feature.minfreq',\n", | ||||||
|  |        " 'feature.possible_states',\n", | ||||||
|  |        " 'feature.possible_transitions',\n", | ||||||
|  |        " 'c1',\n", | ||||||
|  |        " 'c2',\n", | ||||||
|  |        " 'max_iterations',\n", | ||||||
|  |        " 'num_memories',\n", | ||||||
|  |        " 'epsilon',\n", | ||||||
|  |        " 'period',\n", | ||||||
|  |        " 'delta',\n", | ||||||
|  |        " 'linesearch',\n", | ||||||
|  |        " 'max_linesearch']" | ||||||
|  |       ] | ||||||
|  |      }, | ||||||
|  |      "execution_count": 16, | ||||||
|  |      "metadata": {}, | ||||||
|  |      "output_type": "execute_result" | ||||||
|  |     } | ||||||
|  |    ], | ||||||
|  |    "source": [ | ||||||
|  |     "trainer.params()" | ||||||
|  |    ] | ||||||
|  |   }, | ||||||
|  |   { | ||||||
|  |    "cell_type": "code", | ||||||
|  |    "execution_count": 17, | ||||||
|  |    "metadata": {}, | ||||||
|  |    "outputs": [], | ||||||
|  |    "source": [ | ||||||
|  |     "trainer.train('test.crfsuite')" | ||||||
|  |    ] | ||||||
|  |   }, | ||||||
|  |   { | ||||||
|  |    "cell_type": "code", | ||||||
|  |    "execution_count": 21, | ||||||
|  |    "metadata": {}, | ||||||
|  |    "outputs": [ | ||||||
|  |     { | ||||||
|  |      "data": { | ||||||
|  |       "text/plain": [ | ||||||
|  |        "{'num': 688,\n", | ||||||
|  |        " 'scores': {},\n", | ||||||
|  |        " 'loss': 72969.779861,\n", | ||||||
|  |        " 'feature_norm': 130.969535,\n", | ||||||
|  |        " 'error_norm': 157.007119,\n", | ||||||
|  |        " 'active_features': 8435,\n", | ||||||
|  |        " 'linesearch_trials': 1,\n", | ||||||
|  |        " 'linesearch_step': 1.0,\n", | ||||||
|  |        " 'time': 0.346}" | ||||||
|  |       ] | ||||||
|  |      }, | ||||||
|  |      "execution_count": 21, | ||||||
|  |      "metadata": {}, | ||||||
|  |      "output_type": "execute_result" | ||||||
|  |     } | ||||||
|  |    ], | ||||||
|  |    "source": [ | ||||||
|  |     "trainer.logparser.last_iteration\n" | ||||||
|  |    ] | ||||||
|  |   }, | ||||||
|  |   { | ||||||
|  |    "cell_type": "markdown", | ||||||
|  |    "metadata": {}, | ||||||
|  |    "source": [ | ||||||
|  |     "* test:" | ||||||
|  |    ] | ||||||
|  |   }, | ||||||
|  |   { | ||||||
|  |    "cell_type": "code", | ||||||
|  |    "execution_count": 22, | ||||||
|  |    "metadata": {}, | ||||||
|  |    "outputs": [ | ||||||
|  |     { | ||||||
|  |      "data": { | ||||||
|  |       "text/plain": [ | ||||||
|  |        "<contextlib.closing at 0x7f347332cc88>" | ||||||
|  |       ] | ||||||
|  |      }, | ||||||
|  |      "execution_count": 22, | ||||||
|  |      "metadata": {}, | ||||||
|  |      "output_type": "execute_result" | ||||||
|  |     } | ||||||
|  |    ], | ||||||
|  |    "source": [ | ||||||
|  |     "tagger = pycrfsuite.Tagger()\n", | ||||||
|  |     "tagger.open('test.crfsuite')" | ||||||
|  |    ] | ||||||
|  |   }, | ||||||
|  |   { | ||||||
|  |    "cell_type": "code", | ||||||
|  |    "execution_count": 23, | ||||||
|  |    "metadata": {}, | ||||||
|  |    "outputs": [ | ||||||
|  |     { | ||||||
|  |      "name": "stdout", | ||||||
|  |      "output_type": "stream", | ||||||
|  |      "text": [ | ||||||
|  |       "\n", | ||||||
|  |       "Predicted: \n", | ||||||
|  |       "Correct:   \n", | ||||||
|  |       "\n", | ||||||
|  |       "\n", | ||||||
|  |       "Prepare mudding as directed on package using 3 cups milk .\n", | ||||||
|  |       "Predicted: action ingredient 0 0 0 0 0 0 0 ingredient 0\n", | ||||||
|  |       "Correct:   action 0 0 0 0 0 0 0 0 ingredient 0\n", | ||||||
|  |       "\n", | ||||||
|  |       "\n", | ||||||
|  |       "Remove from heat ; stir in orange_peel .\n", | ||||||
|  |       "Predicted: action 0 action 0 action 0 ingredient 0\n", | ||||||
|  |       "Correct:   action 0 action 0 action 0 ingredient 0\n", | ||||||
|  |       "\n", | ||||||
|  |       "\n", | ||||||
|  |       "Cover surface of pudding with waxed paper or plastic wrap and cool 15 minutes .\n", | ||||||
|  |       "Predicted: action action 0 0 0 0 0 0 0 0 0 action 0 0 0\n", | ||||||
|  |       "Correct:   action 0 0 ingredient 0 0 0 0 0 0 0 action 0 0 0\n", | ||||||
|  |       "\n", | ||||||
|  |       "\n", | ||||||
|  |       "Line bottom of trifle dish or glass bowl with 1/3 of the cake cubes ; .\n", | ||||||
|  |       "Predicted: 0 0 0 0 0 0 0 0 0 0 0 0 ingredient 0 0 0\n", | ||||||
|  |       "Correct:   0 0 0 0 0 0 0 0 0 0 0 0 ingredient 0 0 0\n", | ||||||
|  |       "\n", | ||||||
|  |       "\n", | ||||||
|  |       "Drizzle with 1 tablespoon of the orange_juice_concentrate .\n", | ||||||
|  |       "Predicted: 0 0 0 0 0 0 ingredient 0\n", | ||||||
|  |       "Correct:   0 0 0 0 0 0 ingredient 0\n", | ||||||
|  |       "\n", | ||||||
|  |       "\n", | ||||||
|  |       "Spoon 1/3 of pudding over top .\n", | ||||||
|  |       "Predicted: action 0 0 0 0 ingredient 0\n", | ||||||
|  |       "Correct:   action 0 0 ingredient 0 ingredient 0\n", | ||||||
|  |       "\n", | ||||||
|  |       "\n", | ||||||
|  |       "Spoon 1/3 of strawberry filling over pudding .\n", | ||||||
|  |       "Predicted: action 0 0 ingredient 0 0 ingredient 0\n", | ||||||
|  |       "Correct:   action 0 0 ingredient ingredient 0 ingredient 0\n", | ||||||
|  |       "\n", | ||||||
|  |       "\n", | ||||||
|  |       "Top with 1/3 of orange_segments .\n", | ||||||
|  |       "Predicted: ingredient 0 0 0 0 0\n", | ||||||
|  |       "Correct:   ingredient 0 0 0 ingredient 0\n", | ||||||
|  |       "\n", | ||||||
|  |       "\n", | ||||||
|  |       "Repeat layers 2 more times .\n", | ||||||
|  |       "Predicted: 0 0 0 0 0 0\n", | ||||||
|  |       "Correct:   0 0 0 0 0 0\n", | ||||||
|  |       "\n", | ||||||
|  |       "\n", | ||||||
|  |       "Cover and refrigerate 3 hours or overnight .\n", | ||||||
|  |       "Predicted: action 0 action 0 0 0 0 0\n", | ||||||
|  |       "Correct:   action 0 action 0 0 0 0 0\n", | ||||||
|  |       "\n", | ||||||
|  |       "\n", | ||||||
|  |       "Store in refrigerator .\n", | ||||||
|  |       "Predicted: 0 0 action 0\n", | ||||||
|  |       "Correct:   0 0 action 0\n", | ||||||
|  |       "\n", | ||||||
|  |       "\n", | ||||||
|  |       "\n", | ||||||
|  |       "Predicted: \n", | ||||||
|  |       "Correct:   \n", | ||||||
|  |       "\n", | ||||||
|  |       "\n", | ||||||
|  |       "Meanwhile , whisk the vinegar , lemon_zest and juice , honey , dried_oregano , 1/2 teaspoon salt and 1/4 teaspoon pepper in a large bowl .\n", | ||||||
|  |       "Predicted: 0 0 action 0 action 0 ingredient 0 ingredient 0 ingredient 0 ingredient 0 0 0 action 0 0 0 ingredient 0 0 0 0 0\n", | ||||||
|  |       "Correct:   0 0 action 0 ingredient 0 ingredient 0 ingredient 0 ingredient 0 ingredient 0 0 0 action 0 0 0 ingredient 0 0 0 0 0\n", | ||||||
|  |       "\n", | ||||||
|  |       "\n", | ||||||
|  |       "Whisk in the olive_oil in a slow , steady stream until emulsified .\n", | ||||||
|  |       "Predicted: action 0 0 ingredient 0 0 0 0 0 0 0 0 0\n", | ||||||
|  |       "Correct:   action 0 0 ingredient 0 0 0 0 0 0 0 0 0\n", | ||||||
|  |       "\n", | ||||||
|  |       "\n", | ||||||
|  |       "Add the tomatoes and olives and toss .\n", | ||||||
|  |       "Predicted: action 0 ingredient 0 ingredient 0 0 0\n", | ||||||
|  |       "Correct:   action 0 ingredient 0 ingredient 0 0 0\n", | ||||||
|  |       "\n", | ||||||
|  |       "\n", | ||||||
|  |       "Peel the cucumbers , leaving alternating strips of green peel .\n", | ||||||
|  |       "Predicted: action 0 ingredient 0 0 0 0 0 ingredient action 0\n", | ||||||
|  |       "Correct:   action 0 ingredient 0 ingredient 0 0 0 ingredient action 0\n", | ||||||
|  |       "\n", | ||||||
|  |       "\n", | ||||||
|  |       "Trim the ends , halve lengthwise and slice crosswise , about 1/2 inch thick ; add to the bowl with the tomatoes .\n", | ||||||
|  |       "Predicted: 0 0 ingredient 0 0 0 0 action 0 0 0 0 0 0 0 action 0 0 0 0 0 ingredient 0\n", | ||||||
|  |       "Correct:   0 0 0 0 0 0 0 action ingredient 0 0 0 0 0 0 action 0 0 0 0 0 ingredient 0\n", | ||||||
|  |       "\n", | ||||||
|  |       "\n", | ||||||
|  |       "Drain the red onion , add to the bowl and toss .\n", | ||||||
|  |       "Predicted: action 0 ingredient ingredient 0 action 0 0 0 0 0 0\n", | ||||||
|  |       "Correct:   action 0 ingredient ingredient 0 action 0 0 0 0 0 0\n", | ||||||
|  |       "\n", | ||||||
|  |       "\n", | ||||||
|  |       "Drain the feta and slice horizontally into 4 even rectangles .\n", | ||||||
|  |       "Predicted: action 0 0 0 ingredient 0 0 0 0 0 0\n", | ||||||
|  |       "Correct:   action 0 0 0 action 0 0 0 0 0 0\n", | ||||||
|  |       "\n", | ||||||
|  |       "\n", | ||||||
|  |       "Divide the salad among plates .\n", | ||||||
|  |       "Predicted: 0 0 ingredient 0 0 0\n", | ||||||
|  |       "Correct:   0 0 ingredient 0 0 0\n", | ||||||
|  |       "\n", | ||||||
|  |       "\n", | ||||||
|  |       "Top with the feta and oregano ; drizzle with olive_oil and season with pepper .\n", | ||||||
|  |       "Predicted: ingredient 0 0 ingredient 0 ingredient 0 0 0 ingredient 0 action 0 ingredient 0\n", | ||||||
|  |       "Correct:   ingredient 0 0 0 0 ingredient 0 0 0 ingredient 0 action 0 ingredient 0\n", | ||||||
|  |       "\n", | ||||||
|  |       "\n", | ||||||
|  |       "Photograph by Johnny Miller\n", | ||||||
|  |       "Predicted: 0 0 0 0\n", | ||||||
|  |       "Correct:   0 0 0 0\n", | ||||||
|  |       "\n", | ||||||
|  |       "\n", | ||||||
|  |       "\n", | ||||||
|  |       "Predicted: \n", | ||||||
|  |       "Correct:   \n", | ||||||
|  |       "\n", | ||||||
|  |       "\n", | ||||||
|  |       "Put the potatoes in a large saucepan and cover with water .\n", | ||||||
|  |       "Predicted: action 0 ingredient 0 0 0 0 0 action 0 ingredient 0\n", | ||||||
|  |       "Correct:   action 0 ingredient 0 0 0 0 0 action 0 ingredient 0\n", | ||||||
|  |       "\n", | ||||||
|  |       "\n", | ||||||
|  |       "Add 1 teaspoon salt to the water .\n", | ||||||
|  |       "Predicted: action 0 0 action 0 0 ingredient 0\n", | ||||||
|  |       "Correct:   action 0 0 action 0 0 ingredient 0\n", | ||||||
|  |       "\n", | ||||||
|  |       "\n", | ||||||
|  |       "Bring to a boil over high heat , reduce to a simmer and cook the potatoes until they are very tender but not overcooked , about 10 minutes .\n", | ||||||
|  |       "Predicted: 0 0 0 action 0 0 action 0 ingredient 0 0 action 0 action 0 ingredient 0 0 0 0 0 0 0 action 0 0 0 0 0\n", | ||||||
|  |       "Correct:   0 0 0 action 0 0 action 0 action 0 0 action 0 action 0 ingredient 0 0 0 0 0 0 0 0 0 0 0 0 0\n", | ||||||
|  |       "\n", | ||||||
|  |       "\n", | ||||||
|  |       "Remove the potatoes from the water and let them drain on a dishtowel , gently patting to dry .\n", | ||||||
|  |       "Predicted: action 0 ingredient 0 0 ingredient 0 0 0 action 0 0 0 0 0 0 0 0 0\n", | ||||||
|  |       "Correct:   action 0 ingredient 0 0 ingredient 0 0 0 action 0 0 0 0 0 0 0 0 0\n", | ||||||
|  |       "\n", | ||||||
|  |       "\n", | ||||||
|  |       "Allow to cool .\n", | ||||||
|  |       "Predicted: 0 0 action 0\n", | ||||||
|  |       "Correct:   0 0 action 0\n", | ||||||
|  |       "\n", | ||||||
|  |       "\n", | ||||||
|  |       "Gently press down on one potato with a spatula to gently flatten it to about 1/2 inch thick .\n", | ||||||
|  |       "Predicted: 0 0 0 0 0 ingredient 0 0 0 0 0 0 0 0 0 0 0 0 0\n", | ||||||
|  |       "Correct:   0 0 0 0 0 ingredient 0 0 0 0 0 0 0 0 0 0 0 0 0\n", | ||||||
|  |       "\n", | ||||||
|  |       "\n" | ||||||
|  |      ] | ||||||
|  |     } | ||||||
|  |    ], | ||||||
|  |    "source": [ | ||||||
|  |     "for i in range(100,130):\n", | ||||||
|  |     "    print(' '.join(t_test[i]))\n", | ||||||
|  |     "    #print(' '.join(feature2tokens(X_test[i])), end='\\n\\n')\n", | ||||||
|  |     "    print(\"Predicted:\", ' '.join(tagger.tag(X_test[i])))\n", | ||||||
|  |     "    print(\"Correct:  \", ' '.join(Y_test[i]))\n", | ||||||
|  |     "    \n", | ||||||
|  |     "    print(\"\\n\")" | ||||||
|  |    ] | ||||||
|  |   }, | ||||||
|  |   { | ||||||
|  |    "cell_type": "code", | ||||||
|  |    "execution_count": null, | ||||||
|  |    "metadata": {}, | ||||||
|  |    "outputs": [], | ||||||
|  |    "source": [] | ||||||
|  |   } | ||||||
|  |  ], | ||||||
|  |  "metadata": { | ||||||
|  |   "kernelspec": { | ||||||
|  |    "display_name": "Python 3", | ||||||
|  |    "language": "python", | ||||||
|  |    "name": "python3" | ||||||
|  |   }, | ||||||
|  |   "language_info": { | ||||||
|  |    "codemirror_mode": { | ||||||
|  |     "name": "ipython", | ||||||
|  |     "version": 3 | ||||||
|  |    }, | ||||||
|  |    "file_extension": ".py", | ||||||
|  |    "mimetype": "text/x-python", | ||||||
|  |    "name": "python", | ||||||
|  |    "nbconvert_exporter": "python", | ||||||
|  |    "pygments_lexer": "ipython3", | ||||||
|  |    "version": "3.7.3" | ||||||
|  |   } | ||||||
|  |  }, | ||||||
|  |  "nbformat": 4, | ||||||
|  |  "nbformat_minor": 4 | ||||||
|  | } | ||||||
							
								
								
									
										290
									
								
								Tagging/Conllu_Training.ipynb
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										290
									
								
								Tagging/Conllu_Training.ipynb
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,290 @@ | |||||||
|  | { | ||||||
|  |  "cells": [ | ||||||
|  |   { | ||||||
|  |    "cell_type": "code", | ||||||
|  |    "execution_count": 1, | ||||||
|  |    "metadata": {}, | ||||||
|  |    "outputs": [], | ||||||
|  |    "source": [ | ||||||
|  |     "\n", | ||||||
|  |     "import sys\n", | ||||||
|  |     "\n", | ||||||
|  |     "from conllu import parse\n", | ||||||
|  |     "\n", | ||||||
|  |     "sys.path.insert(0,'..')\n", | ||||||
|  |     "import settings\n", | ||||||
|  |     "\n", | ||||||
|  |     "from tagging_tools import print_visualized_tags\n", | ||||||
|  |     "\n", | ||||||
|  |     "from train_sample_generator import ConlluReader, ConlluDataProvider\n", | ||||||
|  |     "\n", | ||||||
|  |     "from gensim.test.utils import common_texts, get_tmpfile\n", | ||||||
|  |     "from gensim.models import Word2Vec\n", | ||||||
|  |     "from nltk import PorterStemmer\n", | ||||||
|  |     "import numpy as np\n", | ||||||
|  |     "from sklearn import preprocessing\n", | ||||||
|  |     "porter = PorterStemmer()" | ||||||
|  |    ] | ||||||
|  |   }, | ||||||
|  |   { | ||||||
|  |    "cell_type": "code", | ||||||
|  |    "execution_count": 2, | ||||||
|  |    "metadata": {}, | ||||||
|  |    "outputs": [], | ||||||
|  |    "source": [ | ||||||
|  |     "conllu_reader = ConlluReader(\"recipes0.conllu\", iter_documents=False)" | ||||||
|  |    ] | ||||||
|  |   }, | ||||||
|  |   { | ||||||
|  |    "cell_type": "code", | ||||||
|  |    "execution_count": 3, | ||||||
|  |    "metadata": {}, | ||||||
|  |    "outputs": [ | ||||||
|  |     { | ||||||
|  |      "data": { | ||||||
|  |       "text/plain": [ | ||||||
|  |        "[TokenList<Set, oven, to, 350, degrees, F, .>]" | ||||||
|  |       ] | ||||||
|  |      }, | ||||||
|  |      "execution_count": 3, | ||||||
|  |      "metadata": {}, | ||||||
|  |      "output_type": "execute_result" | ||||||
|  |     } | ||||||
|  |    ], | ||||||
|  |    "source": [ | ||||||
|  |     "conllu_reader.__iter__().__next__()" | ||||||
|  |    ] | ||||||
|  |   }, | ||||||
|  |   { | ||||||
|  |    "cell_type": "code", | ||||||
|  |    "execution_count": 4, | ||||||
|  |    "metadata": {}, | ||||||
|  |    "outputs": [], | ||||||
|  |    "source": [ | ||||||
|  |     "conllu_data_provider = ConlluDataProvider(\"recipes0.conllu\", \n", | ||||||
|  |     "                                          word2vec_model=None,\n", | ||||||
|  |     "                                          batchsize=100,\n", | ||||||
|  |     "                                          window_size=3,\n", | ||||||
|  |     "                                          iter_documents=False,\n", | ||||||
|  |     "                                          food_type=\"ingredient\")" | ||||||
|  |    ] | ||||||
|  |   }, | ||||||
|  |   { | ||||||
|  |    "cell_type": "code", | ||||||
|  |    "execution_count": 5, | ||||||
|  |    "metadata": {}, | ||||||
|  |    "outputs": [], | ||||||
|  |    "source": [ | ||||||
|  |     "x,y  = conllu_data_provider.getNextDataBatch(y_food_type_label=\"ingredient\")" | ||||||
|  |    ] | ||||||
|  |   }, | ||||||
|  |   { | ||||||
|  |    "cell_type": "code", | ||||||
|  |    "execution_count": 6, | ||||||
|  |    "metadata": {}, | ||||||
|  |    "outputs": [ | ||||||
|  |     { | ||||||
|  |      "data": { | ||||||
|  |       "text/plain": [ | ||||||
|  |        "1148" | ||||||
|  |       ] | ||||||
|  |      }, | ||||||
|  |      "execution_count": 6, | ||||||
|  |      "metadata": {}, | ||||||
|  |      "output_type": "execute_result" | ||||||
|  |     } | ||||||
|  |    ], | ||||||
|  |    "source": [ | ||||||
|  |     "len(y)" | ||||||
|  |    ] | ||||||
|  |   }, | ||||||
|  |   { | ||||||
|  |    "cell_type": "code", | ||||||
|  |    "execution_count": 11, | ||||||
|  |    "metadata": {}, | ||||||
|  |    "outputs": [], | ||||||
|  |    "source": [ | ||||||
|  |     "sum_tokens = 0\n", | ||||||
|  |     "i = 0\n", | ||||||
|  |     "for x,y in conllu_data_provider:\n", | ||||||
|  |     "    sum_tokens += len(x)\n", | ||||||
|  |     "    i += 1\n", | ||||||
|  |     "    " | ||||||
|  |    ] | ||||||
|  |   }, | ||||||
|  |   { | ||||||
|  |    "cell_type": "code", | ||||||
|  |    "execution_count": 12, | ||||||
|  |    "metadata": {}, | ||||||
|  |    "outputs": [ | ||||||
|  |     { | ||||||
|  |      "data": { | ||||||
|  |       "text/plain": [ | ||||||
|  |        "649423" | ||||||
|  |       ] | ||||||
|  |      }, | ||||||
|  |      "execution_count": 12, | ||||||
|  |      "metadata": {}, | ||||||
|  |      "output_type": "execute_result" | ||||||
|  |     } | ||||||
|  |    ], | ||||||
|  |    "source": [ | ||||||
|  |     "sum_tokens" | ||||||
|  |    ] | ||||||
|  |   }, | ||||||
|  |   { | ||||||
|  |    "cell_type": "code", | ||||||
|  |    "execution_count": 13, | ||||||
|  |    "metadata": {}, | ||||||
|  |    "outputs": [ | ||||||
|  |     { | ||||||
|  |      "data": { | ||||||
|  |       "text/plain": [ | ||||||
|  |        "576" | ||||||
|  |       ] | ||||||
|  |      }, | ||||||
|  |      "execution_count": 13, | ||||||
|  |      "metadata": {}, | ||||||
|  |      "output_type": "execute_result" | ||||||
|  |     } | ||||||
|  |    ], | ||||||
|  |    "source": [ | ||||||
|  |     "i" | ||||||
|  |    ] | ||||||
|  |   }, | ||||||
|  |   { | ||||||
|  |    "cell_type": "markdown", | ||||||
|  |    "metadata": {}, | ||||||
|  |    "source": [ | ||||||
|  |     "## decision tree classifier" | ||||||
|  |    ] | ||||||
|  |   }, | ||||||
|  |   { | ||||||
|  |    "cell_type": "code", | ||||||
|  |    "execution_count": 35, | ||||||
|  |    "metadata": {}, | ||||||
|  |    "outputs": [], | ||||||
|  |    "source": [ | ||||||
|  |     "from sklearn.tree import DecisionTreeClassifier\n", | ||||||
|  |     "from sklearn.ensemble import RandomForestClassifier\n", | ||||||
|  |     "from sklearn.model_selection import train_test_split" | ||||||
|  |    ] | ||||||
|  |   }, | ||||||
|  |   { | ||||||
|  |    "cell_type": "code", | ||||||
|  |    "execution_count": 36, | ||||||
|  |    "metadata": {}, | ||||||
|  |    "outputs": [], | ||||||
|  |    "source": [ | ||||||
|  |     "conllu_data_provider = ConlluDataProvider(\"recipes0.conllu\", \n", | ||||||
|  |     "                                          word2vec_model=None,\n", | ||||||
|  |     "                                          batchsize=100,\n", | ||||||
|  |     "                                          window_size=3,\n", | ||||||
|  |     "                                          iter_documents=False,\n", | ||||||
|  |     "                                          food_type=\"ingredient\")" | ||||||
|  |    ] | ||||||
|  |   }, | ||||||
|  |   { | ||||||
|  |    "cell_type": "code", | ||||||
|  |    "execution_count": 37, | ||||||
|  |    "metadata": {}, | ||||||
|  |    "outputs": [], | ||||||
|  |    "source": [ | ||||||
|  |     "clf = RandomForestClassifier(n_estimators=100 ,random_state=0, warm_start=True)" | ||||||
|  |    ] | ||||||
|  |   }, | ||||||
|  |   { | ||||||
|  |    "cell_type": "code", | ||||||
|  |    "execution_count": 28, | ||||||
|  |    "metadata": {}, | ||||||
|  |    "outputs": [], | ||||||
|  |    "source": [ | ||||||
|  |     "for x,y in conllu_data_provider:\n", | ||||||
|  |     "    break\n", | ||||||
|  |     "    X_train, X_test, y_train, y_test = train_test_split(x,y, random_state=0)\n", | ||||||
|  |     "    clf.fit(X_train, y_train)\n", | ||||||
|  |     "    pred = tree.predict(X_test)\n", | ||||||
|  |     "    print(\"loss: \", np.sum((pred - y_test)**2) / len(x))" | ||||||
|  |    ] | ||||||
|  |   }, | ||||||
|  |   { | ||||||
|  |    "cell_type": "code", | ||||||
|  |    "execution_count": 29, | ||||||
|  |    "metadata": {}, | ||||||
|  |    "outputs": [ | ||||||
|  |     { | ||||||
|  |      "data": { | ||||||
|  |       "text/plain": [ | ||||||
|  |        "array([[0., 0., 0., ..., 0., 0., 0.],\n", | ||||||
|  |        "       [0., 0., 0., ..., 0., 0., 0.],\n", | ||||||
|  |        "       [0., 0., 0., ..., 0., 0., 0.],\n", | ||||||
|  |        "       ...,\n", | ||||||
|  |        "       [0., 0., 1., ..., 0., 0., 0.],\n", | ||||||
|  |        "       [0., 0., 0., ..., 0., 0., 0.],\n", | ||||||
|  |        "       [0., 1., 0., ..., 0., 0., 0.]])" | ||||||
|  |       ] | ||||||
|  |      }, | ||||||
|  |      "execution_count": 29, | ||||||
|  |      "metadata": {}, | ||||||
|  |      "output_type": "execute_result" | ||||||
|  |     } | ||||||
|  |    ], | ||||||
|  |    "source": [] | ||||||
|  |   }, | ||||||
|  |   { | ||||||
|  |    "cell_type": "code", | ||||||
|  |    "execution_count": 39, | ||||||
|  |    "metadata": {}, | ||||||
|  |    "outputs": [ | ||||||
|  |     { | ||||||
|  |      "name": "stdout", | ||||||
|  |      "output_type": "stream", | ||||||
|  |      "text": [ | ||||||
|  |       "loss:  0.041811846689895474\n" | ||||||
|  |      ] | ||||||
|  |     }, | ||||||
|  |     { | ||||||
|  |      "name": "stderr", | ||||||
|  |      "output_type": "stream", | ||||||
|  |      "text": [ | ||||||
|  |       "/home/jonas/.local/lib/python3.7/site-packages/sklearn/ensemble/forest.py:307: UserWarning: Warm-start fitting without increasing n_estimators does not fit new trees.\n", | ||||||
|  |       "  warn(\"Warm-start fitting without increasing n_estimators does not \"\n" | ||||||
|  |      ] | ||||||
|  |     } | ||||||
|  |    ], | ||||||
|  |    "source": [ | ||||||
|  |     "clf.fit(X_train, y_train)\n", | ||||||
|  |     "pred = tree.predict(X_test)\n", | ||||||
|  |     "print(\"loss: \", np.sum((pred - y_test)**2) / len(x))" | ||||||
|  |    ] | ||||||
|  |   }, | ||||||
|  |   { | ||||||
|  |    "cell_type": "code", | ||||||
|  |    "execution_count": null, | ||||||
|  |    "metadata": {}, | ||||||
|  |    "outputs": [], | ||||||
|  |    "source": [] | ||||||
|  |   } | ||||||
|  |  ], | ||||||
|  |  "metadata": { | ||||||
|  |   "kernelspec": { | ||||||
|  |    "display_name": "Python 3", | ||||||
|  |    "language": "python", | ||||||
|  |    "name": "python3" | ||||||
|  |   }, | ||||||
|  |   "language_info": { | ||||||
|  |    "codemirror_mode": { | ||||||
|  |     "name": "ipython", | ||||||
|  |     "version": 3 | ||||||
|  |    }, | ||||||
|  |    "file_extension": ".py", | ||||||
|  |    "mimetype": "text/x-python", | ||||||
|  |    "name": "python", | ||||||
|  |    "nbconvert_exporter": "python", | ||||||
|  |    "pygments_lexer": "ipython3", | ||||||
|  |    "version": "3.7.3" | ||||||
|  |   } | ||||||
|  |  }, | ||||||
|  |  "nbformat": 4, | ||||||
|  |  "nbformat_minor": 4 | ||||||
|  | } | ||||||
							
								
								
									
										4
									
								
								Tagging/README.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										4
									
								
								Tagging/README.md
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,4 @@ | |||||||
|  | # Tagging tools | ||||||
|  | --- | ||||||
|  |  | ||||||
|  | in this folder are tools that are here to annotate existing recipe instructions and generating conllu files from them | ||||||
| @ -90,7 +90,7 @@ | |||||||
|   }, |   }, | ||||||
|   { |   { | ||||||
|    "cell_type": "code", |    "cell_type": "code", | ||||||
|    "execution_count": 5, |    "execution_count": 2, | ||||||
|    "metadata": {}, |    "metadata": {}, | ||||||
|    "outputs": [], |    "outputs": [], | ||||||
|    "source": [ |    "source": [ | ||||||
| @ -109,7 +109,7 @@ | |||||||
|   }, |   }, | ||||||
|   { |   { | ||||||
|    "cell_type": "code", |    "cell_type": "code", | ||||||
|    "execution_count": 14, |    "execution_count": 3, | ||||||
|    "metadata": {}, |    "metadata": {}, | ||||||
|    "outputs": [], |    "outputs": [], | ||||||
|    "source": [ |    "source": [ | ||||||
| @ -120,7 +120,7 @@ | |||||||
|   }, |   }, | ||||||
|   { |   { | ||||||
|    "cell_type": "code", |    "cell_type": "code", | ||||||
|    "execution_count": 15, |    "execution_count": 4, | ||||||
|    "metadata": {}, |    "metadata": {}, | ||||||
|    "outputs": [], |    "outputs": [], | ||||||
|    "source": [ |    "source": [ | ||||||
| @ -142,16 +142,25 @@ | |||||||
|   }, |   }, | ||||||
|   { |   { | ||||||
|    "cell_type": "code", |    "cell_type": "code", | ||||||
|    "execution_count": 34, |    "execution_count": 11, | ||||||
|    "metadata": {}, |    "metadata": {}, | ||||||
|    "outputs": [], |    "outputs": [], | ||||||
|    "source": [ |    "source": [ | ||||||
|     "mwe_tokenizer = MWETokenizer([w.split() for w in ingredients.multi_word_ingredients_stemmed])" |     "from stemmed_mwe_tokenizer import StemmedMWETokenizer" | ||||||
|    ] |    ] | ||||||
|   }, |   }, | ||||||
|   { |   { | ||||||
|    "cell_type": "code", |    "cell_type": "code", | ||||||
|    "execution_count": 35, |    "execution_count": 12, | ||||||
|  |    "metadata": {}, | ||||||
|  |    "outputs": [], | ||||||
|  |    "source": [ | ||||||
|  |     "mwe_tokenizer = StemmedMWETokenizer([w.split() for w in ingredients.multi_word_ingredients_stemmed])" | ||||||
|  |    ] | ||||||
|  |   }, | ||||||
|  |   { | ||||||
|  |    "cell_type": "code", | ||||||
|  |    "execution_count": 14, | ||||||
|    "metadata": {}, |    "metadata": {}, | ||||||
|    "outputs": [ |    "outputs": [ | ||||||
|     { |     { | ||||||
| @ -423,8 +432,7 @@ | |||||||
|        " 'of',\n", |        " 'of',\n", | ||||||
|        " 'pasta',\n", |        " 'pasta',\n", | ||||||
|        " 'to',\n", |        " 'to',\n", | ||||||
|        " 'cheese',\n", |        " 'cheese_sauce',\n", | ||||||
|        " 'sauce',\n", |  | ||||||
|        " 'is',\n", |        " 'is',\n", | ||||||
|        " 'crucial',\n", |        " 'crucial',\n", | ||||||
|        " 'to',\n", |        " 'to',\n", | ||||||
| @ -457,7 +465,7 @@ | |||||||
|        " '.']" |        " '.']" | ||||||
|       ] |       ] | ||||||
|      }, |      }, | ||||||
|      "execution_count": 35, |      "execution_count": 14, | ||||||
|      "metadata": {}, |      "metadata": {}, | ||||||
|      "output_type": "execute_result" |      "output_type": "execute_result" | ||||||
|     } |     } | ||||||
| @ -466,6 +474,61 @@ | |||||||
|     "mwe_tokenizer.tokenize(nltk.tokenize.word_tokenize(instructions[0]))" |     "mwe_tokenizer.tokenize(nltk.tokenize.word_tokenize(instructions[0]))" | ||||||
|    ] |    ] | ||||||
|   }, |   }, | ||||||
|  |   { | ||||||
|  |    "cell_type": "code", | ||||||
|  |    "execution_count": null, | ||||||
|  |    "metadata": {}, | ||||||
|  |    "outputs": [], | ||||||
|  |    "source": [] | ||||||
|  |   }, | ||||||
|  |   { | ||||||
|  |    "cell_type": "code", | ||||||
|  |    "execution_count": 8, | ||||||
|  |    "metadata": {}, | ||||||
|  |    "outputs": [ | ||||||
|  |     { | ||||||
|  |      "data": { | ||||||
|  |       "text/plain": [ | ||||||
|  |        "\u001b[0;31mSignature:\u001b[0m \u001b[0mmwe_tokenizer\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mspan_tokenize\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0ms\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", | ||||||
|  |        "\u001b[0;31mDocstring:\u001b[0m\n", | ||||||
|  |        "Identify the tokens using integer offsets ``(start_i, end_i)``,\n", | ||||||
|  |        "where ``s[start_i:end_i]`` is the corresponding token.\n", | ||||||
|  |        "\n", | ||||||
|  |        ":rtype: iter(tuple(int, int))\n", | ||||||
|  |        "\u001b[0;31mFile:\u001b[0m      ~/.local/lib/python3.7/site-packages/nltk/tokenize/api.py\n", | ||||||
|  |        "\u001b[0;31mType:\u001b[0m      method\n" | ||||||
|  |       ] | ||||||
|  |      }, | ||||||
|  |      "metadata": {}, | ||||||
|  |      "output_type": "display_data" | ||||||
|  |     } | ||||||
|  |    ], | ||||||
|  |    "source": [ | ||||||
|  |     "?mwe_tokenizer." | ||||||
|  |    ] | ||||||
|  |   }, | ||||||
|  |   { | ||||||
|  |    "cell_type": "code", | ||||||
|  |    "execution_count": 10, | ||||||
|  |    "metadata": {}, | ||||||
|  |    "outputs": [ | ||||||
|  |     { | ||||||
|  |      "ename": "NotImplementedError", | ||||||
|  |      "evalue": "", | ||||||
|  |      "output_type": "error", | ||||||
|  |      "traceback": [ | ||||||
|  |       "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", | ||||||
|  |       "\u001b[0;31mNotImplementedError\u001b[0m                       Traceback (most recent call last)", | ||||||
|  |       "\u001b[0;32m<ipython-input-10-dfad11b33102>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mmwe_tokenizer\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mspan_tokenize\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mnltk\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtokenize\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mword_tokenize\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0minstructions\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m", | ||||||
|  |       "\u001b[0;32m~/.local/lib/python3.7/site-packages/nltk/tokenize/api.py\u001b[0m in \u001b[0;36mspan_tokenize\u001b[0;34m(self, s)\u001b[0m\n\u001b[1;32m     42\u001b[0m         \u001b[0;34m:\u001b[0m\u001b[0mrtype\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0miter\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtuple\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mint\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mint\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     43\u001b[0m         \"\"\"\n\u001b[0;32m---> 44\u001b[0;31m         \u001b[0;32mraise\u001b[0m \u001b[0mNotImplementedError\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m     45\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     46\u001b[0m     \u001b[0;32mdef\u001b[0m \u001b[0mtokenize_sents\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mstrings\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", | ||||||
|  |       "\u001b[0;31mNotImplementedError\u001b[0m: " | ||||||
|  |      ] | ||||||
|  |     } | ||||||
|  |    ], | ||||||
|  |    "source": [ | ||||||
|  |     "mwe_tokenizer.span_tokenize(nltk.tokenize.word_tokenize(instructions[0]))" | ||||||
|  |    ] | ||||||
|  |   }, | ||||||
|   { |   { | ||||||
|    "cell_type": "code", |    "cell_type": "code", | ||||||
|    "execution_count": null, |    "execution_count": null, | ||||||
|  | |||||||
							
								
								
									
										1467
									
								
								Tagging/Recipe_Tagging_Analysis.ipynb
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										1467
									
								
								Tagging/Recipe_Tagging_Analysis.ipynb
									
									
									
									
									
										Normal file
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							
							
								
								
									
										275
									
								
								Tagging/conllu_batch_generator.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										275
									
								
								Tagging/conllu_batch_generator.py
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,275 @@ | |||||||
|  | #!/usr/bin/env python3 | ||||||
|  |  | ||||||
|  | import sys | ||||||
|  |  | ||||||
|  | from conllu import parse | ||||||
|  | from tagging_tools import print_visualized_tags | ||||||
|  |  | ||||||
|  | from sklearn import preprocessing | ||||||
|  | import numpy as np | ||||||
|  |  | ||||||
|  | sys.path.insert(0, '..') | ||||||
|  | import settings  # noqa | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class ConlluSentenceIterator(object): | ||||||
|  |     def __init__(self, conllu_reader): | ||||||
|  |         self.conllu_reader = conllu_reader | ||||||
|  |  | ||||||
|  |     def __next__(self): | ||||||
|  |         next_sent = self.conllu_reader.next_sentence() | ||||||
|  |         if next_sent is None: | ||||||
|  |             raise StopIteration | ||||||
|  |         return next_sent | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class ConlluDocumentIterator(object): | ||||||
|  |     def __init__(self, conllu_reader): | ||||||
|  |         self.conllu_reader = conllu_reader | ||||||
|  |  | ||||||
|  |     def __next__(self): | ||||||
|  |         next_sent = self.conllu_reader.next_document() | ||||||
|  |         if next_sent is None: | ||||||
|  |             raise StopIteration | ||||||
|  |         return next_sent | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class ConlluReader(object): | ||||||
|  |     def __init__(self, path, iter_documents=False): | ||||||
|  |         self._path = path | ||||||
|  |         self._fileobj = None | ||||||
|  |         self._open() | ||||||
|  |         self.iter_documents = iter_documents | ||||||
|  |  | ||||||
|  |     def _open(self): | ||||||
|  |         self._fileobj = open(self._path, 'r') | ||||||
|  |  | ||||||
|  |     def next_sentence(self): | ||||||
|  |         data = "" | ||||||
|  |         while True: | ||||||
|  |             line = self._fileobj.readline() | ||||||
|  |             if line == "": | ||||||
|  |                 break | ||||||
|  |             data += line | ||||||
|  |             if line == "\n": | ||||||
|  |                 break | ||||||
|  |  | ||||||
|  |         if data == "": | ||||||
|  |             return None | ||||||
|  |  | ||||||
|  |         if data[-1] != "\n": | ||||||
|  |             data += "\n" | ||||||
|  |  | ||||||
|  |         conllu_obj = parse(data) | ||||||
|  |         return conllu_obj | ||||||
|  |  | ||||||
|  |     def next_document(self): | ||||||
|  |         data = "" | ||||||
|  |         last_line_empty = False | ||||||
|  |         while True: | ||||||
|  |             line = self._fileobj.readline() | ||||||
|  |             if line == "": | ||||||
|  |                 break | ||||||
|  |             data += line | ||||||
|  |             if line == "\n": | ||||||
|  |                 if last_line_empty: | ||||||
|  |                     break | ||||||
|  |                 last_line_empty = True | ||||||
|  |             else: | ||||||
|  |                 last_line_empty = False | ||||||
|  |  | ||||||
|  |         if data == "": | ||||||
|  |             return None | ||||||
|  |  | ||||||
|  |         if data[-1] != "\n": | ||||||
|  |             data += "\n" | ||||||
|  |  | ||||||
|  |         conllu_obj = parse(data) | ||||||
|  |         return conllu_obj | ||||||
|  |  | ||||||
|  |     def __iter__(self): | ||||||
|  |         return ConlluDocumentIterator(self) if self.iter_documents else ConlluSentenceIterator(self) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class SlidingWindowListIterator(object): | ||||||
|  |     def __init__(self, parent): | ||||||
|  |         self.parent = parent | ||||||
|  |         self.i = 0 | ||||||
|  |  | ||||||
|  |     def __next__(self): | ||||||
|  |         if len(self.parent) == self.i: | ||||||
|  |             raise StopIteration | ||||||
|  |  | ||||||
|  |         self.i += 1 | ||||||
|  |         return self.parent[self.i - 1] | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class SlidingWindowList(list): | ||||||
|  |     def __init__(self, sliding_window_size, input=None, border_value=None): | ||||||
|  |  | ||||||
|  |         self.sliding_window_size = sliding_window_size | ||||||
|  |         self.border_value = border_value | ||||||
|  |  | ||||||
|  |         if border_value is None and input is not None: | ||||||
|  |             self.border_value = type(input[0])() | ||||||
|  |  | ||||||
|  |         if input is not None: | ||||||
|  |             super(SlidingWindowList, self).__init__(input) | ||||||
|  |  | ||||||
|  |     def __getitem__(self, index): | ||||||
|  |  | ||||||
|  |         if type(index) == slice: | ||||||
|  |             start = 0 if index.start is None else index.start | ||||||
|  |             stop = len(self) if index.stop is None else index.stop | ||||||
|  |             step = 1 if index.step is None else index.step | ||||||
|  |             return [self[i] for i in range(start, stop, step)] | ||||||
|  |  | ||||||
|  |         else: | ||||||
|  |             n = self.sliding_window_size * 2 + 1 | ||||||
|  |             res = n * [self.border_value] | ||||||
|  |  | ||||||
|  |             j_start = index - self.sliding_window_size | ||||||
|  |  | ||||||
|  |             for i in range(n): | ||||||
|  |                 ind = j_start + i | ||||||
|  |                 if ind >= 0 and ind < len(self): | ||||||
|  |                     res[i] = super(SlidingWindowList, self).__getitem__(ind) | ||||||
|  |  | ||||||
|  |             return res | ||||||
|  |  | ||||||
|  |     def __iter__(self): | ||||||
|  |         return SlidingWindowListIterator(self) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | ''' | ||||||
|  | class ConlluDataProviderIterator(object): | ||||||
|  |     def __init__(self, parent): | ||||||
|  |         self.parent = parent | ||||||
|  |         self.conllu_reader = ConlluReader( | ||||||
|  |             parent.filepath, parent.iter_documents) | ||||||
|  |  | ||||||
|  |     def __next__(self): | ||||||
|  |         result = self.parent.getNextDataBatch(conllu_reader=self.conllu_reader) | ||||||
|  |         if result is None: | ||||||
|  |             raise StopIteration | ||||||
|  |         return result | ||||||
|  | ''' | ||||||
|  |  | ||||||
|  | ''' | ||||||
|  | class ConlluDataProvider(object): | ||||||
|  |     def __init__(self, | ||||||
|  |                  filepath, | ||||||
|  |                  word2vec_model, | ||||||
|  |                  batchsize=100, | ||||||
|  |                  window_size=3, | ||||||
|  |                  iter_documents=False, | ||||||
|  |                  food_type=None): | ||||||
|  |         self.batchsize = batchsize | ||||||
|  |         self.word2vec_model = word2vec_model | ||||||
|  |         self.filepath = filepath | ||||||
|  |         self.conllu_reader = ConlluReader(filepath, iter_documents) | ||||||
|  |         self.window_size = window_size | ||||||
|  |         self.food_type = food_type | ||||||
|  |         self.iter_documents = iter_documents | ||||||
|  |  | ||||||
|  |         # create a label binarizer for upos tags: | ||||||
|  |         self.lb = preprocessing.LabelBinarizer() | ||||||
|  |         self.lb.fit(['.', 'ADJ', 'ADP', 'ADV', 'CONJ', 'DET', | ||||||
|  |                      'NOUN', 'NUM', 'PRON', 'PRT', 'VERB', 'X']) | ||||||
|  |  | ||||||
|  |     def _get_next_conllu_objects(self, n: int, conllu_reader): | ||||||
|  |         i = 0 | ||||||
|  |         conllu_list = [] | ||||||
|  |  | ||||||
|  |         while i < n: | ||||||
|  |             try: | ||||||
|  |                 conllu_list.append(conllu_reader.__iter__().__next__()) | ||||||
|  |                 i += 1 | ||||||
|  |  | ||||||
|  |             except StopIteration: | ||||||
|  |                 break | ||||||
|  |  | ||||||
|  |         return conllu_list | ||||||
|  |  | ||||||
|  |     def _get_upos_X(self, conllu_list): | ||||||
|  |         n_tokens = 0 | ||||||
|  |         l_global = [] | ||||||
|  |         for document in conllu_list: | ||||||
|  |             l = [] | ||||||
|  |             for sentence in document: | ||||||
|  |                 for token in sentence: | ||||||
|  |                     upos = token['upostag'] | ||||||
|  |                     l.append(upos) | ||||||
|  |                     n_tokens += 1 | ||||||
|  |             if len(l) > 0: | ||||||
|  |                 l_global.append(self.lb.transform(l)) | ||||||
|  |  | ||||||
|  |         return l_global, n_tokens | ||||||
|  |  | ||||||
|  |     def _get_y(self, conllu_list, misk_key="food_type", misc_val="ingredient"): | ||||||
|  |         n_tokens = 0 | ||||||
|  |         y_global = [] | ||||||
|  |         for document in conllu_list: | ||||||
|  |             y = [] | ||||||
|  |             for sentence in document: | ||||||
|  |                 for token in sentence: | ||||||
|  |                     m = token['misc'] | ||||||
|  |                     t_y = m is not None and misk_key in m and m[misk_key] == misc_val | ||||||
|  |                     y.append(t_y) | ||||||
|  |                     n_tokens += 1 | ||||||
|  |             if len(y) > 0: | ||||||
|  |                 y_global.append(y) | ||||||
|  |  | ||||||
|  |         return y_global, n_tokens | ||||||
|  |  | ||||||
|  |     def getNextDataBatch(self, y_food_type_label=None, conllu_reader=None): | ||||||
|  |  | ||||||
|  |         if y_food_type_label is None: | ||||||
|  |             y_food_type_label = self.food_type | ||||||
|  |  | ||||||
|  |         if conllu_reader is None: | ||||||
|  |             conllu_reader = self.conllu_reader | ||||||
|  |         conllu_list = self._get_next_conllu_objects( | ||||||
|  |             self.batchsize, conllu_reader) | ||||||
|  |  | ||||||
|  |         if len(conllu_list) == 0: | ||||||
|  |             return None | ||||||
|  |  | ||||||
|  |         # generate features for each document/sentence | ||||||
|  |         n = len(conllu_list) | ||||||
|  |  | ||||||
|  |         d = self.window_size * 2 + 1 | ||||||
|  |  | ||||||
|  |         buf_X, x_tokens = self._get_upos_X(conllu_list) | ||||||
|  |         buf_ingr_y, y_tokens = self._get_y(conllu_list) | ||||||
|  |  | ||||||
|  |         assert len(buf_X) == len(buf_ingr_y) and x_tokens == y_tokens | ||||||
|  |  | ||||||
|  |         X_upos = np.zeros(shape=(x_tokens, d * len(self.lb.classes_))) | ||||||
|  |         y = None | ||||||
|  |  | ||||||
|  |         if y_food_type_label is not None: | ||||||
|  |             y = np.zeros(shape=(x_tokens)) | ||||||
|  |  | ||||||
|  |         i = 0 | ||||||
|  |         for xupos in buf_X: | ||||||
|  |             tmp = SlidingWindowList(self.window_size, | ||||||
|  |                                     xupos, | ||||||
|  |                                     border_value=[0] * len(self.lb.classes_)) | ||||||
|  |             for upos_window in tmp: | ||||||
|  |                 X_upos[i, :] = np.array(upos_window).flatten() | ||||||
|  |                 i += 1 | ||||||
|  |  | ||||||
|  |         i = 0 | ||||||
|  |         if y_food_type_label is not None: | ||||||
|  |             for sentence in buf_ingr_y: | ||||||
|  |                 for yl in sentence: | ||||||
|  |                     y[i] = yl | ||||||
|  |                     i += 1 | ||||||
|  |  | ||||||
|  |         return X_upos, y | ||||||
|  |      | ||||||
|  |     def __iter__(self): | ||||||
|  |         return ConlluDataProviderIterator(self) | ||||||
|  |  | ||||||
|  | ''' | ||||||
| @ -102,7 +102,7 @@ class ConlluElement(object): | |||||||
|         return replace_tab(result, 16) |         return replace_tab(result, 16) | ||||||
|  |  | ||||||
|  |  | ||||||
| class ConlluDocument(object): | class ConlluSentence(object): | ||||||
|     def __init__(self): |     def __init__(self): | ||||||
|         self.conllu_elements = [] |         self.conllu_elements = [] | ||||||
|  |  | ||||||
| @ -114,7 +114,25 @@ class ConlluDocument(object): | |||||||
|         for elem in self.conllu_elements: |         for elem in self.conllu_elements: | ||||||
|             result += elem.__repr__() + "\n" |             result += elem.__repr__() + "\n" | ||||||
|  |  | ||||||
|         return result + "\n" |         return result | ||||||
|  |  | ||||||
|  |     def __str__(self): | ||||||
|  |         return self.__repr__() | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class ConlluDocument(object): | ||||||
|  |     def __init__(self): | ||||||
|  |         self.conllu_sentences = [] | ||||||
|  |      | ||||||
|  |     def add(self, conllu_sentence: ConlluSentence): | ||||||
|  |         self.conllu_sentences.append(conllu_sentence) | ||||||
|  |      | ||||||
|  |     def __repr__(self): | ||||||
|  |         result = "# newdoc\n" | ||||||
|  |         for elem in self.conllu_sentences: | ||||||
|  |             result += elem.__repr__() + "\n" | ||||||
|  |  | ||||||
|  |         return result | ||||||
|  |  | ||||||
|     def __str__(self): |     def __str__(self): | ||||||
|         return self.__repr__() |         return self.__repr__() | ||||||
| @ -136,44 +154,54 @@ class ConlluGenerator(object): | |||||||
|         tokenized_documents = [] |         tokenized_documents = [] | ||||||
|  |  | ||||||
|         for doc in self.documents: |         for doc in self.documents: | ||||||
|             simple_tokenized = nltk.tokenize.word_tokenize(doc) |             tokenized_sentences = [] | ||||||
|             tokenized_documents.append( |             sentences = doc.split("\n") | ||||||
|  |             for sent in sentences:  | ||||||
|  |                 if (len(sent) > 0): | ||||||
|  |                     simple_tokenized = nltk.tokenize.word_tokenize(sent) | ||||||
|  |                     tokenized_sentences.append( | ||||||
|                         self.mwe_tokenizer.tokenize(simple_tokenized)) |                         self.mwe_tokenizer.tokenize(simple_tokenized)) | ||||||
|  |             tokenized_documents.append(tokenized_sentences) | ||||||
|  |  | ||||||
|         # now create initial colln-u elemnts |         # now create initial colln-u elemnts | ||||||
|         for doc in tokenized_documents: |         for doc in tokenized_documents: | ||||||
|  |             conllu_doc = ConlluDocument() | ||||||
|             self.id_counter = 0 |             self.id_counter = 0 | ||||||
|             collnu_doc = ConlluDocument() |             for sent in doc: | ||||||
|             for token in doc: |                 conllu_sent = ConlluSentence() | ||||||
|  |                 for token in sent: | ||||||
|                     stemmed_token = None |                     stemmed_token = None | ||||||
|                     if "_" in token: |                     if "_" in token: | ||||||
|                         stemmed_token = "_".join( |                         stemmed_token = "_".join( | ||||||
|                             [self.stemmer.stem(part) for part in token.split("_")]) |                             [self.stemmer.stem(part) for part in token.split("_")]) | ||||||
|                     else: |                     else: | ||||||
|                         stemmed_token = self.stemmer.stem(token) |                         stemmed_token = self.stemmer.stem(token) | ||||||
|                 collnu_doc.add(ConlluElement( |                     conllu_sent.add(ConlluElement( | ||||||
|                         id=self.id_counter + 1, |                         id=self.id_counter + 1, | ||||||
|                         form=token, |                         form=token, | ||||||
|                         lemma=stemmed_token |                         lemma=stemmed_token | ||||||
|                     )) |                     )) | ||||||
|                     self.id_counter += 1 |                     self.id_counter += 1 | ||||||
|             self.conllu_documents.append(collnu_doc) |                 conllu_doc.add(conllu_sent) | ||||||
|  |             self.conllu_documents.append(conllu_doc) | ||||||
|  |  | ||||||
|     def pos_tagging(self): |     def pos_tagging(self): | ||||||
|         for conllu_document in self.conllu_documents: |         for conllu_document in self.conllu_documents: | ||||||
|             tokens = [x.form for x in conllu_document.conllu_elements] |             for conllu_sent in conllu_document.conllu_sentences: | ||||||
|  |                 tokens = [x.form for x in conllu_sent.conllu_elements] | ||||||
|                 pos_tags = pos_tag(tokens) |                 pos_tags = pos_tag(tokens) | ||||||
|                 simplified_tags = [map_tag('en-ptb', 'universal', tag) |                 simplified_tags = [map_tag('en-ptb', 'universal', tag) | ||||||
|                                 for word, tag in pos_tags] |                                 for word, tag in pos_tags] | ||||||
|  |  | ||||||
|                 for i in range(len(tokens)): |                 for i in range(len(tokens)): | ||||||
|                 conllu_elem = conllu_document.conllu_elements[i] |                     conllu_elem = conllu_sent.conllu_elements[i] | ||||||
|                     conllu_elem.upos = simplified_tags[i] |                     conllu_elem.upos = simplified_tags[i] | ||||||
|                     conllu_elem.xpos = pos_tags[i][1] |                     conllu_elem.xpos = pos_tags[i][1] | ||||||
|  |  | ||||||
|     def add_misc_value_by_list(self, key, value, stemmed_keyword_list): |     def add_misc_value_by_list(self, key, value, stemmed_keyword_list): | ||||||
|         for conllu_document in self.conllu_documents: |         for conllu_document in self.conllu_documents: | ||||||
|             for elem in conllu_document.conllu_elements: |             for conllu_sent in conllu_document.conllu_sentences: | ||||||
|  |                 for elem in conllu_sent.conllu_elements: | ||||||
|                     if elem.lemma in stemmed_keyword_list: |                     if elem.lemma in stemmed_keyword_list: | ||||||
|                         elem.add_misc(key, value) |                         elem.add_misc(key, value) | ||||||
|  |  | ||||||
|  | |||||||
							
								
								
									
										123
									
								
								Tagging/crf_data_generator.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										123
									
								
								Tagging/crf_data_generator.py
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,123 @@ | |||||||
|  | #!/usr/bin/env python3 | ||||||
|  |  | ||||||
|  | import conllu_batch_generator as cbg | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def word2features(sent, i): | ||||||
|  |     word = sent[i]['form'] | ||||||
|  |     postag = sent[i]['upostag'] | ||||||
|  |     features = [ | ||||||
|  |         'bias', | ||||||
|  |         #'word.lower=' + word.lower(), | ||||||
|  |         'word[-3:]=' + word[-3:], | ||||||
|  |         'word[-2:]=' + word[-2:], | ||||||
|  |         'word.isupper=%s' % word.isupper(), | ||||||
|  |         'word.istitle=%s' % word.istitle(), | ||||||
|  |         'word.isdigit=%s' % word.isdigit(), | ||||||
|  |         'postag=' + postag, | ||||||
|  |         'postag[:2]=' + postag[:2], | ||||||
|  |     ] | ||||||
|  |     if i > 0: | ||||||
|  |         word1 = sent[i-1]['form'] | ||||||
|  |         postag1 = sent[i-1]['upostag'] | ||||||
|  |         features.extend([ | ||||||
|  |             '-1:word.lower=' + word1.lower(), | ||||||
|  |             '-1:word.istitle=%s' % word1.istitle(), | ||||||
|  |             '-1:word.isupper=%s' % word1.isupper(), | ||||||
|  |             '-1:postag=' + postag1, | ||||||
|  |             '-1:postag[:2]=' + postag1[:2], | ||||||
|  |         ]) | ||||||
|  |         if i > 1: | ||||||
|  |             word1 = sent[i-2]['form'] | ||||||
|  |             postag1 = sent[i-2]['upostag'] | ||||||
|  |             features.extend([ | ||||||
|  |                 '-2:word.lower=' + word1.lower(), | ||||||
|  |                 '-2:word.istitle=%s' % word1.istitle(), | ||||||
|  |                 '-2:word.isupper=%s' % word1.isupper(), | ||||||
|  |                 '-2:postag=' + postag1, | ||||||
|  |                 '-2:postag[:2]=' + postag1[:2], | ||||||
|  |             ]) | ||||||
|  |     else: | ||||||
|  |         features.append('BOS') | ||||||
|  |  | ||||||
|  |     if i < len(sent)-1: | ||||||
|  |         word1 = sent[i+1]['form'] | ||||||
|  |         postag1 = sent[i+1]['upostag'] | ||||||
|  |         features.extend([ | ||||||
|  |             '+1:word.lower=' + word1.lower(), | ||||||
|  |             '+1:word.istitle=%s' % word1.istitle(), | ||||||
|  |             '+1:word.isupper=%s' % word1.isupper(), | ||||||
|  |             '+1:postag=' + postag1, | ||||||
|  |             '+1:postag[:2]=' + postag1[:2], | ||||||
|  |         ]) | ||||||
|  |         if i < len(sent)-2: | ||||||
|  |             word1 = sent[i+1]['form'] | ||||||
|  |             postag1 = sent[i+1]['upostag'] | ||||||
|  |             features.extend([ | ||||||
|  |                 '+2:word.lower=' + word1.lower(), | ||||||
|  |                 '+2:word.istitle=%s' % word1.istitle(), | ||||||
|  |                 '+2:word.isupper=%s' % word1.isupper(), | ||||||
|  |                 '+2:postag=' + postag1, | ||||||
|  |                 '+2:postag[:2]=' + postag1[:2], | ||||||
|  |             ]) | ||||||
|  |     else: | ||||||
|  |         features.append('EOS') | ||||||
|  |  | ||||||
|  |     return features | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def sent2labels(sent): | ||||||
|  |     labels = [] | ||||||
|  |     for token in sent: | ||||||
|  |         if token['misc'] is not None and 'food_type' in token['misc']: | ||||||
|  |             labels.append(token['misc']['food_type']) | ||||||
|  |         else: | ||||||
|  |             labels.append("0") | ||||||
|  |     return labels | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def sent2features(sent): | ||||||
|  |     return [word2features(sent, i) for i in range(len(sent))] | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def sent2tokens(sent): | ||||||
|  |     return [token['form'] for token in sent] | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def feature2tokens(sent): | ||||||
|  |     return [t[1].split("=")[1] for t in sent] | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class ConlluCRFReaderIterator(object): | ||||||
|  |     def __init__(self, parent): | ||||||
|  |         self._parent = parent | ||||||
|  |         self._iter = self._parent._conllu_reader.__iter__() | ||||||
|  |  | ||||||
|  |     def __next__(self): | ||||||
|  |         features = None | ||||||
|  |         labels = None | ||||||
|  |         tokens = None | ||||||
|  |  | ||||||
|  |         if not self._parent._iter_documents: | ||||||
|  |             next_sent = self._iter.__next__()[0] | ||||||
|  |             features = sent2features(next_sent) | ||||||
|  |             labels = sent2labels(next_sent) | ||||||
|  |             tokens = sent2tokens(next_sent) | ||||||
|  |         else: | ||||||
|  |             next_doc = self._iter.__next__() | ||||||
|  |             features = [sent2features(sentence) for sentence in next_doc] | ||||||
|  |             labels = [sent2labels(sentence) for sentence in next_doc] | ||||||
|  |             tokens = [sent2tokens(sentence) for sentence in next_doc] | ||||||
|  |  | ||||||
|  |         return features, labels, tokens | ||||||
|  |  | ||||||
|  |  | ||||||
|  | class ConlluCRFReader(object): | ||||||
|  |     def __init__(self, path, iter_documents=False): | ||||||
|  |         self._path = path | ||||||
|  |         self._iter_documents = iter_documents | ||||||
|  |  | ||||||
|  |         self._conllu_reader = cbg.ConlluReader(path, iter_documents) | ||||||
|  |  | ||||||
|  |     def __iter__(self): | ||||||
|  |         return ConlluCRFReaderIterator(self) | ||||||
| @ -16,6 +16,12 @@ spec = importlib.util.spec_from_file_location( | |||||||
| ingredients = importlib.util.module_from_spec(spec) | ingredients = importlib.util.module_from_spec(spec) | ||||||
| spec.loader.exec_module(ingredients) | spec.loader.exec_module(ingredients) | ||||||
|  |  | ||||||
|  | # loading actions: | ||||||
|  | spec = importlib.util.spec_from_file_location( | ||||||
|  |     "ingredients", "../" + settings.actions_file) | ||||||
|  | actions = importlib.util.module_from_spec(spec) | ||||||
|  | spec.loader.exec_module(actions) | ||||||
|  |  | ||||||
| # load json reader | # load json reader | ||||||
|  |  | ||||||
|  |  | ||||||
| @ -36,13 +42,14 @@ def process_instructions(instructions: list): | |||||||
|     if len(instructions) == 0: |     if len(instructions) == 0: | ||||||
|         return |         return | ||||||
|  |  | ||||||
|     conllu_input_docs = [doc.replace("\n", " ")[:-1] for doc in instructions] |     conllu_input_docs = instructions | ||||||
|  |  | ||||||
|     cg = ConlluGenerator( |     cg = ConlluGenerator( | ||||||
|         conllu_input_docs, ingredients.multi_word_ingredients_stemmed) |         conllu_input_docs, ingredients.multi_word_ingredients_stemmed) | ||||||
|     cg.tokenize_and_stem() |     cg.tokenize_and_stem() | ||||||
|     cg.pos_tagging() |     cg.pos_tagging() | ||||||
|     cg.add_misc_value_by_list("food_type", "ingredient", [w.replace(" ","_") for w in ingredients.multi_word_ingredients_stemmed] + ingredients.ingredients_stemmed) |     cg.add_misc_value_by_list("food_type", "ingredient", [w.replace(" ","_") for w in ingredients.multi_word_ingredients_stemmed] + ingredients.ingredients_stemmed) | ||||||
|  |     cg.add_misc_value_by_list("food_type", "action", actions.stemmed_cooking_verbs) | ||||||
|  |  | ||||||
|     savefile.write(str(cg)) |     savefile.write(str(cg)) | ||||||
|  |  | ||||||
|  | |||||||
							
								
								
									
										28
									
								
								Tagging/tagging_tools.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										28
									
								
								Tagging/tagging_tools.py
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,28 @@ | |||||||
|  | #!/usr/bin/env python3 | ||||||
|  |  | ||||||
|  | from IPython.display import Markdown, display | ||||||
|  | import conllu | ||||||
|  |  | ||||||
|  | def print_visualized_tags( | ||||||
|  |         conllu_sentence, | ||||||
|  |         food_tags_and_colors={'ingredient': 'cyan', 'action': "orange"}, | ||||||
|  |         upos_colors={'VERB': 'yellow'}): | ||||||
|  |     colorstr = "<span style='background-color:{}'>{}</span>" | ||||||
|  |     s = "" | ||||||
|  |     for tag in conllu_sentence: | ||||||
|  |         # print(tag) | ||||||
|  |         upos = tag['upostag'] | ||||||
|  |         if tag['misc'] != None: | ||||||
|  |             for food_tag in food_tags_and_colors: | ||||||
|  |                 if food_tag == tag['misc']['food_type']: | ||||||
|  |                     s += colorstr.format( | ||||||
|  |                         food_tags_and_colors[food_tag], tag['form']) + " " | ||||||
|  |  | ||||||
|  |         elif upos in upos_colors: | ||||||
|  |             s += colorstr.format(upos_colors[upos], tag['form']) + " " | ||||||
|  |         else: | ||||||
|  |             s += tag['form'] + " " | ||||||
|  |  | ||||||
|  |     display(Markdown(s)) | ||||||
|  |  | ||||||
|  |  | ||||||
							
								
								
									
										
											BIN
										
									
								
								Tagging/test.crfsuite
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										
											BIN
										
									
								
								Tagging/test.crfsuite
									
									
									
									
									
										Normal file
									
								
							
										
											Binary file not shown.
										
									
								
							
							
								
								
									
										102
									
								
								tools.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										102
									
								
								tools.py
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,102 @@ | |||||||
|  | #!/usr/bin/env python3 | ||||||
|  |  | ||||||
|  | import numpy as np | ||||||
|  | import json | ||||||
|  |  | ||||||
|  | import nltk | ||||||
|  | from nltk.stem import PorterStemmer | ||||||
|  | from nltk.stem import LancasterStemmer | ||||||
|  | from nltk.corpus import stopwords as nltk_stopwords | ||||||
|  |  | ||||||
|  | from pprint import pprint | ||||||
|  |  | ||||||
|  | from gensim.test.utils import common_texts, get_tmpfile | ||||||
|  | from gensim.models import Word2Vec, KeyedVectors | ||||||
|  |  | ||||||
|  | from sklearn.manifold import TSNE | ||||||
|  |  | ||||||
|  | import matplotlib.pyplot as plt | ||||||
|  |  | ||||||
|  | from json_buffered_reader import JSON_buffered_reader as JSON_br | ||||||
|  |  | ||||||
|  | import pandas as pd | ||||||
|  |  | ||||||
|  | import settings | ||||||
|  |  | ||||||
|  | from ipypb import track | ||||||
|  | from IPython.display import HTML, Markdown | ||||||
|  |  | ||||||
|  |  | ||||||
|  | # loading learned wordvectors | ||||||
|  | wv = KeyedVectors.load("data/wordvectors.kv") | ||||||
|  | porter = PorterStemmer() | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def word_similarity(word_a: str, word_b: str, model=wv, stemmer=porter): | ||||||
|  |     return model.similarity(stemmer.stem(word_a), stemmer.stem(word_b)) | ||||||
|  |  | ||||||
|  |  | ||||||
|  | def word_exists(word: str, model=wv, stemmer=porter): | ||||||
|  |     return stemmer.stem(word) in model | ||||||
|  |  | ||||||
|  | from cooking_vocab import cooking_verbs | ||||||
|  | from cooking_ingredients import ingredients | ||||||
|  |  | ||||||
|  | model_actions = [] | ||||||
|  | model_ingredients = [] | ||||||
|  |  | ||||||
|  | for action in cooking_verbs: | ||||||
|  |     if word_exists(action): | ||||||
|  |         model_actions.append(action) | ||||||
|  |  | ||||||
|  | for ingredient in ingredients: | ||||||
|  |     if word_exists(ingredient): | ||||||
|  |         model_ingredients.append(ingredient) | ||||||
|  |  | ||||||
|  | def tsne_plot(tokens, model=wv, dist_token=None): | ||||||
|  |     vecs = [] | ||||||
|  |     labels = [] | ||||||
|  |     for token in tokens: | ||||||
|  |         vecs.append(model[token]) | ||||||
|  |         labels.append(token) | ||||||
|  |  | ||||||
|  |     tsne_model = TSNE(perplexity=40, n_components=2, | ||||||
|  |                       init='pca', n_iter=2500, random_state=23) | ||||||
|  |     plot_values = tsne_model.fit_transform(vecs) | ||||||
|  |  | ||||||
|  |     distances = [] | ||||||
|  |  | ||||||
|  |     min_size = 10 | ||||||
|  |     max_size = 500 | ||||||
|  |  | ||||||
|  |     if dist_token is not None: | ||||||
|  |         distances = np.array([model.similarity(t, dist_token) for t in tokens]) | ||||||
|  |         # scale: | ||||||
|  |         min_s = np.min(distances) | ||||||
|  |         max_s = np.max(distances) | ||||||
|  |         distances = min_size + (distances - min_s) * ((max_size - min_size) / (max_s - min_s)) | ||||||
|  |  | ||||||
|  |  | ||||||
|  |     x = [] | ||||||
|  |     y = [] | ||||||
|  |     for value in plot_values: | ||||||
|  |         x.append(value[0]) | ||||||
|  |         y.append(value[1]) | ||||||
|  |  | ||||||
|  |     plt.figure(figsize=(16, 16)) | ||||||
|  |     for i in range(len(x)): | ||||||
|  |         if dist_token is None: | ||||||
|  |             plt.scatter(x[i], y[i]) | ||||||
|  |         else: | ||||||
|  |             plt.scatter(x[i], y[i], s=distances[i]) | ||||||
|  |         plt.annotate(labels[i], | ||||||
|  |                      xy=(x[i], y[i]), | ||||||
|  |                      xytext=(5, 2), | ||||||
|  |                      textcoords='offset points', | ||||||
|  |                      ha='right', | ||||||
|  |                      va='bottom') | ||||||
|  |     plt.show() | ||||||
|  |  | ||||||
|  |  | ||||||
|  | stemmed_ingredients = [porter.stem(ing) for ing in model_ingredients] | ||||||
|  | stemmed_actions = [porter.stem(act) for act in model_actions] | ||||||
		Reference in New Issue
	
	Block a user