diff --git a/Tagging/CRF_evaluation.ipynb b/Tagging/CRF_evaluation.ipynb new file mode 100644 index 0000000..2074322 --- /dev/null +++ b/Tagging/CRF_evaluation.ipynb @@ -0,0 +1,1680 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# CRF entity recognition evaluation" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "from IPython.core.display import Markdown, HTML, display\n", + "\n", + "import crf_data_generator as cdg\n", + "import pycrfsuite" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "data = cdg.ConlluCRFReader(\"recipes0.conllu\")\n", + "\n", + "data_iterator = iter(data)" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "def sentence_as_markdown_table( tokens, labels = None, predictions = None):\n", + " n = len(tokens)\n", + " s = \"| |\"\n", + " for t in tokens:\n", + " s += t + \" |\"\n", + " \n", + " s += \"\\n|-----|\" + \"----|\" * n\n", + " \n", + " if labels is not None:\n", + " s += \"\\n|labels:|\" + \"\".join([f\" {l} |\" for l in labels])\n", + " \n", + " if predictions is not None:\n", + " s+= \"\\n|Predicitions:|\" + \"\".join([f\" {p} |\" for p in predictions])\n", + " \n", + " display(Markdown(s + \"\\n\"))\n", + "\n", + " \n", + " " + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "tagger = pycrfsuite.Tagger()\n", + "tagger.open('test.crfsuite')" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "data": { + "text/markdown": [ + "| |Reduce |heat |to |low |and |let |the |water |simmer |. |\n", + "|-----|----|----|----|----|----|----|----|----|----|----|\n", + "|labels:| action | action | 0 | 0 | 0 | 0 | 0 | ingredient | action | 0 |\n", + "|Predicitions:| action | action | 0 | 0 | 0 | 0 | 0 | ingredient | action | 0 |\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |Gently |drop |gnudi |, |about |12 |at |a |time |, |into |the |simmering |water |and |cook |until |they |rise |to |the |surface |, |about |4 |minutes |. |\n", + "|-----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|\n", + "|labels:| 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | action | ingredient | 0 | action | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |\n", + "|Predicitions:| 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | ingredient | 0 | action | 0 | 0 | 0 | 0 | 0 | action | 0 | 0 | 0 | 0 | 0 |\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |Simmer |for |4 |more |minutes |and |remove |with |a |slotted |spoon |. |\n", + "|-----|----|----|----|----|----|----|----|----|----|----|----|----|\n", + "|labels:| action | 0 | 0 | 0 | 0 | 0 | action | 0 | 0 | 0 | action | 0 |\n", + "|Predicitions:| action | 0 | 0 | 0 | 0 | 0 | action | 0 | 0 | 0 | action | 0 |\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |Keep |boiled |gnudi |warm |while |you |finish |cooking |remaining |batches |. |\n", + "|-----|----|----|----|----|----|----|----|----|----|----|----|\n", + "|labels:| 0 | action | 0 | action | 0 | 0 | 0 | action | 0 | 0 | 0 |\n", + "|Predicitions:| 0 | action | 0 | action | 0 | 0 | 0 | action | 0 | 0 | 0 |\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |Melt_butter |in |a |large |skillet |over |medium |heat |and |cook |whole |sage_leaves |until |they |wilt |and |are |beginning |to |brown |, |about |2 |minutes |; |remove |leaves |. |\n", + "|-----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|\n", + "|labels:| ingredient | 0 | 0 | 0 | 0 | 0 | 0 | action | 0 | action | 0 | ingredient | 0 | 0 | 0 | 0 | 0 | 0 | 0 | action | 0 | 0 | 0 | 0 | 0 | action | ingredient | 0 |\n", + "|Predicitions:| ingredient | 0 | 0 | 0 | 0 | 0 | 0 | action | 0 | action | 0 | ingredient | 0 | 0 | 0 | 0 | 0 | 0 | 0 | action | 0 | 0 | 0 | 0 | 0 | action | ingredient | 0 |\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |Retain |butter |in |skillet |. |\n", + "|-----|----|----|----|----|----|\n", + "|labels:| 0 | ingredient | 0 | 0 | 0 |\n", + "|Predicitions:| action | ingredient | 0 | 0 | 0 |\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |Stir |chopped |sage |into |the |hot |butter |and |cook |until |the |butter |begins |to |brown |and |give |off |a |nutty |fragrance |, |about |2 |minutes |. |\n", + "|-----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|\n", + "|labels:| action | action | ingredient | 0 | 0 | ingredient | ingredient | 0 | action | 0 | 0 | ingredient | 0 | 0 | action | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |\n", + "|Predicitions:| action | action | ingredient | 0 | 0 | ingredient | ingredient | 0 | action | 0 | 0 | ingredient | 0 | 0 | action | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |Gently |mix |the |gnudi |into |the |butter |and |sage |until |gnudi |are |coated |. |\n", + "|-----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|\n", + "|labels:| 0 | action | 0 | 0 | 0 | 0 | ingredient | 0 | ingredient | 0 | 0 | 0 | 0 | 0 |\n", + "|Predicitions:| 0 | action | 0 | 0 | 0 | 0 | ingredient | 0 | ingredient | 0 | 0 | 0 | 0 | 0 |\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |Garnish |with |whole |sage_leaves |and |sprinkle |with |2 |tablespoons |Parmigiano-Reggiano_cheese |to |serve |. |\n", + "|-----|----|----|----|----|----|----|----|----|----|----|----|----|----|\n", + "|labels:| action | 0 | 0 | ingredient | 0 | action | 0 | 0 | 0 | ingredient | 0 | action | 0 |\n", + "|Predicitions:| action | 0 | 0 | ingredient | 0 | action | 0 | 0 | 0 | ingredient | 0 | action | 0 |\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |\n", + "|-----|\n", + "|labels:|\n", + "|Predicitions:|\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |Preheat |the |oven |to |350 |degrees |. |\n", + "|-----|----|----|----|----|----|----|----|\n", + "|labels:| 0 | 0 | 0 | 0 | 0 | 0 | 0 |\n", + "|Predicitions:| action | 0 | 0 | 0 | 0 | 0 | 0 |\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |Cream |butter |with |lemon_zest |and |sugar |( |minus |1 |tablespoon |) |until |light |and |fluffy |. |\n", + "|-----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|\n", + "|labels:| ingredient | ingredient | 0 | ingredient | 0 | action | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |\n", + "|Predicitions:| ingredient | ingredient | 0 | ingredient | 0 | action | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |Add |the |egg |and |vanilla |and |beat |until |combined |. |\n", + "|-----|----|----|----|----|----|----|----|----|----|----|\n", + "|labels:| action | 0 | ingredient | 0 | ingredient | 0 | action | 0 | 0 | 0 |\n", + "|Predicitions:| action | 0 | ingredient | 0 | ingredient | 0 | action | 0 | 0 | 0 |\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |Meanwhile |, |toss |the |blueberries |with |1/4 |cup |of |flour |, |then |whisk |together |the |remaining |flour |, |baking_powder |and |salt |. |\n", + "|-----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|\n", + "|labels:| 0 | 0 | 0 | 0 | ingredient | 0 | 0 | 0 | 0 | action | 0 | 0 | action | 0 | 0 | 0 | action | 0 | ingredient | 0 | action | 0 |\n", + "|Predicitions:| 0 | 0 | 0 | 0 | ingredient | 0 | 0 | 0 | 0 | action | 0 | 0 | action | 0 | 0 | 0 | action | 0 | ingredient | 0 | action | 0 |\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |Add |the |flour |mixture |to |the |butter |mixture |a |little |at |a |time |, |alternating |with |the |buttermilk |; |fold |in |the |blueberries |. |\n", + "|-----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|\n", + "|labels:| action | 0 | action | 0 | 0 | 0 | ingredient | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | ingredient | 0 | action | 0 | 0 | ingredient | 0 |\n", + "|Predicitions:| action | 0 | action | 0 | 0 | 0 | ingredient | 0 | 0 | 0 | 0 | 0 | 0 | 0 | action | 0 | 0 | ingredient | 0 | action | 0 | 0 | ingredient | 0 |\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |Grease |a |9-inch |square |aluminum |baking |pan |with |butter |or |coat |with |non-stick |spray |. |\n", + "|-----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|\n", + "|labels:| 0 | 0 | 0 | 0 | 0 | action | 0 | 0 | ingredient | 0 | 0 | 0 | 0 | 0 | 0 |\n", + "|Predicitions:| 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | ingredient | 0 | 0 | 0 | 0 | ingredient | 0 |\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |Spread |batter |into |pan |. |\n", + "|-----|----|----|----|----|----|\n", + "|labels:| action | action | 0 | 0 | 0 |\n", + "|Predicitions:| action | ingredient | 0 | 0 | 0 |\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |Sprinkle |batter |with |remaining |tablespoon |of |sugar |. |\n", + "|-----|----|----|----|----|----|----|----|----|\n", + "|labels:| action | action | 0 | 0 | 0 | 0 | action | 0 |\n", + "|Predicitions:| action | ingredient | 0 | 0 | action | 0 | ingredient | 0 |\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |Bake |for |35-45 |minutes |. |\n", + "|-----|----|----|----|----|----|\n", + "|labels:| action | 0 | 0 | 0 | 0 |\n", + "|Predicitions:| action | 0 | 0 | 0 | 0 |\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |Check |with |a |toothpick |, |checking |for |doneness |. |\n", + "|-----|----|----|----|----|----|----|----|----|----|\n", + "|labels:| action | 0 | 0 | 0 | 0 | action | 0 | 0 | 0 |\n", + "|Predicitions:| action | 0 | 0 | 0 | 0 | action | 0 | 0 | 0 |\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |Let |cool |at |least |15 |minutes |before |serving |. |\n", + "|-----|----|----|----|----|----|----|----|----|----|\n", + "|labels:| 0 | action | 0 | 0 | 0 | 0 | 0 | action | 0 |\n", + "|Predicitions:| 0 | action | 0 | 0 | 0 | 0 | 0 | action | 0 |\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |\n", + "|-----|\n", + "|labels:|\n", + "|Predicitions:|\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |Preheat |a |grill |for |high |heat |. |\n", + "|-----|----|----|----|----|----|----|----|\n", + "|labels:| 0 | 0 | action | 0 | 0 | action | 0 |\n", + "|Predicitions:| 0 | 0 | action | 0 | 0 | action | 0 |\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |In |a |medium |bowl |, |lightly |mix |together |the |ground_beef |, |Worcestershire_sauce |, |liquid_smoke |and |garlic_powder |. |\n", + "|-----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|\n", + "|labels:| 0 | 0 | 0 | 0 | 0 | 0 | action | 0 | 0 | ingredient | 0 | ingredient | 0 | ingredient | 0 | ingredient | 0 |\n", + "|Predicitions:| 0 | 0 | 0 | 0 | 0 | 0 | action | 0 | 0 | ingredient | 0 | ingredient | 0 | ingredient | 0 | ingredient | 0 |\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |Form |into |3 |patties |, |handling |the |meat |minimally |. |\n", + "|-----|----|----|----|----|----|----|----|----|----|----|\n", + "|labels:| 0 | 0 | 0 | 0 | 0 | 0 | 0 | ingredient | 0 | 0 |\n", + "|Predicitions:| 0 | 0 | 0 | 0 | 0 | 0 | 0 | ingredient | 0 | 0 |\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |Brush |both |sides |of |each |patty |with |some |oil |, |and |season |with |seasoned_salt |. |\n", + "|-----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|\n", + "|labels:| action | 0 | 0 | 0 | 0 | 0 | 0 | 0 | ingredient | 0 | 0 | action | 0 | ingredient | 0 |\n", + "|Predicitions:| action | 0 | 0 | 0 | 0 | 0 | 0 | 0 | ingredient | 0 | 0 | action | 0 | action | 0 |\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |Place |the |patties |on |the |grill |grate |, |and |cook |for |about |5 |minutes |per |side |, |until |well |done |. |\n", + "|-----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|\n", + "|labels:| action | 0 | 0 | 0 | 0 | action | action | 0 | 0 | action | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |\n", + "|Predicitions:| action | 0 | ingredient | 0 | 0 | action | ingredient | 0 | 0 | action | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |\n", + "|-----|\n", + "|labels:|\n", + "|Predicitions:|\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |Heat |oven |to |400 |degrees |F |. |\n", + "|-----|----|----|----|----|----|----|----|\n", + "|labels:| action | 0 | 0 | 0 | 0 | 0 | 0 |\n", + "|Predicitions:| 0 | 0 | 0 | 0 | 0 | 0 | 0 |\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |Saute |sausage |and |zucchini |in |oil |in |a |large |nonstick |skillet |over |medium |heat |5 |to |7 |minutes |, |or |until |sausage |is |browned |and |zucchini |tender |. |\n", + "|-----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|\n", + "|labels:| 0 | ingredient | 0 | ingredient | 0 | ingredient | 0 | 0 | 0 | 0 | 0 | 0 | 0 | action | 0 | 0 | 0 | 0 | 0 | 0 | 0 | ingredient | 0 | action | 0 | ingredient | 0 | 0 |\n", + "|Predicitions:| 0 | ingredient | 0 | ingredient | 0 | ingredient | 0 | 0 | 0 | 0 | 0 | 0 | 0 | action | 0 | 0 | 0 | 0 | 0 | 0 | 0 | ingredient | 0 | action | 0 | 0 | 0 | 0 |\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |Remove |skillet |from |heat |; |cool |slightly |. |\n", + "|-----|----|----|----|----|----|----|----|----|\n", + "|labels:| action | 0 | 0 | action | 0 | action | 0 | 0 |\n", + "|Predicitions:| action | 0 | 0 | action | 0 | action | 0 | 0 |\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |Combine |ricotta |, |egg_white |, |mozzarella |, |1/4 |cup |of |the |Parmesan |, |salt |, |and |pepper |. |\n", + "|-----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|\n", + "|labels:| 0 | ingredient | 0 | ingredient | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | action | 0 | 0 | ingredient | 0 |\n", + "|Predicitions:| 0 | ingredient | 0 | 0 | 0 | ingredient | 0 | 0 | 0 | 0 | 0 | 0 | 0 | action | 0 | 0 | ingredient | 0 |\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |Spread |3/4 |cup |of |the |tomato_sauce |on |bottom |of |a |10-inch |ovenproof |skillet |, |preferably |cast-iron |or |nonstick |. |\n", + "|-----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|\n", + "|labels:| action | 0 | 0 | 0 | 0 | ingredient | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |\n", + "|Predicitions:| action | 0 | 0 | 0 | 0 | ingredient | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |Top |with |2 |noodles |, |long |sides |touching |. |\n", + "|-----|----|----|----|----|----|----|----|----|----|\n", + "|labels:| ingredient | 0 | 0 | ingredient | 0 | 0 | 0 | 0 | 0 |\n", + "|Predicitions:| ingredient | 0 | 0 | ingredient | 0 | 0 | 0 | 0 | 0 |\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |Spoon |1/3 |each |of |the |sausage |mixture |, |cheese |mixture |, |and |basil_leaves |over |noodles |. |\n", + "|-----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|\n", + "|labels:| action | 0 | 0 | 0 | 0 | ingredient | 0 | 0 | ingredient | 0 | 0 | 0 | ingredient | 0 | ingredient | 0 |\n", + "|Predicitions:| 0 | 0 | 0 | 0 | 0 | ingredient | 0 | 0 | ingredient | 0 | 0 | 0 | ingredient | 0 | ingredient | 0 |\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |Assemble |2 |more |layers |, |beginning |with |2 |noodles |, |then |3/4 |cup |of |the |sauce |, |1/3 |of |the |sausage |mixture |, |1/3 |of |the |cheese |mixture |, |and |1/3 |of |the |basil_leaves |, |ending |top |layer |with |the |last |2 |noodles |. |\n", + "|-----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|\n", + "|labels:| 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | ingredient | 0 | 0 | 0 | 0 | 0 | 0 | ingredient | 0 | 0 | 0 | 0 | ingredient | 0 | 0 | 0 | 0 | 0 | ingredient | 0 | 0 | 0 | 0 | 0 | 0 | ingredient | 0 | 0 | ingredient | 0 | 0 | 0 | 0 | 0 | ingredient | 0 |\n", + "|Predicitions:| 0 | 0 | 0 | ingredient | 0 | 0 | 0 | 0 | ingredient | 0 | 0 | 0 | 0 | 0 | 0 | ingredient | 0 | 0 | 0 | 0 | ingredient | 0 | 0 | 0 | 0 | 0 | ingredient | 0 | 0 | 0 | 0 | 0 | 0 | ingredient | 0 | 0 | ingredient | 0 | 0 | 0 | 0 | 0 | 0 | 0 |\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |Spread |the |remaining |3/4 |cup |sauce |over |top |and |sprinkle |with |the |remaining |1/4 |cup |Parmesan |. |\n", + "|-----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|\n", + "|labels:| action | 0 | 0 | 0 | 0 | ingredient | 0 | ingredient | 0 | action | 0 | 0 | 0 | 0 | 0 | 0 | 0 |\n", + "|Predicitions:| action | 0 | 0 | 0 | 0 | ingredient | 0 | ingredient | 0 | action | 0 | 0 | 0 | 0 | 0 | 0 | 0 |\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |Cover |tightly |with |foil |. |\n", + "|-----|----|----|----|----|----|\n", + "|labels:| action | 0 | 0 | 0 | 0 |\n", + "|Predicitions:| action | 0 | 0 | 0 | 0 |\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |Bake |35 |minutes |. |\n", + "|-----|----|----|----|----|\n", + "|labels:| action | 0 | 0 | 0 |\n", + "|Predicitions:| action | 0 | 0 | 0 |\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |Let |rest |10 |minutes |before |cutting |into |wedges |. |\n", + "|-----|----|----|----|----|----|----|----|----|----|\n", + "|labels:| 0 | 0 | 0 | 0 | 0 | action | 0 | 0 | 0 |\n", + "|Predicitions:| 0 | 0 | 0 | 0 | 0 | action | 0 | 0 | 0 |\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |\n", + "|-----|\n", + "|labels:|\n", + "|Predicitions:|\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |Preheat |the |oven |to |450 |degrees |F. |Bring |a |large |pot |of |salted_water |to |a |boil |. |\n", + "|-----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|\n", + "|labels:| 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | ingredient | 0 | 0 | action | 0 |\n", + "|Predicitions:| action | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | ingredient | 0 | 0 | action | 0 |\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |Put |2 |pieces |of |foil |side |by |side |on |a |baking |sheet |. |\n", + "|-----|----|----|----|----|----|----|----|----|----|----|----|----|----|\n", + "|labels:| action | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | action | 0 | 0 |\n", + "|Predicitions:| action | 0 | 0 | 0 | ingredient | 0 | 0 | 0 | 0 | 0 | action | 0 | 0 |\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |Put |the |almonds |on |one |piece |of |foil |and |the |broccoli |on |the |other |. |\n", + "|-----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|\n", + "|labels:| action | 0 | ingredient | 0 | 0 | 0 | 0 | 0 | 0 | 0 | ingredient | 0 | 0 | ingredient | 0 |\n", + "|Predicitions:| action | 0 | ingredient | 0 | 0 | 0 | 0 | ingredient | 0 | 0 | ingredient | 0 | 0 | ingredient | 0 |\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |Transfer |to |the |oven |; |roast |until |the |almonds |are |lightly |toasted |, |about |7 |minutes |. |\n", + "|-----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|\n", + "|labels:| 0 | 0 | 0 | 0 | 0 | action | 0 | 0 | ingredient | 0 | 0 | action | 0 | 0 | 0 | 0 | 0 |\n", + "|Predicitions:| 0 | 0 | 0 | 0 | 0 | action | 0 | 0 | ingredient | 0 | 0 | action | 0 | 0 | 0 | 0 | 0 |\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |Remove |the |foil |with |the |almonds |. |\n", + "|-----|----|----|----|----|----|----|----|\n", + "|labels:| action | 0 | 0 | 0 | 0 | ingredient | 0 |\n", + "|Predicitions:| action | 0 | ingredient | 0 | 0 | ingredient | 0 |\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |Continue |roasting |the |broccoli |until |tender |, |about |7 |more |minutes |. |\n", + "|-----|----|----|----|----|----|----|----|----|----|----|----|----|\n", + "|labels:| 0 | action | 0 | ingredient | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |\n", + "|Predicitions:| 0 | action | 0 | ingredient | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |Season |the |broccoli |with |salt |and |pepper |and |transfer |to |a |large |bowl |. |\n", + "|-----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|\n", + "|labels:| action | 0 | ingredient | 0 | action | 0 | ingredient | 0 | 0 | 0 | 0 | 0 | 0 | 0 |\n", + "|Predicitions:| action | 0 | ingredient | 0 | action | 0 | ingredient | 0 | 0 | 0 | 0 | 0 | 0 | 0 |\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |Add |the |pasta |to |the |boiling_water |and |cook |as |the |label |directs |. |\n", + "|-----|----|----|----|----|----|----|----|----|----|----|----|----|----|\n", + "|labels:| action | 0 | ingredient | 0 | 0 | ingredient | 0 | action | 0 | 0 | 0 | 0 | 0 |\n", + "|Predicitions:| action | 0 | ingredient | 0 | 0 | ingredient | 0 | action | 0 | 0 | 0 | 0 | 0 |\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |Meanwhile |, |pulse |the |almonds |in |a |food |processor |until |finely |chopped |; |with |the |motor |running |, |add |the |garlic |and |process |until |finely |chopped |. |\n", + "|-----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|\n", + "|labels:| 0 | 0 | 0 | 0 | ingredient | 0 | 0 | 0 | 0 | 0 | 0 | action | 0 | 0 | 0 | 0 | 0 | 0 | action | 0 | ingredient | 0 | 0 | 0 | 0 | action | 0 |\n", + "|Predicitions:| 0 | 0 | 0 | 0 | ingredient | 0 | 0 | 0 | 0 | 0 | 0 | action | 0 | 0 | 0 | 0 | 0 | 0 | action | 0 | ingredient | 0 | 0 | 0 | 0 | action | 0 |\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |Drizzle |in |the |olive_oil |and |process |until |blended |. |\n", + "|-----|----|----|----|----|----|----|----|----|----|\n", + "|labels:| 0 | 0 | 0 | ingredient | 0 | 0 | 0 | action | 0 |\n", + "|Predicitions:| 0 | 0 | 0 | ingredient | 0 | 0 | 0 | 0 | 0 |\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |Add |the |basil |and |pulse |until |smooth |; |transfer |to |the |bowl |with |the |broccoli |. |\n", + "|-----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|\n", + "|labels:| action | 0 | ingredient | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | ingredient | 0 |\n", + "|Predicitions:| action | 0 | ingredient | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | ingredient | 0 |\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |Put |the |tomatoes |and |half |of |their |juice |( |about |1 |cup |) |in |the |food |processor |and |pulse |until |pureed |. |\n", + "|-----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|\n", + "|labels:| action | 0 | ingredient | 0 | 0 | 0 | 0 | ingredient | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |\n", + "|Predicitions:| action | 0 | ingredient | 0 | 0 | 0 | 0 | ingredient | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |Add |to |the |bowl |with |the |broccoli |mixture |and |stir |to |combine |. |\n", + "|-----|----|----|----|----|----|----|----|----|----|----|----|----|----|\n", + "|labels:| action | 0 | 0 | 0 | 0 | 0 | ingredient | 0 | 0 | action | 0 | 0 | 0 |\n", + "|Predicitions:| action | 0 | 0 | 0 | 0 | 0 | ingredient | 0 | 0 | action | 0 | 0 | 0 |\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |Add |1 |teaspoon |salt |and |1/2 |teaspoon |pepper |. |\n", + "|-----|----|----|----|----|----|----|----|----|----|\n", + "|labels:| action | 0 | 0 | action | 0 | 0 | 0 | ingredient | 0 |\n", + "|Predicitions:| action | 0 | 0 | action | 0 | 0 | 0 | ingredient | 0 |\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |Drain |the |pasta |and |toss |with |the |sauce |. |\n", + "|-----|----|----|----|----|----|----|----|----|----|\n", + "|labels:| action | 0 | ingredient | 0 | 0 | 0 | 0 | ingredient | 0 |\n", + "|Predicitions:| action | 0 | ingredient | 0 | 0 | 0 | 0 | ingredient | 0 |\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |Per |serving |: |Calories |744 |; |Fat |39 |g |( |Saturated |5 |g |) |; |Cholesterol |0 |mg |; |Sodium |555 |mg |; |Carbohydrate |82 |g |; |Fiber |12 |g |; |Protein |20 |g |\n", + "|-----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|\n", + "|labels:| 0 | action | 0 | 0 | 0 | 0 | ingredient | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |\n", + "|Predicitions:| 0 | 0 | 0 | 0 | 0 | 0 | ingredient | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |Photograph |by |Antonis |Achilleos |\n", + "|-----|----|----|----|----|\n", + "|labels:| 0 | 0 | 0 | 0 |\n", + "|Predicitions:| 0 | 0 | 0 | 0 |\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |\n", + "|-----|\n", + "|labels:|\n", + "|Predicitions:|\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |Preheat |oven |to |350 |degrees |F |( |175 |degrees |C |) |. |\n", + "|-----|----|----|----|----|----|----|----|----|----|----|----|----|\n", + "|labels:| 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |\n", + "|Predicitions:| 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |Lightly |grease |one |13 |x |9 |inch |cake |pan |or |two |8 |inch |round |cake |pans |. |\n", + "|-----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|\n", + "|labels:| 0 | 0 | 0 | 0 | 0 | 0 | 0 | ingredient | 0 | 0 | 0 | 0 | 0 | ingredient | ingredient | 0 | 0 |\n", + "|Predicitions:| 0 | ingredient | 0 | 0 | 0 | 0 | 0 | action | 0 | 0 | 0 | 0 | 0 | ingredient | ingredient | 0 | 0 |\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |Mix |sugar |and |flour |together |. |\n", + "|-----|----|----|----|----|----|----|\n", + "|labels:| action | action | 0 | action | 0 | 0 |\n", + "|Predicitions:| action | action | 0 | action | 0 | 0 |\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |With |two |knives |or |a |pastry |blender |cut |the |shortening |into |the |flour |and |sugar |until |the |mixture |resembles |coarse |crumbs |( |as |if |you |were |making |pie_dough |) |. |\n", + "|-----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|\n", + "|labels:| 0 | 0 | 0 | 0 | 0 | ingredient | 0 | action | 0 | ingredient | 0 | 0 | action | 0 | action | 0 | 0 | 0 | 0 | 0 | ingredient | 0 | 0 | 0 | 0 | 0 | 0 | ingredient | 0 | 0 |\n", + "|Predicitions:| 0 | 0 | ingredient | 0 | 0 | ingredient | 0 | action | 0 | 0 | 0 | 0 | action | 0 | action | 0 | 0 | 0 | 0 | 0 | ingredient | 0 | 0 | 0 | 0 | 0 | 0 | ingredient | 0 | 0 |\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |Reserve |1/2 |cup |of |this |mixture |as |crumbs |for |top |of |cake |. |\n", + "|-----|----|----|----|----|----|----|----|----|----|----|----|----|----|\n", + "|labels:| 0 | 0 | 0 | 0 | 0 | 0 | 0 | ingredient | 0 | ingredient | 0 | ingredient | 0 |\n", + "|Predicitions:| action | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | ingredient | 0 | ingredient | 0 |\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |To |the |remaining |flour |mixture |stir |in |the |baking_powder |, |eggs |, |milk |, |and |vanilla |and |mix |well |. |\n", + "|-----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|\n", + "|labels:| 0 | 0 | 0 | action | 0 | action | 0 | 0 | ingredient | 0 | ingredient | 0 | ingredient | 0 | 0 | ingredient | 0 | action | 0 | 0 |\n", + "|Predicitions:| 0 | 0 | 0 | action | 0 | action | 0 | 0 | 0 | 0 | ingredient | 0 | ingredient | 0 | 0 | ingredient | 0 | action | 0 | 0 |\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |Pour |batter |into |prepared |pan |. |\n", + "|-----|----|----|----|----|----|----|\n", + "|labels:| action | action | 0 | action | 0 | 0 |\n", + "|Predicitions:| 0 | action | 0 | action | 0 | 0 |\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |Sprinkle |reserved |crumb |mixture |over |top |. |\n", + "|-----|----|----|----|----|----|----|----|\n", + "|labels:| action | 0 | ingredient | 0 | 0 | ingredient | 0 |\n", + "|Predicitions:| action | 0 | ingredient | 0 | 0 | ingredient | 0 |\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |Bake |at |350 |degrees |F |( |175 |degrees |C |) |for |about |45 |minutes |( |35 |minutes |if |using |two |8 |inch |round |cake |pans |) |or |until |center |of |cake |springs |back |when |lightly |touched |or |a |toothpick |inserted |in |the |center |comes |out |clean |. |\n", + "|-----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|\n", + "|labels:| action | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | ingredient | ingredient | 0 | 0 | 0 | 0 | 0 | 0 | ingredient | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |\n", + "|Predicitions:| action | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | ingredient | 0 | 0 | 0 | 0 | 0 | 0 | 0 | ingredient | 0 | 0 | 0 | 0 | action | 0 | 0 | 0 | 0 | 0 | 0 | ingredient | 0 | 0 | 0 | 0 |\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |\n", + "|-----|\n", + "|labels:|\n", + "|Predicitions:|\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |Combine |the |garlic |, |onions |, |salt |and |5 |cups |water |in |a |12-inch |high-sided |saucepan |and |bring |to |a |boil |. |\n", + "|-----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|\n", + "|labels:| 0 | 0 | ingredient | 0 | ingredient | 0 | action | 0 | 0 | 0 | ingredient | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | action | 0 |\n", + "|Predicitions:| 0 | 0 | ingredient | 0 | ingredient | 0 | action | 0 | 0 | 0 | ingredient | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | action | 0 |\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |Add |the |chicken_breasts |and |bay_leaf |, |and |lower |the |heat |to |a |simmer |. |\n", + "|-----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|\n", + "|labels:| action | 0 | ingredient | 0 | ingredient | 0 | 0 | action | 0 | action | 0 | 0 | action | 0 |\n", + "|Predicitions:| action | 0 | ingredient | 0 | ingredient | 0 | 0 | action | 0 | action | 0 | 0 | action | 0 |\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |Cook |the |chicken |, |partially |covered |, |until |cooked |through |, |about |35 |minutes |. |\n", + "|-----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|\n", + "|labels:| action | 0 | ingredient | 0 | 0 | action | 0 | 0 | action | 0 | 0 | 0 | 0 | 0 | 0 |\n", + "|Predicitions:| action | 0 | ingredient | 0 | 0 | action | 0 | 0 | action | 0 | 0 | 0 | 0 | 0 | 0 |\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |( |To |check |for |doneness |, |slit |the |chicken |in |half |to |make |sure |the |interior |is |no |longer |pink |. |) |\n", + "|-----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|\n", + "|labels:| 0 | 0 | action | 0 | 0 | 0 | 0 | 0 | ingredient | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |\n", + "|Predicitions:| 0 | 0 | action | 0 | 0 | 0 | 0 | 0 | ingredient | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |Remove |the |chicken |( |reserve |the |broth |) |and |allow |to |cool |slightly |. |\n", + "|-----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|\n", + "|labels:| action | 0 | ingredient | 0 | 0 | 0 | ingredient | 0 | 0 | 0 | 0 | action | 0 | 0 |\n", + "|Predicitions:| action | 0 | ingredient | 0 | action | 0 | ingredient | 0 | 0 | 0 | 0 | action | 0 | 0 |\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |When |cool |enough |to |handle |, |shred |by |hand |and |place |in |a |medium |bowl |. |\n", + "|-----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|\n", + "|labels:| 0 | action | 0 | 0 | 0 | 0 | 0 | 0 | ingredient | 0 | action | 0 | 0 | 0 | 0 | 0 |\n", + "|Predicitions:| 0 | action | 0 | 0 | 0 | 0 | action | 0 | ingredient | 0 | action | 0 | 0 | 0 | 0 | 0 |\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |Remove |enough |broth |to |leave |about |2 |cups |in |the |pan |with |the |onions |and |garlic |( |cool |and |refrigerate |or |freeze |the |extra |broth |for |another |use |) |. |\n", + "|-----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|\n", + "|labels:| action | 0 | ingredient | 0 | ingredient | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | ingredient | 0 | ingredient | 0 | action | 0 | action | 0 | action | 0 | ingredient | ingredient | 0 | 0 | 0 | 0 | 0 |\n", + "|Predicitions:| action | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | ingredient | 0 | ingredient | 0 | action | 0 | 0 | 0 | action | 0 | ingredient | ingredient | 0 | 0 | 0 | 0 | 0 |\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |Remove |the |bay_leaf |. |\n", + "|-----|----|----|----|----|\n", + "|labels:| action | 0 | ingredient | 0 |\n", + "|Predicitions:| action | 0 | ingredient | 0 |\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |Add |the |tomatillos |and |jalapenos |and |cook |until |the |tomatillos |are |olive |green |, |10 |to |12 |minutes |. |\n", + "|-----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|\n", + "|labels:| action | 0 | ingredient | 0 | 0 | 0 | action | 0 | 0 | ingredient | 0 | ingredient | ingredient | 0 | 0 | 0 | 0 | 0 | 0 |\n", + "|Predicitions:| action | 0 | ingredient | 0 | 0 | 0 | action | 0 | 0 | ingredient | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |With |a |slotted |spoon |, |transfer |the |solid |ingredients |( |onions |, |tomatillos |and |garlic |) |to |a |blender |. |\n", + "|-----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|\n", + "|labels:| 0 | 0 | 0 | action | 0 | 0 | 0 | 0 | 0 | 0 | ingredient | 0 | ingredient | 0 | ingredient | 0 | 0 | 0 | 0 | 0 |\n", + "|Predicitions:| 0 | 0 | 0 | action | 0 | 0 | 0 | 0 | 0 | 0 | ingredient | 0 | ingredient | 0 | ingredient | 0 | 0 | 0 | 0 | 0 |\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |Add |the |cilantro |and |process |until |smooth |( |using |some |of |the |liquid |in |the |pan |if |needed |) |. |\n", + "|-----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|\n", + "|labels:| action | 0 | ingredient | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | ingredient | 0 | 0 | 0 | 0 | 0 | 0 | 0 |\n", + "|Predicitions:| action | 0 | ingredient | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | ingredient | 0 | 0 | 0 | 0 | 0 | 0 | 0 |\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |Season |with |salt |and |pepper |. |\n", + "|-----|----|----|----|----|----|----|\n", + "|labels:| action | 0 | action | 0 | ingredient | 0 |\n", + "|Predicitions:| action | 0 | action | 0 | ingredient | 0 |\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |Pour |the |salsa |back |into |the |saucepan |and |bring |to |a |boil |; |add |the |chicken |and |cook |until |the |flavors |incorporate |, |about |5 |minutes |. |\n", + "|-----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|\n", + "|labels:| action | 0 | ingredient | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | action | 0 | action | 0 | ingredient | 0 | action | 0 | 0 | ingredient | 0 | 0 | 0 | 0 | 0 | 0 |\n", + "|Predicitions:| action | 0 | ingredient | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | action | 0 | action | 0 | ingredient | 0 | action | 0 | 0 | ingredient | 0 | 0 | 0 | 0 | 0 | 0 |\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |Adjust |the |seasoning |. |\n", + "|-----|----|----|----|----|\n", + "|labels:| action | 0 | action | 0 |\n", + "|Predicitions:| 0 | 0 | action | 0 |\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |Remove |from |the |heat |and |set |aside |to |cool |. |\n", + "|-----|----|----|----|----|----|----|----|----|----|----|\n", + "|labels:| action | 0 | 0 | action | 0 | action | 0 | 0 | action | 0 |\n", + "|Predicitions:| action | 0 | 0 | action | 0 | action | 0 | 0 | action | 0 |\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |Preheat |the |oven |to |400 |degrees |F |. |\n", + "|-----|----|----|----|----|----|----|----|----|\n", + "|labels:| 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |\n", + "|Predicitions:| action | 0 | 0 | 0 | 0 | 0 | 0 | 0 |\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |To |assemble |the |empanadas |: |Dust |a |flat |surface |with |flour |. |\n", + "|-----|----|----|----|----|----|----|----|----|----|----|----|----|\n", + "|labels:| 0 | 0 | 0 | ingredient | 0 | 0 | 0 | 0 | 0 | 0 | action | 0 |\n", + "|Predicitions:| 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | action | 0 |\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |Roll |out |the |pizza_dough |about |1/8 |to |1/4 |inch |thick |. |\n", + "|-----|----|----|----|----|----|----|----|----|----|----|----|\n", + "|labels:| ingredient | 0 | 0 | ingredient | 0 | 0 | 0 | 0 | 0 | 0 | 0 |\n", + "|Predicitions:| ingredient | 0 | 0 | ingredient | 0 | 0 | 0 | 0 | 0 | 0 | 0 |\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |Using |a |6-inch |round |cookie |cutter |( |or |a |bowl |or |plate |with |the |same |diameter |) |, |cut |the |dough |into |circles |. |\n", + "|-----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|\n", + "|labels:| 0 | 0 | 0 | ingredient | ingredient | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | action | 0 | ingredient | 0 | 0 | 0 |\n", + "|Predicitions:| 0 | 0 | 0 | ingredient | ingredient | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | action | 0 | ingredient | 0 | ingredient | 0 |\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |Expect |to |get |about |3 |circles |from |1 |pound |of |dough |. |\n", + "|-----|----|----|----|----|----|----|----|----|----|----|----|----|\n", + "|labels:| 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | ingredient | 0 |\n", + "|Predicitions:| 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | ingredient | 0 |\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |Brush |the |egg |wash |over |the |surface |of |one |dough |round |. |\n", + "|-----|----|----|----|----|----|----|----|----|----|----|----|----|\n", + "|labels:| action | 0 | ingredient | action | 0 | 0 | 0 | 0 | 0 | ingredient | ingredient | 0 |\n", + "|Predicitions:| 0 | 0 | ingredient | action | 0 | 0 | action | 0 | 0 | ingredient | 0 | 0 |\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |Add |2 |tablespoons |of |filling |onto |half |of |the |circle |and |sprinkle |about |1 |ounce |of |mozzarella |on |top |of |the |filling |, |if |using |. |\n", + "|-----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|\n", + "|labels:| action | 0 | 0 | 0 | ingredient | 0 | 0 | 0 | 0 | 0 | 0 | action | 0 | 0 | 0 | 0 | 0 | 0 | ingredient | 0 | 0 | ingredient | 0 | 0 | 0 | 0 |\n", + "|Predicitions:| action | 0 | 0 | 0 | ingredient | 0 | 0 | 0 | 0 | 0 | 0 | action | 0 | 0 | 0 | 0 | ingredient | 0 | ingredient | 0 | 0 | ingredient | 0 | 0 | 0 | 0 |\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |Fold |the |plain |half |over |the |filling |. |\n", + "|-----|----|----|----|----|----|----|----|----|\n", + "|labels:| action | 0 | 0 | 0 | 0 | 0 | ingredient | 0 |\n", + "|Predicitions:| 0 | 0 | action | 0 | 0 | 0 | ingredient | 0 |\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |Seal |the |edges |with |fork |tines |or |crimp |decoratively |. |\n", + "|-----|----|----|----|----|----|----|----|----|----|----|\n", + "|labels:| 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |\n", + "|Predicitions:| 0 | 0 | 0 | 0 | 0 | 0 | 0 | ingredient | 0 | 0 |\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |Repeat |for |the |remaining |empanadas |. |\n", + "|-----|----|----|----|----|----|----|\n", + "|labels:| 0 | 0 | 0 | 0 | ingredient | 0 |\n", + "|Predicitions:| action | 0 | 0 | 0 | 0 | 0 |\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |( |Save |any |leftover |filling |in |an |airtight |container |in |the |fridge |for |up |to |a |week |for |another |use |. |) |\n", + "|-----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|\n", + "|labels:| 0 | 0 | ingredient | 0 | ingredient | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |\n", + "|Predicitions:| 0 | 0 | ingredient | 0 | ingredient | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |Transfer |the |empanadas |to |a |baking |sheet |lined |with |parchment |paper |. |\n", + "|-----|----|----|----|----|----|----|----|----|----|----|----|----|\n", + "|labels:| 0 | 0 | ingredient | 0 | 0 | action | 0 | 0 | 0 | 0 | 0 | 0 |\n", + "|Predicitions:| 0 | 0 | 0 | 0 | 0 | action | 0 | 0 | 0 | 0 | 0 | 0 |\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |Using |a |pastry |brush |, |brush |the |tops |of |the |empanadas |with |the |egg |wash. |Bake |until |crisp |and |golden |brown |, |about |20 |minutes |. |\n", + "|-----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|----|\n", + "|labels:| 0 | 0 | ingredient | action | 0 | action | 0 | ingredient | 0 | 0 | ingredient | 0 | 0 | ingredient | 0 | action | 0 | ingredient | 0 | 0 | action | 0 | 0 | 0 | 0 | 0 |\n", + "|Predicitions:| 0 | 0 | ingredient | action | 0 | action | 0 | ingredient | 0 | 0 | 0 | 0 | 0 | ingredient | 0 | action | 0 | ingredient | 0 | 0 | action | 0 | 0 | 0 | 0 | 0 |\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |Let |cool |for |about |10 |minutes |before |serving |. |\n", + "|-----|----|----|----|----|----|----|----|----|----|\n", + "|labels:| 0 | action | 0 | 0 | 0 | 0 | 0 | action | 0 |\n", + "|Predicitions:| 0 | action | 0 | 0 | 0 | 0 | 0 | action | 0 |\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |\n", + "|-----|\n", + "|labels:|\n", + "|Predicitions:|\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |Heat |a |large |pot |over |mediumhigh |heat |. |\n", + "|-----|----|----|----|----|----|----|----|----|\n", + "|labels:| action | 0 | 0 | 0 | 0 | 0 | action | 0 |\n", + "|Predicitions:| action | 0 | 0 | 0 | 0 | 0 | action | 0 |\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "| |Add |the |bacon |and |cook |. |\n", + "|-----|----|----|----|----|----|----|\n", + "|labels:| action | 0 | ingredient | 0 | action | 0 |\n", + "|Predicitions:| action | 0 | ingredient | 0 | action | 0 |\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "for i, (features, labels, tokens) in enumerate(data):\n", + " if i > 100:\n", + " break\n", + " \n", + " prediction = tagger.tag(features)\n", + " \n", + " sentence_as_markdown_table(tokens, labels, prediction)" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'---------'" + ] + }, + "execution_count": 19, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "\"---\" * 3" + ] + }, + { + "cell_type": "code", + "execution_count": 28, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "\" ['0', '0', '0', '0', '0', 'action', '0', '0'] |\"" + ] + }, + "execution_count": 28, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "\"\".join([f\" {l} |\" for l in labels])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.3" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/Tagging/CRF_training.ipynb b/Tagging/CRF_training.ipynb new file mode 100644 index 0000000..00a9f19 --- /dev/null +++ b/Tagging/CRF_training.ipynb @@ -0,0 +1,576 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import conllu_batch_generator as cbg" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "cr = cbg.ConlluReader(\"recipes0.conllu\")" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "t = cr.__iter__().__next__()" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "OrderedDict([('id', 2),\n", + " ('form', 'oven'),\n", + " ('lemma', 'oven'),\n", + " ('upostag', 'ADV'),\n", + " ('xpostag', 'RB'),\n", + " ('feats', None),\n", + " ('head', None),\n", + " ('deprel', '_'),\n", + " ('deps', None),\n", + " ('misc', None)])" + ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "t[0][1]" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "def word2features(sent, i):\n", + " word = sent[i]['form']\n", + " postag = sent[i]['upostag']\n", + " features = [\n", + " 'bias',\n", + " #'word.lower=' + word.lower(),\n", + " 'word[-3:]=' + word[-3:],\n", + " 'word[-2:]=' + word[-2:],\n", + " 'word.isupper=%s' % word.isupper(),\n", + " 'word.istitle=%s' % word.istitle(),\n", + " 'word.isdigit=%s' % word.isdigit(),\n", + " 'postag=' + postag,\n", + " 'postag[:2]=' + postag[:2],\n", + " ]\n", + " if i > 0:\n", + " word1 = sent[i-1]['form']\n", + " postag1 = sent[i-1]['upostag']\n", + " features.extend([\n", + " '-1:word.lower=' + word1.lower(),\n", + " '-1:word.istitle=%s' % word1.istitle(),\n", + " '-1:word.isupper=%s' % word1.isupper(),\n", + " '-1:postag=' + postag1,\n", + " '-1:postag[:2]=' + postag1[:2],\n", + " ])\n", + " if i > 1:\n", + " word1 = sent[i-2]['form']\n", + " postag1 = sent[i-2]['upostag']\n", + " features.extend([\n", + " '-2:word.lower=' + word1.lower(),\n", + " '-2:word.istitle=%s' % word1.istitle(),\n", + " '-2:word.isupper=%s' % word1.isupper(),\n", + " '-2:postag=' + postag1,\n", + " '-2:postag[:2]=' + postag1[:2],\n", + " ])\n", + " else:\n", + " features.append('BOS')\n", + " \n", + " if i < len(sent)-1:\n", + " word1 = sent[i+1]['form']\n", + " postag1 = sent[i+1]['upostag']\n", + " features.extend([\n", + " '+1:word.lower=' + word1.lower(),\n", + " '+1:word.istitle=%s' % word1.istitle(),\n", + " '+1:word.isupper=%s' % word1.isupper(),\n", + " '+1:postag=' + postag1,\n", + " '+1:postag[:2]=' + postag1[:2],\n", + " ])\n", + " if i < len(sent)-2:\n", + " word1 = sent[i+1]['form']\n", + " postag1 = sent[i+1]['upostag']\n", + " features.extend([\n", + " '+2:word.lower=' + word1.lower(),\n", + " '+2:word.istitle=%s' % word1.istitle(),\n", + " '+2:word.isupper=%s' % word1.isupper(),\n", + " '+2:postag=' + postag1,\n", + " '+2:postag[:2]=' + postag1[:2],\n", + " ])\n", + " else:\n", + " features.append('EOS')\n", + " \n", + " return features" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "def sent2labels(sent):\n", + " labels = []\n", + " for token in sent:\n", + " if token['misc'] is not None and 'food_type' in token['misc']:\n", + " labels.append(token['misc']['food_type'])\n", + " else:\n", + " labels.append(\"0\")\n", + " return labels" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [], + "source": [ + "def sent2features(sent):\n", + " return [word2features(sent, i) for i in range(len(sent))]" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [], + "source": [ + "def sent2tokens(sent):\n", + " return [token['form'] for token in sent]" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [], + "source": [ + "def feature2tokens(sent):\n", + " return [t[1].split(\"=\")[1] for t in sent]" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [], + "source": [ + "def conllu2tokens(sent):\n", + " return [t['form'] for t in sent]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "* create test dataset:" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [], + "source": [ + "# read 50000 samples:" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [], + "source": [ + "n_train = 50000\n", + "n_test = 1000\n", + "\n", + "X_train = []\n", + "Y_train = []\n", + "t_train = []\n", + "\n", + "X_test = []\n", + "Y_test = []\n", + "t_test = []\n", + "\n", + "\n", + "\n", + "for i,sample in enumerate(cr):\n", + " \n", + " if i < n_train:\n", + " X_train.append(sent2features(sample[0]))\n", + " Y_train.append(sent2labels(sample[0]))\n", + " t_train.append(conllu2tokens(sample[0]))\n", + " else:\n", + " X_test.append(sent2features(sample[0]))\n", + " Y_test.append(sent2labels(sample[0]))\n", + " t_test.append(conllu2tokens(sample[0]))\n", + " \n", + " if i >= n_train + n_test:\n", + " break\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "* train with crfsuite" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [], + "source": [ + "import pycrfsuite" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [], + "source": [ + "trainer = pycrfsuite.Trainer(verbose=False)\n", + "\n", + "for xseq, yseq in zip(X_train, Y_train):\n", + " trainer.append(xseq, yseq)" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [], + "source": [ + "trainer.set_params({\n", + " 'c1': 1.0, # coefficient for L1 penalty\n", + " 'c2': 1e-3, # coefficient for L2 penalty\n", + " #'max_iterations': 50, # stop earlier\n", + "\n", + " # include transitions that are possible, but not observed\n", + " 'feature.possible_transitions': True\n", + "})" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "['feature.minfreq',\n", + " 'feature.possible_states',\n", + " 'feature.possible_transitions',\n", + " 'c1',\n", + " 'c2',\n", + " 'max_iterations',\n", + " 'num_memories',\n", + " 'epsilon',\n", + " 'period',\n", + " 'delta',\n", + " 'linesearch',\n", + " 'max_linesearch']" + ] + }, + "execution_count": 16, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "trainer.params()" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "metadata": {}, + "outputs": [], + "source": [ + "trainer.train('test.crfsuite')" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'num': 688,\n", + " 'scores': {},\n", + " 'loss': 72969.779861,\n", + " 'feature_norm': 130.969535,\n", + " 'error_norm': 157.007119,\n", + " 'active_features': 8435,\n", + " 'linesearch_trials': 1,\n", + " 'linesearch_step': 1.0,\n", + " 'time': 0.346}" + ] + }, + "execution_count": 21, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "trainer.logparser.last_iteration\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "* test:" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": 22, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "tagger = pycrfsuite.Tagger()\n", + "tagger.open('test.crfsuite')" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Predicted: \n", + "Correct: \n", + "\n", + "\n", + "Prepare mudding as directed on package using 3 cups milk .\n", + "Predicted: action ingredient 0 0 0 0 0 0 0 ingredient 0\n", + "Correct: action 0 0 0 0 0 0 0 0 ingredient 0\n", + "\n", + "\n", + "Remove from heat ; stir in orange_peel .\n", + "Predicted: action 0 action 0 action 0 ingredient 0\n", + "Correct: action 0 action 0 action 0 ingredient 0\n", + "\n", + "\n", + "Cover surface of pudding with waxed paper or plastic wrap and cool 15 minutes .\n", + "Predicted: action action 0 0 0 0 0 0 0 0 0 action 0 0 0\n", + "Correct: action 0 0 ingredient 0 0 0 0 0 0 0 action 0 0 0\n", + "\n", + "\n", + "Line bottom of trifle dish or glass bowl with 1/3 of the cake cubes ; .\n", + "Predicted: 0 0 0 0 0 0 0 0 0 0 0 0 ingredient 0 0 0\n", + "Correct: 0 0 0 0 0 0 0 0 0 0 0 0 ingredient 0 0 0\n", + "\n", + "\n", + "Drizzle with 1 tablespoon of the orange_juice_concentrate .\n", + "Predicted: 0 0 0 0 0 0 ingredient 0\n", + "Correct: 0 0 0 0 0 0 ingredient 0\n", + "\n", + "\n", + "Spoon 1/3 of pudding over top .\n", + "Predicted: action 0 0 0 0 ingredient 0\n", + "Correct: action 0 0 ingredient 0 ingredient 0\n", + "\n", + "\n", + "Spoon 1/3 of strawberry filling over pudding .\n", + "Predicted: action 0 0 ingredient 0 0 ingredient 0\n", + "Correct: action 0 0 ingredient ingredient 0 ingredient 0\n", + "\n", + "\n", + "Top with 1/3 of orange_segments .\n", + "Predicted: ingredient 0 0 0 0 0\n", + "Correct: ingredient 0 0 0 ingredient 0\n", + "\n", + "\n", + "Repeat layers 2 more times .\n", + "Predicted: 0 0 0 0 0 0\n", + "Correct: 0 0 0 0 0 0\n", + "\n", + "\n", + "Cover and refrigerate 3 hours or overnight .\n", + "Predicted: action 0 action 0 0 0 0 0\n", + "Correct: action 0 action 0 0 0 0 0\n", + "\n", + "\n", + "Store in refrigerator .\n", + "Predicted: 0 0 action 0\n", + "Correct: 0 0 action 0\n", + "\n", + "\n", + "\n", + "Predicted: \n", + "Correct: \n", + "\n", + "\n", + "Meanwhile , whisk the vinegar , lemon_zest and juice , honey , dried_oregano , 1/2 teaspoon salt and 1/4 teaspoon pepper in a large bowl .\n", + "Predicted: 0 0 action 0 action 0 ingredient 0 ingredient 0 ingredient 0 ingredient 0 0 0 action 0 0 0 ingredient 0 0 0 0 0\n", + "Correct: 0 0 action 0 ingredient 0 ingredient 0 ingredient 0 ingredient 0 ingredient 0 0 0 action 0 0 0 ingredient 0 0 0 0 0\n", + "\n", + "\n", + "Whisk in the olive_oil in a slow , steady stream until emulsified .\n", + "Predicted: action 0 0 ingredient 0 0 0 0 0 0 0 0 0\n", + "Correct: action 0 0 ingredient 0 0 0 0 0 0 0 0 0\n", + "\n", + "\n", + "Add the tomatoes and olives and toss .\n", + "Predicted: action 0 ingredient 0 ingredient 0 0 0\n", + "Correct: action 0 ingredient 0 ingredient 0 0 0\n", + "\n", + "\n", + "Peel the cucumbers , leaving alternating strips of green peel .\n", + "Predicted: action 0 ingredient 0 0 0 0 0 ingredient action 0\n", + "Correct: action 0 ingredient 0 ingredient 0 0 0 ingredient action 0\n", + "\n", + "\n", + "Trim the ends , halve lengthwise and slice crosswise , about 1/2 inch thick ; add to the bowl with the tomatoes .\n", + "Predicted: 0 0 ingredient 0 0 0 0 action 0 0 0 0 0 0 0 action 0 0 0 0 0 ingredient 0\n", + "Correct: 0 0 0 0 0 0 0 action ingredient 0 0 0 0 0 0 action 0 0 0 0 0 ingredient 0\n", + "\n", + "\n", + "Drain the red onion , add to the bowl and toss .\n", + "Predicted: action 0 ingredient ingredient 0 action 0 0 0 0 0 0\n", + "Correct: action 0 ingredient ingredient 0 action 0 0 0 0 0 0\n", + "\n", + "\n", + "Drain the feta and slice horizontally into 4 even rectangles .\n", + "Predicted: action 0 0 0 ingredient 0 0 0 0 0 0\n", + "Correct: action 0 0 0 action 0 0 0 0 0 0\n", + "\n", + "\n", + "Divide the salad among plates .\n", + "Predicted: 0 0 ingredient 0 0 0\n", + "Correct: 0 0 ingredient 0 0 0\n", + "\n", + "\n", + "Top with the feta and oregano ; drizzle with olive_oil and season with pepper .\n", + "Predicted: ingredient 0 0 ingredient 0 ingredient 0 0 0 ingredient 0 action 0 ingredient 0\n", + "Correct: ingredient 0 0 0 0 ingredient 0 0 0 ingredient 0 action 0 ingredient 0\n", + "\n", + "\n", + "Photograph by Johnny Miller\n", + "Predicted: 0 0 0 0\n", + "Correct: 0 0 0 0\n", + "\n", + "\n", + "\n", + "Predicted: \n", + "Correct: \n", + "\n", + "\n", + "Put the potatoes in a large saucepan and cover with water .\n", + "Predicted: action 0 ingredient 0 0 0 0 0 action 0 ingredient 0\n", + "Correct: action 0 ingredient 0 0 0 0 0 action 0 ingredient 0\n", + "\n", + "\n", + "Add 1 teaspoon salt to the water .\n", + "Predicted: action 0 0 action 0 0 ingredient 0\n", + "Correct: action 0 0 action 0 0 ingredient 0\n", + "\n", + "\n", + "Bring to a boil over high heat , reduce to a simmer and cook the potatoes until they are very tender but not overcooked , about 10 minutes .\n", + "Predicted: 0 0 0 action 0 0 action 0 ingredient 0 0 action 0 action 0 ingredient 0 0 0 0 0 0 0 action 0 0 0 0 0\n", + "Correct: 0 0 0 action 0 0 action 0 action 0 0 action 0 action 0 ingredient 0 0 0 0 0 0 0 0 0 0 0 0 0\n", + "\n", + "\n", + "Remove the potatoes from the water and let them drain on a dishtowel , gently patting to dry .\n", + "Predicted: action 0 ingredient 0 0 ingredient 0 0 0 action 0 0 0 0 0 0 0 0 0\n", + "Correct: action 0 ingredient 0 0 ingredient 0 0 0 action 0 0 0 0 0 0 0 0 0\n", + "\n", + "\n", + "Allow to cool .\n", + "Predicted: 0 0 action 0\n", + "Correct: 0 0 action 0\n", + "\n", + "\n", + "Gently press down on one potato with a spatula to gently flatten it to about 1/2 inch thick .\n", + "Predicted: 0 0 0 0 0 ingredient 0 0 0 0 0 0 0 0 0 0 0 0 0\n", + "Correct: 0 0 0 0 0 ingredient 0 0 0 0 0 0 0 0 0 0 0 0 0\n", + "\n", + "\n" + ] + } + ], + "source": [ + "for i in range(100,130):\n", + " print(' '.join(t_test[i]))\n", + " #print(' '.join(feature2tokens(X_test[i])), end='\\n\\n')\n", + " print(\"Predicted:\", ' '.join(tagger.tag(X_test[i])))\n", + " print(\"Correct: \", ' '.join(Y_test[i]))\n", + " \n", + " print(\"\\n\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.3" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/Tagging/Conllu_Training.ipynb b/Tagging/Conllu_Training.ipynb new file mode 100644 index 0000000..892528e --- /dev/null +++ b/Tagging/Conllu_Training.ipynb @@ -0,0 +1,290 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "\n", + "import sys\n", + "\n", + "from conllu import parse\n", + "\n", + "sys.path.insert(0,'..')\n", + "import settings\n", + "\n", + "from tagging_tools import print_visualized_tags\n", + "\n", + "from train_sample_generator import ConlluReader, ConlluDataProvider\n", + "\n", + "from gensim.test.utils import common_texts, get_tmpfile\n", + "from gensim.models import Word2Vec\n", + "from nltk import PorterStemmer\n", + "import numpy as np\n", + "from sklearn import preprocessing\n", + "porter = PorterStemmer()" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "conllu_reader = ConlluReader(\"recipes0.conllu\", iter_documents=False)" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[TokenList]" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "conllu_reader.__iter__().__next__()" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "conllu_data_provider = ConlluDataProvider(\"recipes0.conllu\", \n", + " word2vec_model=None,\n", + " batchsize=100,\n", + " window_size=3,\n", + " iter_documents=False,\n", + " food_type=\"ingredient\")" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "x,y = conllu_data_provider.getNextDataBatch(y_food_type_label=\"ingredient\")" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "1148" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "len(y)" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [], + "source": [ + "sum_tokens = 0\n", + "i = 0\n", + "for x,y in conllu_data_provider:\n", + " sum_tokens += len(x)\n", + " i += 1\n", + " " + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "649423" + ] + }, + "execution_count": 12, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "sum_tokens" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "576" + ] + }, + "execution_count": 13, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "i" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## decision tree classifier" + ] + }, + { + "cell_type": "code", + "execution_count": 35, + "metadata": {}, + "outputs": [], + "source": [ + "from sklearn.tree import DecisionTreeClassifier\n", + "from sklearn.ensemble import RandomForestClassifier\n", + "from sklearn.model_selection import train_test_split" + ] + }, + { + "cell_type": "code", + "execution_count": 36, + "metadata": {}, + "outputs": [], + "source": [ + "conllu_data_provider = ConlluDataProvider(\"recipes0.conllu\", \n", + " word2vec_model=None,\n", + " batchsize=100,\n", + " window_size=3,\n", + " iter_documents=False,\n", + " food_type=\"ingredient\")" + ] + }, + { + "cell_type": "code", + "execution_count": 37, + "metadata": {}, + "outputs": [], + "source": [ + "clf = RandomForestClassifier(n_estimators=100 ,random_state=0, warm_start=True)" + ] + }, + { + "cell_type": "code", + "execution_count": 28, + "metadata": {}, + "outputs": [], + "source": [ + "for x,y in conllu_data_provider:\n", + " break\n", + " X_train, X_test, y_train, y_test = train_test_split(x,y, random_state=0)\n", + " clf.fit(X_train, y_train)\n", + " pred = tree.predict(X_test)\n", + " print(\"loss: \", np.sum((pred - y_test)**2) / len(x))" + ] + }, + { + "cell_type": "code", + "execution_count": 29, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "array([[0., 0., 0., ..., 0., 0., 0.],\n", + " [0., 0., 0., ..., 0., 0., 0.],\n", + " [0., 0., 0., ..., 0., 0., 0.],\n", + " ...,\n", + " [0., 0., 1., ..., 0., 0., 0.],\n", + " [0., 0., 0., ..., 0., 0., 0.],\n", + " [0., 1., 0., ..., 0., 0., 0.]])" + ] + }, + "execution_count": 29, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [] + }, + { + "cell_type": "code", + "execution_count": 39, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "loss: 0.041811846689895474\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/jonas/.local/lib/python3.7/site-packages/sklearn/ensemble/forest.py:307: UserWarning: Warm-start fitting without increasing n_estimators does not fit new trees.\n", + " warn(\"Warm-start fitting without increasing n_estimators does not \"\n" + ] + } + ], + "source": [ + "clf.fit(X_train, y_train)\n", + "pred = tree.predict(X_test)\n", + "print(\"loss: \", np.sum((pred - y_test)**2) / len(x))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.3" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/Tagging/README.md b/Tagging/README.md new file mode 100644 index 0000000..8c847d6 --- /dev/null +++ b/Tagging/README.md @@ -0,0 +1,4 @@ +# Tagging tools +--- + +in this folder are tools that are here to annotate existing recipe instructions and generating conllu files from them diff --git a/Tagging/Recioe_Tagging.ipynb b/Tagging/Recioe_Tagging.ipynb index d4df712..fc46398 100644 --- a/Tagging/Recioe_Tagging.ipynb +++ b/Tagging/Recioe_Tagging.ipynb @@ -90,7 +90,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 2, "metadata": {}, "outputs": [], "source": [ @@ -109,7 +109,7 @@ }, { "cell_type": "code", - "execution_count": 14, + "execution_count": 3, "metadata": {}, "outputs": [], "source": [ @@ -120,7 +120,7 @@ }, { "cell_type": "code", - "execution_count": 15, + "execution_count": 4, "metadata": {}, "outputs": [], "source": [ @@ -142,16 +142,25 @@ }, { "cell_type": "code", - "execution_count": 34, + "execution_count": 11, "metadata": {}, "outputs": [], "source": [ - "mwe_tokenizer = MWETokenizer([w.split() for w in ingredients.multi_word_ingredients_stemmed])" + "from stemmed_mwe_tokenizer import StemmedMWETokenizer" ] }, { "cell_type": "code", - "execution_count": 35, + "execution_count": 12, + "metadata": {}, + "outputs": [], + "source": [ + "mwe_tokenizer = StemmedMWETokenizer([w.split() for w in ingredients.multi_word_ingredients_stemmed])" + ] + }, + { + "cell_type": "code", + "execution_count": 14, "metadata": {}, "outputs": [ { @@ -423,8 +432,7 @@ " 'of',\n", " 'pasta',\n", " 'to',\n", - " 'cheese',\n", - " 'sauce',\n", + " 'cheese_sauce',\n", " 'is',\n", " 'crucial',\n", " 'to',\n", @@ -457,7 +465,7 @@ " '.']" ] }, - "execution_count": 35, + "execution_count": 14, "metadata": {}, "output_type": "execute_result" } @@ -466,6 +474,61 @@ "mwe_tokenizer.tokenize(nltk.tokenize.word_tokenize(instructions[0]))" ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "\u001b[0;31mSignature:\u001b[0m \u001b[0mmwe_tokenizer\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mspan_tokenize\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0ms\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;31mDocstring:\u001b[0m\n", + "Identify the tokens using integer offsets ``(start_i, end_i)``,\n", + "where ``s[start_i:end_i]`` is the corresponding token.\n", + "\n", + ":rtype: iter(tuple(int, int))\n", + "\u001b[0;31mFile:\u001b[0m ~/.local/lib/python3.7/site-packages/nltk/tokenize/api.py\n", + "\u001b[0;31mType:\u001b[0m method\n" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "?mwe_tokenizer." + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [ + { + "ename": "NotImplementedError", + "evalue": "", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mNotImplementedError\u001b[0m Traceback (most recent call last)", + "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mmwe_tokenizer\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mspan_tokenize\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mnltk\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtokenize\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mword_tokenize\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0minstructions\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m", + "\u001b[0;32m~/.local/lib/python3.7/site-packages/nltk/tokenize/api.py\u001b[0m in \u001b[0;36mspan_tokenize\u001b[0;34m(self, s)\u001b[0m\n\u001b[1;32m 42\u001b[0m \u001b[0;34m:\u001b[0m\u001b[0mrtype\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0miter\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtuple\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mint\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mint\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 43\u001b[0m \"\"\"\n\u001b[0;32m---> 44\u001b[0;31m \u001b[0;32mraise\u001b[0m \u001b[0mNotImplementedError\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 45\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 46\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mtokenize_sents\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mstrings\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;31mNotImplementedError\u001b[0m: " + ] + } + ], + "source": [ + "mwe_tokenizer.span_tokenize(nltk.tokenize.word_tokenize(instructions[0]))" + ] + }, { "cell_type": "code", "execution_count": null, diff --git a/Tagging/Recipe_Tagging_Analysis.ipynb b/Tagging/Recipe_Tagging_Analysis.ipynb new file mode 100644 index 0000000..36b0093 --- /dev/null +++ b/Tagging/Recipe_Tagging_Analysis.ipynb @@ -0,0 +1,1467 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Recipe Tagging analysis\n", + "\n", + "analyse and visualize preconfigured conllu files" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import sys\n", + "\n", + "from conllu import parse\n", + "\n", + "sys.path.insert(0,'..')\n", + "import settings\n" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "conllu_path = \"recipes0.conllu\"" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "with open(conllu_path, 'r') as f:\n", + " data = f.readlines()" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "# newdoc\n", + "1 Set set NOUN NNP _ _ _ _ food_type=action \n", + "2 oven oven ADV RB _ _ _ _ _ \n", + "3 to to PRT TO _ _ _ _ _ \n", + "4 350 350 NUM CD _ _ _ _ _ \n", + "5 degrees degre NOUN NNS _ _ _ _ _ \n", + "6 F F NOUN NNP _ _ _ _ _ \n", + "7 . . . . _ _ _ _ _ \n", + "\n", + "8 Grease greas VERB VB _ _ _ _ _ \n", + "9 a a DET DT _ _ _ _ _ \n", + "10 13 13 NUM CD _ _ _ _ _ \n", + "11 x x ADJ JJ _ _ _ _ _ \n", + "12 9-inch 9-inch ADJ JJ _ _ _ _ _ \n", + "13 baking bake VERB VBG _ _ _ _ food_type=action \n", + "14 dish dish NOUN NN _ _ _ _ _ \n", + "15 . . . . _ _ _ _ _ \n", + "\n", + "16 Cook cook VERB VB _ _ _ _ food_type=action \n", + "17 the the DET DT _ _ _ _ _ \n", + "\n" + ] + } + ], + "source": [ + "print(\"\".join(data[0:20]))" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "ts = parse(\"\".join(data))" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "sentence = ts[0]" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "list" + ] + }, + "execution_count": 10, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "type(ts)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "---\n", + "\n", + "* visualize tags:" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [], + "source": [ + "from tagging_tools import print_visualized_tags" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "data": { + "text/markdown": [ + "Set oven to 350 degrees F . " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "Grease a 13 x 9-inch baking dish . " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "Cook the macaroni in boiling_water until al dente ( do not cook until very soft ) drain , then place into a bowl . " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "Place the milk , flour , onion_salt and garlic_powder in a 1-quart jar ; cover tightly and shake vigorously for 1 minute ; pour the mixture over the macaroni and add in 3-1/2 cups shredded_cheddar_cheese ; mix thoroughly to combine , then spread into the baking dish . " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "Sprinkle with breadcrumbs then remaining 1 cup of cheddar_cheese . " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "Drizzle 1/4 cup melted_butter or margarine over the top . " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "Bake uncovered for 45 minutes or until golden brown . " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "Stir together the sugar , salt , pepper , cumin and mustard in small bowl . " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "Place salmon so that the side the skin was removed from is down . " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "Press sugar mixture evenly onto fish . " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "Heat oil over medium heat in large nonstick or cast-iron skillet . " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "When hot , place fish rub-side down in pan . " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "Cook until brown_sugar rub dissolves and darkens slightly , being careful not to burn , about 4 minutes . " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "Flip fish and cook to medium doneness , about 1 minute longer . " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "Mix all ingredients in bowl in order listed . " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "Make small or medium size balls out of mixture and place on a greased cookie sheet sprayed with Pam . " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "For extra happy horses you can sprinkle a little bit of honey on them before cooking . " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "Bake 350F for 15-20 minutes and make sure they do n't burn ! " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "The day before you want to eat this cut your rolls into 8 medium to thick slices , put on a light setting and just slightly toast them or put them into the oven and slightly pre toast them . " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "They will be grilled the next day again so do not brown them to much , alternatively you can buy larger ready cooked crostini , next day cut the cheese to size to fit the slices of bread . " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "The next day , slice some cheese the same size as each slice of bread.. " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "Layer first a slice of bread , then a slice of cheese then a slice of bread and another slice of cheese into ovenproof soup plates or bowls " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "prepare you soup by making a clear stock of preference , vegetable , chicken , fish or beef . " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "You can do this by skimming the grey foam as soon a it comes up with a very small net strainer or a ladle . " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "This needs to be done as soon as it comes up as it first starts boiling or the soup will go cloudy . " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "Season to your liking . " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "Add a little sherry . " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "Drain the stock , carefully ladle the clear stock over the bread and cheese . " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "Put the bowls into the oven and melt the cheese . " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "Hope you enjoy " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "The bread full of soup is the best bit " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "Using a 1/2 c ice-cream scoop , put one scoop of sherbet on each cookie . " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "Freeze until firm , at least 15 minutes . " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "Heat the oven to 475 degrees F . " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "Whip the egg_whites in a glass or metal bowl until soft peaks form . " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "Add the sugar and beat until stiff peaks form . " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "Place the frozen sherbet topped cookies about 2 '' apart on a wire rack on a baking sheet . " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "Spread the meringue over each of the sherbets and also the cookie edges . " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "Bake until lightly browned , about 2 minutes . " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "Serve immediately . " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "Spread 1/2 tablespoon butter on one side of each piece of toasted Italian_bread . " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "Set aside . " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "Heat olive_oil in a skillet over high heat . " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "When oil begins to smoke , pour scallops into pan . " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "Cook for 30 seconds without stirring . " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "Toss scallops in pan and stir in garlic . " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "Cook and stir until fragrant , about 30 seconds . " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "Stir wine and lemon_juice into scallops , bring to a boil , and cook for about 30 seconds . " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "Stir parsley and cold butter into scallops and remove from heat . " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "When butter melts , stir in salt , black_pepper , and cayenne_pepper . " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "Spoon scallops over buttered toast and serve immediately . " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "1 . " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "Whisk together brownie mix , flour , and sugar . " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "2.Using a heavy spoon , stir in the eggs one at a time , followed by butter and 2 tablespoons water . " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "If the batter appears like there is too much flour , add 1 more tablespoon of water . " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "Batter will be very thick . " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "3 . " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "Fold in chocolate_chunks 4 . " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "Cover and refrigerate for 2 hours . " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/markdown": [ + "5 . " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "for i in range(60):\n", + " print_visualized_tags(ts[i])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## train word2vec model based on stemmed words" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [], + "source": [ + "from gensim.test.utils import common_texts, get_tmpfile\n", + "from gensim.models import Word2Vec\n", + "from nltk import PorterStemmer\n", + "import numpy as np\n", + "from sklearn import preprocessing\n", + "porter = PorterStemmer()" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [], + "source": [ + "sentence_symbols = set(('.', ';', '!', '?', ',')) " + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [], + "source": [ + "stemmed_sentences = []\n", + "for sentence in ts:\n", + " stemmed_sentence = []\n", + " for token in sentence:\n", + " stemmed = token['lemma']\n", + " if stemmed not in sentence_symbols:\n", + " stemmed_sentence.append(stemmed)\n", + " stemmed_sentences.append(stemmed_sentence)" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "52563" + ] + }, + "execution_count": 13, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "len(stemmed_sentences)" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [], + "source": [ + "model = Word2Vec(stemmed_sentences, size=128, window=1, min_count=1, workers=4)\n", + "def word_similarity(word_a:str, word_b:str, model=model, stemmer=porter):\n", + " return model.wv.similarity(stemmer.stem(word_a), stemmer.stem(word_b))\n", + "\n", + "def word_exists(word:str, model=model, stemmer=porter):\n", + " return stemmer.stem(word) in model.wv\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## train stuff" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "* create Data Arrays" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [], + "source": [ + "upos_tags = []\n", + "word_vecs = []\n", + "bool_ingr = []\n", + "bool_actn = []\n", + "\n", + "lb = preprocessing.LabelBinarizer()\n", + "lb.fit(['.', 'ADJ', 'ADP', 'ADV', 'CONJ', 'DET', 'NOUN', 'NUM', 'PRON', 'PRT', 'VERB', 'X'])\n", + "\n", + "number_tokens = 0\n", + "\n", + "for sentence in ts:\n", + " sent_upos = []\n", + " sent_word = []\n", + " sent_ingr = []\n", + " sent_actn = []\n", + " \n", + " for token in sentence:\n", + " number_tokens += 1\n", + " if token['form'] not in sentence_symbols:\n", + " sent_word.append(model.wv[token['lemma']])\n", + " upos = token['upostag']\n", + " misc = token['misc']\n", + " actn = False\n", + " ingr = False\n", + " sent_upos.append(upos)\n", + " if misc != None and 'food_type' in misc:\n", + " if misc['food_type'] == \"action\":\n", + " actn = True\n", + " elif misc['food_type'] == \"ingredient\":\n", + " ingr = True\n", + " sent_ingr.append(ingr)\n", + " sent_actn.append(actn)\n", + " if len(sent_upos) > 0:\n", + " upos_tags.append(lb.transform(sent_upos))\n", + " word_vecs.append(sent_word)\n", + " bool_ingr.append(sent_ingr)\n", + " bool_actn.append(sent_actn)\n", + " \n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "* create one-vs-all binary labels from upos values" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "array([-0.68675774, -0.7883484 , 0.20251137, -0.05719408, 0.09501456,\n", + " 0.05182203, -0.25285384, -0.41048968, -0.71525794, -0.67254657,\n", + " -0.17055118, -0.17210464, -0.82208157, 0.2450692 , 0.5252676 ,\n", + " -0.19269118, 0.341501 , 0.34656093, 0.9737164 , 0.35325623,\n", + " -0.89236367, -0.64312077, -0.7638635 , 0.39858425, -0.27193114,\n", + " 0.49859434, -0.81326216, 0.7487419 , 0.16307887, 0.25901216,\n", + " 0.66147804, -0.58643436, -0.09952813, 0.17525254, -0.00194792,\n", + " 0.07784765, 0.90528905, -0.15917295, -0.3427578 , 1.0070069 ,\n", + " -0.57078034, 0.33585113, -0.27914697, -0.24289384, 0.17581789,\n", + " 0.2808861 , -0.35724574, 0.2030598 , 0.7853533 , 0.5153942 ,\n", + " -0.02380644, -0.34730977, -0.2862777 , -0.44545448, 0.04394584,\n", + " -0.7926984 , -0.3066928 , -0.4378877 , 0.06224217, -0.48833787,\n", + " -0.48612142, 0.33839923, -0.02597165, 0.06829736, -1.3514524 ,\n", + " -0.72032964, 0.81755555, 0.73999155, 0.35204384, 0.32564116,\n", + " 0.37047276, 0.3869023 , 0.73725784, 0.49011388, -0.06073807,\n", + " 0.40204594, 0.25720817, 0.3023497 , -0.7338208 , -1.2525887 ,\n", + " 0.758383 , 0.1704396 , 0.26385054, -0.6765845 , -0.80152684,\n", + " -0.47452438, -0.25873485, 0.05395245, -0.5654043 , 0.5105308 ,\n", + " 0.26053032, 0.34596896, -0.62164515, -0.02258987, 0.06654755,\n", + " 0.8050189 , -0.02911493, -0.74690014, -0.5086117 , -0.06318699,\n", + " 1.2542377 , 0.5853978 , -0.92408645, -0.6169342 , -0.5941347 ,\n", + " 0.2128975 , 0.76446146, 0.78195745, -0.3471526 , 0.51036954,\n", + " 0.5108987 , -0.09576786, -0.7606738 , 0.8236566 , -0.28881475,\n", + " -0.7867539 , 0.31546646, 0.40256152, 0.0999696 , 0.20387918,\n", + " -0.6289423 , 0.63682 , 0.8060169 , -0.98891175, 0.14811945,\n", + " 0.29527354, 0.31912726, -0.22632086], dtype=float32)" + ] + }, + "execution_count": 16, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "word_vecs[0][0]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "* the most annoying part: create sliding windows around words and create huge arrays containing values ad it's neighbors" + ] + }, + { + "cell_type": "code", + "execution_count": 34, + "metadata": {}, + "outputs": [], + "source": [ + "window_size = 3\n", + "batch_size = 1000" + ] + }, + { + "cell_type": "code", + "execution_count": 35, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "12" + ] + }, + "execution_count": 35, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "len(lb.classes_)" + ] + }, + { + "cell_type": "code", + "execution_count": 36, + "metadata": {}, + "outputs": [], + "source": [ + "\n", + "\n", + "train_upos_tags = np.zeros((batch_size, window_size * 2 + 1, len(lb.classes_)))\n", + "train_word_vecs = np.zeros((batch_size, window_size * 2 + 1, model.vector_size))\n", + "train_bool_ingr = np.zeros((batch_size, 1))\n", + "train_bool_actn = np.zeros((batch_size, 1))\n", + "\n", + "X = np.zeros((batch_size, window_size * 2 + 1, train_upos_tags.shape[2] + train_word_vecs.shape[2]))" + ] + }, + { + "cell_type": "code", + "execution_count": 37, + "metadata": {}, + "outputs": [], + "source": [ + "sentence_counter = 0\n", + "token_counter = 0\n", + "i = 0" + ] + }, + { + "cell_type": "code", + "execution_count": 40, + "metadata": {}, + "outputs": [], + "source": [ + "def next_batch():\n", + " global sentence_counter\n", + " global i\n", + " global token_counter\n", + " global train_upos_tags\n", + " global train_word_vecs\n", + " global train_bool_ingr\n", + " global train_bool_actn\n", + " \n", + " \n", + " train_upos_tags = np.zeros((batch_size, window_size * 2 + 1, len(lb.classes_)))\n", + " train_word_vecs = np.zeros((batch_size, window_size * 2 + 1, model.vector_size))\n", + " train_bool_ingr = np.zeros((batch_size, 1))\n", + " train_bool_actn = np.zeros((batch_size, 1))\n", + " \n", + " i = 0\n", + " \n", + " current_sent_upos = upos_tags[sentence_counter]\n", + " current_sent_word = word_vecs[sentence_counter]\n", + " current_sent_ingr = bool_ingr[sentence_counter]\n", + " current_sent_actn = bool_actn[sentence_counter]\n", + "\n", + " \n", + " for i in range(batch_size):\n", + " \n", + " for j in range(- window_size, window_size + 1):\n", + " if j + token_counter < 0:\n", + " pass\n", + " elif j + token_counter >= len(current_sent_upos):\n", + " pass\n", + " else:\n", + " train_upos_tags[i,j + window_size,:] = current_sent_upos[token_counter + j]\n", + " train_word_vecs[i,j + window_size,:] = current_sent_word[token_counter + j]\n", + "\n", + " train_bool_ingr[i] = current_sent_ingr[token_counter]\n", + " train_bool_actn[i] = current_sent_actn[token_counter]\n", + "\n", + " token_counter += 1\n", + "\n", + " if token_counter >= len(current_sent_upos):\n", + " current_sent_upos = upos_tags[sentence_counter]\n", + " current_sent_word = word_vecs[sentence_counter]\n", + " current_sent_ingr = bool_ingr[sentence_counter]\n", + " current_sent_actn = bool_actn[sentence_counter]\n", + " \n", + " sentence_counter += 1\n", + " token_counter = 0\n", + " \n", + " # flatten dimensions:\n", + " train_upos_tags = train_upos_tags.reshape((batch_size, train_upos_tags.shape[1] * train_upos_tags.shape[2]))\n", + " train_word_vecs = train_word_vecs.reshape((batch_size, train_word_vecs.shape[1] * train_word_vecs.shape[2]))" + ] + }, + { + "cell_type": "code", + "execution_count": 41, + "metadata": {}, + "outputs": [], + "source": [ + "next_batch()" + ] + }, + { + "cell_type": "code", + "execution_count": 42, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "1000" + ] + }, + "execution_count": 42, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "len(train_bool_ingr)" + ] + }, + { + "cell_type": "code", + "execution_count": 43, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "9\n", + "10\n", + "11\n", + "12\n", + "13\n", + "14\n", + "15\n" + ] + } + ], + "source": [ + "for j in range(token_counter - window_size, token_counter + window_size + 1):\n", + " print(j + window_size)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": 44, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "896" + ] + }, + "execution_count": 44, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "len(train_word_vecs[2])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## create a decision tree classifier" + ] + }, + { + "cell_type": "code", + "execution_count": 45, + "metadata": {}, + "outputs": [], + "source": [ + "from sklearn.tree import DecisionTreeClassifier\n", + "from sklearn.model_selection import train_test_split" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "* train ingredients" + ] + }, + { + "cell_type": "code", + "execution_count": 46, + "metadata": {}, + "outputs": [], + "source": [ + "X = train_upos_tags\n", + "Y = train_bool_ingr" + ] + }, + { + "cell_type": "code", + "execution_count": 47, + "metadata": {}, + "outputs": [], + "source": [ + "X_train, X_test, y_train, y_test = train_test_split(X,Y, random_state=0)" + ] + }, + { + "cell_type": "code", + "execution_count": 48, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "250" + ] + }, + "execution_count": 48, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "len(X_test)" + ] + }, + { + "cell_type": "code", + "execution_count": 49, + "metadata": {}, + "outputs": [], + "source": [ + "tree = DecisionTreeClassifier(random_state=0)" + ] + }, + { + "cell_type": "code", + "execution_count": 50, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "DecisionTreeClassifier(class_weight=None, criterion='gini', max_depth=None,\n", + " max_features=None, max_leaf_nodes=None,\n", + " min_impurity_decrease=0.0, min_impurity_split=None,\n", + " min_samples_leaf=1, min_samples_split=2,\n", + " min_weight_fraction_leaf=0.0, presort=False,\n", + " random_state=0, splitter='best')" + ] + }, + "execution_count": 50, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "tree.fit(X_train, y_train)" + ] + }, + { + "cell_type": "code", + "execution_count": 58, + "metadata": {}, + "outputs": [], + "source": [ + "pred = tree.predict(X_test)" + ] + }, + { + "cell_type": "code", + "execution_count": 70, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "array([1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0.,\n", + " 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1.,\n", + " 1., 0., 1., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 1., 0., 1., 0.,\n", + " 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 1., 1., 0., 1., 0.,\n", + " 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 1.,\n", + " 0., 0., 0., 0., 0., 1., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0.,\n", + " 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n", + " 0., 0., 1., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n", + " 0., 0., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0.,\n", + " 0., 0., 0., 0., 1., 1., 1., 0., 1., 1., 0., 0., 1., 0., 0., 0., 1.,\n", + " 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n", + " 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0.,\n", + " 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n", + " 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 0.,\n", + " 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.])" + ] + }, + "execution_count": 70, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "(pred - y_test[:,0])**2" + ] + }, + { + "cell_type": "code", + "execution_count": 67, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "array([0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 1., 1., 0., 0., 0., 0., 0.,\n", + " 0., 0., 0., 0., 0., 1., 0., 0., 1., 0., 0., 0., 0., 0., 0., 1., 1.,\n", + " 1., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0.,\n", + " 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0.,\n", + " 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n", + " 0., 0., 0., 0., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n", + " 0., 1., 0., 0., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0.,\n", + " 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n", + " 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 1., 0., 0., 1., 0., 0., 0.,\n", + " 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 0.,\n", + " 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0.,\n", + " 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0.,\n", + " 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n", + " 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0.,\n", + " 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.])" + ] + }, + "execution_count": 67, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "y_test[:,0]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.3" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/Tagging/conllu_batch_generator.py b/Tagging/conllu_batch_generator.py new file mode 100644 index 0000000..737639d --- /dev/null +++ b/Tagging/conllu_batch_generator.py @@ -0,0 +1,275 @@ +#!/usr/bin/env python3 + +import sys + +from conllu import parse +from tagging_tools import print_visualized_tags + +from sklearn import preprocessing +import numpy as np + +sys.path.insert(0, '..') +import settings # noqa + + +class ConlluSentenceIterator(object): + def __init__(self, conllu_reader): + self.conllu_reader = conllu_reader + + def __next__(self): + next_sent = self.conllu_reader.next_sentence() + if next_sent is None: + raise StopIteration + return next_sent + + +class ConlluDocumentIterator(object): + def __init__(self, conllu_reader): + self.conllu_reader = conllu_reader + + def __next__(self): + next_sent = self.conllu_reader.next_document() + if next_sent is None: + raise StopIteration + return next_sent + + +class ConlluReader(object): + def __init__(self, path, iter_documents=False): + self._path = path + self._fileobj = None + self._open() + self.iter_documents = iter_documents + + def _open(self): + self._fileobj = open(self._path, 'r') + + def next_sentence(self): + data = "" + while True: + line = self._fileobj.readline() + if line == "": + break + data += line + if line == "\n": + break + + if data == "": + return None + + if data[-1] != "\n": + data += "\n" + + conllu_obj = parse(data) + return conllu_obj + + def next_document(self): + data = "" + last_line_empty = False + while True: + line = self._fileobj.readline() + if line == "": + break + data += line + if line == "\n": + if last_line_empty: + break + last_line_empty = True + else: + last_line_empty = False + + if data == "": + return None + + if data[-1] != "\n": + data += "\n" + + conllu_obj = parse(data) + return conllu_obj + + def __iter__(self): + return ConlluDocumentIterator(self) if self.iter_documents else ConlluSentenceIterator(self) + + +class SlidingWindowListIterator(object): + def __init__(self, parent): + self.parent = parent + self.i = 0 + + def __next__(self): + if len(self.parent) == self.i: + raise StopIteration + + self.i += 1 + return self.parent[self.i - 1] + + +class SlidingWindowList(list): + def __init__(self, sliding_window_size, input=None, border_value=None): + + self.sliding_window_size = sliding_window_size + self.border_value = border_value + + if border_value is None and input is not None: + self.border_value = type(input[0])() + + if input is not None: + super(SlidingWindowList, self).__init__(input) + + def __getitem__(self, index): + + if type(index) == slice: + start = 0 if index.start is None else index.start + stop = len(self) if index.stop is None else index.stop + step = 1 if index.step is None else index.step + return [self[i] for i in range(start, stop, step)] + + else: + n = self.sliding_window_size * 2 + 1 + res = n * [self.border_value] + + j_start = index - self.sliding_window_size + + for i in range(n): + ind = j_start + i + if ind >= 0 and ind < len(self): + res[i] = super(SlidingWindowList, self).__getitem__(ind) + + return res + + def __iter__(self): + return SlidingWindowListIterator(self) + + +''' +class ConlluDataProviderIterator(object): + def __init__(self, parent): + self.parent = parent + self.conllu_reader = ConlluReader( + parent.filepath, parent.iter_documents) + + def __next__(self): + result = self.parent.getNextDataBatch(conllu_reader=self.conllu_reader) + if result is None: + raise StopIteration + return result +''' + +''' +class ConlluDataProvider(object): + def __init__(self, + filepath, + word2vec_model, + batchsize=100, + window_size=3, + iter_documents=False, + food_type=None): + self.batchsize = batchsize + self.word2vec_model = word2vec_model + self.filepath = filepath + self.conllu_reader = ConlluReader(filepath, iter_documents) + self.window_size = window_size + self.food_type = food_type + self.iter_documents = iter_documents + + # create a label binarizer for upos tags: + self.lb = preprocessing.LabelBinarizer() + self.lb.fit(['.', 'ADJ', 'ADP', 'ADV', 'CONJ', 'DET', + 'NOUN', 'NUM', 'PRON', 'PRT', 'VERB', 'X']) + + def _get_next_conllu_objects(self, n: int, conllu_reader): + i = 0 + conllu_list = [] + + while i < n: + try: + conllu_list.append(conllu_reader.__iter__().__next__()) + i += 1 + + except StopIteration: + break + + return conllu_list + + def _get_upos_X(self, conllu_list): + n_tokens = 0 + l_global = [] + for document in conllu_list: + l = [] + for sentence in document: + for token in sentence: + upos = token['upostag'] + l.append(upos) + n_tokens += 1 + if len(l) > 0: + l_global.append(self.lb.transform(l)) + + return l_global, n_tokens + + def _get_y(self, conllu_list, misk_key="food_type", misc_val="ingredient"): + n_tokens = 0 + y_global = [] + for document in conllu_list: + y = [] + for sentence in document: + for token in sentence: + m = token['misc'] + t_y = m is not None and misk_key in m and m[misk_key] == misc_val + y.append(t_y) + n_tokens += 1 + if len(y) > 0: + y_global.append(y) + + return y_global, n_tokens + + def getNextDataBatch(self, y_food_type_label=None, conllu_reader=None): + + if y_food_type_label is None: + y_food_type_label = self.food_type + + if conllu_reader is None: + conllu_reader = self.conllu_reader + conllu_list = self._get_next_conllu_objects( + self.batchsize, conllu_reader) + + if len(conllu_list) == 0: + return None + + # generate features for each document/sentence + n = len(conllu_list) + + d = self.window_size * 2 + 1 + + buf_X, x_tokens = self._get_upos_X(conllu_list) + buf_ingr_y, y_tokens = self._get_y(conllu_list) + + assert len(buf_X) == len(buf_ingr_y) and x_tokens == y_tokens + + X_upos = np.zeros(shape=(x_tokens, d * len(self.lb.classes_))) + y = None + + if y_food_type_label is not None: + y = np.zeros(shape=(x_tokens)) + + i = 0 + for xupos in buf_X: + tmp = SlidingWindowList(self.window_size, + xupos, + border_value=[0] * len(self.lb.classes_)) + for upos_window in tmp: + X_upos[i, :] = np.array(upos_window).flatten() + i += 1 + + i = 0 + if y_food_type_label is not None: + for sentence in buf_ingr_y: + for yl in sentence: + y[i] = yl + i += 1 + + return X_upos, y + + def __iter__(self): + return ConlluDataProviderIterator(self) + +''' diff --git a/Tagging/conllu_generator.py b/Tagging/conllu_generator.py index d91ac75..daeb7ee 100644 --- a/Tagging/conllu_generator.py +++ b/Tagging/conllu_generator.py @@ -102,7 +102,7 @@ class ConlluElement(object): return replace_tab(result, 16) -class ConlluDocument(object): +class ConlluSentence(object): def __init__(self): self.conllu_elements = [] @@ -114,7 +114,25 @@ class ConlluDocument(object): for elem in self.conllu_elements: result += elem.__repr__() + "\n" - return result + "\n" + return result + + def __str__(self): + return self.__repr__() + + +class ConlluDocument(object): + def __init__(self): + self.conllu_sentences = [] + + def add(self, conllu_sentence: ConlluSentence): + self.conllu_sentences.append(conllu_sentence) + + def __repr__(self): + result = "# newdoc\n" + for elem in self.conllu_sentences: + result += elem.__repr__() + "\n" + + return result def __str__(self): return self.__repr__() @@ -136,46 +154,56 @@ class ConlluGenerator(object): tokenized_documents = [] for doc in self.documents: - simple_tokenized = nltk.tokenize.word_tokenize(doc) - tokenized_documents.append( - self.mwe_tokenizer.tokenize(simple_tokenized)) + tokenized_sentences = [] + sentences = doc.split("\n") + for sent in sentences: + if (len(sent) > 0): + simple_tokenized = nltk.tokenize.word_tokenize(sent) + tokenized_sentences.append( + self.mwe_tokenizer.tokenize(simple_tokenized)) + tokenized_documents.append(tokenized_sentences) # now create initial colln-u elemnts for doc in tokenized_documents: + conllu_doc = ConlluDocument() self.id_counter = 0 - collnu_doc = ConlluDocument() - for token in doc: - stemmed_token = None - if "_" in token: - stemmed_token = "_".join( - [self.stemmer.stem(part) for part in token.split("_")]) - else: - stemmed_token = self.stemmer.stem(token) - collnu_doc.add(ConlluElement( - id=self.id_counter + 1, - form=token, - lemma=stemmed_token - )) - self.id_counter += 1 - self.conllu_documents.append(collnu_doc) + for sent in doc: + conllu_sent = ConlluSentence() + for token in sent: + stemmed_token = None + if "_" in token: + stemmed_token = "_".join( + [self.stemmer.stem(part) for part in token.split("_")]) + else: + stemmed_token = self.stemmer.stem(token) + conllu_sent.add(ConlluElement( + id=self.id_counter + 1, + form=token, + lemma=stemmed_token + )) + self.id_counter += 1 + conllu_doc.add(conllu_sent) + self.conllu_documents.append(conllu_doc) def pos_tagging(self): for conllu_document in self.conllu_documents: - tokens = [x.form for x in conllu_document.conllu_elements] - pos_tags = pos_tag(tokens) - simplified_tags = [map_tag('en-ptb', 'universal', tag) - for word, tag in pos_tags] + for conllu_sent in conllu_document.conllu_sentences: + tokens = [x.form for x in conllu_sent.conllu_elements] + pos_tags = pos_tag(tokens) + simplified_tags = [map_tag('en-ptb', 'universal', tag) + for word, tag in pos_tags] - for i in range(len(tokens)): - conllu_elem = conllu_document.conllu_elements[i] - conllu_elem.upos = simplified_tags[i] - conllu_elem.xpos = pos_tags[i][1] + for i in range(len(tokens)): + conllu_elem = conllu_sent.conllu_elements[i] + conllu_elem.upos = simplified_tags[i] + conllu_elem.xpos = pos_tags[i][1] def add_misc_value_by_list(self, key, value, stemmed_keyword_list): for conllu_document in self.conllu_documents: - for elem in conllu_document.conllu_elements: - if elem.lemma in stemmed_keyword_list: - elem.add_misc(key, value) + for conllu_sent in conllu_document.conllu_sentences: + for elem in conllu_sent.conllu_elements: + if elem.lemma in stemmed_keyword_list: + elem.add_misc(key, value) def __repr__(self): result = "" diff --git a/Tagging/crf_data_generator.py b/Tagging/crf_data_generator.py new file mode 100644 index 0000000..ce91b39 --- /dev/null +++ b/Tagging/crf_data_generator.py @@ -0,0 +1,123 @@ +#!/usr/bin/env python3 + +import conllu_batch_generator as cbg + + +def word2features(sent, i): + word = sent[i]['form'] + postag = sent[i]['upostag'] + features = [ + 'bias', + #'word.lower=' + word.lower(), + 'word[-3:]=' + word[-3:], + 'word[-2:]=' + word[-2:], + 'word.isupper=%s' % word.isupper(), + 'word.istitle=%s' % word.istitle(), + 'word.isdigit=%s' % word.isdigit(), + 'postag=' + postag, + 'postag[:2]=' + postag[:2], + ] + if i > 0: + word1 = sent[i-1]['form'] + postag1 = sent[i-1]['upostag'] + features.extend([ + '-1:word.lower=' + word1.lower(), + '-1:word.istitle=%s' % word1.istitle(), + '-1:word.isupper=%s' % word1.isupper(), + '-1:postag=' + postag1, + '-1:postag[:2]=' + postag1[:2], + ]) + if i > 1: + word1 = sent[i-2]['form'] + postag1 = sent[i-2]['upostag'] + features.extend([ + '-2:word.lower=' + word1.lower(), + '-2:word.istitle=%s' % word1.istitle(), + '-2:word.isupper=%s' % word1.isupper(), + '-2:postag=' + postag1, + '-2:postag[:2]=' + postag1[:2], + ]) + else: + features.append('BOS') + + if i < len(sent)-1: + word1 = sent[i+1]['form'] + postag1 = sent[i+1]['upostag'] + features.extend([ + '+1:word.lower=' + word1.lower(), + '+1:word.istitle=%s' % word1.istitle(), + '+1:word.isupper=%s' % word1.isupper(), + '+1:postag=' + postag1, + '+1:postag[:2]=' + postag1[:2], + ]) + if i < len(sent)-2: + word1 = sent[i+1]['form'] + postag1 = sent[i+1]['upostag'] + features.extend([ + '+2:word.lower=' + word1.lower(), + '+2:word.istitle=%s' % word1.istitle(), + '+2:word.isupper=%s' % word1.isupper(), + '+2:postag=' + postag1, + '+2:postag[:2]=' + postag1[:2], + ]) + else: + features.append('EOS') + + return features + + +def sent2labels(sent): + labels = [] + for token in sent: + if token['misc'] is not None and 'food_type' in token['misc']: + labels.append(token['misc']['food_type']) + else: + labels.append("0") + return labels + + +def sent2features(sent): + return [word2features(sent, i) for i in range(len(sent))] + + +def sent2tokens(sent): + return [token['form'] for token in sent] + + +def feature2tokens(sent): + return [t[1].split("=")[1] for t in sent] + + +class ConlluCRFReaderIterator(object): + def __init__(self, parent): + self._parent = parent + self._iter = self._parent._conllu_reader.__iter__() + + def __next__(self): + features = None + labels = None + tokens = None + + if not self._parent._iter_documents: + next_sent = self._iter.__next__()[0] + features = sent2features(next_sent) + labels = sent2labels(next_sent) + tokens = sent2tokens(next_sent) + else: + next_doc = self._iter.__next__() + features = [sent2features(sentence) for sentence in next_doc] + labels = [sent2labels(sentence) for sentence in next_doc] + tokens = [sent2tokens(sentence) for sentence in next_doc] + + return features, labels, tokens + + +class ConlluCRFReader(object): + def __init__(self, path, iter_documents=False): + self._path = path + self._iter_documents = iter_documents + + self._conllu_reader = cbg.ConlluReader(path, iter_documents) + + def __iter__(self): + return ConlluCRFReaderIterator(self) diff --git a/Tagging/recipe_collnu_generator.py b/Tagging/recipe_collnu_generator.py index df78455..a9a5760 100755 --- a/Tagging/recipe_collnu_generator.py +++ b/Tagging/recipe_collnu_generator.py @@ -16,6 +16,12 @@ spec = importlib.util.spec_from_file_location( ingredients = importlib.util.module_from_spec(spec) spec.loader.exec_module(ingredients) +# loading actions: +spec = importlib.util.spec_from_file_location( + "ingredients", "../" + settings.actions_file) +actions = importlib.util.module_from_spec(spec) +spec.loader.exec_module(actions) + # load json reader @@ -36,13 +42,14 @@ def process_instructions(instructions: list): if len(instructions) == 0: return - conllu_input_docs = [doc.replace("\n", " ")[:-1] for doc in instructions] + conllu_input_docs = instructions cg = ConlluGenerator( conllu_input_docs, ingredients.multi_word_ingredients_stemmed) cg.tokenize_and_stem() cg.pos_tagging() cg.add_misc_value_by_list("food_type", "ingredient", [w.replace(" ","_") for w in ingredients.multi_word_ingredients_stemmed] + ingredients.ingredients_stemmed) + cg.add_misc_value_by_list("food_type", "action", actions.stemmed_cooking_verbs) savefile.write(str(cg)) diff --git a/Tagging/tagging_tools.py b/Tagging/tagging_tools.py new file mode 100644 index 0000000..5765836 --- /dev/null +++ b/Tagging/tagging_tools.py @@ -0,0 +1,28 @@ +#!/usr/bin/env python3 + +from IPython.display import Markdown, display +import conllu + +def print_visualized_tags( + conllu_sentence, + food_tags_and_colors={'ingredient': 'cyan', 'action': "orange"}, + upos_colors={'VERB': 'yellow'}): + colorstr = "{}" + s = "" + for tag in conllu_sentence: + # print(tag) + upos = tag['upostag'] + if tag['misc'] != None: + for food_tag in food_tags_and_colors: + if food_tag == tag['misc']['food_type']: + s += colorstr.format( + food_tags_and_colors[food_tag], tag['form']) + " " + + elif upos in upos_colors: + s += colorstr.format(upos_colors[upos], tag['form']) + " " + else: + s += tag['form'] + " " + + display(Markdown(s)) + + diff --git a/Tagging/test.crfsuite b/Tagging/test.crfsuite new file mode 100644 index 0000000..79cc1b5 Binary files /dev/null and b/Tagging/test.crfsuite differ diff --git a/tools.py b/tools.py new file mode 100644 index 0000000..ee7e3a3 --- /dev/null +++ b/tools.py @@ -0,0 +1,102 @@ +#!/usr/bin/env python3 + +import numpy as np +import json + +import nltk +from nltk.stem import PorterStemmer +from nltk.stem import LancasterStemmer +from nltk.corpus import stopwords as nltk_stopwords + +from pprint import pprint + +from gensim.test.utils import common_texts, get_tmpfile +from gensim.models import Word2Vec, KeyedVectors + +from sklearn.manifold import TSNE + +import matplotlib.pyplot as plt + +from json_buffered_reader import JSON_buffered_reader as JSON_br + +import pandas as pd + +import settings + +from ipypb import track +from IPython.display import HTML, Markdown + + +# loading learned wordvectors +wv = KeyedVectors.load("data/wordvectors.kv") +porter = PorterStemmer() + + +def word_similarity(word_a: str, word_b: str, model=wv, stemmer=porter): + return model.similarity(stemmer.stem(word_a), stemmer.stem(word_b)) + + +def word_exists(word: str, model=wv, stemmer=porter): + return stemmer.stem(word) in model + +from cooking_vocab import cooking_verbs +from cooking_ingredients import ingredients + +model_actions = [] +model_ingredients = [] + +for action in cooking_verbs: + if word_exists(action): + model_actions.append(action) + +for ingredient in ingredients: + if word_exists(ingredient): + model_ingredients.append(ingredient) + +def tsne_plot(tokens, model=wv, dist_token=None): + vecs = [] + labels = [] + for token in tokens: + vecs.append(model[token]) + labels.append(token) + + tsne_model = TSNE(perplexity=40, n_components=2, + init='pca', n_iter=2500, random_state=23) + plot_values = tsne_model.fit_transform(vecs) + + distances = [] + + min_size = 10 + max_size = 500 + + if dist_token is not None: + distances = np.array([model.similarity(t, dist_token) for t in tokens]) + # scale: + min_s = np.min(distances) + max_s = np.max(distances) + distances = min_size + (distances - min_s) * ((max_size - min_size) / (max_s - min_s)) + + + x = [] + y = [] + for value in plot_values: + x.append(value[0]) + y.append(value[1]) + + plt.figure(figsize=(16, 16)) + for i in range(len(x)): + if dist_token is None: + plt.scatter(x[i], y[i]) + else: + plt.scatter(x[i], y[i], s=distances[i]) + plt.annotate(labels[i], + xy=(x[i], y[i]), + xytext=(5, 2), + textcoords='offset points', + ha='right', + va='bottom') + plt.show() + + +stemmed_ingredients = [porter.stem(ing) for ing in model_ingredients] +stemmed_actions = [porter.stem(act) for act in model_actions] \ No newline at end of file