master-thesis/Tagging/Recioe_Tagging.ipynb

562 lines
14 KiB
Plaintext

{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Recipe Tagging"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"'Plotly version 3.10.0'\n"
]
},
{
"data": {
"text/html": [
" <script type=\"text/javascript\">\n",
" window.PlotlyConfig = {MathJaxConfig: 'local'};\n",
" if (window.MathJax) {MathJax.Hub.Config({SVG: {font: \"STIX-Web\"}});}\n",
" if (typeof require !== 'undefined') {\n",
" require.undef(\"plotly\");\n",
" requirejs.config({\n",
" paths: {\n",
" 'plotly': ['https://cdn.plot.ly/plotly-latest.min']\n",
" }\n",
" });\n",
" require(['plotly'], function(Plotly) {\n",
" window._Plotly = Plotly;\n",
" });\n",
" }\n",
" </script>\n",
" "
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"import sys\n",
"sys.path.insert(0,'..')\n",
"\n",
"import numpy as np\n",
"import json\n",
"\n",
"import nltk\n",
"from nltk.stem import PorterStemmer\n",
"from nltk.stem import LancasterStemmer\n",
"from nltk.corpus import stopwords as nltk_stopwords\n",
"from nltk.tokenize import MWETokenizer\n",
"\n",
"import matplotlib.pyplot as plt\n",
"\n",
"from pprint import pprint\n",
"\n",
"from gensim.test.utils import common_texts, get_tmpfile\n",
"from gensim.models import Word2Vec\n",
"\n",
"from json_buffered_reader import JSON_buffered_reader as JSON_br\n",
"\n",
"import pandas as pd\n",
"import settings\n",
"\n",
"from ipypb import track\n",
"\n",
"from IPython.display import HTML, Markdown\n",
"\n",
"import plotly\n",
"pprint (f\"Plotly version {plotly.__version__}\")\n",
"\n",
"import plotly.graph_objs as go\n",
"from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot\n",
"init_notebook_mode(connected=True)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"* loading ingredients file"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"import importlib.util\n",
"spec = importlib.util.spec_from_file_location(\"ingredients\", \"../\" + settings.ingredients_file)\n",
"ingredients = importlib.util.module_from_spec(spec)\n",
"spec.loader.exec_module(ingredients)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"* loading first n recipes"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"n = 1000\n",
"\n",
"buffered_reader_1M = JSON_br(\"../\" + settings.one_million_recipes_file)"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [],
"source": [
"i = 0\n",
"\n",
"instructions = []\n",
"\n",
"for recipe in buffered_reader_1M:\n",
" \n",
" instruction = \"\"\n",
" for item in recipe['instructions']:\n",
" instruction += item['text'] + '\\n' \n",
" \n",
" instructions.append(instruction)\n",
" i += 1\n",
" if i >= 1000:\n",
" break"
]
},
{
"cell_type": "code",
"execution_count": 11,
"metadata": {},
"outputs": [],
"source": [
"from stemmed_mwe_tokenizer import StemmedMWETokenizer"
]
},
{
"cell_type": "code",
"execution_count": 12,
"metadata": {},
"outputs": [],
"source": [
"mwe_tokenizer = StemmedMWETokenizer([w.split() for w in ingredients.multi_word_ingredients_stemmed])"
]
},
{
"cell_type": "code",
"execution_count": 14,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"['Preheat',\n",
" 'the',\n",
" 'oven',\n",
" 'to',\n",
" '350',\n",
" 'F.',\n",
" 'Butter',\n",
" 'or',\n",
" 'oil',\n",
" 'an',\n",
" '8-inch',\n",
" 'baking',\n",
" 'dish',\n",
" '.',\n",
" 'Cook',\n",
" 'the',\n",
" 'penne',\n",
" '2',\n",
" 'minutes',\n",
" 'less',\n",
" 'than',\n",
" 'package',\n",
" 'directions',\n",
" '.',\n",
" '(',\n",
" 'It',\n",
" 'will',\n",
" 'finish',\n",
" 'cooking',\n",
" 'in',\n",
" 'the',\n",
" 'oven',\n",
" '.',\n",
" ')',\n",
" 'Rinse',\n",
" 'the',\n",
" 'pasta',\n",
" 'in',\n",
" 'cold_water',\n",
" 'and',\n",
" 'set',\n",
" 'aside',\n",
" '.',\n",
" 'Combine',\n",
" 'the',\n",
" 'cooked',\n",
" 'pasta',\n",
" 'and',\n",
" 'the',\n",
" 'sauce',\n",
" 'in',\n",
" 'a',\n",
" 'medium',\n",
" 'bowl',\n",
" 'and',\n",
" 'mix',\n",
" 'carefully',\n",
" 'but',\n",
" 'thoroughly',\n",
" '.',\n",
" 'Scrape',\n",
" 'the',\n",
" 'pasta',\n",
" 'into',\n",
" 'the',\n",
" 'prepared',\n",
" 'baking',\n",
" 'dish',\n",
" '.',\n",
" 'Sprinkle',\n",
" 'the',\n",
" 'top',\n",
" 'with',\n",
" 'the',\n",
" 'cheeses',\n",
" 'and',\n",
" 'then',\n",
" 'the',\n",
" 'chili_powder',\n",
" '.',\n",
" 'Bake',\n",
" ',',\n",
" 'uncovered',\n",
" ',',\n",
" 'for',\n",
" '20',\n",
" 'minutes',\n",
" '.',\n",
" 'Let',\n",
" 'the',\n",
" 'mac',\n",
" 'and',\n",
" 'cheese',\n",
" 'sit',\n",
" 'for',\n",
" '5',\n",
" 'minutes',\n",
" 'before',\n",
" 'serving',\n",
" '.',\n",
" 'Melt',\n",
" 'the',\n",
" 'butter',\n",
" 'in',\n",
" 'a',\n",
" 'heavy-bottomed',\n",
" 'saucepan',\n",
" 'over',\n",
" 'medium',\n",
" 'heat',\n",
" 'and',\n",
" 'whisk',\n",
" 'in',\n",
" 'the',\n",
" 'flour',\n",
" '.',\n",
" 'Continue',\n",
" 'whisking',\n",
" 'and',\n",
" 'cooking',\n",
" 'for',\n",
" '2',\n",
" 'minutes',\n",
" '.',\n",
" 'Slowly',\n",
" 'add',\n",
" 'the',\n",
" 'milk',\n",
" ',',\n",
" 'whisking',\n",
" 'constantly',\n",
" '.',\n",
" 'Cook',\n",
" 'until',\n",
" 'the',\n",
" 'sauce',\n",
" 'thickens',\n",
" ',',\n",
" 'about',\n",
" '10',\n",
" 'minutes',\n",
" ',',\n",
" 'stirring',\n",
" 'frequently',\n",
" '.',\n",
" 'Remove',\n",
" 'from',\n",
" 'the',\n",
" 'heat',\n",
" '.',\n",
" 'Add',\n",
" 'the',\n",
" 'cheeses',\n",
" ',',\n",
" 'salt',\n",
" ',',\n",
" 'chili_powder',\n",
" ',',\n",
" 'and',\n",
" 'garlic_powder',\n",
" '.',\n",
" 'Stir',\n",
" 'until',\n",
" 'the',\n",
" 'cheese',\n",
" 'is',\n",
" 'melted',\n",
" 'and',\n",
" 'all',\n",
" 'ingredients',\n",
" 'are',\n",
" 'incorporated',\n",
" ',',\n",
" 'about',\n",
" '3',\n",
" 'minutes',\n",
" '.',\n",
" 'Use',\n",
" 'immediately',\n",
" ',',\n",
" 'or',\n",
" 'refrigerate',\n",
" 'for',\n",
" 'up',\n",
" 'to',\n",
" '3',\n",
" 'days',\n",
" '.',\n",
" 'This',\n",
" 'sauce',\n",
" 'reheats',\n",
" 'nicely',\n",
" 'on',\n",
" 'the',\n",
" 'stove',\n",
" 'in',\n",
" 'a',\n",
" 'saucepan',\n",
" 'over',\n",
" 'low',\n",
" 'heat',\n",
" '.',\n",
" 'Stir',\n",
" 'frequently',\n",
" 'so',\n",
" 'the',\n",
" 'sauce',\n",
" 'doesnt',\n",
" 'scorch',\n",
" '.',\n",
" 'This',\n",
" 'recipe',\n",
" 'can',\n",
" 'be',\n",
" 'assembled',\n",
" 'before',\n",
" 'baking',\n",
" 'and',\n",
" 'frozen',\n",
" 'for',\n",
" 'up',\n",
" 'to',\n",
" '3',\n",
" 'monthsjust',\n",
" 'be',\n",
" 'sure',\n",
" 'to',\n",
" 'use',\n",
" 'a',\n",
" 'freezer-to-oven',\n",
" 'pan',\n",
" 'and',\n",
" 'increase',\n",
" 'the',\n",
" 'baking',\n",
" 'time',\n",
" 'to',\n",
" '50',\n",
" 'minutes',\n",
" '.',\n",
" 'One-half',\n",
" 'teaspoon',\n",
" 'of',\n",
" 'chipotle',\n",
" 'chili_powder',\n",
" 'makes',\n",
" 'a',\n",
" 'spicy',\n",
" 'mac',\n",
" ',',\n",
" 'so',\n",
" 'make',\n",
" 'sure',\n",
" 'your',\n",
" 'family',\n",
" 'and',\n",
" 'friends',\n",
" 'can',\n",
" 'handle',\n",
" 'it',\n",
" '!',\n",
" 'The',\n",
" 'proportion',\n",
" 'of',\n",
" 'pasta',\n",
" 'to',\n",
" 'cheese_sauce',\n",
" 'is',\n",
" 'crucial',\n",
" 'to',\n",
" 'the',\n",
" 'success',\n",
" 'of',\n",
" 'the',\n",
" 'dish',\n",
" '.',\n",
" 'It',\n",
" 'will',\n",
" 'look',\n",
" 'like',\n",
" 'a',\n",
" 'lot',\n",
" 'of',\n",
" 'sauce',\n",
" 'for',\n",
" 'the',\n",
" 'pasta',\n",
" ',',\n",
" 'but',\n",
" 'some',\n",
" 'of',\n",
" 'the',\n",
" 'liquid',\n",
" 'will',\n",
" 'be',\n",
" 'absorbed',\n",
" '.']"
]
},
"execution_count": 14,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"mwe_tokenizer.tokenize(nltk.tokenize.word_tokenize(instructions[0]))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"\u001b[0;31mSignature:\u001b[0m \u001b[0mmwe_tokenizer\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mspan_tokenize\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0ms\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;31mDocstring:\u001b[0m\n",
"Identify the tokens using integer offsets ``(start_i, end_i)``,\n",
"where ``s[start_i:end_i]`` is the corresponding token.\n",
"\n",
":rtype: iter(tuple(int, int))\n",
"\u001b[0;31mFile:\u001b[0m ~/.local/lib/python3.7/site-packages/nltk/tokenize/api.py\n",
"\u001b[0;31mType:\u001b[0m method\n"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"?mwe_tokenizer."
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {},
"outputs": [
{
"ename": "NotImplementedError",
"evalue": "",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mNotImplementedError\u001b[0m Traceback (most recent call last)",
"\u001b[0;32m<ipython-input-10-dfad11b33102>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mmwe_tokenizer\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mspan_tokenize\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mnltk\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtokenize\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mword_tokenize\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0minstructions\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
"\u001b[0;32m~/.local/lib/python3.7/site-packages/nltk/tokenize/api.py\u001b[0m in \u001b[0;36mspan_tokenize\u001b[0;34m(self, s)\u001b[0m\n\u001b[1;32m 42\u001b[0m \u001b[0;34m:\u001b[0m\u001b[0mrtype\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0miter\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtuple\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mint\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mint\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 43\u001b[0m \"\"\"\n\u001b[0;32m---> 44\u001b[0;31m \u001b[0;32mraise\u001b[0m \u001b[0mNotImplementedError\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 45\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 46\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mtokenize_sents\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mstrings\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;31mNotImplementedError\u001b[0m: "
]
}
],
"source": [
"mwe_tokenizer.span_tokenize(nltk.tokenize.word_tokenize(instructions[0]))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.3"
}
},
"nbformat": 4,
"nbformat_minor": 2
}