moved 1 million recipes into database, starting coarse tree generation for recipe analysis
This commit is contained in:
223
RecipeAnalysis/Recipe Analysis.ipynb
Normal file
223
RecipeAnalysis/Recipe Analysis.ipynb
Normal file
@ -0,0 +1,223 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Recipe Analysis"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import sys\n",
|
||||
"sys.path.append(\"../\")\n",
|
||||
"from Recipe import Recipe"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import settings\n",
|
||||
"import db.db_settings as db_settings\n",
|
||||
"from db.database_connection import DatabaseConnection"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import random"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"<db.database_connection.DatabaseConnection at 0x7f58b3f41b70>"
|
||||
]
|
||||
},
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"DatabaseConnection(db_settings.db_host,\n",
|
||||
" db_settings.db_port,\n",
|
||||
" db_settings.db_user,\n",
|
||||
" db_settings.db_pw,\n",
|
||||
" db_settings.db_db,\n",
|
||||
" db_settings.db_charset)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"* get all recipe id's"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"ids = DatabaseConnection.global_single_query(\"select id from recipes\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"* analyse a random recipe"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"test_rec = Recipe(random.choice(ids)['id'])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/markdown": [
|
||||
"## Pat LaFriedas Filet Mignon Steak Sandwich\n",
|
||||
"(eaed08c862)"
|
||||
],
|
||||
"text/plain": [
|
||||
"<IPython.core.display.Markdown object>"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/markdown": [
|
||||
"### Ingredients"
|
||||
],
|
||||
"text/plain": [
|
||||
"<IPython.core.display.Markdown object>"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/markdown": [
|
||||
" * '4 tablespoons canola or other neutral-flavored oil , plus more as needed'\n",
|
||||
" * '2 large sweet yellow onions or Spanish onions , thinly sliced \\( about 3 cups \\)'\n",
|
||||
" * '6 ounces thinly sliced Monterey Jack cheese'\n",
|
||||
" * '1 cup beef stock'\n",
|
||||
" * '1 1/2 teaspoons balsamic glaze'\n",
|
||||
" * '12 \\( 1 1/2-inch thick \\) filet medallions \\( about 1 1/2 pounds \\)'\n",
|
||||
" * '1 tablespoon kosher salt'\n",
|
||||
" * '1/2 teaspoon turbinado sugar or light brown sugar'\n",
|
||||
" * '4 demi-baguettes \\( or 6-inch \\) segments of a long baguette'"
|
||||
],
|
||||
"text/plain": [
|
||||
"<IPython.core.display.Markdown object>"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/markdown": [
|
||||
"### Instructions"
|
||||
],
|
||||
"text/plain": [
|
||||
"<IPython.core.display.Markdown object>"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/markdown": [
|
||||
" * In a large skillet , heat 2 tablespoons of the oil over medium heat until it slides easily in the pan , 2 to 3 minutes .\n",
|
||||
" * Add the onions and cook , stirring occasionally so they do n't stick to the pan , until they are soft and caramelized , about 20 minutes .\n",
|
||||
" * Spread the onions out over the surface of the pan .\n",
|
||||
" * Remove from the heat and lay the cheese on top of the onions , letting it melt .\n",
|
||||
" * To make a jus , in a small saucepan , bring the stock to a simmer over medium heat .\n",
|
||||
" * Remove from the heat and stir in the balsamic glaze .\n",
|
||||
" * Cover the pan to keep the jus warm .\n",
|
||||
" * Season the meat on both sides with the salt and sugar .\n",
|
||||
" * In another large skillet , heat the remaining 2 tablespoons oil over high heat .\n",
|
||||
" * Add half the medallions , or as many as will fit in a single layer , and sear them until they are caramelized , 1 to 1 1/2 minutes per side .\n",
|
||||
" * Cook the remaining medallions in the same way , adding more oil and letting it get hot before adding the meat to the pan .\n",
|
||||
" * Meanwhile , without opening them , toast the baguettes so that the outsides , top and bottom , are hot and crispy .\n",
|
||||
" * Halve the baguettes horizontally , leaving them hinged on one side .\n",
|
||||
" * To assemble the sandwiches , lay 3 medallions on the bottom of each baguette .\n",
|
||||
" * Top with the onions and cheese , dividing them equally among the sandwiches .\n",
|
||||
" * Drizzle 1/4 cup of the jus on the inside top half of each baguette .\n",
|
||||
" * Close up the sandwiches and you 're good to go ."
|
||||
],
|
||||
"text/plain": [
|
||||
"<IPython.core.display.Markdown object>"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"CPU times: user 1.31 ms, sys: 7.65 ms, total: 8.96 ms\n",
|
||||
"Wall time: 7.88 ms\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"%time test_rec.display_recipe()"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.7.3"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
298
RecipeAnalysis/Recipe.ipynb
Normal file
298
RecipeAnalysis/Recipe.ipynb
Normal file
@ -0,0 +1,298 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Recipe class"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import sys\n",
|
||||
"sys.path.append(\"../\")\n",
|
||||
"\n",
|
||||
"import settings\n",
|
||||
"\n",
|
||||
"import pycrfsuite\n",
|
||||
"\n",
|
||||
"import json\n",
|
||||
"\n",
|
||||
"import db.db_settings as db_settings\n",
|
||||
"from db.database_connection import DatabaseConnection\n",
|
||||
"\n",
|
||||
"from Tagging.conllu_generator import ConlluGenerator\n",
|
||||
"from Tagging.crf_data_generator import *\n",
|
||||
"\n",
|
||||
"from IPython.display import Markdown, HTML, display"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"* get vocabulary"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import importlib.util\n",
|
||||
"# loading ingredients:\n",
|
||||
"spec = importlib.util.spec_from_file_location(\n",
|
||||
" \"ingredients\", \"../\" + settings.ingredients_file)\n",
|
||||
"ingredients = importlib.util.module_from_spec(spec)\n",
|
||||
"spec.loader.exec_module(ingredients)\n",
|
||||
"\n",
|
||||
"# loading actions:\n",
|
||||
"spec = importlib.util.spec_from_file_location(\n",
|
||||
" \"actions\", \"../\" + settings.actions_file)\n",
|
||||
"actions = importlib.util.module_from_spec(spec)\n",
|
||||
"spec.loader.exec_module(actions)\n",
|
||||
"\n",
|
||||
"# loading containers\n",
|
||||
"spec = importlib.util.spec_from_file_location(\n",
|
||||
" \"containers\", \"../\" + settings.container_file)\n",
|
||||
"containers = importlib.util.module_from_spec(spec)\n",
|
||||
"spec.loader.exec_module(containers)\n",
|
||||
"\n",
|
||||
"# loading placeholders\n",
|
||||
"spec = importlib.util.spec_from_file_location(\n",
|
||||
" \"placeholders\", \"../\" + settings.placeholder_file)\n",
|
||||
"placeholders = importlib.util.module_from_spec(spec)\n",
|
||||
"spec.loader.exec_module(placeholders)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"<contextlib.closing at 0x7f6743611278>"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"tagger = pycrfsuite.Tagger()\n",
|
||||
"tagger.open('../Tagging/test.crfsuite')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"id_query = \"select * from recipes where id like %s\""
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def escape_md_chars(s):\n",
|
||||
" s = s.replace(\"*\", \"\\*\")\n",
|
||||
" s = s.replace(\"(\", \"\\(\")\n",
|
||||
" s = s.replace(\")\", \"\\)\")\n",
|
||||
" s = s.replace(\"[\", \"\\[\")\n",
|
||||
" s = s.replace(\"]\", \"\\]\")\n",
|
||||
" s = s.replace(\"_\", \"\\_\")\n",
|
||||
" \n",
|
||||
" return s"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"class Recipe(object):\n",
|
||||
" def __init__(self, recipe_db_id = None):\n",
|
||||
" \n",
|
||||
" self._sentences = None\n",
|
||||
" self._title = None\n",
|
||||
" self._part = None\n",
|
||||
" self._ingredients = None\n",
|
||||
" self._recipe_id = recipe_db_id\n",
|
||||
" self._get_from_db()\n",
|
||||
" \n",
|
||||
" self._extracted_ingredients = None # TODO\n",
|
||||
" \n",
|
||||
" self.annotate_ingredients()\n",
|
||||
" self.annotate_sentences()\n",
|
||||
" \n",
|
||||
" def _get_from_db(self):\n",
|
||||
" result = DatabaseConnection.global_single_query(id_query, (self._recipe_id))\n",
|
||||
" assert len(result) > 0\n",
|
||||
" result = result[0]\n",
|
||||
" self._title = result['title']\n",
|
||||
" self._part = result['part']\n",
|
||||
" \n",
|
||||
" raw_sentences = json.loads(result['instructions'])\n",
|
||||
" raw_ingredients = json.loads(result['ingredients'])\n",
|
||||
" \n",
|
||||
" # throwing the raw data through our connlu generator to annotate them right\n",
|
||||
" cg_sents = ConlluGenerator([\"\\n\".join(raw_sentences)])\n",
|
||||
" cg_ings = ConlluGenerator([\"\\n\".join(raw_ingredients)])\n",
|
||||
" \n",
|
||||
" cg_sents.tokenize()\n",
|
||||
" cg_sents.pos_tagging_and_lemmatization()\n",
|
||||
" \n",
|
||||
" cg_ings.tokenize()\n",
|
||||
" cg_ings.pos_tagging_and_lemmatization()\n",
|
||||
" \n",
|
||||
" # TODO\n",
|
||||
" self._sentences = cg_sents.get_conllu_elements()[0]\n",
|
||||
" self._ingredients = cg_ings.get_conllu_elements()[0]\n",
|
||||
" #self._sentences = json.loads(result['instructions'])\n",
|
||||
" #self._ingredients = json.loads(result['ingredients'])\n",
|
||||
" \n",
|
||||
" def avg_sentence_length(self):\n",
|
||||
" return sum([len(s) for s in self._sentences])/len(self._sentences)\n",
|
||||
" \n",
|
||||
" def n_instructions(self):\n",
|
||||
" return len(self._sentences)\n",
|
||||
" \n",
|
||||
" def max_sentence_length(self):\n",
|
||||
" return max([len(s) for s in self._sentences])\n",
|
||||
" \n",
|
||||
" def keyword_ratio(self):\n",
|
||||
" sentence_ratios = []\n",
|
||||
" for sent in self._sentences:\n",
|
||||
" # FIXME: only works if there are no other misc annotations!\n",
|
||||
" sentence_ratios.append(sum([token['misc'] is not None for token in sent]))\n",
|
||||
" return sum(sentence_ratios) / len(sentence_ratios)\n",
|
||||
" \n",
|
||||
" def predict_labels(self):\n",
|
||||
" features = [sent2features(sent) for sent in self._sentences]\n",
|
||||
" labels = [tagger.tag(feat) for feat in features]\n",
|
||||
" return labels\n",
|
||||
" \n",
|
||||
" def predict_ingredient_labels(self):\n",
|
||||
" features = [sent2features(sent) for sent in self._ingredients]\n",
|
||||
" labels = [tagger.tag(feat) for feat in features]\n",
|
||||
" return labels\n",
|
||||
" \n",
|
||||
" def _annotate_sentences(self, sent_token_list, predictions):\n",
|
||||
" # test whether we predicted an label or found it in our label list\n",
|
||||
" for i, ing in enumerate(sent_token_list):\n",
|
||||
" for j, token in enumerate(ing):\n",
|
||||
" lemma = token['lemma']\n",
|
||||
" \n",
|
||||
" # check for ingredient\n",
|
||||
" if lemma in ingredients.ingredients_stemmed:\n",
|
||||
" token.add_misc(\"food_type\", \"ingredient\")\n",
|
||||
" elif predictions[i][j] == 'ingredient':\n",
|
||||
" token.add_misc(\"food_type\", \"ingredient\")\n",
|
||||
" \n",
|
||||
" # check for action\n",
|
||||
" if lemma in actions.stemmed_cooking_verbs:\n",
|
||||
" token.add_misc(\"food_type\", \"action\")\n",
|
||||
" elif predictions[i][j] == 'action':\n",
|
||||
" token.add_misc(\"food_type\", \"action\")\n",
|
||||
" \n",
|
||||
" # check for container\n",
|
||||
" if lemma in containers.stemmed_containers:\n",
|
||||
" token.add_misc(\"food_type\", \"container\")\n",
|
||||
" elif predictions[i][j] == 'container':\n",
|
||||
" token.add_misc(\"food_type\", \"container\")\n",
|
||||
" \n",
|
||||
" # check for placeholder\n",
|
||||
" if lemma in placeholders.stemmed_placeholders:\n",
|
||||
" token.add_misc(\"food_type\", \"placeholder\")\n",
|
||||
" elif predictions[i][j] == 'placeholder':\n",
|
||||
" token.add_misc(\"food_type\", \"placeholder\")\n",
|
||||
" \n",
|
||||
" def annotate_ingredients(self):\n",
|
||||
" self._annotate_sentences(self._ingredients, self.predict_ingredient_labels())\n",
|
||||
" \n",
|
||||
" def annotate_sentences(self):\n",
|
||||
" self._annotate_sentences(self._sentences, self.predict_labels())\n",
|
||||
" \n",
|
||||
" def recipe_id(self):\n",
|
||||
" return self._recipe_id\n",
|
||||
" \n",
|
||||
" def serialize(self):\n",
|
||||
" result = \"# newdoc\\n\"\n",
|
||||
" if self._recipe_id is not None:\n",
|
||||
" result += f\"# id: {self._recipe_id}\\n\"\n",
|
||||
" \n",
|
||||
" for sent in self._sentences:\n",
|
||||
" result += f\"{sent.serialize()}\"\n",
|
||||
" return result + \"\\n\"\n",
|
||||
" \n",
|
||||
" def display_recipe(self):\n",
|
||||
" display(Markdown(f\"## {self._title}\\n({self._recipe_id})\"))\n",
|
||||
" display(Markdown(f\"### Ingredients\"))\n",
|
||||
" display(Markdown(\"\\n\".join([f\" * '{escape_md_chars(self.tokenlist2str(ing))}'\" for ing in self._ingredients])))\n",
|
||||
" display(Markdown(f\"### Instructions\"))\n",
|
||||
" display(Markdown(\"\\n\".join([f\" * {escape_md_chars(self.tokenlist2str(ins))}\" for ins in self._sentences])))\n",
|
||||
" \n",
|
||||
" def tokenlist2str(self, tokenlist):\n",
|
||||
" return \" \".join([token['form'] for token in tokenlist])\n",
|
||||
" \n",
|
||||
" def tokenarray2str(self, tokenarray):\n",
|
||||
" return \"\\n\".join([self.tokenlist2str(tokenlist) for tokenlist in tokenarray])\n",
|
||||
" \n",
|
||||
" \n",
|
||||
" def __repr__(self):\n",
|
||||
" s = \"recipe: \" + (self._recipe_id if self._recipe_id else \"\") + \"\\n\"\n",
|
||||
" s += \"instructions: \\n\"\n",
|
||||
" for sent in self._sentences:\n",
|
||||
" s += \" \".join([token['form'] for token in sent]) + \"\\n\"\n",
|
||||
" \n",
|
||||
" s += \"\\nscores:\\n\"\n",
|
||||
" s += f\"avg_sent_length: {self.avg_sentence_length()}\\n\"\n",
|
||||
" s += f\"n_instructions: {self.n_instructions()}\\n\"\n",
|
||||
" s += f\"keyword_ratio: {self.keyword_ratio()}\\n\\n\\n\"\n",
|
||||
" \n",
|
||||
" return s"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.7.3"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 4
|
||||
}
|
211
RecipeAnalysis/Recipe.py
Normal file
211
RecipeAnalysis/Recipe.py
Normal file
@ -0,0 +1,211 @@
|
||||
#!/usr/bin/env python3
|
||||
# coding: utf-8
|
||||
|
||||
# # Recipe class
|
||||
|
||||
import sys
|
||||
sys.path.append("../")
|
||||
|
||||
import settings
|
||||
|
||||
import pycrfsuite
|
||||
|
||||
import json
|
||||
|
||||
import db.db_settings as db_settings
|
||||
from db.database_connection import DatabaseConnection
|
||||
|
||||
from Tagging.conllu_generator import ConlluGenerator
|
||||
from Tagging.crf_data_generator import *
|
||||
|
||||
from IPython.display import Markdown, HTML, display
|
||||
|
||||
|
||||
# * get vocabulary
|
||||
|
||||
import importlib.util
|
||||
# loading ingredients:
|
||||
spec = importlib.util.spec_from_file_location(
|
||||
"ingredients", "../" + settings.ingredients_file)
|
||||
ingredients = importlib.util.module_from_spec(spec)
|
||||
spec.loader.exec_module(ingredients)
|
||||
|
||||
# loading actions:
|
||||
spec = importlib.util.spec_from_file_location(
|
||||
"actions", "../" + settings.actions_file)
|
||||
actions = importlib.util.module_from_spec(spec)
|
||||
spec.loader.exec_module(actions)
|
||||
|
||||
# loading containers
|
||||
spec = importlib.util.spec_from_file_location(
|
||||
"containers", "../" + settings.container_file)
|
||||
containers = importlib.util.module_from_spec(spec)
|
||||
spec.loader.exec_module(containers)
|
||||
|
||||
# loading placeholders
|
||||
spec = importlib.util.spec_from_file_location(
|
||||
"placeholders", "../" + settings.placeholder_file)
|
||||
placeholders = importlib.util.module_from_spec(spec)
|
||||
spec.loader.exec_module(placeholders)
|
||||
|
||||
|
||||
tagger = pycrfsuite.Tagger()
|
||||
tagger.open('../Tagging/test.crfsuite')
|
||||
|
||||
|
||||
id_query = "select * from recipes where id like %s"
|
||||
|
||||
|
||||
def escape_md_chars(s):
|
||||
s = s.replace("*", "\*")
|
||||
s = s.replace("(", "\(")
|
||||
s = s.replace(")", "\)")
|
||||
s = s.replace("[", "\[")
|
||||
s = s.replace("]", "\]")
|
||||
s = s.replace("_", "\_")
|
||||
|
||||
return s
|
||||
|
||||
|
||||
class Recipe(object):
|
||||
def __init__(self, recipe_db_id = None):
|
||||
|
||||
self._sentences = None
|
||||
self._title = None
|
||||
self._part = None
|
||||
self._ingredients = None
|
||||
self._recipe_id = recipe_db_id
|
||||
self._get_from_db()
|
||||
self.annotate_ingredients()
|
||||
self.annotate_sentences()
|
||||
|
||||
def _get_from_db(self):
|
||||
result = DatabaseConnection.global_single_query(id_query, (self._recipe_id))
|
||||
assert len(result) > 0
|
||||
result = result[0]
|
||||
self._title = result['title']
|
||||
self._part = result['part']
|
||||
|
||||
raw_sentences = json.loads(result['instructions'])
|
||||
raw_ingredients = json.loads(result['ingredients'])
|
||||
|
||||
# throwing the raw data through our connlu generator to annotate them right
|
||||
cg_sents = ConlluGenerator(["\n".join(raw_sentences)])
|
||||
cg_ings = ConlluGenerator(["\n".join(raw_ingredients)])
|
||||
|
||||
cg_sents.tokenize()
|
||||
cg_sents.pos_tagging_and_lemmatization()
|
||||
|
||||
cg_ings.tokenize()
|
||||
cg_ings.pos_tagging_and_lemmatization()
|
||||
|
||||
# TODO
|
||||
self._sentences = cg_sents.get_conllu_elements()[0]
|
||||
self._ingredients = cg_ings.get_conllu_elements()[0]
|
||||
#self._sentences = json.loads(result['instructions'])
|
||||
#self._ingredients = json.loads(result['ingredients'])
|
||||
|
||||
def avg_sentence_length(self):
|
||||
return sum([len(s) for s in self._sentences])/len(self._sentences)
|
||||
|
||||
def n_instructions(self):
|
||||
return len(self._sentences)
|
||||
|
||||
def max_sentence_length(self):
|
||||
return max([len(s) for s in self._sentences])
|
||||
|
||||
def keyword_ratio(self):
|
||||
sentence_ratios = []
|
||||
for sent in self._sentences:
|
||||
# FIXME: only works if there are no other misc annotations!
|
||||
sentence_ratios.append(sum([token['misc'] is not None for token in sent]))
|
||||
return sum(sentence_ratios) / len(sentence_ratios)
|
||||
|
||||
def predict_labels(self):
|
||||
features = [sent2features(sent) for sent in self._sentences]
|
||||
labels = [tagger.tag(feat) for feat in features]
|
||||
return labels
|
||||
|
||||
def predict_ingredient_labels(self):
|
||||
features = [sent2features(sent) for sent in self._ingredients]
|
||||
labels = [tagger.tag(feat) for feat in features]
|
||||
return labels
|
||||
|
||||
def _annotate_sentences(self, sent_token_list, predictions):
|
||||
# test whether we predicted an label or found it in our label list
|
||||
for i, ing in enumerate(sent_token_list):
|
||||
for j, token in enumerate(ing):
|
||||
lemma = token['lemma']
|
||||
|
||||
# check for ingredient
|
||||
if lemma in ingredients.ingredients_stemmed:
|
||||
token.add_misc("food_type", "ingredient")
|
||||
elif predictions[i][j] == 'ingredient':
|
||||
token.add_misc("food_type", "ingredient")
|
||||
|
||||
# check for action
|
||||
if lemma in actions.stemmed_cooking_verbs:
|
||||
token.add_misc("food_type", "action")
|
||||
elif predictions[i][j] == 'action':
|
||||
token.add_misc("food_type", "action")
|
||||
|
||||
# check for container
|
||||
if lemma in containers.stemmed_containers:
|
||||
token.add_misc("food_type", "container")
|
||||
elif predictions[i][j] == 'container':
|
||||
token.add_misc("food_type", "container")
|
||||
|
||||
# check for placeholder
|
||||
if lemma in placeholders.stemmed_placeholders:
|
||||
token.add_misc("food_type", "placeholder")
|
||||
elif predictions[i][j] == 'placeholder':
|
||||
token.add_misc("food_type", "placeholder")
|
||||
|
||||
def annotate_ingredients(self):
|
||||
self._annotate_sentences(self._ingredients, self.predict_ingredient_labels())
|
||||
|
||||
def annotate_sentences(self):
|
||||
self._annotate_sentences(self._sentences, self.predict_labels())
|
||||
|
||||
def recipe_id(self):
|
||||
return self._recipe_id
|
||||
|
||||
def serialize(self):
|
||||
result = "# newdoc\n"
|
||||
if self._recipe_id is not None:
|
||||
result += f"# id: {self._recipe_id}\n"
|
||||
|
||||
for sent in self._sentences:
|
||||
result += f"{sent.serialize()}"
|
||||
return result + "\n"
|
||||
|
||||
def display_recipe(self):
|
||||
display(Markdown(f"## {self._title}\n({self._recipe_id})"))
|
||||
display(Markdown(f"### Ingredients"))
|
||||
display(Markdown("\n".join([f" * '{escape_md_chars(self.tokenlist2str(ing))}'" for ing in self._ingredients])))
|
||||
display(Markdown(f"### Instructions"))
|
||||
display(Markdown("\n".join([f" * {escape_md_chars(self.tokenlist2str(ins))}" for ins in self._sentences])))
|
||||
|
||||
def tokenlist2str(self, tokenlist):
|
||||
return " ".join([token['form'] for token in tokenlist])
|
||||
|
||||
def tokenarray2str(self, tokenarray):
|
||||
return "\n".join([self.tokenlist2str(tokenlist) for tokenlist in tokenarray])
|
||||
|
||||
|
||||
def __repr__(self):
|
||||
s = "recipe: " + (self._recipe_id if self._recipe_id else "") + "\n"
|
||||
s += "instructions: \n"
|
||||
for sent in self._sentences:
|
||||
s += " ".join([token['form'] for token in sent]) + "\n"
|
||||
|
||||
s += "\nscores:\n"
|
||||
s += f"avg_sent_length: {self.avg_sentence_length()}\n"
|
||||
s += f"n_instructions: {self.n_instructions()}\n"
|
||||
s += f"keyword_ratio: {self.keyword_ratio()}\n\n\n"
|
||||
|
||||
return s
|
||||
|
||||
|
||||
|
||||
|
Reference in New Issue
Block a user