moved 1 million recipes into database, starting coarse tree generation for recipe analysis

This commit is contained in:
Jonas Weinz 2019-08-10 23:10:30 +02:00
parent cbccf169bb
commit 09eb58e703
24 changed files with 7571 additions and 3663 deletions

View File

@ -0,0 +1,223 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Recipe Analysis"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"import sys\n",
"sys.path.append(\"../\")\n",
"from Recipe import Recipe"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"import settings\n",
"import db.db_settings as db_settings\n",
"from db.database_connection import DatabaseConnection"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"import random"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"<db.database_connection.DatabaseConnection at 0x7f58b3f41b70>"
]
},
"execution_count": 4,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"DatabaseConnection(db_settings.db_host,\n",
" db_settings.db_port,\n",
" db_settings.db_user,\n",
" db_settings.db_pw,\n",
" db_settings.db_db,\n",
" db_settings.db_charset)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"* get all recipe id's"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [],
"source": [
"ids = DatabaseConnection.global_single_query(\"select id from recipes\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"* analyse a random recipe"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [],
"source": [
"test_rec = Recipe(random.choice(ids)['id'])"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [
{
"data": {
"text/markdown": [
"## Pat LaFriedas Filet Mignon Steak Sandwich\n",
"(eaed08c862)"
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/markdown": [
"### Ingredients"
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/markdown": [
" * '4 tablespoons canola or other neutral-flavored oil , plus more as needed'\n",
" * '2 large sweet yellow onions or Spanish onions , thinly sliced \\( about 3 cups \\)'\n",
" * '6 ounces thinly sliced Monterey Jack cheese'\n",
" * '1 cup beef stock'\n",
" * '1 1/2 teaspoons balsamic glaze'\n",
" * '12 \\( 1 1/2-inch thick \\) filet medallions \\( about 1 1/2 pounds \\)'\n",
" * '1 tablespoon kosher salt'\n",
" * '1/2 teaspoon turbinado sugar or light brown sugar'\n",
" * '4 demi-baguettes \\( or 6-inch \\) segments of a long baguette'"
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/markdown": [
"### Instructions"
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/markdown": [
" * In a large skillet , heat 2 tablespoons of the oil over medium heat until it slides easily in the pan , 2 to 3 minutes .\n",
" * Add the onions and cook , stirring occasionally so they do n't stick to the pan , until they are soft and caramelized , about 20 minutes .\n",
" * Spread the onions out over the surface of the pan .\n",
" * Remove from the heat and lay the cheese on top of the onions , letting it melt .\n",
" * To make a jus , in a small saucepan , bring the stock to a simmer over medium heat .\n",
" * Remove from the heat and stir in the balsamic glaze .\n",
" * Cover the pan to keep the jus warm .\n",
" * Season the meat on both sides with the salt and sugar .\n",
" * In another large skillet , heat the remaining 2 tablespoons oil over high heat .\n",
" * Add half the medallions , or as many as will fit in a single layer , and sear them until they are caramelized , 1 to 1 1/2 minutes per side .\n",
" * Cook the remaining medallions in the same way , adding more oil and letting it get hot before adding the meat to the pan .\n",
" * Meanwhile , without opening them , toast the baguettes so that the outsides , top and bottom , are hot and crispy .\n",
" * Halve the baguettes horizontally , leaving them hinged on one side .\n",
" * To assemble the sandwiches , lay 3 medallions on the bottom of each baguette .\n",
" * Top with the onions and cheese , dividing them equally among the sandwiches .\n",
" * Drizzle 1/4 cup of the jus on the inside top half of each baguette .\n",
" * Close up the sandwiches and you 're good to go ."
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"CPU times: user 1.31 ms, sys: 7.65 ms, total: 8.96 ms\n",
"Wall time: 7.88 ms\n"
]
}
],
"source": [
"%time test_rec.display_recipe()"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.3"
}
},
"nbformat": 4,
"nbformat_minor": 4
}

298
RecipeAnalysis/Recipe.ipynb Normal file
View File

@ -0,0 +1,298 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Recipe class"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"import sys\n",
"sys.path.append(\"../\")\n",
"\n",
"import settings\n",
"\n",
"import pycrfsuite\n",
"\n",
"import json\n",
"\n",
"import db.db_settings as db_settings\n",
"from db.database_connection import DatabaseConnection\n",
"\n",
"from Tagging.conllu_generator import ConlluGenerator\n",
"from Tagging.crf_data_generator import *\n",
"\n",
"from IPython.display import Markdown, HTML, display"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"* get vocabulary"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"import importlib.util\n",
"# loading ingredients:\n",
"spec = importlib.util.spec_from_file_location(\n",
" \"ingredients\", \"../\" + settings.ingredients_file)\n",
"ingredients = importlib.util.module_from_spec(spec)\n",
"spec.loader.exec_module(ingredients)\n",
"\n",
"# loading actions:\n",
"spec = importlib.util.spec_from_file_location(\n",
" \"actions\", \"../\" + settings.actions_file)\n",
"actions = importlib.util.module_from_spec(spec)\n",
"spec.loader.exec_module(actions)\n",
"\n",
"# loading containers\n",
"spec = importlib.util.spec_from_file_location(\n",
" \"containers\", \"../\" + settings.container_file)\n",
"containers = importlib.util.module_from_spec(spec)\n",
"spec.loader.exec_module(containers)\n",
"\n",
"# loading placeholders\n",
"spec = importlib.util.spec_from_file_location(\n",
" \"placeholders\", \"../\" + settings.placeholder_file)\n",
"placeholders = importlib.util.module_from_spec(spec)\n",
"spec.loader.exec_module(placeholders)\n"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"<contextlib.closing at 0x7f6743611278>"
]
},
"execution_count": 3,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"tagger = pycrfsuite.Tagger()\n",
"tagger.open('../Tagging/test.crfsuite')"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [],
"source": [
"id_query = \"select * from recipes where id like %s\""
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [],
"source": [
"def escape_md_chars(s):\n",
" s = s.replace(\"*\", \"\\*\")\n",
" s = s.replace(\"(\", \"\\(\")\n",
" s = s.replace(\")\", \"\\)\")\n",
" s = s.replace(\"[\", \"\\[\")\n",
" s = s.replace(\"]\", \"\\]\")\n",
" s = s.replace(\"_\", \"\\_\")\n",
" \n",
" return s"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [],
"source": [
"class Recipe(object):\n",
" def __init__(self, recipe_db_id = None):\n",
" \n",
" self._sentences = None\n",
" self._title = None\n",
" self._part = None\n",
" self._ingredients = None\n",
" self._recipe_id = recipe_db_id\n",
" self._get_from_db()\n",
" \n",
" self._extracted_ingredients = None # TODO\n",
" \n",
" self.annotate_ingredients()\n",
" self.annotate_sentences()\n",
" \n",
" def _get_from_db(self):\n",
" result = DatabaseConnection.global_single_query(id_query, (self._recipe_id))\n",
" assert len(result) > 0\n",
" result = result[0]\n",
" self._title = result['title']\n",
" self._part = result['part']\n",
" \n",
" raw_sentences = json.loads(result['instructions'])\n",
" raw_ingredients = json.loads(result['ingredients'])\n",
" \n",
" # throwing the raw data through our connlu generator to annotate them right\n",
" cg_sents = ConlluGenerator([\"\\n\".join(raw_sentences)])\n",
" cg_ings = ConlluGenerator([\"\\n\".join(raw_ingredients)])\n",
" \n",
" cg_sents.tokenize()\n",
" cg_sents.pos_tagging_and_lemmatization()\n",
" \n",
" cg_ings.tokenize()\n",
" cg_ings.pos_tagging_and_lemmatization()\n",
" \n",
" # TODO\n",
" self._sentences = cg_sents.get_conllu_elements()[0]\n",
" self._ingredients = cg_ings.get_conllu_elements()[0]\n",
" #self._sentences = json.loads(result['instructions'])\n",
" #self._ingredients = json.loads(result['ingredients'])\n",
" \n",
" def avg_sentence_length(self):\n",
" return sum([len(s) for s in self._sentences])/len(self._sentences)\n",
" \n",
" def n_instructions(self):\n",
" return len(self._sentences)\n",
" \n",
" def max_sentence_length(self):\n",
" return max([len(s) for s in self._sentences])\n",
" \n",
" def keyword_ratio(self):\n",
" sentence_ratios = []\n",
" for sent in self._sentences:\n",
" # FIXME: only works if there are no other misc annotations!\n",
" sentence_ratios.append(sum([token['misc'] is not None for token in sent]))\n",
" return sum(sentence_ratios) / len(sentence_ratios)\n",
" \n",
" def predict_labels(self):\n",
" features = [sent2features(sent) for sent in self._sentences]\n",
" labels = [tagger.tag(feat) for feat in features]\n",
" return labels\n",
" \n",
" def predict_ingredient_labels(self):\n",
" features = [sent2features(sent) for sent in self._ingredients]\n",
" labels = [tagger.tag(feat) for feat in features]\n",
" return labels\n",
" \n",
" def _annotate_sentences(self, sent_token_list, predictions):\n",
" # test whether we predicted an label or found it in our label list\n",
" for i, ing in enumerate(sent_token_list):\n",
" for j, token in enumerate(ing):\n",
" lemma = token['lemma']\n",
" \n",
" # check for ingredient\n",
" if lemma in ingredients.ingredients_stemmed:\n",
" token.add_misc(\"food_type\", \"ingredient\")\n",
" elif predictions[i][j] == 'ingredient':\n",
" token.add_misc(\"food_type\", \"ingredient\")\n",
" \n",
" # check for action\n",
" if lemma in actions.stemmed_cooking_verbs:\n",
" token.add_misc(\"food_type\", \"action\")\n",
" elif predictions[i][j] == 'action':\n",
" token.add_misc(\"food_type\", \"action\")\n",
" \n",
" # check for container\n",
" if lemma in containers.stemmed_containers:\n",
" token.add_misc(\"food_type\", \"container\")\n",
" elif predictions[i][j] == 'container':\n",
" token.add_misc(\"food_type\", \"container\")\n",
" \n",
" # check for placeholder\n",
" if lemma in placeholders.stemmed_placeholders:\n",
" token.add_misc(\"food_type\", \"placeholder\")\n",
" elif predictions[i][j] == 'placeholder':\n",
" token.add_misc(\"food_type\", \"placeholder\")\n",
" \n",
" def annotate_ingredients(self):\n",
" self._annotate_sentences(self._ingredients, self.predict_ingredient_labels())\n",
" \n",
" def annotate_sentences(self):\n",
" self._annotate_sentences(self._sentences, self.predict_labels())\n",
" \n",
" def recipe_id(self):\n",
" return self._recipe_id\n",
" \n",
" def serialize(self):\n",
" result = \"# newdoc\\n\"\n",
" if self._recipe_id is not None:\n",
" result += f\"# id: {self._recipe_id}\\n\"\n",
" \n",
" for sent in self._sentences:\n",
" result += f\"{sent.serialize()}\"\n",
" return result + \"\\n\"\n",
" \n",
" def display_recipe(self):\n",
" display(Markdown(f\"## {self._title}\\n({self._recipe_id})\"))\n",
" display(Markdown(f\"### Ingredients\"))\n",
" display(Markdown(\"\\n\".join([f\" * '{escape_md_chars(self.tokenlist2str(ing))}'\" for ing in self._ingredients])))\n",
" display(Markdown(f\"### Instructions\"))\n",
" display(Markdown(\"\\n\".join([f\" * {escape_md_chars(self.tokenlist2str(ins))}\" for ins in self._sentences])))\n",
" \n",
" def tokenlist2str(self, tokenlist):\n",
" return \" \".join([token['form'] for token in tokenlist])\n",
" \n",
" def tokenarray2str(self, tokenarray):\n",
" return \"\\n\".join([self.tokenlist2str(tokenlist) for tokenlist in tokenarray])\n",
" \n",
" \n",
" def __repr__(self):\n",
" s = \"recipe: \" + (self._recipe_id if self._recipe_id else \"\") + \"\\n\"\n",
" s += \"instructions: \\n\"\n",
" for sent in self._sentences:\n",
" s += \" \".join([token['form'] for token in sent]) + \"\\n\"\n",
" \n",
" s += \"\\nscores:\\n\"\n",
" s += f\"avg_sent_length: {self.avg_sentence_length()}\\n\"\n",
" s += f\"n_instructions: {self.n_instructions()}\\n\"\n",
" s += f\"keyword_ratio: {self.keyword_ratio()}\\n\\n\\n\"\n",
" \n",
" return s"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.3"
}
},
"nbformat": 4,
"nbformat_minor": 4
}

211
RecipeAnalysis/Recipe.py Normal file
View File

@ -0,0 +1,211 @@
#!/usr/bin/env python3
# coding: utf-8
# # Recipe class
import sys
sys.path.append("../")
import settings
import pycrfsuite
import json
import db.db_settings as db_settings
from db.database_connection import DatabaseConnection
from Tagging.conllu_generator import ConlluGenerator
from Tagging.crf_data_generator import *
from IPython.display import Markdown, HTML, display
# * get vocabulary
import importlib.util
# loading ingredients:
spec = importlib.util.spec_from_file_location(
"ingredients", "../" + settings.ingredients_file)
ingredients = importlib.util.module_from_spec(spec)
spec.loader.exec_module(ingredients)
# loading actions:
spec = importlib.util.spec_from_file_location(
"actions", "../" + settings.actions_file)
actions = importlib.util.module_from_spec(spec)
spec.loader.exec_module(actions)
# loading containers
spec = importlib.util.spec_from_file_location(
"containers", "../" + settings.container_file)
containers = importlib.util.module_from_spec(spec)
spec.loader.exec_module(containers)
# loading placeholders
spec = importlib.util.spec_from_file_location(
"placeholders", "../" + settings.placeholder_file)
placeholders = importlib.util.module_from_spec(spec)
spec.loader.exec_module(placeholders)
tagger = pycrfsuite.Tagger()
tagger.open('../Tagging/test.crfsuite')
id_query = "select * from recipes where id like %s"
def escape_md_chars(s):
s = s.replace("*", "\*")
s = s.replace("(", "\(")
s = s.replace(")", "\)")
s = s.replace("[", "\[")
s = s.replace("]", "\]")
s = s.replace("_", "\_")
return s
class Recipe(object):
def __init__(self, recipe_db_id = None):
self._sentences = None
self._title = None
self._part = None
self._ingredients = None
self._recipe_id = recipe_db_id
self._get_from_db()
self.annotate_ingredients()
self.annotate_sentences()
def _get_from_db(self):
result = DatabaseConnection.global_single_query(id_query, (self._recipe_id))
assert len(result) > 0
result = result[0]
self._title = result['title']
self._part = result['part']
raw_sentences = json.loads(result['instructions'])
raw_ingredients = json.loads(result['ingredients'])
# throwing the raw data through our connlu generator to annotate them right
cg_sents = ConlluGenerator(["\n".join(raw_sentences)])
cg_ings = ConlluGenerator(["\n".join(raw_ingredients)])
cg_sents.tokenize()
cg_sents.pos_tagging_and_lemmatization()
cg_ings.tokenize()
cg_ings.pos_tagging_and_lemmatization()
# TODO
self._sentences = cg_sents.get_conllu_elements()[0]
self._ingredients = cg_ings.get_conllu_elements()[0]
#self._sentences = json.loads(result['instructions'])
#self._ingredients = json.loads(result['ingredients'])
def avg_sentence_length(self):
return sum([len(s) for s in self._sentences])/len(self._sentences)
def n_instructions(self):
return len(self._sentences)
def max_sentence_length(self):
return max([len(s) for s in self._sentences])
def keyword_ratio(self):
sentence_ratios = []
for sent in self._sentences:
# FIXME: only works if there are no other misc annotations!
sentence_ratios.append(sum([token['misc'] is not None for token in sent]))
return sum(sentence_ratios) / len(sentence_ratios)
def predict_labels(self):
features = [sent2features(sent) for sent in self._sentences]
labels = [tagger.tag(feat) for feat in features]
return labels
def predict_ingredient_labels(self):
features = [sent2features(sent) for sent in self._ingredients]
labels = [tagger.tag(feat) for feat in features]
return labels
def _annotate_sentences(self, sent_token_list, predictions):
# test whether we predicted an label or found it in our label list
for i, ing in enumerate(sent_token_list):
for j, token in enumerate(ing):
lemma = token['lemma']
# check for ingredient
if lemma in ingredients.ingredients_stemmed:
token.add_misc("food_type", "ingredient")
elif predictions[i][j] == 'ingredient':
token.add_misc("food_type", "ingredient")
# check for action
if lemma in actions.stemmed_cooking_verbs:
token.add_misc("food_type", "action")
elif predictions[i][j] == 'action':
token.add_misc("food_type", "action")
# check for container
if lemma in containers.stemmed_containers:
token.add_misc("food_type", "container")
elif predictions[i][j] == 'container':
token.add_misc("food_type", "container")
# check for placeholder
if lemma in placeholders.stemmed_placeholders:
token.add_misc("food_type", "placeholder")
elif predictions[i][j] == 'placeholder':
token.add_misc("food_type", "placeholder")
def annotate_ingredients(self):
self._annotate_sentences(self._ingredients, self.predict_ingredient_labels())
def annotate_sentences(self):
self._annotate_sentences(self._sentences, self.predict_labels())
def recipe_id(self):
return self._recipe_id
def serialize(self):
result = "# newdoc\n"
if self._recipe_id is not None:
result += f"# id: {self._recipe_id}\n"
for sent in self._sentences:
result += f"{sent.serialize()}"
return result + "\n"
def display_recipe(self):
display(Markdown(f"## {self._title}\n({self._recipe_id})"))
display(Markdown(f"### Ingredients"))
display(Markdown("\n".join([f" * '{escape_md_chars(self.tokenlist2str(ing))}'" for ing in self._ingredients])))
display(Markdown(f"### Instructions"))
display(Markdown("\n".join([f" * {escape_md_chars(self.tokenlist2str(ins))}" for ins in self._sentences])))
def tokenlist2str(self, tokenlist):
return " ".join([token['form'] for token in tokenlist])
def tokenarray2str(self, tokenarray):
return "\n".join([self.tokenlist2str(tokenlist) for tokenlist in tokenarray])
def __repr__(self):
s = "recipe: " + (self._recipe_id if self._recipe_id else "") + "\n"
s += "instructions: \n"
for sent in self._sentences:
s += " ".join([token['form'] for token in sent]) + "\n"
s += "\nscores:\n"
s += f"avg_sent_length: {self.avg_sentence_length()}\n"
s += f"n_instructions: {self.n_instructions()}\n"
s += f"keyword_ratio: {self.keyword_ratio()}\n\n\n"
return s

File diff suppressed because it is too large Load Diff

View File

@ -20,7 +20,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 4, "execution_count": 3,
"metadata": {}, "metadata": {},
"outputs": [ "outputs": [
{ {
@ -29,7 +29,7 @@
"TokenList<Dissolve, Jello, in, boiling, water, .>" "TokenList<Dissolve, Jello, in, boiling, water, .>"
] ]
}, },
"execution_count": 4, "execution_count": 3,
"metadata": {}, "metadata": {},
"output_type": "execute_result" "output_type": "execute_result"
} }
@ -40,7 +40,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 5, "execution_count": 4,
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
@ -109,7 +109,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 6, "execution_count": 5,
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
@ -125,7 +125,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 7, "execution_count": 6,
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
@ -135,7 +135,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 8, "execution_count": 7,
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
@ -145,7 +145,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 9, "execution_count": 8,
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
@ -155,7 +155,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 10, "execution_count": 9,
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
@ -172,7 +172,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 11, "execution_count": 10,
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
@ -181,7 +181,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 14, "execution_count": 11,
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
@ -218,16 +218,16 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 15, "execution_count": 12,
"metadata": {}, "metadata": {},
"outputs": [ "outputs": [
{ {
"data": { "data": {
"text/plain": [ "text/plain": [
"45442" "47538"
] ]
}, },
"execution_count": 15, "execution_count": 12,
"metadata": {}, "metadata": {},
"output_type": "execute_result" "output_type": "execute_result"
} }
@ -245,7 +245,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 16, "execution_count": 13,
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
@ -254,7 +254,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 17, "execution_count": 14,
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
@ -266,7 +266,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 18, "execution_count": 15,
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
@ -282,7 +282,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 19, "execution_count": 16,
"metadata": {}, "metadata": {},
"outputs": [ "outputs": [
{ {
@ -302,7 +302,7 @@
" 'max_linesearch']" " 'max_linesearch']"
] ]
}, },
"execution_count": 19, "execution_count": 16,
"metadata": {}, "metadata": {},
"output_type": "execute_result" "output_type": "execute_result"
} }
@ -313,7 +313,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 20, "execution_count": 17,
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
@ -322,24 +322,24 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 20, "execution_count": 18,
"metadata": {}, "metadata": {},
"outputs": [ "outputs": [
{ {
"data": { "data": {
"text/plain": [ "text/plain": [
"{'num': 455,\n", "{'num': 830,\n",
" 'scores': {},\n", " 'scores': {},\n",
" 'loss': 110.581675,\n", " 'loss': 41171.669638,\n",
" 'feature_norm': 8.594619,\n", " 'feature_norm': 126.341894,\n",
" 'error_norm': 0.214403,\n", " 'error_norm': 85.690855,\n",
" 'active_features': 87,\n", " 'active_features': 6055,\n",
" 'linesearch_trials': 2,\n", " 'linesearch_trials': 2,\n",
" 'linesearch_step': 0.5,\n", " 'linesearch_step': 0.5,\n",
" 'time': 0.001}" " 'time': 0.724}"
] ]
}, },
"execution_count": 20, "execution_count": 18,
"metadata": {}, "metadata": {},
"output_type": "execute_result" "output_type": "execute_result"
} }
@ -357,16 +357,16 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 21, "execution_count": 19,
"metadata": {}, "metadata": {},
"outputs": [ "outputs": [
{ {
"data": { "data": {
"text/plain": [ "text/plain": [
"<contextlib.closing at 0x7f056bc6d828>" "<contextlib.closing at 0x7f26d79813c8>"
] ]
}, },
"execution_count": 21, "execution_count": 19,
"metadata": {}, "metadata": {},
"output_type": "execute_result" "output_type": "execute_result"
} }
@ -378,7 +378,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 22, "execution_count": 20,
"metadata": {}, "metadata": {},
"outputs": [ "outputs": [
{ {
@ -388,7 +388,7 @@
"traceback": [ "traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mIndexError\u001b[0m Traceback (most recent call last)", "\u001b[0;31mIndexError\u001b[0m Traceback (most recent call last)",
"\u001b[0;32m<ipython-input-22-a88100b49642>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mi\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mrange\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m100\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;36m130\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 2\u001b[0;31m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m' '\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mjoin\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mt_test\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mi\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 3\u001b[0m \u001b[0;31m#print(' '.join(feature2tokens(X_test[i])), end='\\n\\n')\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 4\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"Predicted:\"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m' '\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mjoin\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtagger\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtag\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mX_test\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mi\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 5\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"Correct: \"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m' '\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mjoin\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mY_test\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mi\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m<ipython-input-20-a88100b49642>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mi\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mrange\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m100\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;36m130\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 2\u001b[0;31m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m' '\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mjoin\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mt_test\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mi\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 3\u001b[0m \u001b[0;31m#print(' '.join(feature2tokens(X_test[i])), end='\\n\\n')\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 4\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"Predicted:\"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m' '\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mjoin\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtagger\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtag\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mX_test\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mi\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 5\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"Correct: \"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m' '\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mjoin\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mY_test\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mi\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;31mIndexError\u001b[0m: list index out of range" "\u001b[0;31mIndexError\u001b[0m: list index out of range"
] ]
} }

File diff suppressed because one or more lines are too long

View File

@ -16,14 +16,15 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"import sys\n", "import sys\n",
"sys.path.append('../')\n",
"\n", "\n",
"from conllu import parse\n", "from conllu import parse\n",
"from tagging_tools import print_visualized_tags\n", "from Tagging.tagging_tools import print_visualized_tags\n",
"\n", "\n",
"from sklearn import preprocessing\n", "from sklearn import preprocessing\n",
"import numpy as np\n", "import numpy as np\n",
"\n", "\n",
"sys.path.insert(0, '..')\n", "\n",
"import settings # noqa\n", "import settings # noqa\n",
"\n", "\n",
"import gzip" "import gzip"

View File

@ -6,14 +6,15 @@
# read conllu documents in batches # read conllu documents in batches
import sys import sys
sys.path.append('../')
from conllu import parse from conllu import parse
from tagging_tools import print_visualized_tags from Tagging.tagging_tools import print_visualized_tags
from sklearn import preprocessing from sklearn import preprocessing
import numpy as np import numpy as np
sys.path.insert(0, '..')
import settings # noqa import settings # noqa
import gzip import gzip

View File

@ -25,11 +25,14 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"import sys\n",
"sys.path.append(\"../\")\n",
"\n",
"import nltk\n", "import nltk\n",
"from nltk.tag import pos_tag, map_tag\n", "from nltk.tag import pos_tag, map_tag\n",
"from nltk.stem import PorterStemmer\n", "from nltk.stem import PorterStemmer\n",
"from nltk.corpus import stopwords as nltk_stopwords\n", "from nltk.corpus import stopwords as nltk_stopwords\n",
"from stemmed_mwe_tokenizer import StemmedMWETokenizer\n", "from Tagging.stemmed_mwe_tokenizer import StemmedMWETokenizer\n",
"from nltk.stem import WordNetLemmatizer" "from nltk.stem import WordNetLemmatizer"
] ]
}, },
@ -188,7 +191,22 @@
" result = \"\"\n", " result = \"\"\n",
" for attr in CONLLU_ATTRIBUTES:\n", " for attr in CONLLU_ATTRIBUTES:\n",
" result += str(self.__getattribute__(attr)) + \" \\t\"\n", " result += str(self.__getattribute__(attr)) + \" \\t\"\n",
" return replace_tab(result, 16)" " return replace_tab(result, 16)\n",
" \n",
" def __getitem__(self, key):\n",
" \n",
" # conllu module compability:\n",
" if key == \"upostag\":\n",
" key = \"upos\"\n",
" if key == \"xpostag\":\n",
" key = \"xpos\"\n",
" \n",
" if key not in CONLLU_ATTRIBUTES:\n",
" return None\n",
" attr = self.__getattribute__(key)\n",
" if str(attr) == \"_\":\n",
" return None\n",
" return attr"
] ]
}, },
{ {
@ -210,6 +228,9 @@
"\n", "\n",
" def add(self, conllu_element: ConlluElement):\n", " def add(self, conllu_element: ConlluElement):\n",
" self.conllu_elements.append(conllu_element)\n", " self.conllu_elements.append(conllu_element)\n",
" \n",
" def get_conllu_elements(self):\n",
" return self.conllu_elements\n",
"\n", "\n",
" def __repr__(self):\n", " def __repr__(self):\n",
" result = \"\"\n", " result = \"\"\n",
@ -243,6 +264,9 @@
" def add(self, conllu_sentence: ConlluSentence):\n", " def add(self, conllu_sentence: ConlluSentence):\n",
" self.conllu_sentences.append(conllu_sentence)\n", " self.conllu_sentences.append(conllu_sentence)\n",
" \n", " \n",
" def get_conllu_elements(self):\n",
" return [c_sent.get_conllu_elements() for c_sent in self.conllu_sentences]\n",
" \n",
" def __repr__(self):\n", " def __repr__(self):\n",
" result = \"# newdoc\\n\"\n", " result = \"# newdoc\\n\"\n",
" if self.id is not None:\n", " if self.id is not None:\n",
@ -270,15 +294,18 @@
"outputs": [], "outputs": [],
"source": [ "source": [
"class ConlluGenerator(object):\n", "class ConlluGenerator(object):\n",
" def __init__(self, documents: list, stemmed_multi_word_tokens, stemmer=PorterStemmer(), ids=None):\n", " def __init__(self, documents: list, stemmed_multi_word_tokens=None, stemmer=PorterStemmer(), ids=None):\n",
" self.documents = documents\n", " self.documents = documents\n",
" self.stemmed_multi_word_tokens = stemmed_multi_word_tokens\n", " self.stemmed_multi_word_tokens = stemmed_multi_word_tokens\n",
" self.mwe_tokenizer = StemmedMWETokenizer(\n", " \n",
" [w.split() for w in stemmed_multi_word_tokens])\n", " if self.stemmed_multi_word_tokens is not None:\n",
" self.mwe_tokenizer = StemmedMWETokenizer(\n",
" [w.split() for w in stemmed_multi_word_tokens])\n",
" else:\n",
" self.mwe_tokenizer = None\n",
" \n",
" self.stemmer = stemmer\n", " self.stemmer = stemmer\n",
"\n", "\n",
" self.id_counter = 0\n",
"\n",
" self.conllu_documents = []\n", " self.conllu_documents = []\n",
"\n", "\n",
" self.ids = ids\n", " self.ids = ids\n",
@ -293,8 +320,11 @@
" for sent in sentences: \n", " for sent in sentences: \n",
" if (len(sent) > 0):\n", " if (len(sent) > 0):\n",
" simple_tokenized = nltk.tokenize.word_tokenize(sent)\n", " simple_tokenized = nltk.tokenize.word_tokenize(sent)\n",
" tokenized_sentences.append(\n", " if self.mwe_tokenizer is None:\n",
" self.mwe_tokenizer.tokenize(simple_tokenized))\n", " tokenized_sentences.append(simple_tokenized)\n",
" else:\n",
" tokenized_sentences.append(\n",
" self.mwe_tokenizer.tokenize(simple_tokenized))\n",
" tokenized_documents.append(tokenized_sentences)\n", " tokenized_documents.append(tokenized_sentences)\n",
" \n", " \n",
" # now create initial colln-u elemnts\n", " # now create initial colln-u elemnts\n",
@ -304,13 +334,14 @@
" else:\n", " else:\n",
" conllu_doc = ConlluDocument()\n", " conllu_doc = ConlluDocument()\n",
" for sent in doc:\n", " for sent in doc:\n",
" token_id = 0\n",
" conllu_sent = ConlluSentence()\n", " conllu_sent = ConlluSentence()\n",
" for token in sent:\n", " for token in sent:\n",
" token_id += 1\n",
" conllu_sent.add(ConlluElement(\n", " conllu_sent.add(ConlluElement(\n",
" id=self.id_counter + 1,\n", " id=token_id,\n",
" form=token,\n", " form=token,\n",
" ))\n", " ))\n",
" self.id_counter += 1\n",
" conllu_doc.add(conllu_sent)\n", " conllu_doc.add(conllu_sent)\n",
" self.conllu_documents.append(conllu_doc)\n", " self.conllu_documents.append(conllu_doc)\n",
" i += 1\n", " i += 1\n",
@ -340,6 +371,9 @@
" for elem in conllu_sent.conllu_elements:\n", " for elem in conllu_sent.conllu_elements:\n",
" if elem.lemma in stemmed_keyword_list:\n", " if elem.lemma in stemmed_keyword_list:\n",
" elem.add_misc(key, value)\n", " elem.add_misc(key, value)\n",
" \n",
" def get_conllu_elements(self):\n",
" return [doc.get_conllu_elements() for doc in self.conllu_documents]\n",
"\n", "\n",
" def __repr__(self):\n", " def __repr__(self):\n",
" result = \"\"\n", " result = \"\"\n",

View File

@ -10,11 +10,14 @@
# ## imports and settings # ## imports and settings
import sys
sys.path.append("../")
import nltk import nltk
from nltk.tag import pos_tag, map_tag from nltk.tag import pos_tag, map_tag
from nltk.stem import PorterStemmer from nltk.stem import PorterStemmer
from nltk.corpus import stopwords as nltk_stopwords from nltk.corpus import stopwords as nltk_stopwords
from stemmed_mwe_tokenizer import StemmedMWETokenizer from Tagging.stemmed_mwe_tokenizer import StemmedMWETokenizer
from nltk.stem import WordNetLemmatizer from nltk.stem import WordNetLemmatizer
@ -129,6 +132,21 @@ class ConlluElement(object):
result += str(self.__getattribute__(attr)) + " \t" result += str(self.__getattribute__(attr)) + " \t"
return replace_tab(result, 16) return replace_tab(result, 16)
def __getitem__(self, key):
# conllu module compability:
if key == "upostag":
key = "upos"
if key == "xpostag":
key = "xpos"
if key not in CONLLU_ATTRIBUTES:
return None
attr = self.__getattribute__(key)
if str(attr) == "_":
return None
return attr
# ## Conllu Sentence Class # ## Conllu Sentence Class
@ -139,6 +157,9 @@ class ConlluSentence(object):
def add(self, conllu_element: ConlluElement): def add(self, conllu_element: ConlluElement):
self.conllu_elements.append(conllu_element) self.conllu_elements.append(conllu_element)
def get_conllu_elements(self):
return self.conllu_elements
def __repr__(self): def __repr__(self):
result = "" result = ""
for elem in self.conllu_elements: for elem in self.conllu_elements:
@ -160,6 +181,9 @@ class ConlluDocument(object):
def add(self, conllu_sentence: ConlluSentence): def add(self, conllu_sentence: ConlluSentence):
self.conllu_sentences.append(conllu_sentence) self.conllu_sentences.append(conllu_sentence)
def get_conllu_elements(self):
return [c_sent.get_conllu_elements() for c_sent in self.conllu_sentences]
def __repr__(self): def __repr__(self):
result = "# newdoc\n" result = "# newdoc\n"
if self.id is not None: if self.id is not None:
@ -176,14 +200,17 @@ class ConlluDocument(object):
# ## Conllu Generator Class # ## Conllu Generator Class
class ConlluGenerator(object): class ConlluGenerator(object):
def __init__(self, documents: list, stemmed_multi_word_tokens, stemmer=PorterStemmer(), ids=None): def __init__(self, documents: list, stemmed_multi_word_tokens=None, stemmer=PorterStemmer(), ids=None):
self.documents = documents self.documents = documents
self.stemmed_multi_word_tokens = stemmed_multi_word_tokens self.stemmed_multi_word_tokens = stemmed_multi_word_tokens
self.mwe_tokenizer = StemmedMWETokenizer(
[w.split() for w in stemmed_multi_word_tokens])
self.stemmer = stemmer
self.id_counter = 0 if self.stemmed_multi_word_tokens is not None:
self.mwe_tokenizer = StemmedMWETokenizer(
[w.split() for w in stemmed_multi_word_tokens])
else:
self.mwe_tokenizer = None
self.stemmer = stemmer
self.conllu_documents = [] self.conllu_documents = []
@ -199,8 +226,11 @@ class ConlluGenerator(object):
for sent in sentences: for sent in sentences:
if (len(sent) > 0): if (len(sent) > 0):
simple_tokenized = nltk.tokenize.word_tokenize(sent) simple_tokenized = nltk.tokenize.word_tokenize(sent)
tokenized_sentences.append( if self.mwe_tokenizer is None:
self.mwe_tokenizer.tokenize(simple_tokenized)) tokenized_sentences.append(simple_tokenized)
else:
tokenized_sentences.append(
self.mwe_tokenizer.tokenize(simple_tokenized))
tokenized_documents.append(tokenized_sentences) tokenized_documents.append(tokenized_sentences)
# now create initial colln-u elemnts # now create initial colln-u elemnts
@ -210,13 +240,14 @@ class ConlluGenerator(object):
else: else:
conllu_doc = ConlluDocument() conllu_doc = ConlluDocument()
for sent in doc: for sent in doc:
token_id = 0
conllu_sent = ConlluSentence() conllu_sent = ConlluSentence()
for token in sent: for token in sent:
token_id += 1
conllu_sent.add(ConlluElement( conllu_sent.add(ConlluElement(
id=self.id_counter + 1, id=token_id,
form=token, form=token,
)) ))
self.id_counter += 1
conllu_doc.add(conllu_sent) conllu_doc.add(conllu_sent)
self.conllu_documents.append(conllu_doc) self.conllu_documents.append(conllu_doc)
i += 1 i += 1
@ -247,6 +278,9 @@ class ConlluGenerator(object):
if elem.lemma in stemmed_keyword_list: if elem.lemma in stemmed_keyword_list:
elem.add_misc(key, value) elem.add_misc(key, value)
def get_conllu_elements(self):
return [doc.get_conllu_elements() for doc in self.conllu_documents]
def __repr__(self): def __repr__(self):
result = "" result = ""
for document in self.conllu_documents: for document in self.conllu_documents:

View File

@ -13,7 +13,17 @@
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"import conllu_batch_generator as cbg" "import sys\n",
"sys.path.append(\"../\")"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"import Tagging.conllu_batch_generator as cbg"
] ]
}, },
{ {

View File

@ -3,7 +3,11 @@
# # crf data Generator # # crf data Generator
import conllu_batch_generator as cbg import sys
sys.path.append("../")
import Tagging.conllu_batch_generator as cbg
def word2features(sent, i): def word2features(sent, i):

View File

@ -46,6 +46,12 @@
"containers = importlib.util.module_from_spec(spec)\n", "containers = importlib.util.module_from_spec(spec)\n",
"spec.loader.exec_module(containers)\n", "spec.loader.exec_module(containers)\n",
"\n", "\n",
"# loading placeholders\n",
"spec = importlib.util.spec_from_file_location(\n",
" \"placeholders\", \"../\" + settings.placeholder_file)\n",
"placeholders = importlib.util.module_from_spec(spec)\n",
"spec.loader.exec_module(placeholders)\n",
"\n",
"# skipping recipes:\n", "# skipping recipes:\n",
"n_skipped_recipes = int(sys.argv[1]) if len(sys.argv) > 1 else 0\n", "n_skipped_recipes = int(sys.argv[1]) if len(sys.argv) > 1 else 0\n",
"print(\"start reading at recipe \" + str(n_skipped_recipes))\n", "print(\"start reading at recipe \" + str(n_skipped_recipes))\n",
@ -80,7 +86,8 @@
" \n", " \n",
" cg.add_misc_value_by_list(\"food_type\", \"ingredient\", [w.replace(\" \",\"_\") for w in ingredients.multi_word_ingredients_stemmed] + ingredients.ingredients_stemmed)\n", " cg.add_misc_value_by_list(\"food_type\", \"ingredient\", [w.replace(\" \",\"_\") for w in ingredients.multi_word_ingredients_stemmed] + ingredients.ingredients_stemmed)\n",
" cg.add_misc_value_by_list(\"food_type\", \"action\", actions.stemmed_cooking_verbs)\n", " cg.add_misc_value_by_list(\"food_type\", \"action\", actions.stemmed_cooking_verbs)\n",
" cg.add_misc_value_by_list(\"food_type\", \"containers\", containers.containers)\n", " cg.add_misc_value_by_list(\"food_type\", \"containers\", containers.stemmed_containers)\n",
" cg.add_misc_value_by_list(\"food_type\", \"placeholders\", placeholders.stemmed_placeholders)\n",
"\n", "\n",
" savefile.write(str(cg))" " savefile.write(str(cg))"
] ]

View File

@ -30,6 +30,12 @@ spec = importlib.util.spec_from_file_location(
containers = importlib.util.module_from_spec(spec) containers = importlib.util.module_from_spec(spec)
spec.loader.exec_module(containers) spec.loader.exec_module(containers)
# loading placeholders
spec = importlib.util.spec_from_file_location(
"placeholders", "../" + settings.placeholder_file)
placeholders = importlib.util.module_from_spec(spec)
spec.loader.exec_module(placeholders)
# skipping recipes: # skipping recipes:
n_skipped_recipes = int(sys.argv[1]) if len(sys.argv) > 1 else 0 n_skipped_recipes = int(sys.argv[1]) if len(sys.argv) > 1 else 0
print("start reading at recipe " + str(n_skipped_recipes)) print("start reading at recipe " + str(n_skipped_recipes))
@ -58,7 +64,8 @@ def process_instructions(instructions: list, document_ids=None):
cg.add_misc_value_by_list("food_type", "ingredient", [w.replace(" ","_") for w in ingredients.multi_word_ingredients_stemmed] + ingredients.ingredients_stemmed) cg.add_misc_value_by_list("food_type", "ingredient", [w.replace(" ","_") for w in ingredients.multi_word_ingredients_stemmed] + ingredients.ingredients_stemmed)
cg.add_misc_value_by_list("food_type", "action", actions.stemmed_cooking_verbs) cg.add_misc_value_by_list("food_type", "action", actions.stemmed_cooking_verbs)
cg.add_misc_value_by_list("food_type", "containers", containers.containers) cg.add_misc_value_by_list("food_type", "containers", containers.stemmed_containers)
cg.add_misc_value_by_list("food_type", "placeholders", placeholders.stemmed_placeholders)
savefile.write(str(cg)) savefile.write(str(cg))

Binary file not shown.

File diff suppressed because one or more lines are too long

1298
db/create_database.ipynb Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,304 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Create Database with Docker"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"* download mariadb image"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Using default tag: latest\n",
"latest: Pulling from mariadb/server\n",
"Digest: sha256:b5762c478d38ae54c464e3ab63e10e0c3f951633ed7619d52fa3c22bcf36218a\n",
"Status: Image is up to date for mariadb/server:latest\n"
]
}
],
"source": [
"%%bash \n",
"docker pull mariadb/server"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"* create and run container"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"09342f7c540b2b6224bb96b2e3a542a5ff144e6bbc1cf243ae33b0dab9262c47\n"
]
}
],
"source": [
"%%bash\n",
"docker run --name recipe_db -e MYSQL_ROOT_PASSWORD=\"g00d_r3c1p3s\" -d mariadb/server"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"* checking docker logs"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Initializing database\n",
"\n",
"\n",
"PLEASE REMEMBER TO SET A PASSWORD FOR THE MariaDB root USER !\n",
"To do so, start the server, then issue the following commands:\n",
"\n",
"'/usr/bin/mysqladmin' -u root password 'new-password'\n",
"'/usr/bin/mysqladmin' -u root -h password 'new-password'\n",
"\n",
"Alternatively you can run:\n",
"'/usr/bin/mysql_secure_installation'\n",
"\n",
"which will also give you the option of removing the test\n",
"databases and anonymous user created by default. This is\n",
"strongly recommended for production servers.\n",
"\n",
"See the MariaDB Knowledgebase at http://mariadb.com/kb or the\n",
"MySQL manual for more instructions.\n",
"\n",
"Please report any problems at http://mariadb.org/jira\n",
"\n",
"The latest information about MariaDB is available at http://mariadb.org/.\n",
"You can find additional information about the MySQL part at:\n",
"http://dev.mysql.com\n",
"Consider joining MariaDB's strong and vibrant community:\n",
"https://mariadb.org/get-involved/\n",
"\n",
"Database initialized\n",
"MySQL init process in progress...\n",
"\n",
"\n",
"MySQL init process done. Ready for start up.\n",
"\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"2019-08-08 14:46:43 0 [Note] mysqld (mysqld 10.3.13-MariaDB-1:10.3.13+maria~bionic) starting as process 104 ...\n",
"2019-08-08 14:46:43 0 [Note] InnoDB: Using Linux native AIO\n",
"2019-08-08 14:46:43 0 [Note] InnoDB: Mutexes and rw_locks use GCC atomic builtins\n",
"2019-08-08 14:46:43 0 [Note] InnoDB: Uses event mutexes\n",
"2019-08-08 14:46:43 0 [Note] InnoDB: Compressed tables use zlib 1.2.11\n",
"2019-08-08 14:46:43 0 [Note] InnoDB: Number of pools: 1\n",
"2019-08-08 14:46:43 0 [Note] InnoDB: Using SSE2 crc32 instructions\n",
"2019-08-08 14:46:43 0 [Note] InnoDB: Initializing buffer pool, total size = 256M, instances = 1, chunk size = 128M\n",
"2019-08-08 14:46:43 0 [Note] InnoDB: Completed initialization of buffer pool\n",
"2019-08-08 14:46:43 0 [Note] InnoDB: If the mysqld execution user is authorized, page cleaner thread priority can be changed. See the man page of setpriority().\n",
"2019-08-08 14:46:43 0 [Note] InnoDB: 128 out of 128 rollback segments are active.\n",
"2019-08-08 14:46:43 0 [Note] InnoDB: Creating shared tablespace for temporary tables\n",
"2019-08-08 14:46:43 0 [Note] InnoDB: Setting file './ibtmp1' size to 12 MB. Physically writing the file full; Please wait ...\n",
"2019-08-08 14:46:43 0 [Note] InnoDB: File './ibtmp1' size is now 12 MB.\n",
"2019-08-08 14:46:43 0 [Note] InnoDB: Waiting for purge to start\n",
"2019-08-08 14:46:44 0 [Note] InnoDB: 10.3.13 started; log sequence number 1630815; transaction id 21\n",
"2019-08-08 14:46:44 0 [Note] InnoDB: Loading buffer pool(s) from /var/lib/mysql/ib_buffer_pool\n",
"2019-08-08 14:46:44 0 [Note] Plugin 'FEEDBACK' is disabled.\n",
"2019-08-08 14:46:44 0 [Note] InnoDB: Buffer pool(s) load completed at 190808 14:46:44\n",
"2019-08-08 14:46:44 0 [Warning] 'user' entry 'root@09342f7c540b' ignored in --skip-name-resolve mode.\n",
"2019-08-08 14:46:44 0 [Warning] 'user' entry '@09342f7c540b' ignored in --skip-name-resolve mode.\n",
"2019-08-08 14:46:44 0 [Warning] 'proxies_priv' entry '@% root@09342f7c540b' ignored in --skip-name-resolve mode.\n",
"2019-08-08 14:46:44 0 [Note] Reading of all Master_info entries succeded\n",
"2019-08-08 14:46:44 0 [Note] Added new Master_info '' to hash table\n",
"2019-08-08 14:46:44 0 [Note] mysqld: ready for connections.\n",
"Version: '10.3.13-MariaDB-1:10.3.13+maria~bionic' socket: '/var/run/mysqld/mysqld.sock' port: 0 mariadb.org binary distribution\n",
"Warning: Unable to load '/usr/share/zoneinfo/leap-seconds.list' as time zone. Skipping it.\n",
"2019-08-08 14:46:46 10 [Warning] 'proxies_priv' entry '@% root@09342f7c540b' ignored in --skip-name-resolve mode.\n",
"2019-08-08 14:46:46 0 [Note] mysqld (initiated by: unknown): Normal shutdown\n",
"2019-08-08 14:46:46 0 [Note] Event Scheduler: Purging the queue. 0 events\n",
"2019-08-08 14:46:46 0 [Note] InnoDB: FTS optimize thread exiting.\n",
"2019-08-08 14:46:46 0 [Note] InnoDB: Starting shutdown...\n",
"2019-08-08 14:46:46 0 [Note] InnoDB: Dumping buffer pool(s) to /var/lib/mysql/ib_buffer_pool\n",
"2019-08-08 14:46:46 0 [Note] InnoDB: Buffer pool(s) dump completed at 190808 14:46:46\n",
"2019-08-08 14:46:47 0 [Note] InnoDB: Shutdown completed; log sequence number 1630824; transaction id 24\n",
"2019-08-08 14:46:47 0 [Note] InnoDB: Removed temporary tablespace data file: \"ibtmp1\"\n",
"2019-08-08 14:46:47 0 [Note] mysqld: Shutdown complete\n",
"\n",
"2019-08-08 14:46:47 0 [Note] mysqld (mysqld 10.3.13-MariaDB-1:10.3.13+maria~bionic) starting as process 1 ...\n",
"2019-08-08 14:46:47 0 [Note] InnoDB: Using Linux native AIO\n",
"2019-08-08 14:46:47 0 [Note] InnoDB: Mutexes and rw_locks use GCC atomic builtins\n",
"2019-08-08 14:46:47 0 [Note] InnoDB: Uses event mutexes\n",
"2019-08-08 14:46:47 0 [Note] InnoDB: Compressed tables use zlib 1.2.11\n",
"2019-08-08 14:46:47 0 [Note] InnoDB: Number of pools: 1\n",
"2019-08-08 14:46:47 0 [Note] InnoDB: Using SSE2 crc32 instructions\n",
"2019-08-08 14:46:47 0 [Note] InnoDB: Initializing buffer pool, total size = 256M, instances = 1, chunk size = 128M\n",
"2019-08-08 14:46:47 0 [Note] InnoDB: Completed initialization of buffer pool\n",
"2019-08-08 14:46:47 0 [Note] InnoDB: If the mysqld execution user is authorized, page cleaner thread priority can be changed. See the man page of setpriority().\n",
"2019-08-08 14:46:47 0 [Note] InnoDB: 128 out of 128 rollback segments are active.\n",
"2019-08-08 14:46:47 0 [Note] InnoDB: Creating shared tablespace for temporary tables\n",
"2019-08-08 14:46:47 0 [Note] InnoDB: Setting file './ibtmp1' size to 12 MB. Physically writing the file full; Please wait ...\n",
"2019-08-08 14:46:47 0 [Note] InnoDB: File './ibtmp1' size is now 12 MB.\n",
"2019-08-08 14:46:47 0 [Note] InnoDB: Waiting for purge to start\n",
"2019-08-08 14:46:48 0 [Note] InnoDB: 10.3.13 started; log sequence number 1630824; transaction id 21\n",
"2019-08-08 14:46:48 0 [Note] InnoDB: Loading buffer pool(s) from /var/lib/mysql/ib_buffer_pool\n",
"2019-08-08 14:46:48 0 [Note] Plugin 'FEEDBACK' is disabled.\n",
"2019-08-08 14:46:48 0 [Note] Server socket created on IP: '::'.\n",
"2019-08-08 14:46:48 0 [Note] InnoDB: Buffer pool(s) load completed at 190808 14:46:48\n",
"2019-08-08 14:46:48 0 [Warning] 'proxies_priv' entry '@% root@09342f7c540b' ignored in --skip-name-resolve mode.\n",
"2019-08-08 14:46:48 0 [Note] Reading of all Master_info entries succeded\n",
"2019-08-08 14:46:48 0 [Note] Added new Master_info '' to hash table\n",
"2019-08-08 14:46:48 0 [Note] mysqld: ready for connections.\n",
"Version: '10.3.13-MariaDB-1:10.3.13+maria~bionic' socket: '/var/run/mysqld/mysqld.sock' port: 3306 mariadb.org binary distribution\n"
]
}
],
"source": [
"%%bash\n",
"docker logs recipe_db"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"* get local db address. **NOTE**: dont forget to set this address in settings.py!"
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"172.17.0.2\n"
]
}
],
"source": [
"%%bash\n",
"docker inspect --format '{{ .NetworkSettings.IPAddress }}' recipe_db"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"* and create databse"
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"mysql: [Warning] Using a password on the command line interface can be insecure.\n"
]
}
],
"source": [
"%%bash\n",
"mysql -h 172.17.0.2 -u root --password=\"g00d_r3c1p3s\" -e \"CREATE DATABASE recipe_db\""
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"* check whether database is created:"
]
},
{
"cell_type": "code",
"execution_count": 11,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Database\n",
"information_schema\n",
"mysql\n",
"performance_schema\n",
"recipe_db\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"mysql: [Warning] Using a password on the command line interface can be insecure.\n"
]
}
],
"source": [
"%%bash\n",
"mysql -h 172.17.0.2 -u root --password=\"g00d_r3c1p3s\" -e \"show databases\""
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.3"
}
},
"nbformat": 4,
"nbformat_minor": 4
}

View File

@ -0,0 +1,190 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"import pymysql.cursors\n",
"import sys\n",
"\n",
"\n",
"def get_sql_time(datetime_object):\n",
" return datetime_object.strftime('%Y-%m-%d %H:%M:%S')\n",
"\n",
"\n",
"class SQLInjectionError(Exception):\n",
" def __init__(self):\n",
"\n",
" # Call the base class constructor with the parameters it needs\n",
" super().__init__(\"Detected possible SQL injection attack!\")\n",
"\n",
"\n",
"class DatabaseConnection(object):\n",
" \"\"\"\n",
" a singleton class for a global database connection\n",
" \"\"\"\n",
"\n",
" instance = None\n",
"\n",
" @staticmethod\n",
" def global_cursor():\n",
" assert DatabaseConnection.instance is not None\n",
" return DatabaseConnection.instance.get_cursor()\n",
"\n",
" @staticmethod\n",
" def global_close():\n",
" assert DatabaseConnection.instance is not None\n",
" DatabaseConnection.instance.close()\n",
"\n",
" @staticmethod\n",
" def global_commit():\n",
" assert DatabaseConnection.instance is not None\n",
" DatabaseConnection.instance.commit()\n",
"\n",
" @staticmethod\n",
" def global_ping():\n",
" assert DatabaseConnection.instance is not None\n",
" DatabaseConnection.instance.connection.ping()\n",
"\n",
" @staticmethod\n",
" def global_single_query(query, params=None):\n",
" DatabaseConnection.global_ping()\n",
" if ';' in query:\n",
" # Possible injection!\n",
" raise SQLInjectionError()\n",
"\n",
" with DatabaseConnection.global_cursor() as c:\n",
" if params is None:\n",
" c.execute(query)\n",
" else:\n",
" c.execute(query, params)\n",
"\n",
" return c.fetchall()\n",
"\n",
" @staticmethod\n",
" def global_single_execution(sql_statement, params=None):\n",
" DatabaseConnection.global_ping()\n",
" if ';' in sql_statement:\n",
" # Possible injection detected!\n",
" raise SQLInjectionError()\n",
"\n",
" with DatabaseConnection.global_cursor() as c:\n",
" if params is None:\n",
" c.execute(sql_statement)\n",
" else:\n",
" c.execute(sql_statement, params)\n",
" DatabaseConnection.global_commit()\n",
"\n",
" def __init__(self,\n",
" host: str,\n",
" port: int,\n",
" user: str,\n",
" password: str,\n",
" db: str,\n",
" charset: str):\n",
"\n",
" assert DatabaseConnection.instance is None\n",
" try:\n",
" self.connection = pymysql.connect(\n",
" host=host,\n",
" port=port,\n",
" user=user,\n",
" password=password,\n",
" db=db,\n",
" charset=charset,\n",
" cursorclass=pymysql.cursors.DictCursor)\n",
" DatabaseConnection.instance = self\n",
" except Exception as e:\n",
" sys.stderr.write(\"could not connect to database '\" +\n",
" str(db) +\n",
" \"' at \" +\n",
" user +\n",
" \"@\" +\n",
" host +\n",
" \":\" +\n",
" str(port) +\n",
" \"\\nCheck the configuration in settings.py!\\n\")\n",
" raise Exception('could not connect to database')\n",
"\n",
" def get_cursor(self):\n",
" return self.connection.cursor()\n",
"\n",
" def close(self):\n",
" self.connection.close()\n",
" DatabaseConnection.instance = None\n",
"\n",
" def commit(self):\n",
" self.connection.commit()\n",
"\n",
"\n",
"def test_connection():\n",
" import db_settings as settings\n",
" DatabaseConnection(settings.db_host,\n",
" settings.db_port,\n",
" settings.db_user,\n",
" settings.db_pw,\n",
" settings.db_db,\n",
" settings.db_charset)"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"test_connection()"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"()"
]
},
"execution_count": 3,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"DatabaseConnection.global_single_query(\"show tables\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.3"
}
},
"nbformat": 4,
"nbformat_minor": 4
}

125
db/database_connection.py Normal file
View File

@ -0,0 +1,125 @@
#!/usr/bin/env python3
# coding: utf-8
import pymysql.cursors
import sys
def get_sql_time(datetime_object):
return datetime_object.strftime('%Y-%m-%d %H:%M:%S')
class SQLInjectionError(Exception):
def __init__(self):
# Call the base class constructor with the parameters it needs
super().__init__("Detected possible SQL injection attack!")
class DatabaseConnection(object):
"""
a singleton class for a global database connection
"""
instance = None
@staticmethod
def global_cursor():
assert DatabaseConnection.instance is not None
return DatabaseConnection.instance.get_cursor()
@staticmethod
def global_close():
assert DatabaseConnection.instance is not None
DatabaseConnection.instance.close()
@staticmethod
def global_commit():
assert DatabaseConnection.instance is not None
DatabaseConnection.instance.commit()
@staticmethod
def global_ping():
assert DatabaseConnection.instance is not None
DatabaseConnection.instance.connection.ping()
@staticmethod
def global_single_query(query, params=None):
DatabaseConnection.global_ping()
if ';' in query:
# Possible injection!
raise SQLInjectionError()
with DatabaseConnection.global_cursor() as c:
if params is None:
c.execute(query)
else:
c.execute(query, params)
return c.fetchall()
@staticmethod
def global_single_execution(sql_statement, params=None):
DatabaseConnection.global_ping()
if ';' in sql_statement:
# Possible injection detected!
raise SQLInjectionError()
with DatabaseConnection.global_cursor() as c:
if params is None:
c.execute(sql_statement)
else:
c.execute(sql_statement, params)
DatabaseConnection.global_commit()
def __init__(self,
host: str,
port: int,
user: str,
password: str,
db: str,
charset: str):
assert DatabaseConnection.instance is None
try:
self.connection = pymysql.connect(
host=host,
port=port,
user=user,
password=password,
db=db,
charset=charset,
cursorclass=pymysql.cursors.DictCursor)
DatabaseConnection.instance = self
except Exception as e:
sys.stderr.write("could not connect to database '" +
str(db) +
"' at " +
user +
"@" +
host +
":" +
str(port) +
"\nCheck the configuration in settings.py!\n")
raise Exception('could not connect to database')
def get_cursor(self):
return self.connection.cursor()
def close(self):
self.connection.close()
DatabaseConnection.instance = None
def commit(self):
self.connection.commit()
def test_connection():
import settings
DatabaseConnection(settings.db_host,
settings.db_port,
settings.db_user,
settings.db_pw,
settings.db_db,
settings.db_charset)

48
db/db_settings.ipynb Normal file
View File

@ -0,0 +1,48 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [],
"source": [
"db_host = \"172.17.0.2\"\n",
"db_port = 3306\n",
"\n",
"db_user = \"root\"\n",
"db_pw = \"g00d_r3c1p3s\"\n",
"db_db = \"recipe_db\"\n",
"\n",
"db_charset = 'utf8mb4'"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.3"
}
},
"nbformat": 4,
"nbformat_minor": 4
}

13
db/db_settings.py Normal file
View File

@ -0,0 +1,13 @@
#!/usr/bin/env python3
# coding: utf-8
db_host = "172.17.0.2"
db_port = 3306
db_user = "root"
db_pw = "g00d_r3c1p3s"
db_db = "recipe_db"
db_charset = 'utf8mb4'

2247
reports/demos.ipynb Normal file

File diff suppressed because one or more lines are too long

View File

@ -9,5 +9,6 @@ fooddb_folder = data_root + "foodb_2017_06_29_csv/"
ingredients_file = data_root + "ingredients.py" ingredients_file = data_root + "ingredients.py"
actions_file = data_root + "actions.py" actions_file = data_root + "actions.py"
container_file = data_root + "containers.py" container_file = data_root + "containers.py"
placeholder_file = data_root + "placeholders.py"
gzipped_conllu_data_root = data_root + "1M_recipes_conllu/" gzipped_conllu_data_root = data_root + "1M_recipes_conllu/"