moved 1 million recipes into database, starting coarse tree generation for recipe analysis

This commit is contained in:
Jonas Weinz
2019-08-10 23:10:30 +02:00
parent cbccf169bb
commit 09eb58e703
24 changed files with 7571 additions and 3663 deletions

File diff suppressed because it is too large Load Diff

View File

@ -20,7 +20,7 @@
},
{
"cell_type": "code",
"execution_count": 4,
"execution_count": 3,
"metadata": {},
"outputs": [
{
@ -29,7 +29,7 @@
"TokenList<Dissolve, Jello, in, boiling, water, .>"
]
},
"execution_count": 4,
"execution_count": 3,
"metadata": {},
"output_type": "execute_result"
}
@ -40,7 +40,7 @@
},
{
"cell_type": "code",
"execution_count": 5,
"execution_count": 4,
"metadata": {},
"outputs": [],
"source": [
@ -109,7 +109,7 @@
},
{
"cell_type": "code",
"execution_count": 6,
"execution_count": 5,
"metadata": {},
"outputs": [],
"source": [
@ -125,7 +125,7 @@
},
{
"cell_type": "code",
"execution_count": 7,
"execution_count": 6,
"metadata": {},
"outputs": [],
"source": [
@ -135,7 +135,7 @@
},
{
"cell_type": "code",
"execution_count": 8,
"execution_count": 7,
"metadata": {},
"outputs": [],
"source": [
@ -145,7 +145,7 @@
},
{
"cell_type": "code",
"execution_count": 9,
"execution_count": 8,
"metadata": {},
"outputs": [],
"source": [
@ -155,7 +155,7 @@
},
{
"cell_type": "code",
"execution_count": 10,
"execution_count": 9,
"metadata": {},
"outputs": [],
"source": [
@ -172,7 +172,7 @@
},
{
"cell_type": "code",
"execution_count": 11,
"execution_count": 10,
"metadata": {},
"outputs": [],
"source": [
@ -181,7 +181,7 @@
},
{
"cell_type": "code",
"execution_count": 14,
"execution_count": 11,
"metadata": {},
"outputs": [],
"source": [
@ -218,16 +218,16 @@
},
{
"cell_type": "code",
"execution_count": 15,
"execution_count": 12,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"45442"
"47538"
]
},
"execution_count": 15,
"execution_count": 12,
"metadata": {},
"output_type": "execute_result"
}
@ -245,7 +245,7 @@
},
{
"cell_type": "code",
"execution_count": 16,
"execution_count": 13,
"metadata": {},
"outputs": [],
"source": [
@ -254,7 +254,7 @@
},
{
"cell_type": "code",
"execution_count": 17,
"execution_count": 14,
"metadata": {},
"outputs": [],
"source": [
@ -266,7 +266,7 @@
},
{
"cell_type": "code",
"execution_count": 18,
"execution_count": 15,
"metadata": {},
"outputs": [],
"source": [
@ -282,7 +282,7 @@
},
{
"cell_type": "code",
"execution_count": 19,
"execution_count": 16,
"metadata": {},
"outputs": [
{
@ -302,7 +302,7 @@
" 'max_linesearch']"
]
},
"execution_count": 19,
"execution_count": 16,
"metadata": {},
"output_type": "execute_result"
}
@ -313,7 +313,7 @@
},
{
"cell_type": "code",
"execution_count": 20,
"execution_count": 17,
"metadata": {},
"outputs": [],
"source": [
@ -322,24 +322,24 @@
},
{
"cell_type": "code",
"execution_count": 20,
"execution_count": 18,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'num': 455,\n",
"{'num': 830,\n",
" 'scores': {},\n",
" 'loss': 110.581675,\n",
" 'feature_norm': 8.594619,\n",
" 'error_norm': 0.214403,\n",
" 'active_features': 87,\n",
" 'loss': 41171.669638,\n",
" 'feature_norm': 126.341894,\n",
" 'error_norm': 85.690855,\n",
" 'active_features': 6055,\n",
" 'linesearch_trials': 2,\n",
" 'linesearch_step': 0.5,\n",
" 'time': 0.001}"
" 'time': 0.724}"
]
},
"execution_count": 20,
"execution_count": 18,
"metadata": {},
"output_type": "execute_result"
}
@ -357,16 +357,16 @@
},
{
"cell_type": "code",
"execution_count": 21,
"execution_count": 19,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"<contextlib.closing at 0x7f056bc6d828>"
"<contextlib.closing at 0x7f26d79813c8>"
]
},
"execution_count": 21,
"execution_count": 19,
"metadata": {},
"output_type": "execute_result"
}
@ -378,7 +378,7 @@
},
{
"cell_type": "code",
"execution_count": 22,
"execution_count": 20,
"metadata": {},
"outputs": [
{
@ -388,7 +388,7 @@
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mIndexError\u001b[0m Traceback (most recent call last)",
"\u001b[0;32m<ipython-input-22-a88100b49642>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mi\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mrange\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m100\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;36m130\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 2\u001b[0;31m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m' '\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mjoin\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mt_test\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mi\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 3\u001b[0m \u001b[0;31m#print(' '.join(feature2tokens(X_test[i])), end='\\n\\n')\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 4\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"Predicted:\"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m' '\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mjoin\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtagger\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtag\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mX_test\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mi\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 5\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"Correct: \"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m' '\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mjoin\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mY_test\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mi\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m<ipython-input-20-a88100b49642>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mi\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mrange\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m100\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;36m130\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 2\u001b[0;31m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m' '\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mjoin\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mt_test\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mi\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 3\u001b[0m \u001b[0;31m#print(' '.join(feature2tokens(X_test[i])), end='\\n\\n')\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 4\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"Predicted:\"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m' '\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mjoin\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtagger\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtag\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mX_test\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mi\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 5\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"Correct: \"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m' '\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mjoin\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mY_test\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mi\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;31mIndexError\u001b[0m: list index out of range"
]
}

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -16,14 +16,15 @@
"outputs": [],
"source": [
"import sys\n",
"sys.path.append('../')\n",
"\n",
"from conllu import parse\n",
"from tagging_tools import print_visualized_tags\n",
"from Tagging.tagging_tools import print_visualized_tags\n",
"\n",
"from sklearn import preprocessing\n",
"import numpy as np\n",
"\n",
"sys.path.insert(0, '..')\n",
"\n",
"import settings # noqa\n",
"\n",
"import gzip"

View File

@ -6,14 +6,15 @@
# read conllu documents in batches
import sys
sys.path.append('../')
from conllu import parse
from tagging_tools import print_visualized_tags
from Tagging.tagging_tools import print_visualized_tags
from sklearn import preprocessing
import numpy as np
sys.path.insert(0, '..')
import settings # noqa
import gzip

View File

@ -25,11 +25,14 @@
"metadata": {},
"outputs": [],
"source": [
"import sys\n",
"sys.path.append(\"../\")\n",
"\n",
"import nltk\n",
"from nltk.tag import pos_tag, map_tag\n",
"from nltk.stem import PorterStemmer\n",
"from nltk.corpus import stopwords as nltk_stopwords\n",
"from stemmed_mwe_tokenizer import StemmedMWETokenizer\n",
"from Tagging.stemmed_mwe_tokenizer import StemmedMWETokenizer\n",
"from nltk.stem import WordNetLemmatizer"
]
},
@ -188,7 +191,22 @@
" result = \"\"\n",
" for attr in CONLLU_ATTRIBUTES:\n",
" result += str(self.__getattribute__(attr)) + \" \\t\"\n",
" return replace_tab(result, 16)"
" return replace_tab(result, 16)\n",
" \n",
" def __getitem__(self, key):\n",
" \n",
" # conllu module compability:\n",
" if key == \"upostag\":\n",
" key = \"upos\"\n",
" if key == \"xpostag\":\n",
" key = \"xpos\"\n",
" \n",
" if key not in CONLLU_ATTRIBUTES:\n",
" return None\n",
" attr = self.__getattribute__(key)\n",
" if str(attr) == \"_\":\n",
" return None\n",
" return attr"
]
},
{
@ -210,6 +228,9 @@
"\n",
" def add(self, conllu_element: ConlluElement):\n",
" self.conllu_elements.append(conllu_element)\n",
" \n",
" def get_conllu_elements(self):\n",
" return self.conllu_elements\n",
"\n",
" def __repr__(self):\n",
" result = \"\"\n",
@ -243,6 +264,9 @@
" def add(self, conllu_sentence: ConlluSentence):\n",
" self.conllu_sentences.append(conllu_sentence)\n",
" \n",
" def get_conllu_elements(self):\n",
" return [c_sent.get_conllu_elements() for c_sent in self.conllu_sentences]\n",
" \n",
" def __repr__(self):\n",
" result = \"# newdoc\\n\"\n",
" if self.id is not None:\n",
@ -270,15 +294,18 @@
"outputs": [],
"source": [
"class ConlluGenerator(object):\n",
" def __init__(self, documents: list, stemmed_multi_word_tokens, stemmer=PorterStemmer(), ids=None):\n",
" def __init__(self, documents: list, stemmed_multi_word_tokens=None, stemmer=PorterStemmer(), ids=None):\n",
" self.documents = documents\n",
" self.stemmed_multi_word_tokens = stemmed_multi_word_tokens\n",
" self.mwe_tokenizer = StemmedMWETokenizer(\n",
" [w.split() for w in stemmed_multi_word_tokens])\n",
" \n",
" if self.stemmed_multi_word_tokens is not None:\n",
" self.mwe_tokenizer = StemmedMWETokenizer(\n",
" [w.split() for w in stemmed_multi_word_tokens])\n",
" else:\n",
" self.mwe_tokenizer = None\n",
" \n",
" self.stemmer = stemmer\n",
"\n",
" self.id_counter = 0\n",
"\n",
" self.conllu_documents = []\n",
"\n",
" self.ids = ids\n",
@ -293,8 +320,11 @@
" for sent in sentences: \n",
" if (len(sent) > 0):\n",
" simple_tokenized = nltk.tokenize.word_tokenize(sent)\n",
" tokenized_sentences.append(\n",
" self.mwe_tokenizer.tokenize(simple_tokenized))\n",
" if self.mwe_tokenizer is None:\n",
" tokenized_sentences.append(simple_tokenized)\n",
" else:\n",
" tokenized_sentences.append(\n",
" self.mwe_tokenizer.tokenize(simple_tokenized))\n",
" tokenized_documents.append(tokenized_sentences)\n",
" \n",
" # now create initial colln-u elemnts\n",
@ -304,13 +334,14 @@
" else:\n",
" conllu_doc = ConlluDocument()\n",
" for sent in doc:\n",
" token_id = 0\n",
" conllu_sent = ConlluSentence()\n",
" for token in sent:\n",
" token_id += 1\n",
" conllu_sent.add(ConlluElement(\n",
" id=self.id_counter + 1,\n",
" id=token_id,\n",
" form=token,\n",
" ))\n",
" self.id_counter += 1\n",
" conllu_doc.add(conllu_sent)\n",
" self.conllu_documents.append(conllu_doc)\n",
" i += 1\n",
@ -340,6 +371,9 @@
" for elem in conllu_sent.conllu_elements:\n",
" if elem.lemma in stemmed_keyword_list:\n",
" elem.add_misc(key, value)\n",
" \n",
" def get_conllu_elements(self):\n",
" return [doc.get_conllu_elements() for doc in self.conllu_documents]\n",
"\n",
" def __repr__(self):\n",
" result = \"\"\n",

View File

@ -10,11 +10,14 @@
# ## imports and settings
import sys
sys.path.append("../")
import nltk
from nltk.tag import pos_tag, map_tag
from nltk.stem import PorterStemmer
from nltk.corpus import stopwords as nltk_stopwords
from stemmed_mwe_tokenizer import StemmedMWETokenizer
from Tagging.stemmed_mwe_tokenizer import StemmedMWETokenizer
from nltk.stem import WordNetLemmatizer
@ -128,6 +131,21 @@ class ConlluElement(object):
for attr in CONLLU_ATTRIBUTES:
result += str(self.__getattribute__(attr)) + " \t"
return replace_tab(result, 16)
def __getitem__(self, key):
# conllu module compability:
if key == "upostag":
key = "upos"
if key == "xpostag":
key = "xpos"
if key not in CONLLU_ATTRIBUTES:
return None
attr = self.__getattribute__(key)
if str(attr) == "_":
return None
return attr
# ## Conllu Sentence Class
@ -138,6 +156,9 @@ class ConlluSentence(object):
def add(self, conllu_element: ConlluElement):
self.conllu_elements.append(conllu_element)
def get_conllu_elements(self):
return self.conllu_elements
def __repr__(self):
result = ""
@ -160,6 +181,9 @@ class ConlluDocument(object):
def add(self, conllu_sentence: ConlluSentence):
self.conllu_sentences.append(conllu_sentence)
def get_conllu_elements(self):
return [c_sent.get_conllu_elements() for c_sent in self.conllu_sentences]
def __repr__(self):
result = "# newdoc\n"
if self.id is not None:
@ -176,15 +200,18 @@ class ConlluDocument(object):
# ## Conllu Generator Class
class ConlluGenerator(object):
def __init__(self, documents: list, stemmed_multi_word_tokens, stemmer=PorterStemmer(), ids=None):
def __init__(self, documents: list, stemmed_multi_word_tokens=None, stemmer=PorterStemmer(), ids=None):
self.documents = documents
self.stemmed_multi_word_tokens = stemmed_multi_word_tokens
self.mwe_tokenizer = StemmedMWETokenizer(
[w.split() for w in stemmed_multi_word_tokens])
if self.stemmed_multi_word_tokens is not None:
self.mwe_tokenizer = StemmedMWETokenizer(
[w.split() for w in stemmed_multi_word_tokens])
else:
self.mwe_tokenizer = None
self.stemmer = stemmer
self.id_counter = 0
self.conllu_documents = []
self.ids = ids
@ -199,8 +226,11 @@ class ConlluGenerator(object):
for sent in sentences:
if (len(sent) > 0):
simple_tokenized = nltk.tokenize.word_tokenize(sent)
tokenized_sentences.append(
self.mwe_tokenizer.tokenize(simple_tokenized))
if self.mwe_tokenizer is None:
tokenized_sentences.append(simple_tokenized)
else:
tokenized_sentences.append(
self.mwe_tokenizer.tokenize(simple_tokenized))
tokenized_documents.append(tokenized_sentences)
# now create initial colln-u elemnts
@ -210,13 +240,14 @@ class ConlluGenerator(object):
else:
conllu_doc = ConlluDocument()
for sent in doc:
token_id = 0
conllu_sent = ConlluSentence()
for token in sent:
token_id += 1
conllu_sent.add(ConlluElement(
id=self.id_counter + 1,
id=token_id,
form=token,
))
self.id_counter += 1
conllu_doc.add(conllu_sent)
self.conllu_documents.append(conllu_doc)
i += 1
@ -246,6 +277,9 @@ class ConlluGenerator(object):
for elem in conllu_sent.conllu_elements:
if elem.lemma in stemmed_keyword_list:
elem.add_misc(key, value)
def get_conllu_elements(self):
return [doc.get_conllu_elements() for doc in self.conllu_documents]
def __repr__(self):
result = ""

View File

@ -13,7 +13,17 @@
"metadata": {},
"outputs": [],
"source": [
"import conllu_batch_generator as cbg"
"import sys\n",
"sys.path.append(\"../\")"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"import Tagging.conllu_batch_generator as cbg"
]
},
{

View File

@ -3,7 +3,11 @@
# # crf data Generator
import conllu_batch_generator as cbg
import sys
sys.path.append("../")
import Tagging.conllu_batch_generator as cbg
def word2features(sent, i):

View File

@ -46,6 +46,12 @@
"containers = importlib.util.module_from_spec(spec)\n",
"spec.loader.exec_module(containers)\n",
"\n",
"# loading placeholders\n",
"spec = importlib.util.spec_from_file_location(\n",
" \"placeholders\", \"../\" + settings.placeholder_file)\n",
"placeholders = importlib.util.module_from_spec(spec)\n",
"spec.loader.exec_module(placeholders)\n",
"\n",
"# skipping recipes:\n",
"n_skipped_recipes = int(sys.argv[1]) if len(sys.argv) > 1 else 0\n",
"print(\"start reading at recipe \" + str(n_skipped_recipes))\n",
@ -80,7 +86,8 @@
" \n",
" cg.add_misc_value_by_list(\"food_type\", \"ingredient\", [w.replace(\" \",\"_\") for w in ingredients.multi_word_ingredients_stemmed] + ingredients.ingredients_stemmed)\n",
" cg.add_misc_value_by_list(\"food_type\", \"action\", actions.stemmed_cooking_verbs)\n",
" cg.add_misc_value_by_list(\"food_type\", \"containers\", containers.containers)\n",
" cg.add_misc_value_by_list(\"food_type\", \"containers\", containers.stemmed_containers)\n",
" cg.add_misc_value_by_list(\"food_type\", \"placeholders\", placeholders.stemmed_placeholders)\n",
"\n",
" savefile.write(str(cg))"
]

View File

@ -30,6 +30,12 @@ spec = importlib.util.spec_from_file_location(
containers = importlib.util.module_from_spec(spec)
spec.loader.exec_module(containers)
# loading placeholders
spec = importlib.util.spec_from_file_location(
"placeholders", "../" + settings.placeholder_file)
placeholders = importlib.util.module_from_spec(spec)
spec.loader.exec_module(placeholders)
# skipping recipes:
n_skipped_recipes = int(sys.argv[1]) if len(sys.argv) > 1 else 0
print("start reading at recipe " + str(n_skipped_recipes))
@ -58,7 +64,8 @@ def process_instructions(instructions: list, document_ids=None):
cg.add_misc_value_by_list("food_type", "ingredient", [w.replace(" ","_") for w in ingredients.multi_word_ingredients_stemmed] + ingredients.ingredients_stemmed)
cg.add_misc_value_by_list("food_type", "action", actions.stemmed_cooking_verbs)
cg.add_misc_value_by_list("food_type", "containers", containers.containers)
cg.add_misc_value_by_list("food_type", "containers", containers.stemmed_containers)
cg.add_misc_value_by_list("food_type", "placeholders", placeholders.stemmed_placeholders)
savefile.write(str(cg))

Binary file not shown.