added tagging tools

This commit is contained in:
Jonas Weinz 2019-06-26 20:35:32 +02:00
parent bd7af6724b
commit 3fbf5fad68
4 changed files with 1347 additions and 0 deletions

File diff suppressed because it is too large Load Diff

186
Tagging/conllu_generator.py Normal file
View File

@ -0,0 +1,186 @@
#!/usr/bin/env python3
import nltk
from nltk.tag import pos_tag, map_tag
from nltk.stem import PorterStemmer
from nltk.corpus import stopwords as nltk_stopwords
from stemmed_mwe_tokenizer import StemmedMWETokenizer
CONLLU_ATTRIBUTES = [
"id",
"form",
"lemma",
"upos",
"xpos",
"feats",
"head",
"deprel",
"deps",
"misc"
]
# took from: https://stackoverflow.com/a/16053211
def replace_tab(s, tabstop=4):
result = str()
for c in s:
if c == '\t':
while (len(result) % tabstop != 0):
result += ' '
else:
result += c
return result
class ConlluDict(dict):
def from_str(self, s: str):
entries = s.split("|")
for entry in entries:
key, val = entry.split("=")
self[key.strip()] = val.strip()
def __repr__(self):
if len(self) == 0:
return "_"
result = ""
for key, value in self.items():
result += key + "=" + value + "|"
return result[:-1]
def __str__(self):
return self.__repr__()
class ConlluElement(object):
# class uses format described here: https://universaldependencies.org/format.html
def __init__(
self,
id: int,
form: str,
lemma: str,
upos: str = "_",
xpos: str = "_",
feats: str = "_",
head: str = "_",
deprel: str = "_",
deps: str = "_",
misc: str = "_"):
self.id = id
self.form = form
self.lemma = lemma
self.upos = upos
self.xpos = xpos
self.feats = ConlluDict()
if feats != "_":
self.feats.from_str(feats)
self.head = head
self.deprel = deprel
self.deps = deps
self.misc = ConlluDict()
if misc != "_":
self.misc.from_str(misc)
def add_feature(self, key: str, value: str):
self.feats[key] = value
def add_misc(self, key: str, value: str):
self.misc[key] = value
def __repr__(self):
result = ""
for attr in CONLLU_ATTRIBUTES:
result += str(self.__getattribute__(attr)) + " \t"
return replace_tab(result, 16)
class ConlluDocument(object):
def __init__(self):
self.conllu_elements = []
def add(self, conllu_element: ConlluElement):
self.conllu_elements.append(conllu_element)
def __repr__(self):
result = ""
for elem in self.conllu_elements:
result += elem.__repr__() + "\n"
return result + "\n"
def __str__(self):
return self.__repr__()
class ConlluGenerator(object):
def __init__(self, documents: list, stemmed_multi_word_tokens, stemmer=PorterStemmer()):
self.documents = documents
self.stemmed_multi_word_tokens = stemmed_multi_word_tokens
self.mwe_tokenizer = StemmedMWETokenizer(
[w.split() for w in stemmed_multi_word_tokens])
self.stemmer = stemmer
self.id_counter = 0
self.conllu_documents = []
def tokenize_and_stem(self):
tokenized_documents = []
for doc in self.documents:
simple_tokenized = nltk.tokenize.word_tokenize(doc)
tokenized_documents.append(
self.mwe_tokenizer.tokenize(simple_tokenized))
# now create initial colln-u elemnts
for doc in tokenized_documents:
self.id_counter = 0
collnu_doc = ConlluDocument()
for token in doc:
stemmed_token = None
if "_" in token:
stemmed_token = "_".join(
[self.stemmer.stem(part) for part in token.split("_")])
else:
stemmed_token = self.stemmer.stem(token)
collnu_doc.add(ConlluElement(
id=self.id_counter,
form=token,
lemma=stemmed_token
))
self.id_counter += 1
self.conllu_documents.append(collnu_doc)
def pos_tagging(self):
for conllu_document in self.conllu_documents:
tokens = [x.form for x in conllu_document.conllu_elements]
pos_tags = pos_tag(tokens)
simplified_tags = [map_tag('en-ptb', 'universal', tag)
for word, tag in pos_tags]
for i in range(len(tokens)):
conllu_elem = conllu_document.conllu_elements[i]
conllu_elem.upos = simplified_tags[i]
conllu_elem.xpos = pos_tags[i][1]
def add_misc_value_by_list(self, key, value, stemmed_keyword_list):
for conllu_document in self.conllu_documents:
for elem in conllu_document.conllu_elements:
if elem.lemma in stemmed_keyword_list:
elem.add_misc(key, value)
def __repr__(self):
result = ""
for document in self.conllu_documents:
result += document.__repr__() + "\n"
return result
def __str__(self):
return self.__repr__()

View File

@ -0,0 +1,76 @@
#!/usr/bin/env python3
import sys
sys.path.insert(0, '..')
from conllu_generator import ConlluDict, ConlluElement, ConlluDocument, ConlluGenerator
import settings
import importlib.util
from json_buffered_reader import JSON_buffered_reader as JSON_br
# loading ingredients:
spec = importlib.util.spec_from_file_location(
"ingredients", "../" + settings.ingredients_file)
ingredients = importlib.util.module_from_spec(spec)
spec.loader.exec_module(ingredients)
# load json reader
# settings:
recipe_buffer_size = 1000
recipe_buffers_per_file = 5
# create reader
buffered_reader_1M = JSON_br("../" + settings.one_million_recipes_file)
# open savefile:
def process_instructions(instructions: list):
if len(instructions) == 0:
return
conllu_input_docs = [doc.replace("\n", " ")[:-1] for doc in instructions]
cg = ConlluGenerator(
conllu_input_docs, ingredients.multi_word_ingredients_stemmed)
cg.tokenize_and_stem()
cg.pos_tagging()
cg.add_misc_value_by_list("food_type", "ingredient", [w.replace(" ","_") for w in ingredients.multi_word_ingredients_stemmed] + ingredients.ingredients_stemmed)
savefile.write(str(cg))
i = 0
buffer_count = 0
file_count = 0
savefile = open(f"recipes{file_count}.conllu", 'w')
instructions = []
for raw_recipe in buffered_reader_1M:
instruction = ""
for item in raw_recipe['instructions']:
instruction += item['text'] + '\n'
instructions.append(instruction)
i += 1
if i % recipe_buffer_size == 0:
process_instructions(instructions)
print(f"processed {i} recipes")
instructions = []
buffer_count += 1
if buffer_count % recipe_buffers_per_file == 0:
savefile.close()
savefile = open(f"recipes{file_count}.conllu", 'w')
file_count += 1
process_instructions(instructions)
print(f"processed {i} recipes")
savefile.close()

View File

@ -0,0 +1,58 @@
#!/usr/bin/env python3
import nltk
from nltk import PorterStemmer
from nltk.util import Trie
# modified MWE Tokenizer which stems multi word expressions before the merge check
class StemmedMWETokenizer(nltk.tokenize.api.TokenizerI):
def __init__(self, stemmed_tokens, stemmer=PorterStemmer(), separator="_"):
self.stemmer = stemmer
self.stemmed_tokens = stemmed_tokens
self.mwes = Trie(stemmed_tokens)
self.separator = separator
def tokenize(self, text):
"""
:param text: A list containing tokenized text
:type text: list(str)
:return: A list of the tokenized text with multi-words merged together
:rtype: list(str)
:Example:
>>> tokenizer = MWETokenizer([('hors', "d'oeuvre")], separator='+')
>>> tokenizer.tokenize("An hors d'oeuvre tonight, sir?".split())
['An', "hors+d'oeuvre", 'tonight,', 'sir?']
"""
i = 0
n = len(text)
result = []
while i < n:
if self.stemmer.stem(text[i]) in self.mwes:
# possible MWE match
j = i
trie = self.mwes
while j < n and self.stemmer.stem(text[j]) in trie:
trie = trie[self.stemmer.stem(text[j])]
j = j + 1
else:
if Trie.LEAF in trie:
# success!
result.append(self.separator.join(text[i:j]))
i = j
else:
# no match, so backtrack
result.append(text[i])
i += 1
else:
result.append(text[i])
i += 1
return result