refactored repo a little bit
This commit is contained in:
		| @ -15,6 +15,10 @@ | ||||
|    "source": [ | ||||
|     "from IPython.core.display import Markdown, HTML, display\n", | ||||
|     "\n", | ||||
|     "import sys\n", | ||||
|     "sys.path.insert(0, '..')  # noqa\n", | ||||
|     "import settings  # noqa\n", | ||||
|     "\n", | ||||
|     "import crf_data_generator as cdg\n", | ||||
|     "import pycrfsuite\n", | ||||
|     "\n", | ||||
| @ -27,7 +31,7 @@ | ||||
|    "metadata": {}, | ||||
|    "outputs": [], | ||||
|    "source": [ | ||||
|     "data = cdg.ConlluCRFReader(\"recipes2.conllu\")\n", | ||||
|     "data = cdg.ConlluCRFReader(\"../\" + settings.gzipped_conllu_data_root + \"recipes2.conllu.gz\")\n", | ||||
|     "\n", | ||||
|     "data_iterator = iter(data)" | ||||
|    ] | ||||
| @ -67,7 +71,7 @@ | ||||
|     { | ||||
|      "data": { | ||||
|       "text/plain": [ | ||||
|        "<contextlib.closing at 0x7eff2aa68128>" | ||||
|        "<contextlib.closing at 0x7f41527f5d30>" | ||||
|       ] | ||||
|      }, | ||||
|      "execution_count": 4, | ||||
|  | ||||
										
											
												File diff suppressed because one or more lines are too long
											
										
									
								
							
							
								
								
									
										380
									
								
								Tagging/conllu_batch_generator.ipynb
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										380
									
								
								Tagging/conllu_batch_generator.ipynb
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,380 @@ | ||||
| { | ||||
|  "cells": [ | ||||
|   { | ||||
|    "cell_type": "markdown", | ||||
|    "metadata": {}, | ||||
|    "source": [ | ||||
|     "# Conllu Batch Generator\n", | ||||
|     "\n", | ||||
|     "read conllu documents in batches" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": null, | ||||
|    "metadata": {}, | ||||
|    "outputs": [], | ||||
|    "source": [ | ||||
|     "import sys\n", | ||||
|     "\n", | ||||
|     "from conllu import parse\n", | ||||
|     "from tagging_tools import print_visualized_tags\n", | ||||
|     "\n", | ||||
|     "from sklearn import preprocessing\n", | ||||
|     "import numpy as np\n", | ||||
|     "\n", | ||||
|     "sys.path.insert(0, '..')\n", | ||||
|     "import settings  # noqa\n", | ||||
|     "\n", | ||||
|     "import gzip" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": null, | ||||
|    "metadata": {}, | ||||
|    "outputs": [], | ||||
|    "source": [ | ||||
|     "class ConlluSentenceIterator(object):\n", | ||||
|     "    def __init__(self, conllu_reader):\n", | ||||
|     "        self.conllu_reader = conllu_reader\n", | ||||
|     "        self._fileobj = None\n", | ||||
|     "        self._open()\n", | ||||
|     "    \n", | ||||
|     "    def _open(self):\n", | ||||
|     "        if self.conllu_reader._path.endswith(\".gz\"):\n", | ||||
|     "            self._fileobj = gzip.open(self.conllu_reader._path, 'r')\n", | ||||
|     "            self._nextline = self.read_byte_line\n", | ||||
|     "        else:\n", | ||||
|     "            self._fileobj = open(self.conllu_reader._path, 'r')\n", | ||||
|     "            self._nextline = self.read_str_line\n", | ||||
|     "\n", | ||||
|     "    def __next__(self):\n", | ||||
|     "        next_sent = self.next_sentence()\n", | ||||
|     "        if next_sent is None:\n", | ||||
|     "            raise StopIteration\n", | ||||
|     "        return next_sent\n", | ||||
|     "    \n", | ||||
|     "    def read_str_line(self):\n", | ||||
|     "        return self._fileobj.readline()\n", | ||||
|     "    \n", | ||||
|     "    def read_byte_line(self):\n", | ||||
|     "        return self._fileobj.readline().decode(\"utf-8\")\n", | ||||
|     "\n", | ||||
|     "    def next_sentence(self):\n", | ||||
|     "        data = \"\"\n", | ||||
|     "        while True:\n", | ||||
|     "            line = self._nextline()\n", | ||||
|     "            if line == \"\":\n", | ||||
|     "                break\n", | ||||
|     "            data += line\n", | ||||
|     "            if line == \"\\n\":\n", | ||||
|     "                break\n", | ||||
|     "\n", | ||||
|     "        if data == \"\":\n", | ||||
|     "            return None\n", | ||||
|     "\n", | ||||
|     "        if data[-1] != \"\\n\":\n", | ||||
|     "            data += \"\\n\"\n", | ||||
|     "\n", | ||||
|     "        conllu_obj = parse(data)\n", | ||||
|     "        return conllu_obj" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": null, | ||||
|    "metadata": {}, | ||||
|    "outputs": [], | ||||
|    "source": [ | ||||
|     "class ConlluDocumentIterator(object):\n", | ||||
|     "    def __init__(self, conllu_reader):\n", | ||||
|     "        self.conllu_reader = conllu_reader\n", | ||||
|     "        self._fileobj = None\n", | ||||
|     "        self._open()\n", | ||||
|     "    \n", | ||||
|     "    def _open(self):\n", | ||||
|     "        if self.conllu_reader._path.endswith(\".gz\"):\n", | ||||
|     "            self._fileobj = gzip.open(self.conllu_reader._path, 'r')\n", | ||||
|     "            self._nextline = self.read_byte_line\n", | ||||
|     "        else:\n", | ||||
|     "            self._fileobj = open(self.conllu_reader._path, 'r')\n", | ||||
|     "            self._nextline = self.read_str_line\n", | ||||
|     "        \n", | ||||
|     "    def read_str_line(self):\n", | ||||
|     "        return self._fileobj.readline()\n", | ||||
|     "    \n", | ||||
|     "    def read_byte_line(self):\n", | ||||
|     "        return self._fileobj.readline().decode(\"utf-8\")\n", | ||||
|     "\n", | ||||
|     "    def next_document(self):\n", | ||||
|     "        data = \"\"\n", | ||||
|     "        last_line_empty = False\n", | ||||
|     "        while True:\n", | ||||
|     "            line = self._nextline()\n", | ||||
|     "            if line == \"\":\n", | ||||
|     "                break\n", | ||||
|     "            data += line\n", | ||||
|     "            if line == \"\\n\":\n", | ||||
|     "                if last_line_empty:\n", | ||||
|     "                    break\n", | ||||
|     "                last_line_empty = True\n", | ||||
|     "            else:\n", | ||||
|     "                last_line_empty = False\n", | ||||
|     "\n", | ||||
|     "        if data == \"\":\n", | ||||
|     "            return None\n", | ||||
|     "\n", | ||||
|     "        if data[-1] != \"\\n\":\n", | ||||
|     "            data += \"\\n\"\n", | ||||
|     "\n", | ||||
|     "        conllu_obj = parse(data)\n", | ||||
|     "        return conllu_obj\n", | ||||
|     "\n", | ||||
|     "    def __next__(self):\n", | ||||
|     "        next_sent = self.next_document()\n", | ||||
|     "        if next_sent is None:\n", | ||||
|     "            raise StopIteration\n", | ||||
|     "        return next_sent" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": null, | ||||
|    "metadata": {}, | ||||
|    "outputs": [], | ||||
|    "source": [ | ||||
|     "class ConlluReader(object):\n", | ||||
|     "    def __init__(self, path, iter_documents=False):\n", | ||||
|     "        self._path = path\n", | ||||
|     "        self.iter_documents = iter_documents\n", | ||||
|     "\n", | ||||
|     "    def __iter__(self):\n", | ||||
|     "        return ConlluDocumentIterator(self) if self.iter_documents else ConlluSentenceIterator(self)" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": null, | ||||
|    "metadata": {}, | ||||
|    "outputs": [], | ||||
|    "source": [ | ||||
|     "class SlidingWindowListIterator(object):\n", | ||||
|     "    def __init__(self, parent):\n", | ||||
|     "        self.parent = parent\n", | ||||
|     "        self.i = 0\n", | ||||
|     "\n", | ||||
|     "    def __next__(self):\n", | ||||
|     "        if len(self.parent) == self.i:\n", | ||||
|     "            raise StopIteration\n", | ||||
|     "\n", | ||||
|     "        self.i += 1\n", | ||||
|     "        return self.parent[self.i - 1]" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": null, | ||||
|    "metadata": {}, | ||||
|    "outputs": [], | ||||
|    "source": [ | ||||
|     "class SlidingWindowList(list):\n", | ||||
|     "    def __init__(self, sliding_window_size, input=None, border_value=None):\n", | ||||
|     "\n", | ||||
|     "        self.sliding_window_size = sliding_window_size\n", | ||||
|     "        self.border_value = border_value\n", | ||||
|     "\n", | ||||
|     "        if border_value is None and input is not None:\n", | ||||
|     "            self.border_value = type(input[0])()\n", | ||||
|     "\n", | ||||
|     "        if input is not None:\n", | ||||
|     "            super(SlidingWindowList, self).__init__(input)\n", | ||||
|     "\n", | ||||
|     "    def __getitem__(self, index):\n", | ||||
|     "\n", | ||||
|     "        if type(index) == slice:\n", | ||||
|     "            start = 0 if index.start is None else index.start\n", | ||||
|     "            stop = len(self) if index.stop is None else index.stop\n", | ||||
|     "            step = 1 if index.step is None else index.step\n", | ||||
|     "            return [self[i] for i in range(start, stop, step)]\n", | ||||
|     "\n", | ||||
|     "        else:\n", | ||||
|     "            n = self.sliding_window_size * 2 + 1\n", | ||||
|     "            res = n * [self.border_value]\n", | ||||
|     "\n", | ||||
|     "            j_start = index - self.sliding_window_size\n", | ||||
|     "\n", | ||||
|     "            for i in range(n):\n", | ||||
|     "                ind = j_start + i\n", | ||||
|     "                if ind >= 0 and ind < len(self):\n", | ||||
|     "                    res[i] = super(SlidingWindowList, self).__getitem__(ind)\n", | ||||
|     "\n", | ||||
|     "            return res\n", | ||||
|     "\n", | ||||
|     "    def __iter__(self):\n", | ||||
|     "        return SlidingWindowListIterator(self)" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": null, | ||||
|    "metadata": {}, | ||||
|    "outputs": [], | ||||
|    "source": [ | ||||
|     "'''\n", | ||||
|     "class ConlluDataProviderIterator(object):\n", | ||||
|     "    def __init__(self, parent):\n", | ||||
|     "        self.parent = parent\n", | ||||
|     "        self.conllu_reader = ConlluReader(\n", | ||||
|     "            parent.filepath, parent.iter_documents)\n", | ||||
|     "\n", | ||||
|     "    def __next__(self):\n", | ||||
|     "        result = self.parent.getNextDataBatch(conllu_reader=self.conllu_reader)\n", | ||||
|     "        if result is None:\n", | ||||
|     "            raise StopIteration\n", | ||||
|     "        return result\n", | ||||
|     "'''\n", | ||||
|     "\n", | ||||
|     "'''\n", | ||||
|     "class ConlluDataProvider(object):\n", | ||||
|     "    def __init__(self,\n", | ||||
|     "                 filepath,\n", | ||||
|     "                 word2vec_model,\n", | ||||
|     "                 batchsize=100,\n", | ||||
|     "                 window_size=3,\n", | ||||
|     "                 iter_documents=False,\n", | ||||
|     "                 food_type=None):\n", | ||||
|     "        self.batchsize = batchsize\n", | ||||
|     "        self.word2vec_model = word2vec_model\n", | ||||
|     "        self.filepath = filepath\n", | ||||
|     "        self.conllu_reader = ConlluReader(filepath, iter_documents)\n", | ||||
|     "        self.window_size = window_size\n", | ||||
|     "        self.food_type = food_type\n", | ||||
|     "        self.iter_documents = iter_documents\n", | ||||
|     "\n", | ||||
|     "        # create a label binarizer for upos tags:\n", | ||||
|     "        self.lb = preprocessing.LabelBinarizer()\n", | ||||
|     "        self.lb.fit(['.', 'ADJ', 'ADP', 'ADV', 'CONJ', 'DET',\n", | ||||
|     "                     'NOUN', 'NUM', 'PRON', 'PRT', 'VERB', 'X'])\n", | ||||
|     "\n", | ||||
|     "    def _get_next_conllu_objects(self, n: int, conllu_reader):\n", | ||||
|     "        i = 0\n", | ||||
|     "        conllu_list = []\n", | ||||
|     "\n", | ||||
|     "        while i < n:\n", | ||||
|     "            try:\n", | ||||
|     "                conllu_list.append(conllu_reader.__iter__().__next__())\n", | ||||
|     "                i += 1\n", | ||||
|     "\n", | ||||
|     "            except StopIteration:\n", | ||||
|     "                break\n", | ||||
|     "\n", | ||||
|     "        return conllu_list\n", | ||||
|     "\n", | ||||
|     "    def _get_upos_X(self, conllu_list):\n", | ||||
|     "        n_tokens = 0\n", | ||||
|     "        l_global = []\n", | ||||
|     "        for document in conllu_list:\n", | ||||
|     "            l = []\n", | ||||
|     "            for sentence in document:\n", | ||||
|     "                for token in sentence:\n", | ||||
|     "                    upos = token['upostag']\n", | ||||
|     "                    l.append(upos)\n", | ||||
|     "                    n_tokens += 1\n", | ||||
|     "            if len(l) > 0:\n", | ||||
|     "                l_global.append(self.lb.transform(l))\n", | ||||
|     "\n", | ||||
|     "        return l_global, n_tokens\n", | ||||
|     "\n", | ||||
|     "    def _get_y(self, conllu_list, misk_key=\"food_type\", misc_val=\"ingredient\"):\n", | ||||
|     "        n_tokens = 0\n", | ||||
|     "        y_global = []\n", | ||||
|     "        for document in conllu_list:\n", | ||||
|     "            y = []\n", | ||||
|     "            for sentence in document:\n", | ||||
|     "                for token in sentence:\n", | ||||
|     "                    m = token['misc']\n", | ||||
|     "                    t_y = m is not None and misk_key in m and m[misk_key] == misc_val\n", | ||||
|     "                    y.append(t_y)\n", | ||||
|     "                    n_tokens += 1\n", | ||||
|     "            if len(y) > 0:\n", | ||||
|     "                y_global.append(y)\n", | ||||
|     "\n", | ||||
|     "        return y_global, n_tokens\n", | ||||
|     "\n", | ||||
|     "    def getNextDataBatch(self, y_food_type_label=None, conllu_reader=None):\n", | ||||
|     "\n", | ||||
|     "        if y_food_type_label is None:\n", | ||||
|     "            y_food_type_label = self.food_type\n", | ||||
|     "\n", | ||||
|     "        if conllu_reader is None:\n", | ||||
|     "            conllu_reader = self.conllu_reader\n", | ||||
|     "        conllu_list = self._get_next_conllu_objects(\n", | ||||
|     "            self.batchsize, conllu_reader)\n", | ||||
|     "\n", | ||||
|     "        if len(conllu_list) == 0:\n", | ||||
|     "            return None\n", | ||||
|     "\n", | ||||
|     "        # generate features for each document/sentence\n", | ||||
|     "        n = len(conllu_list)\n", | ||||
|     "\n", | ||||
|     "        d = self.window_size * 2 + 1\n", | ||||
|     "\n", | ||||
|     "        buf_X, x_tokens = self._get_upos_X(conllu_list)\n", | ||||
|     "        buf_ingr_y, y_tokens = self._get_y(conllu_list)\n", | ||||
|     "\n", | ||||
|     "        assert len(buf_X) == len(buf_ingr_y) and x_tokens == y_tokens\n", | ||||
|     "\n", | ||||
|     "        X_upos = np.zeros(shape=(x_tokens, d * len(self.lb.classes_)))\n", | ||||
|     "        y = None\n", | ||||
|     "\n", | ||||
|     "        if y_food_type_label is not None:\n", | ||||
|     "            y = np.zeros(shape=(x_tokens))\n", | ||||
|     "\n", | ||||
|     "        i = 0\n", | ||||
|     "        for xupos in buf_X:\n", | ||||
|     "            tmp = SlidingWindowList(self.window_size,\n", | ||||
|     "                                    xupos,\n", | ||||
|     "                                    border_value=[0] * len(self.lb.classes_))\n", | ||||
|     "            for upos_window in tmp:\n", | ||||
|     "                X_upos[i, :] = np.array(upos_window).flatten()\n", | ||||
|     "                i += 1\n", | ||||
|     "\n", | ||||
|     "        i = 0\n", | ||||
|     "        if y_food_type_label is not None:\n", | ||||
|     "            for sentence in buf_ingr_y:\n", | ||||
|     "                for yl in sentence:\n", | ||||
|     "                    y[i] = yl\n", | ||||
|     "                    i += 1\n", | ||||
|     "\n", | ||||
|     "        return X_upos, y\n", | ||||
|     "    \n", | ||||
|     "    def __iter__(self):\n", | ||||
|     "        return ConlluDataProviderIterator(self)\n", | ||||
|     "\n", | ||||
|     "'''" | ||||
|    ] | ||||
|   } | ||||
|  ], | ||||
|  "metadata": { | ||||
|   "kernelspec": { | ||||
|    "display_name": "Python 3", | ||||
|    "language": "python", | ||||
|    "name": "python3" | ||||
|   }, | ||||
|   "language_info": { | ||||
|    "codemirror_mode": { | ||||
|     "name": "ipython", | ||||
|     "version": 3 | ||||
|    }, | ||||
|    "file_extension": ".py", | ||||
|    "mimetype": "text/x-python", | ||||
|    "name": "python", | ||||
|    "nbconvert_exporter": "python", | ||||
|    "pygments_lexer": "ipython3", | ||||
|    "version": "3.7.3" | ||||
|   } | ||||
|  }, | ||||
|  "nbformat": 4, | ||||
|  "nbformat_minor": 4 | ||||
| } | ||||
| @ -1,4 +1,9 @@ | ||||
| #!/usr/bin/env python3 | ||||
| # coding: utf-8 | ||||
|  | ||||
| # # Conllu Batch Generator | ||||
| #  | ||||
| # read conllu documents in batches | ||||
|  | ||||
| import sys | ||||
|  | ||||
| @ -302,3 +307,4 @@ class ConlluDataProvider(object): | ||||
|         return ConlluDataProviderIterator(self) | ||||
|  | ||||
| ''' | ||||
|  | ||||
|  | ||||
							
								
								
									
										373
									
								
								Tagging/conllu_generator.ipynb
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										373
									
								
								Tagging/conllu_generator.ipynb
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,373 @@ | ||||
| { | ||||
|  "cells": [ | ||||
|   { | ||||
|    "cell_type": "markdown", | ||||
|    "metadata": {}, | ||||
|    "source": [ | ||||
|     "# Conllu Generator\n", | ||||
|     "\n", | ||||
|     "tools for creating:\n", | ||||
|     "* conllu tokens\n", | ||||
|     "* conllu sentences\n", | ||||
|     "* conllu documents" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "markdown", | ||||
|    "metadata": {}, | ||||
|    "source": [ | ||||
|     "## imports and settings" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": 2, | ||||
|    "metadata": {}, | ||||
|    "outputs": [], | ||||
|    "source": [ | ||||
|     "import nltk\n", | ||||
|     "from nltk.tag import pos_tag, map_tag\n", | ||||
|     "from nltk.stem import PorterStemmer\n", | ||||
|     "from nltk.corpus import stopwords as nltk_stopwords\n", | ||||
|     "from stemmed_mwe_tokenizer import StemmedMWETokenizer" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": 3, | ||||
|    "metadata": {}, | ||||
|    "outputs": [], | ||||
|    "source": [ | ||||
|     "CONLLU_ATTRIBUTES = [\n", | ||||
|     "    \"id\",s\n", | ||||
|     "    \"form\",\n", | ||||
|     "    \"lemma\",\n", | ||||
|     "    \"upos\",\n", | ||||
|     "    \"xpos\",\n", | ||||
|     "    \"feats\",\n", | ||||
|     "    \"head\",\n", | ||||
|     "    \"deprel\",\n", | ||||
|     "    \"deps\",\n", | ||||
|     "    \"misc\"\n", | ||||
|     "]" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": 4, | ||||
|    "metadata": {}, | ||||
|    "outputs": [], | ||||
|    "source": [ | ||||
|     "# took from: https://stackoverflow.com/a/16053211\n", | ||||
|     "\n", | ||||
|     "\n", | ||||
|     "def replace_tab(s, tabstop=4):\n", | ||||
|     "    result = str()\n", | ||||
|     "    s = s.replace(\"\\t\", \" \\t\")\n", | ||||
|     "    for c in s:\n", | ||||
|     "        if c == '\\t':\n", | ||||
|     "            while (len(result) % (tabstop) != 0):\n", | ||||
|     "                result += ' '\n", | ||||
|     "        else:\n", | ||||
|     "            result += c\n", | ||||
|     "    return result" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "markdown", | ||||
|    "metadata": {}, | ||||
|    "source": [ | ||||
|     "## Conllu Dict Class" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": 5, | ||||
|    "metadata": {}, | ||||
|    "outputs": [], | ||||
|    "source": [ | ||||
|     "class ConlluDict(dict):\n", | ||||
|     "\n", | ||||
|     "    def from_str(self, s: str):\n", | ||||
|     "        entries = s.split(\"|\")\n", | ||||
|     "        for entry in entries:\n", | ||||
|     "            key, val = entry.split(\"=\")\n", | ||||
|     "            self[key.strip()] = val.strip()\n", | ||||
|     "\n", | ||||
|     "    def __repr__(self):\n", | ||||
|     "        if len(self) == 0:\n", | ||||
|     "            return \"_\"\n", | ||||
|     "\n", | ||||
|     "        result = \"\"\n", | ||||
|     "        for key, value in self.items():\n", | ||||
|     "            result += key + \"=\" + value + \"|\"\n", | ||||
|     "\n", | ||||
|     "        return result[:-1]\n", | ||||
|     "\n", | ||||
|     "    def __str__(self):\n", | ||||
|     "        return self.__repr__()" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "markdown", | ||||
|    "metadata": {}, | ||||
|    "source": [ | ||||
|     "## Conllu Element Class" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": 6, | ||||
|    "metadata": {}, | ||||
|    "outputs": [], | ||||
|    "source": [ | ||||
|     "class ConlluElement(object):\n", | ||||
|     "        # class uses format described here: https://universaldependencies.org/format.html\n", | ||||
|     "    def __init__(\n", | ||||
|     "            self,\n", | ||||
|     "            id: int,\n", | ||||
|     "            form: str,\n", | ||||
|     "            lemma: str,\n", | ||||
|     "            upos: str = \"_\",\n", | ||||
|     "            xpos: str = \"_\",\n", | ||||
|     "            feats: str = \"_\",\n", | ||||
|     "            head: str = \"_\",\n", | ||||
|     "            deprel: str = \"_\",\n", | ||||
|     "            deps: str = \"_\",\n", | ||||
|     "            misc: str = \"_\"):\n", | ||||
|     "        self.id = id\n", | ||||
|     "        self.form = form\n", | ||||
|     "        self.lemma = lemma\n", | ||||
|     "        self.upos = upos\n", | ||||
|     "        self.xpos = xpos\n", | ||||
|     "\n", | ||||
|     "        self.feats = ConlluDict()\n", | ||||
|     "        if feats != \"_\":\n", | ||||
|     "            self.feats.from_str(feats)\n", | ||||
|     "\n", | ||||
|     "        self.head = head\n", | ||||
|     "        self.deprel = deprel\n", | ||||
|     "        self.deps = deps\n", | ||||
|     "\n", | ||||
|     "        self.misc = ConlluDict()\n", | ||||
|     "        if misc != \"_\":\n", | ||||
|     "            self.misc.from_str(misc)\n", | ||||
|     "\n", | ||||
|     "    def add_feature(self, key: str, value: str):\n", | ||||
|     "        self.feats[key] = value\n", | ||||
|     "\n", | ||||
|     "    def add_misc(self, key: str, value: str):\n", | ||||
|     "        self.misc[key] = value\n", | ||||
|     "\n", | ||||
|     "    def __repr__(self):\n", | ||||
|     "        result = \"\"\n", | ||||
|     "        for attr in CONLLU_ATTRIBUTES:\n", | ||||
|     "            result += str(self.__getattribute__(attr)) + \" \\t\"\n", | ||||
|     "        return replace_tab(result, 16)" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "markdown", | ||||
|    "metadata": {}, | ||||
|    "source": [ | ||||
|     "## Conllu Sentence Class" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": 7, | ||||
|    "metadata": {}, | ||||
|    "outputs": [], | ||||
|    "source": [ | ||||
|     "class ConlluSentence(object):\n", | ||||
|     "    def __init__(self):\n", | ||||
|     "        self.conllu_elements = []\n", | ||||
|     "\n", | ||||
|     "    def add(self, conllu_element: ConlluElement):\n", | ||||
|     "        self.conllu_elements.append(conllu_element)\n", | ||||
|     "\n", | ||||
|     "    def __repr__(self):\n", | ||||
|     "        result = \"\"\n", | ||||
|     "        for elem in self.conllu_elements:\n", | ||||
|     "            result += elem.__repr__() + \"\\n\"\n", | ||||
|     "\n", | ||||
|     "        return result\n", | ||||
|     "\n", | ||||
|     "    def __str__(self):\n", | ||||
|     "        return self.__repr__()" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "markdown", | ||||
|    "metadata": {}, | ||||
|    "source": [ | ||||
|     "## Conllu Document Class" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": 8, | ||||
|    "metadata": {}, | ||||
|    "outputs": [], | ||||
|    "source": [ | ||||
|     "class ConlluDocument(object):\n", | ||||
|     "    def __init__(self, id=None):\n", | ||||
|     "        self.conllu_sentences = []\n", | ||||
|     "        self.id = id\n", | ||||
|     "    \n", | ||||
|     "    def add(self, conllu_sentence: ConlluSentence):\n", | ||||
|     "        self.conllu_sentences.append(conllu_sentence)\n", | ||||
|     "    \n", | ||||
|     "    def __repr__(self):\n", | ||||
|     "        result = \"# newdoc\\n\"\n", | ||||
|     "        if self.id is not None:\n", | ||||
|     "            result += \"# id: \" + self.id + \"\\n\"\n", | ||||
|     "        for elem in self.conllu_sentences:\n", | ||||
|     "            result += elem.__repr__() + \"\\n\"\n", | ||||
|     "\n", | ||||
|     "        return result\n", | ||||
|     "\n", | ||||
|     "    def __str__(self):\n", | ||||
|     "        return self.__repr__()" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "markdown", | ||||
|    "metadata": {}, | ||||
|    "source": [ | ||||
|     "## Conllu Generator Class" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": 1, | ||||
|    "metadata": {}, | ||||
|    "outputs": [ | ||||
|     { | ||||
|      "ename": "NameError", | ||||
|      "evalue": "name 'PorterStemmer' is not defined", | ||||
|      "output_type": "error", | ||||
|      "traceback": [ | ||||
|       "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", | ||||
|       "\u001b[0;31mNameError\u001b[0m                                 Traceback (most recent call last)", | ||||
|       "\u001b[0;32m<ipython-input-1-dcd3e28a755b>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0;32mclass\u001b[0m \u001b[0mConlluGenerator\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mobject\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m      2\u001b[0m     \u001b[0;32mdef\u001b[0m \u001b[0m__init__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdocuments\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mlist\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mstemmed_multi_word_tokens\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mstemmer\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mPorterStemmer\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mids\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mNone\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m      3\u001b[0m         \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdocuments\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mdocuments\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m      4\u001b[0m         \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mstemmed_multi_word_tokens\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mstemmed_multi_word_tokens\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m      5\u001b[0m         self.mwe_tokenizer = StemmedMWETokenizer(\n", | ||||
|       "\u001b[0;32m<ipython-input-1-dcd3e28a755b>\u001b[0m in \u001b[0;36mConlluGenerator\u001b[0;34m()\u001b[0m\n\u001b[1;32m      1\u001b[0m \u001b[0;32mclass\u001b[0m \u001b[0mConlluGenerator\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mobject\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 2\u001b[0;31m     \u001b[0;32mdef\u001b[0m \u001b[0m__init__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdocuments\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mlist\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mstemmed_multi_word_tokens\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mstemmer\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mPorterStemmer\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mids\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mNone\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m      3\u001b[0m         \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdocuments\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mdocuments\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m      4\u001b[0m         \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mstemmed_multi_word_tokens\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mstemmed_multi_word_tokens\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m      5\u001b[0m         self.mwe_tokenizer = StemmedMWETokenizer(\n", | ||||
|       "\u001b[0;31mNameError\u001b[0m: name 'PorterStemmer' is not defined" | ||||
|      ] | ||||
|     } | ||||
|    ], | ||||
|    "source": [ | ||||
|     "class ConlluGenerator(object):\n", | ||||
|     "    def __init__(self, documents: list, stemmed_multi_word_tokens, stemmer=PorterStemmer(), ids=None):\n", | ||||
|     "        self.documents = documents\n", | ||||
|     "        self.stemmed_multi_word_tokens = stemmed_multi_word_tokens\n", | ||||
|     "        self.mwe_tokenizer = StemmedMWETokenizer(\n", | ||||
|     "            [w.split() for w in stemmed_multi_word_tokens])\n", | ||||
|     "        self.stemmer = stemmer\n", | ||||
|     "\n", | ||||
|     "        self.id_counter = 0\n", | ||||
|     "\n", | ||||
|     "        self.conllu_documents = []\n", | ||||
|     "\n", | ||||
|     "        self.ids = ids\n", | ||||
|     "\n", | ||||
|     "    def tokenize_and_stem(self):\n", | ||||
|     "        tokenized_documents = []\n", | ||||
|     "\n", | ||||
|     "        i = 0\n", | ||||
|     "        for doc in self.documents:\n", | ||||
|     "            tokenized_sentences = []\n", | ||||
|     "            sentences = doc.split(\"\\n\")\n", | ||||
|     "            for sent in sentences: \n", | ||||
|     "                if (len(sent) > 0):\n", | ||||
|     "                    simple_tokenized = nltk.tokenize.word_tokenize(sent)\n", | ||||
|     "                    tokenized_sentences.append(\n", | ||||
|     "                        self.mwe_tokenizer.tokenize(simple_tokenized))\n", | ||||
|     "            tokenized_documents.append(tokenized_sentences)\n", | ||||
|     "\n", | ||||
|     "        # now create initial colln-u elemnts\n", | ||||
|     "        for doc in tokenized_documents:\n", | ||||
|     "            if self.ids:\n", | ||||
|     "                conllu_doc = ConlluDocument(self.ids[i])\n", | ||||
|     "            else:\n", | ||||
|     "                conllu_doc = ConlluDocument()\n", | ||||
|     "            self.id_counter = 0\n", | ||||
|     "            for sent in doc:\n", | ||||
|     "                conllu_sent = ConlluSentence()\n", | ||||
|     "                for token in sent:\n", | ||||
|     "                    stemmed_token = None\n", | ||||
|     "                    if \"_\" in token:\n", | ||||
|     "                        stemmed_token = \"_\".join(\n", | ||||
|     "                            [self.stemmer.stem(part) for part in token.split(\"_\")])\n", | ||||
|     "                    else:\n", | ||||
|     "                        stemmed_token = self.stemmer.stem(token)\n", | ||||
|     "                    conllu_sent.add(ConlluElement(\n", | ||||
|     "                        id=self.id_counter + 1,\n", | ||||
|     "                        form=token,\n", | ||||
|     "                        lemma=stemmed_token\n", | ||||
|     "                    ))\n", | ||||
|     "                    self.id_counter += 1\n", | ||||
|     "                conllu_doc.add(conllu_sent)\n", | ||||
|     "            self.conllu_documents.append(conllu_doc)\n", | ||||
|     "            i += 1\n", | ||||
|     "\n", | ||||
|     "    def pos_tagging(self):\n", | ||||
|     "        for conllu_document in self.conllu_documents:\n", | ||||
|     "            for conllu_sent in conllu_document.conllu_sentences:\n", | ||||
|     "                tokens = [x.form for x in conllu_sent.conllu_elements]\n", | ||||
|     "                pos_tags = pos_tag(tokens)\n", | ||||
|     "                simplified_tags = [map_tag('en-ptb', 'universal', tag)\n", | ||||
|     "                                for word, tag in pos_tags]\n", | ||||
|     "\n", | ||||
|     "                for i in range(len(tokens)):\n", | ||||
|     "                    conllu_elem = conllu_sent.conllu_elements[i]\n", | ||||
|     "                    conllu_elem.upos = simplified_tags[i]\n", | ||||
|     "                    conllu_elem.xpos = pos_tags[i][1]\n", | ||||
|     "\n", | ||||
|     "    def add_misc_value_by_list(self, key, value, stemmed_keyword_list):\n", | ||||
|     "        for conllu_document in self.conllu_documents:\n", | ||||
|     "            for conllu_sent in conllu_document.conllu_sentences:\n", | ||||
|     "                for elem in conllu_sent.conllu_elements:\n", | ||||
|     "                    if elem.lemma in stemmed_keyword_list:\n", | ||||
|     "                        elem.add_misc(key, value)\n", | ||||
|     "\n", | ||||
|     "    def __repr__(self):\n", | ||||
|     "        result = \"\"\n", | ||||
|     "        for document in self.conllu_documents:\n", | ||||
|     "            result += document.__repr__() + \"\\n\"\n", | ||||
|     "        return result\n", | ||||
|     "\n", | ||||
|     "    def __str__(self):\n", | ||||
|     "        return self.__repr__()" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": null, | ||||
|    "metadata": {}, | ||||
|    "outputs": [], | ||||
|    "source": [] | ||||
|   } | ||||
|  ], | ||||
|  "metadata": { | ||||
|   "kernelspec": { | ||||
|    "display_name": "Python 3", | ||||
|    "language": "python", | ||||
|    "name": "python3" | ||||
|   }, | ||||
|   "language_info": { | ||||
|    "codemirror_mode": { | ||||
|     "name": "ipython", | ||||
|     "version": 3 | ||||
|    }, | ||||
|    "file_extension": ".py", | ||||
|    "mimetype": "text/x-python", | ||||
|    "name": "python", | ||||
|    "nbconvert_exporter": "python", | ||||
|    "pygments_lexer": "ipython3", | ||||
|    "version": "3.7.3" | ||||
|   } | ||||
|  }, | ||||
|  "nbformat": 4, | ||||
|  "nbformat_minor": 4 | ||||
| } | ||||
| @ -1,4 +1,14 @@ | ||||
| #!/usr/bin/env python3 | ||||
| # coding: utf-8 | ||||
|  | ||||
| # # Conllu Generator | ||||
| #  | ||||
| # tools for creating: | ||||
| # * conllu tokens | ||||
| # * conllu sentences | ||||
| # * conllu documents | ||||
|  | ||||
| # ## imports and settings | ||||
|  | ||||
| import nltk | ||||
| from nltk.tag import pos_tag, map_tag | ||||
| @ -8,7 +18,7 @@ from stemmed_mwe_tokenizer import StemmedMWETokenizer | ||||
|  | ||||
|  | ||||
| CONLLU_ATTRIBUTES = [ | ||||
|     "id", | ||||
|     "id",s | ||||
|     "form", | ||||
|     "lemma", | ||||
|     "upos", | ||||
| @ -20,6 +30,7 @@ CONLLU_ATTRIBUTES = [ | ||||
|     "misc" | ||||
| ] | ||||
|  | ||||
|  | ||||
| # took from: https://stackoverflow.com/a/16053211 | ||||
|  | ||||
|  | ||||
| @ -35,6 +46,8 @@ def replace_tab(s, tabstop=4): | ||||
|     return result | ||||
|  | ||||
|  | ||||
| # ## Conllu Dict Class | ||||
|  | ||||
| class ConlluDict(dict): | ||||
|  | ||||
|     def from_str(self, s: str): | ||||
| @ -57,6 +70,8 @@ class ConlluDict(dict): | ||||
|         return self.__repr__() | ||||
|  | ||||
|  | ||||
| # ## Conllu Element Class | ||||
|  | ||||
| class ConlluElement(object): | ||||
|         # class uses format described here: https://universaldependencies.org/format.html | ||||
|     def __init__( | ||||
| @ -102,6 +117,8 @@ class ConlluElement(object): | ||||
|         return replace_tab(result, 16) | ||||
|  | ||||
|  | ||||
| # ## Conllu Sentence Class | ||||
|  | ||||
| class ConlluSentence(object): | ||||
|     def __init__(self): | ||||
|         self.conllu_elements = [] | ||||
| @ -120,6 +137,8 @@ class ConlluSentence(object): | ||||
|         return self.__repr__() | ||||
|  | ||||
|  | ||||
| # ## Conllu Document Class | ||||
|  | ||||
| class ConlluDocument(object): | ||||
|     def __init__(self, id=None): | ||||
|         self.conllu_sentences = [] | ||||
| @ -141,6 +160,8 @@ class ConlluDocument(object): | ||||
|         return self.__repr__() | ||||
|  | ||||
|  | ||||
| # ## Conllu Generator Class | ||||
|  | ||||
| class ConlluGenerator(object): | ||||
|     def __init__(self, documents: list, stemmed_multi_word_tokens, stemmer=PorterStemmer(), ids=None): | ||||
|         self.documents = documents | ||||
| @ -223,3 +244,7 @@ class ConlluGenerator(object): | ||||
|  | ||||
|     def __str__(self): | ||||
|         return self.__repr__() | ||||
|  | ||||
|  | ||||
|  | ||||
|  | ||||
|  | ||||
							
								
								
									
										210
									
								
								Tagging/crf_data_generator.ipynb
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										210
									
								
								Tagging/crf_data_generator.ipynb
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,210 @@ | ||||
| { | ||||
|  "cells": [ | ||||
|   { | ||||
|    "cell_type": "markdown", | ||||
|    "metadata": {}, | ||||
|    "source": [ | ||||
|     "# crf data Generator" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": 1, | ||||
|    "metadata": {}, | ||||
|    "outputs": [], | ||||
|    "source": [ | ||||
|     "import conllu_batch_generator as cbg" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": 2, | ||||
|    "metadata": {}, | ||||
|    "outputs": [], | ||||
|    "source": [ | ||||
|     "def word2features(sent, i):\n", | ||||
|     "    word = sent[i]['form']\n", | ||||
|     "    postag = sent[i]['upostag']\n", | ||||
|     "    features = [\n", | ||||
|     "        'bias',\n", | ||||
|     "        #'word.lower=' + word.lower(),\n", | ||||
|     "        'word[-3:]=' + word[-3:],\n", | ||||
|     "        'word[-2:]=' + word[-2:],\n", | ||||
|     "        'word.isupper=%s' % word.isupper(),\n", | ||||
|     "        'word.istitle=%s' % word.istitle(),\n", | ||||
|     "        'word.isdigit=%s' % word.isdigit(),\n", | ||||
|     "        'postag=' + postag,\n", | ||||
|     "        'postag[:2]=' + postag[:2],\n", | ||||
|     "    ]\n", | ||||
|     "    if i > 0:\n", | ||||
|     "        word1 = sent[i-1]['form']\n", | ||||
|     "        postag1 = sent[i-1]['upostag']\n", | ||||
|     "        features.extend([\n", | ||||
|     "            '-1:word.lower=' + word1.lower(),\n", | ||||
|     "            '-1:word.istitle=%s' % word1.istitle(),\n", | ||||
|     "            '-1:word.isupper=%s' % word1.isupper(),\n", | ||||
|     "            '-1:postag=' + postag1,\n", | ||||
|     "            '-1:postag[:2]=' + postag1[:2],\n", | ||||
|     "        ])\n", | ||||
|     "        if i > 1:\n", | ||||
|     "            word1 = sent[i-2]['form']\n", | ||||
|     "            postag1 = sent[i-2]['upostag']\n", | ||||
|     "            features.extend([\n", | ||||
|     "                '-2:word.lower=' + word1.lower(),\n", | ||||
|     "                '-2:word.istitle=%s' % word1.istitle(),\n", | ||||
|     "                '-2:word.isupper=%s' % word1.isupper(),\n", | ||||
|     "                '-2:postag=' + postag1,\n", | ||||
|     "                '-2:postag[:2]=' + postag1[:2],\n", | ||||
|     "            ])\n", | ||||
|     "    else:\n", | ||||
|     "        features.append('BOS')\n", | ||||
|     "\n", | ||||
|     "    if i < len(sent)-1:\n", | ||||
|     "        word1 = sent[i+1]['form']\n", | ||||
|     "        postag1 = sent[i+1]['upostag']\n", | ||||
|     "        features.extend([\n", | ||||
|     "            '+1:word.lower=' + word1.lower(),\n", | ||||
|     "            '+1:word.istitle=%s' % word1.istitle(),\n", | ||||
|     "            '+1:word.isupper=%s' % word1.isupper(),\n", | ||||
|     "            '+1:postag=' + postag1,\n", | ||||
|     "            '+1:postag[:2]=' + postag1[:2],\n", | ||||
|     "        ])\n", | ||||
|     "        if i < len(sent)-2:\n", | ||||
|     "            word1 = sent[i+1]['form']\n", | ||||
|     "            postag1 = sent[i+1]['upostag']\n", | ||||
|     "            features.extend([\n", | ||||
|     "                '+2:word.lower=' + word1.lower(),\n", | ||||
|     "                '+2:word.istitle=%s' % word1.istitle(),\n", | ||||
|     "                '+2:word.isupper=%s' % word1.isupper(),\n", | ||||
|     "                '+2:postag=' + postag1,\n", | ||||
|     "                '+2:postag[:2]=' + postag1[:2],\n", | ||||
|     "            ])\n", | ||||
|     "    else:\n", | ||||
|     "        features.append('EOS')\n", | ||||
|     "\n", | ||||
|     "    return features" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": 3, | ||||
|    "metadata": {}, | ||||
|    "outputs": [], | ||||
|    "source": [ | ||||
|     "def sent2labels(sent):\n", | ||||
|     "    labels = []\n", | ||||
|     "    for token in sent:\n", | ||||
|     "        if token['misc'] is not None and 'food_type' in token['misc']:\n", | ||||
|     "            labels.append(token['misc']['food_type'])\n", | ||||
|     "        else:\n", | ||||
|     "            labels.append(\"0\")\n", | ||||
|     "    return labels" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": 4, | ||||
|    "metadata": {}, | ||||
|    "outputs": [], | ||||
|    "source": [ | ||||
|     "def sent2features(sent):\n", | ||||
|     "    return [word2features(sent, i) for i in range(len(sent))]" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": 5, | ||||
|    "metadata": {}, | ||||
|    "outputs": [], | ||||
|    "source": [ | ||||
|     "def sent2tokens(sent):\n", | ||||
|     "    return [token['form'] for token in sent]" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": 6, | ||||
|    "metadata": {}, | ||||
|    "outputs": [], | ||||
|    "source": [ | ||||
|     "def feature2tokens(sent):\n", | ||||
|     "    return [t[1].split(\"=\")[1] for t in sent]" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": 7, | ||||
|    "metadata": {}, | ||||
|    "outputs": [], | ||||
|    "source": [ | ||||
|     "class ConlluCRFReaderIterator(object):\n", | ||||
|     "    def __init__(self, parent):\n", | ||||
|     "        self._parent = parent\n", | ||||
|     "        self._iter = self._parent._conllu_reader.__iter__()\n", | ||||
|     "\n", | ||||
|     "    def __next__(self):\n", | ||||
|     "        features = None\n", | ||||
|     "        labels = None\n", | ||||
|     "        tokens = None\n", | ||||
|     "\n", | ||||
|     "        if not self._parent._iter_documents:\n", | ||||
|     "            next_sent = self._iter.__next__()[0]\n", | ||||
|     "            features = sent2features(next_sent)\n", | ||||
|     "            labels = sent2labels(next_sent)\n", | ||||
|     "            tokens = sent2tokens(next_sent)\n", | ||||
|     "        else:\n", | ||||
|     "            next_doc = self._iter.__next__()\n", | ||||
|     "            features = [sent2features(sentence) for sentence in next_doc]\n", | ||||
|     "            labels = [sent2labels(sentence) for sentence in next_doc]\n", | ||||
|     "            tokens = [sent2tokens(sentence) for sentence in next_doc]\n", | ||||
|     "\n", | ||||
|     "        return features, labels, tokens" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": 8, | ||||
|    "metadata": {}, | ||||
|    "outputs": [], | ||||
|    "source": [ | ||||
|     "class ConlluCRFReader(object):\n", | ||||
|     "    def __init__(self, path, iter_documents=False):\n", | ||||
|     "        self._path = path\n", | ||||
|     "        self._iter_documents = iter_documents\n", | ||||
|     "\n", | ||||
|     "        self._conllu_reader = cbg.ConlluReader(path, iter_documents)\n", | ||||
|     "\n", | ||||
|     "    def __iter__(self):\n", | ||||
|     "        return ConlluCRFReaderIterator(self)" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": null, | ||||
|    "metadata": {}, | ||||
|    "outputs": [], | ||||
|    "source": [] | ||||
|   } | ||||
|  ], | ||||
|  "metadata": { | ||||
|   "kernelspec": { | ||||
|    "display_name": "Python 3", | ||||
|    "language": "python", | ||||
|    "name": "python3" | ||||
|   }, | ||||
|   "language_info": { | ||||
|    "codemirror_mode": { | ||||
|     "name": "ipython", | ||||
|     "version": 3 | ||||
|    }, | ||||
|    "file_extension": ".py", | ||||
|    "mimetype": "text/x-python", | ||||
|    "name": "python", | ||||
|    "nbconvert_exporter": "python", | ||||
|    "pygments_lexer": "ipython3", | ||||
|    "version": "3.7.3" | ||||
|   } | ||||
|  }, | ||||
|  "nbformat": 4, | ||||
|  "nbformat_minor": 4 | ||||
| } | ||||
| @ -1,4 +1,7 @@ | ||||
| #!/usr/bin/env python3 | ||||
| # coding: utf-8 | ||||
|  | ||||
| # # crf data Generator | ||||
|  | ||||
| import conllu_batch_generator as cbg | ||||
|  | ||||
| @ -121,3 +124,7 @@ class ConlluCRFReader(object): | ||||
|  | ||||
|     def __iter__(self): | ||||
|         return ConlluCRFReaderIterator(self) | ||||
|  | ||||
|  | ||||
|  | ||||
|  | ||||
|  | ||||
							
								
								
									
										162
									
								
								Tagging/recipe_conllu_generator.ipynb
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										162
									
								
								Tagging/recipe_conllu_generator.ipynb
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,162 @@ | ||||
| { | ||||
|  "cells": [ | ||||
|   { | ||||
|    "cell_type": "markdown", | ||||
|    "metadata": {}, | ||||
|    "source": [ | ||||
|     "# Recipe Conllu Generator" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": null, | ||||
|    "metadata": {}, | ||||
|    "outputs": [], | ||||
|    "source": [ | ||||
|     "import sys\n", | ||||
|     "sys.path.insert(0, '..')\n", | ||||
|     "\n", | ||||
|     "from conllu_generator import ConlluDict, ConlluElement, ConlluDocument, ConlluGenerator\n", | ||||
|     "import settings\n", | ||||
|     "import importlib.util\n", | ||||
|     "from json_buffered_reader import JSON_buffered_reader as JSON_br" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": null, | ||||
|    "metadata": {}, | ||||
|    "outputs": [], | ||||
|    "source": [ | ||||
|     "# loading ingredients:\n", | ||||
|     "spec = importlib.util.spec_from_file_location(\n", | ||||
|     "    \"ingredients\", \"../\" + settings.ingredients_file)\n", | ||||
|     "ingredients = importlib.util.module_from_spec(spec)\n", | ||||
|     "spec.loader.exec_module(ingredients)\n", | ||||
|     "\n", | ||||
|     "# loading actions:\n", | ||||
|     "spec = importlib.util.spec_from_file_location(\n", | ||||
|     "    \"ingredients\", \"../\" + settings.actions_file)\n", | ||||
|     "actions = importlib.util.module_from_spec(spec)\n", | ||||
|     "spec.loader.exec_module(actions)\n", | ||||
|     "\n", | ||||
|     "# skipping recipes:\n", | ||||
|     "n_skipped_recipes = int(sys.argv[1]) if len(sys.argv) > 1 else 0\n", | ||||
|     "print(\"start reading at recipe \" + str(n_skipped_recipes))\n", | ||||
|     "\n", | ||||
|     "# settings:\n", | ||||
|     "recipe_buffer_size = 1000\n", | ||||
|     "recipe_buffers_per_file = 5\n", | ||||
|     "\n", | ||||
|     "\n", | ||||
|     "# create reader\n", | ||||
|     "buffered_reader_1M = JSON_br(\"../\" + settings.one_million_recipes_file)" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": null, | ||||
|    "metadata": {}, | ||||
|    "outputs": [], | ||||
|    "source": [ | ||||
|     "def process_instructions(instructions: list, document_ids=None):\n", | ||||
|     "\n", | ||||
|     "    if len(instructions) == 0:\n", | ||||
|     "        return\n", | ||||
|     "\n", | ||||
|     "    conllu_input_docs = instructions\n", | ||||
|     "\n", | ||||
|     "    cg = ConlluGenerator(\n", | ||||
|     "        conllu_input_docs, ingredients.multi_word_ingredients_stemmed, ids=document_ids)\n", | ||||
|     "    cg.tokenize_and_stem()\n", | ||||
|     "    cg.pos_tagging()\n", | ||||
|     "    cg.add_misc_value_by_list(\"food_type\", \"ingredient\", [w.replace(\" \",\"_\") for w in ingredients.multi_word_ingredients_stemmed] + ingredients.ingredients_stemmed)\n", | ||||
|     "    cg.add_misc_value_by_list(\"food_type\", \"action\", actions.stemmed_cooking_verbs)\n", | ||||
|     "\n", | ||||
|     "    savefile.write(str(cg))" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": null, | ||||
|    "metadata": {}, | ||||
|    "outputs": [], | ||||
|    "source": [ | ||||
|     "i = 0\n", | ||||
|     "buffer_count = n_skipped_recipes % recipe_buffer_size\n", | ||||
|     "file_count = n_skipped_recipes // (recipe_buffer_size * recipe_buffers_per_file)\n", | ||||
|     "\n", | ||||
|     "savefile = open(f\"recipes{file_count}.conllu\", 'w')\n", | ||||
|     "instructions = []\n", | ||||
|     "ids = []" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": null, | ||||
|    "metadata": {}, | ||||
|    "outputs": [], | ||||
|    "source": [ | ||||
|     "for raw_recipe in buffered_reader_1M:\n", | ||||
|     "\n", | ||||
|     "    i += 1\n", | ||||
|     "\n", | ||||
|     "    if i > n_skipped_recipes:\n", | ||||
|     "\n", | ||||
|     "        instruction = \"\"\n", | ||||
|     "        for item in raw_recipe['instructions']:\n", | ||||
|     "            instruction += item['text'] + '\\n'\n", | ||||
|     "        ids.append(raw_recipe['id'])\n", | ||||
|     "\n", | ||||
|     "        instructions.append(instruction)\n", | ||||
|     "\n", | ||||
|     "        if i % recipe_buffer_size == 0:\n", | ||||
|     "            process_instructions(instructions, ids)\n", | ||||
|     "            print(f\"processed {i} recipes\")\n", | ||||
|     "            instructions = []\n", | ||||
|     "            ids = []\n", | ||||
|     "            buffer_count += 1\n", | ||||
|     "            if buffer_count % recipe_buffers_per_file == 0:\n", | ||||
|     "                savefile.close()\n", | ||||
|     "                file_count += 1\n", | ||||
|     "                savefile = open(f\"recipes{file_count}.conllu\", 'w')\n", | ||||
|     "    " | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": null, | ||||
|    "metadata": {}, | ||||
|    "outputs": [], | ||||
|    "source": [ | ||||
|     "            \n", | ||||
|     "\n", | ||||
|     "process_instructions(instructions)\n", | ||||
|     "print(f\"processed {i} recipes\")\n", | ||||
|     "\n", | ||||
|     "savefile.close()" | ||||
|    ] | ||||
|   } | ||||
|  ], | ||||
|  "metadata": { | ||||
|   "kernelspec": { | ||||
|    "display_name": "Python 3", | ||||
|    "language": "python", | ||||
|    "name": "python3" | ||||
|   }, | ||||
|   "language_info": { | ||||
|    "codemirror_mode": { | ||||
|     "name": "ipython", | ||||
|     "version": 3 | ||||
|    }, | ||||
|    "file_extension": ".py", | ||||
|    "mimetype": "text/x-python", | ||||
|    "name": "python", | ||||
|    "nbconvert_exporter": "python", | ||||
|    "pygments_lexer": "ipython3", | ||||
|    "version": "3.7.3" | ||||
|   } | ||||
|  }, | ||||
|  "nbformat": 4, | ||||
|  "nbformat_minor": 4 | ||||
| } | ||||
| @ -1,4 +1,7 @@ | ||||
| #!/usr/bin/env python3 | ||||
| # coding: utf-8 | ||||
|  | ||||
| # # Recipe Conllu Generator | ||||
|  | ||||
| import sys | ||||
| sys.path.insert(0, '..') | ||||
| @ -9,7 +12,6 @@ import importlib.util | ||||
| from json_buffered_reader import JSON_buffered_reader as JSON_br | ||||
|  | ||||
|  | ||||
|  | ||||
| # loading ingredients: | ||||
| spec = importlib.util.spec_from_file_location( | ||||
|     "ingredients", "../" + settings.ingredients_file) | ||||
| @ -34,7 +36,6 @@ recipe_buffers_per_file = 5 | ||||
| # create reader | ||||
| buffered_reader_1M = JSON_br("../" + settings.one_million_recipes_file) | ||||
|  | ||||
| # open savefile: | ||||
|  | ||||
| def process_instructions(instructions: list, document_ids=None): | ||||
|  | ||||
| @ -52,6 +53,7 @@ def process_instructions(instructions: list, document_ids=None): | ||||
|  | ||||
|     savefile.write(str(cg)) | ||||
|  | ||||
|  | ||||
| i = 0 | ||||
| buffer_count = n_skipped_recipes % recipe_buffer_size | ||||
| file_count = n_skipped_recipes // (recipe_buffer_size * recipe_buffers_per_file) | ||||
| @ -60,6 +62,7 @@ savefile = open(f"recipes{file_count}.conllu", 'w') | ||||
| instructions = [] | ||||
| ids = [] | ||||
|  | ||||
|  | ||||
| for raw_recipe in buffered_reader_1M: | ||||
|  | ||||
|     i += 1 | ||||
| @ -84,11 +87,12 @@ for raw_recipe in buffered_reader_1M: | ||||
|                 file_count += 1 | ||||
|                 savefile = open(f"recipes{file_count}.conllu", 'w') | ||||
|      | ||||
|      | ||||
|      | ||||
|              | ||||
|  | ||||
|  | ||||
|  | ||||
|  | ||||
| process_instructions(instructions) | ||||
| print(f"processed {i} recipes") | ||||
|  | ||||
| savefile.close() | ||||
|  | ||||
|  | ||||
		Reference in New Issue
	
	Block a user