{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "# NLP-LAB Exercise 01 by jonas weinz\n", "----" ] }, { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [], "source": [ "%matplotlib ipympl\n", "import nltk\n", "import pprint\n", "from sklearn.tree import DecisionTreeClassifier\n", "from sklearn.feature_extraction import DictVectorizer\n", "from sklearn.pipeline import Pipeline" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## implementing own classifiers" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "* writing an own feature funtion" ] }, { "cell_type": "code", "execution_count": 2, "metadata": {}, "outputs": [], "source": [ "def features(sentence, index):\n", " word = sentence[index]\n", " is_punctuation_mark = word == \"!\" or word == \".\" or word == \",\" or word == \"?\"\n", " sentence_length = len(sentence)\n", " relative_third = (index * 3) // sentence_length \n", " vowels = word.count('a') + word.count('e') + word.count('i') + word.count('o') + word.count('u')\n", " return {\n", " 'word': word,\n", " 'is_capitalized': sentence[index][0].upper() == sentence[index][0],\n", " 'prefix-1': sentence[index][0],\n", " 'suffix-1': sentence[index][-1],\n", " 'prefix-2': sentence[index][1] if len(word) > 1 else '',\n", " 'suffix-2': sentence[index][-2] if len(word) > 1 else '',\n", " 'prev_word': '' if index == 0 else sentence[index - 1],\n", " 'next_word': '' if index == len(sentence) - 1 else sentence[index + 1],\n", " 'length': len(word),\n", " 'index' : index,\n", " 'rev_index': len(sentence) - index,\n", " 'sentence_length_': len(sentence),\n", " 'relative_third': relative_third,\n", " 'numerical': word.isnumeric(),\n", " 'is_punctuation_mark': is_punctuation_mark,\n", " ',': word == \",\",\n", " '.': word == \".\",\n", " '!': word == \"!\",\n", " '?': word == \"?\",\n", " 'vowels' : vowels\n", " }" ] }, { "cell_type": "code", "execution_count": 3, "metadata": {}, "outputs": [], "source": [ "#test_sentence = ['The','cake','is','a','lie','!']\n", "#for i in range(len(test_sentence)):\n", "# pprint.pprint(features(test_sentence, i))" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "* function for creating training sets" ] }, { "cell_type": "code", "execution_count": 4, "metadata": {}, "outputs": [], "source": [ "def untag(tagged_sentence):\n", " return [w for w,t in tagged_sentence]\n", "\n", "def transform_to_dataset(tagged_sentences):\n", " X,y = [], []\n", " \n", " for s in tagged_sentences:\n", " for i in range(len(s)):\n", " X.append(features(untag(s),i))\n", " y.append(s[i][1])\n", " return X,y\n", "\n", "def create_training_and_test_set(annotated_sentences, relative_cutoff):\n", " cutoff = int(relative_cutoff * len(annotated_sentences))\n", " training_sentences = annotated_sentences[:cutoff]\n", " test_sentences = annotated_sentences[cutoff:]\n", " \n", " X,y = transform_to_dataset(training_sentences)\n", " tX, ty = transform_to_dataset(test_sentences)\n", " \n", " return X,y,tX,ty" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "* Decision Tree classifier" ] }, { "cell_type": "code", "execution_count": 5, "metadata": {}, "outputs": [], "source": [ "def train_classifier(X,y,classifier,max_size=10000):\n", " clf = Pipeline([\n", " ('vectorizer', DictVectorizer(sparse=False)),\n", " ('classifier', classifier)\n", " ])\n", " \n", " print(\"start training…\")\n", " \n", " clf.fit(\n", " X if len(X) < max_size else X[:max_size],\n", " y if len(y) < max_size else y[:max_size]\n", " )\n", " \n", " print(\"training done\")\n", " \n", " return clf" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "* classifier evaluater" ] }, { "cell_type": "code", "execution_count": 6, "metadata": {}, "outputs": [], "source": [ "def test_classifier(clf, tX, ty):\n", " accuracy = clf.score(tX, ty)\n", " print(\"Accuracy: \", accuracy)\n", " # TODO: more analytics\n", " return accuracy" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Task 01:" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Performance 1\n" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "#### Model 01\n", "* train and testing english custom POS tagger model:" ] }, { "cell_type": "code", "execution_count": 7, "metadata": {}, "outputs": [], "source": [ "def model_01(X,y,tX,ty, max_size=1000):\n", " #classifier = DecisionTreeClassifier(criterion='entropy')\n", " from sklearn.neural_network import MLPClassifier\n", " model01_clf = train_classifier(X,y,MLPClassifier(),max_size=max_size)\n", " return test_classifier(clf=model01_clf, tX=tX, ty=ty)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "#### Model 02" ] }, { "cell_type": "code", "execution_count": 8, "metadata": {}, "outputs": [], "source": [ "def model_02(tX,ty):\n", " m2_y = nltk.pos_tag([w['word'] for w in tX])\n", " # compare results\n", " n_correct = sum((1 if m2_y[i][1] == ty[i] else 0) for i in range(len(ty)))\n", " return n_correct / len(ty)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "#### Model 03" ] }, { "cell_type": "code", "execution_count": 9, "metadata": {}, "outputs": [], "source": [ "def model_03(corpus_tagged, corpus_sents, cut=0.8):\n", " \n", " patterns = [(r'.*ing$', 'VBG'), (r'.*ed$', 'VBD'), (r'.*es$', 'VBZ'), (r'.*ould$', 'MD'), (r'.*\\'s$', 'NN$'), \n", " (r'.*s$', 'NNS'), (r'^-?[0-9]+(.[0-9]+)?$', 'CD'), (r'.*', 'NN')]\n", " \n", " s = int(len(corpus_tagged) * cut)\n", " train_sents = corpus_tagged[:s]\n", " test_sents = corpus_tagged[s:]\n", " \n", " models = {\n", " 'def_model': nltk.DefaultTagger('NN'),\n", " 'regexp_model': nltk.RegexpTagger(patterns),\n", " 'uni_model': nltk.UnigramTagger(train_sents),\n", " 'bi_model': nltk.BigramTagger(train_sents),\n", " 'tri_model': nltk.TrigramTagger(train_sents)\n", " }\n", " \n", " performance = {}\n", " for name,model in models.items():\n", " performance[name] = model.evaluate(test_sents)\n", " \n", " return performance\n" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Applying models on Datasets" ] }, { "cell_type": "code", "execution_count": 10, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "[('Pierre', 'NNP'),\n", " ('Vinken', 'NNP'),\n", " (',', ','),\n", " ('61', 'CD'),\n", " ('years', 'NNS'),\n", " ('old', 'JJ'),\n", " (',', ','),\n", " ('will', 'MD'),\n", " ('join', 'VB'),\n", " ('the', 'DT'),\n", " ('board', 'NN'),\n", " ('as', 'IN'),\n", " ('a', 'DT'),\n", " ('nonexecutive', 'JJ'),\n", " ('director', 'NN'),\n", " ('Nov.', 'NNP'),\n", " ('29', 'CD'),\n", " ('.', '.')]" ] }, "metadata": {}, "output_type": "display_data" }, { "name": "stdout", "output_type": "stream", "text": [ "P1.1\n", "start training…\n", "training done\n", "Accuracy: 0.7755377014821099\n", "P1.2\n", "P1.3\n", "P1.4\n", "start training…\n", "training done\n", "Accuracy: 0.63253390325317\n", "P1.5\n", "P1.6\n", "{'P1.1': 0.7755377014821099,\n", " 'P1.2': 0.8936074654423873,\n", " 'P1.3 -- bi_model': 0.1132791057437996,\n", " 'P1.3 -- def_model': 0.1447677029791906,\n", " 'P1.3 -- regexp_model': 0.24232746145017217,\n", " 'P1.3 -- tri_model': 0.06736863116922003,\n", " 'P1.3 -- uni_model': 0.8608213982733669,\n", " 'P1.4': 0.63253390325317,\n", " 'P1.5': 0.6044583741861567,\n", " 'P1.6 -- bi_model': 0.1132791057437996,\n", " 'P1.6 -- def_model': 0.1447677029791906,\n", " 'P1.6 -- regexp_model': 0.24232746145017217,\n", " 'P1.6 -- tri_model': 0.06736863116922003,\n", " 'P1.6 -- uni_model': 0.8608213982733669}\n" ] } ], "source": [ "performances = {}\n", "\n", "treebank_tagged = nltk.corpus.treebank.tagged_sents()\n", "treebank_sents = nltk.corpus.treebank.sents()\n", "\n", "brown_tagged = nltk.corpus.brown.tagged_sents()#(categories='news')\n", "brown_sents = nltk.corpus.brown.sents()#(categories='news')\n", "\n", "display(treebank_tagged[0])\n", "\n", "X1,y1,tX1,ty1 = create_training_and_test_set(annotated_sentences=treebank_tagged, \n", " relative_cutoff=0.8)\n", "\n", "X2,y2,tX2,ty2 = create_training_and_test_set(annotated_sentences=brown_tagged, \n", " relative_cutoff=0.8)\n", "\n", "\n", "print(\"P1.1\")\n", "performances['P1.1'] = model_01(X1,y1,tX1,ty1)\n", "\n", "print(\"P1.2\")\n", "performances['P1.2'] = model_02(tX1,ty1)\n", "\n", "print(\"P1.3\")\n", "p3 = model_03(treebank_tagged, treebank_sents)\n", "for k,v in p3.items():\n", " performances[\"P1.3 -- \" + k] = v\n", "\n", "print(\"P1.4\")\n", "performances['P1.4'] = model_01(X2,y2,tX2,ty2)\n", "\n", "print(\"P1.5\")\n", "performances['P1.5'] = model_02(tX2,ty2)\n", "\n", "print(\"P1.6\")\n", "p6 = model_03(brown_tagged, brown_sents)\n", "for k,v in p3.items():\n", " performances[\"P1.6 -- \" + k] = v\n", "\n", "pprint.pprint(performances)\n" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Plotting Data" ] }, { "cell_type": "code", "execution_count": 11, "metadata": {}, "outputs": [ { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "c6d8e1186c1f44dcb77b146346b1dedb", "version_major": 2, "version_minor": 0 }, "text/html": [ "

Failed to display Jupyter Widget of type FigureCanvasNbAgg.

\n", "

\n", " If you're reading this message in the Jupyter Notebook or JupyterLab Notebook, it may mean\n", " that the widgets JavaScript is still loading. If this message persists, it\n", " likely means that the widgets JavaScript library is either not installed or\n", " not enabled. See the Jupyter\n", " Widgets Documentation for setup instructions.\n", "

\n", "

\n", " If you're reading this message in another frontend (for example, a static\n", " rendering on GitHub or NBViewer),\n", " it may mean that your frontend doesn't currently support widgets.\n", "

\n" ], "text/plain": [ "FigureCanvasNbAgg()" ] }, "metadata": {}, "output_type": "display_data" } ], "source": [ "import matplotlib.pyplot as plt\n", "import numpy as np\n", "#weights = clf.named_steps['classifier'].feature_importances_\n", "#labels = clf.named_steps['vectorizer'].get_feature_names()\n", "\n", "#sort\n", "#weights, labels = (list(t) for t in zip(*sorted(zip(weights, labels))))\n", "\n", "fig_1, ax_1 = plt.subplots()\n", "plt.bar(np.arange(len(performances)), performances.values())\n", "plt.xticks(np.arange(len(performances)), performances.keys(), rotation=30, ha='right')\n", "plt.tight_layout()\n", "plt.show()\n" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "----\n", "## Task 2" ] }, { "cell_type": "code", "execution_count": 12, "metadata": {}, "outputs": [], "source": [ "WORDS = 'words' #: column type for words\n", "POS = 'pos' #: column type for part-of-speech tags\n", "TREE = 'tree' #: column type for parse trees\n", "CHUNK = 'chunk' #: column type for chunk structures\n", "NE = 'ne' #: column type for named entities\n", "SRL = 'srl' #: column type for semantic role labels\n", "IGNORE = 'ignore' #: column type for column that should be ignored\n", "train_path = \"./ru_syntagrus-ud-train.conllu\"\n", "ru_corp = nltk.corpus.ConllCorpusReader(root=\"./\", \n", " fileids=[\"ru_syntagrus-ud-train-uncommented.conllu\"],\n", " columntypes=[IGNORE, WORDS, IGNORE, POS],\n", " encoding='utf-8')\n" ] }, { "cell_type": "code", "execution_count": 13, "metadata": {}, "outputs": [], "source": [ "ru_tagged = ru_corp.tagged_sents()\n", "\n", "\n", "\n", "X3,y3,tX3,ty3 = create_training_and_test_set(annotated_sentences=ru_tagged, \n", " relative_cutoff=0.8)\n" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "#### Model 04, Performance 2.1" ] }, { "cell_type": "code", "execution_count": 14, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "P2.1\n", "start training…\n", "training done\n", "Accuracy: 0.7079014288483687\n", "0.7079014288483687\n" ] } ], "source": [ "print(\"P2.1\")\n", "performances2 = {}\n", "performances2['P2.1'] = model_01(X3,y3,tX3,ty3, max_size=1000)\n", "print(performances2['P2.1'])" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "#### Model 05, Performance 2.2" ] }, { "cell_type": "code", "execution_count": 15, "metadata": {}, "outputs": [], "source": [ "# write russian text out to file:\n", "f = open(\"ru_text.txt\", 'w')\n", "for sentence in ru_tagged:\n", " for word, tag in sentence:\n", " f.write(word + \" \")\n", " f.write(\"\\n\")\n", "f.close()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "* download the python 3 fork of the rdrpos-tagger: https://github.com/jacopofar/RDRPOSTagger-python-3\n", "* adjust `RDRPOS_TAGGER_PATH` to match with the download location" ] }, { "cell_type": "code", "execution_count": 16, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "/home/jonas/Dokumente/gitRepos/NLP-LAB/Jonas_Solutions\n" ] } ], "source": [ "import sys, os\n", "\n", "dir_path = os.getcwd()\n", "print(dir_path)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "['Node', '__builtins__', '__cached__', '__doc__', '__file__', '__loader__', '__name__', '__package__', '__spec__', 'tabStr']\n" ] } ], "source": [ "RDRPOS_TAGGER_PATH = r\"/home/jonas/src/RDRPOSTagger-python-3/pSCRDRtagger/\"\n", "\n", "sys.path.insert(0, RDRPOS_TAGGER_PATH)\n", "os.chdir(RDRPOS_TAGGER_PATH)\n", "\n", "import RDRPOSTagger as model05_tagger \n", "\n", "r = model05_tagger.RDRPOSTagger()\n", "r.constructSCRDRtreeFromRDRfile(\"../Models/UniPOS/UD_Russian-SynTagRus/train.UniPOS.RDR\")\n", "DICT = model05_tagger.readDictionary(\"../Models/UniPOS/UD_Russian-SynTagRus/train.UniPOS.DICT\")\n", "\n", "os.chdir(dir_path)\n", "\n", "r.tagRawCorpus(DICT, \"ru_text.txt\")" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "tagged_words = []\n", "f = open(\"ru_text.txt.TAGGED\", 'r')\n", "for line in f:\n", " for splits in line.split():\n", " cmp = splits.rsplit('/',1)\n", " if len(cmp) != 2:\n", " print(\"error parsing: \", cmp)\n", " else:\n", " w,t = cmp\n", " tagged_words.append((w,t))\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "score_2_2 = 0\n", "i = 0\n", "for sent in ru_tagged:\n", " for tagged_w in sent:\n", " if tagged_w[1] == tagged_words[i][1]:\n", " score_2_2 += 1\n", " i += 1\n", "performances2['P2.2'] = score_2_2 / len(tagged_words)\n" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Results of performance 2.2" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "pprint.pprint(performances2)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "fig_2, ax_2 = plt.subplots()\n", "plt.bar(np.arange(len(performances2)), performances2.values())\n", "plt.xticks(np.arange(len(performances2)), performances2.keys(), rotation=30, ha='right')\n", "plt.tight_layout()\n", "plt.show()\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.6.5" } }, "nbformat": 4, "nbformat_minor": 2 }