diff --git a/Carsten_Solutions/Task 1 - Carsten Draschner.ipynb b/Carsten_Solutions/Task 1 - Carsten Draschner.ipynb new file mode 100644 index 0000000..1822a2e --- /dev/null +++ b/Carsten_Solutions/Task 1 - Carsten Draschner.ipynb @@ -0,0 +1,1321 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Exercise 1\n", + "Solution by:\n", + "Carsten Draschner \n", + "2719095\n", + "\n", + "Following Instructions: \n", + "https://github.com/SmartDataAnalytics/MA-INF-4222-NLP-Lab/blob/master/2018_SoSe/exercises/Task01_Instructions.ipynb" + ] + }, + { + "cell_type": "code", + "execution_count": 152, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "import numpy as np\n", + "import nltk\n", + "from nltk import word_tokenize, pos_tag" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Classifiers\n", + "**Decision Tree** import from skikit learn" + ] + }, + { + "cell_type": "code", + "execution_count": 153, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "from sklearn.tree import DecisionTreeClassifier\n", + "from sklearn.feature_extraction import DictVectorizer\n", + "from sklearn.pipeline import Pipeline" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 1. model1 = your POS tagger model (english)\n", + "for a words defined by its in dex with the given sentences a feature vector fot this word will be determinded" + ] + }, + { + "cell_type": "code", + "execution_count": 154, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "defined own feature model\n" + ] + } + ], + "source": [ + "def features(sentence, index):\n", + " return {\n", + " 'word': sentence[index],\n", + " 'length': len(sentence[index]),\n", + " 'is_capitalized': sentence[index][0].upper() == sentence[index][0],\n", + " 'prefix-1': sentence[index][0],\n", + " 'suffix-1': sentence[index][-1],\n", + " 'prev_word': '' if index == 0 else sentence[index - 1],\n", + " 'next_word': '' if index == len(sentence) - 1 else sentence[index + 1],\n", + " 'kindOfCamelCase': sentence[index][1:].lower() != sentence[index][1:],\n", + " 'includesSpace': True if ((' ') in sentence[index]) else False, #depemds on tokenizer\n", + " 'containsNumber': sum(str(i) in (sentence[index]) for i in range(10))>0,\n", + " 'prefix-2': sentence[index][1] if len(sentence[index])>1 else \"-1\",\n", + " 'suffix-2': sentence[index][-2] if len(sentence[index])>1 else \"-1\"\n", + " }\n", + "print(\"defined own feature model\")\n", + "#print(features(\"halli hallo i bims der Programmierer\".strip().split(\" \"), 3))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 2. model2 = pre-trained POS tagger model using NLTK (maxentropy english)\n" + ] + }, + { + "cell_type": "code", + "execution_count": 155, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "#max entropie pre trained pos tag\n", + "#see Calculate performance 1.2" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 3. model3.x = rule-based classifiers (x = 1 to 5)\n", + "1. DefaultTagger that simply tags everything with the same tag\n", + "2. RegexpTagger that applies tags according to a set of regular expressions\n", + "3. N-Gram (n-gram tagger is a generalization of a unigram tagger whose context is the current word together with the part-of-speech tags of the n-1 preceding token)\n", + " + UnigramTagger\n", + " + BigramTagger\n", + " + TrigramTagger" + ] + }, + { + "cell_type": "code", + "execution_count": 156, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "\"#used from description for RegexpTagger\\npatterns = [(r'.*ing$', 'VBG'), (r'.*ed$', 'VBD'), (r'.*es$', 'VBZ'), (r'.*ould$', 'MD'), (r'.*'s$', 'NN$'), \\n (r'.*s$', 'NNS'), (r'^-?[0-9]+(.[0-9]+)?$', 'CD'), (r'.*', 'NN')]\\n\\n#train taggers\\ndef_model = nltk.DefaultTagger('NN')\\nregexp_model = nltk.RegexpTagger(patterns)\\nuni_model = nltk.UnigramTagger(training_sentences_X1)\\nbi_model = nltk.BigramTagger(training_sentences_X1)\\ntri_model = nltk.TrigramTagger(training_sentences_X1)\"" + ] + }, + "execution_count": 156, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "#see Task 1.3 and 1.6\n", + "\n", + "'''#used from description for RegexpTagger\n", + "patterns = [(r'.*ing$', 'VBG'), (r'.*ed$', 'VBD'), (r'.*es$', 'VBZ'), (r'.*ould$', 'MD'), (r'.*\\'s$', 'NN$'), \n", + " (r'.*s$', 'NNS'), (r'^-?[0-9]+(.[0-9]+)?$', 'CD'), (r'.*', 'NN')]\n", + "\n", + "#train taggers\n", + "def_model = nltk.DefaultTagger('NN')\n", + "regexp_model = nltk.RegexpTagger(patterns)\n", + "uni_model = nltk.UnigramTagger(training_sentences_X1)\n", + "bi_model = nltk.BigramTagger(training_sentences_X1)\n", + "tri_model = nltk.TrigramTagger(training_sentences_X1)'''" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 4. model4 = your POS tagger model (not english)" + ] + }, + { + "cell_type": "code", + "execution_count": 157, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "#see Task 2.1" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 5. model5 = pre-trained POS tagger model using RDRPOSTagger 1 or TreeTagger 2 (not english)" + ] + }, + { + "cell_type": "code", + "execution_count": 158, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "#see Task 2.2" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Corpora\n", + "note: data split for training/test = 0.8/0.2 (sequencial)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### 1. X1 = nltk.corpus.treebank (english)" + ] + }, + { + "cell_type": "code", + "execution_count": 159, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[nltk_data] Downloading package treebank to\n", + "[nltk_data] /Users/Carsten/nltk_data...\n", + "[nltk_data] Package treebank is already up-to-date!\n", + "downloaded treebank\n" + ] + } + ], + "source": [ + "nltk.download('treebank')\n", + "x1 = nltk.corpus.treebank\n", + "print(\"downloaded treebank\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### 2. X2 = nltk.corpus.brown (english)" + ] + }, + { + "cell_type": "code", + "execution_count": 160, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[nltk_data] Downloading package brown to /Users/Carsten/nltk_data...\n", + "[nltk_data] Package brown is already up-to-date!\n", + "downloaded brown\n" + ] + } + ], + "source": [ + "nltk.download('brown')\n", + "x2 = nltk.corpus.brown\n", + "print(\"downloaded brown\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### 3. X3 = other language German" + ] + }, + { + "cell_type": "code", + "execution_count": 161, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "#? nltk.corpus.ConllCorpusReader" + ] + }, + { + "cell_type": "code", + "execution_count": 162, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "downloaded german tiger corpus\n" + ] + } + ], + "source": [ + "# TODO: loading german corpus \n", + "X3 = nltk.corpus.ConllCorpusReader(root='/Users/Carsten/GitRepos/NLP-LAB/Carsten_Solutions/sets/german/', fileids=['tiger_release_aug07.corrected.16012013.conll09'], columntypes=['ignore', 'words', 'ignore', 'ignore', 'pos'], encoding='utf-8')\n", + "german_tagged_sents = X3.tagged_sents()\n", + "print(\"downloaded german tiger corpus\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### 4. X4 = other language Croatian" + ] + }, + { + "cell_type": "code", + "execution_count": 201, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "#x4 = other language\n", + "#from croatia:\n", + "#by ZˇeljkoAgic ́,⋆NikolaLjubesˇic ́ http://www.lrec-conf.org/proceedings/lrec2014/pdf/690_Paper.pdf\n", + "#licenses: https://creativecommons.org/licenses/by-sa/4.0/\n", + "corp = nltk.corpus.ConllCorpusReader(root=\"/Users/Carsten/GitRepos/NLP-LAB/Carsten_Solutions/sets/croatia/\", fileids=[\"set.hr.conll\"], columntypes=('ignore','words','ignore','ignore','ignore','ignore','ignore','ignore','ignore','pos'))" + ] + }, + { + "cell_type": "code", + "execution_count": 200, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + ">\n" + ] + } + ], + "source": [ + "print(corp.tagged_sents.)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Task 1\n", + "* get results for english (plot a graph with all classifiers x results)\n", + " * performance 1.1 = model1 in X1\n", + " * performance 1.2 = model2 in X1\n", + " * performance 1.3.x = model3.x in X1\n", + " * performance 1.4 = model1 in X2\n", + " * performance 1.5 = model2 in X2\n", + " * performance 1.6.x = model3.x in X2" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Generating Testdata" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "##### Generate Training and Testdata for X1\n", + "1. split annotaed sentences into training and testdata\n", + "2. split trainingdata into input data and teacherdata\n", + " *input is the feature vector of each word\n", + " *output is a list of POS tags for each word and sentences" + ] + }, + { + "cell_type": "code", + "execution_count": 165, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "#to generate trainingsdata, ignore the assigned tags as a function\n", + "def untag(tagged_sentence):\n", + " return [w for w, t in tagged_sentence]" + ] + }, + { + "cell_type": "code", + "execution_count": 166, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "got 3131 training sentences and 783 test sentences\n" + ] + } + ], + "source": [ + "#print(type(nltk.corpus.treebank.tagged_sents()))\n", + "\n", + "#object including the annotated sentences\n", + "annotated_sent = nltk.corpus.treebank.tagged_sents()\n", + "\n", + "#to split the data, calculate the borders for ratio\n", + "cutoff = int(.8 * len(annotated_sent))\n", + "training_sentences_X1 = annotated_sent[:cutoff]\n", + "test_sentences_X1 = annotated_sent[cutoff:]\n", + "\n", + "#show the amount of sentences\n", + "print(\"got \",len(training_sentences_X1),\" training sentences and \", len(test_sentences_X1), \" test sentences\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**transform_to_dataset** generates the input X as a list of feature dictinionaries and an output y as a list of pos tags. " + ] + }, + { + "cell_type": "code", + "execution_count": 167, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "#for training split sentences with its tags into y (for a sentences its resulting tags for each word) and transform sentences and x as a list of the features extracet for echt word in the sentences\n", + "def transform_to_dataset(tagged_sentences):\n", + " X, y = [], []\n", + " for tagged_sentence in tagged_sentences:\n", + " for index in range(len(tagged_sentence)):\n", + " X.append(features(untag(tagged_sentence), index))\n", + " y.append(tagged_sentence[index][1]) \n", + " return X, y" + ] + }, + { + "cell_type": "code", + "execution_count": 168, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "generated X1 (feature sets) and y1 set of teacher tags\n" + ] + } + ], + "source": [ + "#trainings inputset X and training teacher set y\n", + "X1, y1 = transform_to_dataset(training_sentences_X1)\n", + "print(\"generated X1 (feature sets) and y1 set of teacher tags\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "##### Generate Training and Testdata for X2\n", + "1. split annotaed sentences into training and testdata\n", + "2. split trainingdata into input data and teacherdata\n", + " *input is the feature vector of each word\n", + " *output is a list of POS tags for each word and sentences" + ] + }, + { + "cell_type": "code", + "execution_count": 169, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "got 45872 training sentences and 11468 test sentences\n" + ] + } + ], + "source": [ + "#object including the annotated sentences\n", + "annotated_sent = nltk.corpus.brown.tagged_sents()\n", + "\n", + "#to split the data, calculate the borders for ratio\n", + "cutoff = int(.8 * len(annotated_sent))\n", + "training_sentences_X2 = annotated_sent[:cutoff]\n", + "test_sentences_X2 = annotated_sent[cutoff:]\n", + "\n", + "#show the amount of sentences\n", + "print(\"got \",len(training_sentences_X2),\" training sentences and \", len(test_sentences_X2), \" test sentences\")" + ] + }, + { + "cell_type": "code", + "execution_count": 170, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "generated X2 (feature sets) and y2 set of teacher tags\n" + ] + } + ], + "source": [ + "#trainings inputset X and training teacher set y\n", + "X2, y2 = transform_to_dataset(training_sentences_X2)\n", + "print(\"generated X2 (feature sets) and y2 set of teacher tags\")#(X3[:3], y3[:3])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "##### Generate Training and Testdata for X3\n", + "1. split annotaed sentences into training and testdata\n", + "2. split trainingdata into input data and teacherdata\n", + " *input is the feature vector of each word\n", + " *output is a list of POS tags for each word and sentences" + ] + }, + { + "cell_type": "code", + "execution_count": 171, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "got 40377 training sentences and 10095 test sentences\n" + ] + } + ], + "source": [ + "#object including the annotated sentences\n", + "annotated_sent = X3.tagged_sents()\n", + "\n", + "#print(type(annotated_sent))\n", + "\n", + "#to split the data, calculate the borders for ratio\n", + "cutoff = int(.8 * len(annotated_sent))\n", + "training_sentences_X3 = annotated_sent[:cutoff]\n", + "test_sentences_X3 = annotated_sent[cutoff:]\n", + "\n", + "#show the amount of sentences\n", + "print(\"got \",len(training_sentences_X3),\" training sentences and \", len(test_sentences_X3), \" test sentences\")" + ] + }, + { + "cell_type": "code", + "execution_count": 172, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "generated X3 (feature sets) and y3 set of teacher tags\n" + ] + } + ], + "source": [ + "#trainings inputset X and training teacher set y\n", + "X3, y3 = transform_to_dataset(training_sentences_X3)\n", + "print(\"generated X3 (feature sets) and y3 set of teacher tags\")#(X3[:3], y3[:3])" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "collapsed": true + }, + "source": [ + "#### Implementing a classifier\n", + "relevant imports\n", + "* decision tree as the AI for classfing\n", + "* dict vercorizer transforms the feature dictionary into a vector as the input for the tree" + ] + }, + { + "cell_type": "code", + "execution_count": 173, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "imported sktree, DictVectorizer, Pipeline\n" + ] + } + ], + "source": [ + "from sklearn.tree import DecisionTreeClassifier\n", + "from sklearn.feature_extraction import DictVectorizer\n", + "from sklearn.pipeline import Pipeline\n", + "print(\"imported sktree, DictVectorizer, Pipeline\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Pipeline manages vectorizer and classifier" + ] + }, + { + "cell_type": "code", + "execution_count": 174, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Initialized classifier\n" + ] + } + ], + "source": [ + "clf = Pipeline([\n", + " ('vectorizer', DictVectorizer(sparse=False)),\n", + " ('classifier', DecisionTreeClassifier(criterion='entropy'))\n", + "])\n", + "print(\"Initialized classifier\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Calculating performances" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "##### Calculate performance 1.1 - own POS tagger model with X1 = treebank\n", + "* fit the decision tree for a limited amount (size) of training \n", + "* test data and compare with score function on testdata" + ] + }, + { + "cell_type": "code", + "execution_count": 175, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "training OK\n", + "Accuracy: 0.885273716253\n" + ] + } + ], + "source": [ + "size=10000\n", + "clf.fit(X1[:size], y1[:size])\n", + " \n", + "print('training OK')\n", + " \n", + "X1_test, y1_test = transform_to_dataset(test_sentences_X1)\n", + "\n", + "performance1_1 = clf.score(X1_test, y1_test)\n", + "\n", + "print(\"Accuracy:\", performance1_1)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "##### Calculate performance 1.2 - pre-trained POS tagger model using NLTK (maxentropy english) with X1 = treebank" + ] + }, + { + "cell_type": "code", + "execution_count": 176, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Accuracy: 0.8936074654423873\n" + ] + } + ], + "source": [ + "#extract only the words from feature trainings set\n", + "only_words_X1 = [x['word'] for x in X1_test]\n", + "\n", + "#train with the pos tagger by nltk\n", + "pos_tags_by_pre_trained_pos_tagger = [word_tag_tuple[1] for word_tag_tuple in pos_tag(only_words_X1, lang='eng')]\n", + "\n", + "#calculate performance by comparing each pos tag\n", + "performance1_2 = 0\n", + "for index in range(len(pos_tags_by_pre_trained_pos_tagger)):\n", + " if(pos_tags_by_pre_trained_pos_tagger[index]==y1_test[index]):\n", + " performance1_2 += 1\n", + "performance1_2 /= len(pos_tags_by_pre_trained_pos_tagger)\n", + "\n", + "print(\"Accuracy:\", performance1_2)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "##### Calculate performance 1.3 - rule-based classifiers with X1 = treebank\n", + "1. DefaultTagger that simply tags everything with the same tag\n", + "2. RegexpTagger that applies tags according to a set of regular expressions\n", + "3. N-Gram (n-gram tagger is a generalization of a unigram tagger whose context is the current word together with the part-of-speech tags of the n-1 preceding token)\n", + " + UnigramTagger\n", + " + BigramTagger\n", + " + TrigramTagger" + ] + }, + { + "cell_type": "code", + "execution_count": 177, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "performance 1.3.1 is: 0.1447677029791906\n", + "performance 1.3.2 is: 0.24232746145017217\n", + "performance 1.3.3 is: 0.8608213982733669\n", + "performance 1.3.4 is: 0.1132791057437996\n", + "performance 1.3.5 is: 0.06736863116922003\n" + ] + } + ], + "source": [ + "#used from description for RegexpTagger\n", + "patterns = [(r'.*ing$', 'VBG'), (r'.*ed$', 'VBD'), (r'.*es$', 'VBZ'), (r'.*ould$', 'MD'), (r'.*\\'s$', 'NN$'), \n", + " (r'.*s$', 'NNS'), (r'^-?[0-9]+(.[0-9]+)?$', 'CD'), (r'.*', 'NN')]\n", + "\n", + "#train taggers\n", + "def_model = nltk.DefaultTagger('NN')\n", + "regexp_model = nltk.RegexpTagger(patterns)\n", + "uni_model = nltk.UnigramTagger(training_sentences_X1)\n", + "bi_model = nltk.BigramTagger(training_sentences_X1)\n", + "tri_model = nltk.TrigramTagger(training_sentences_X1)\n", + "\n", + "#evaluate taggers\n", + "# performance of Default Tagger\n", + "performance1_3_1 = def_model.evaluate(test_sentences_X1)\n", + "print('performance 1.3.1 is: ',performance1_3_1)\n", + "\n", + "# performance of Regex Tagger\n", + "performance1_3_2 = regexp_model.evaluate(test_sentences_X1)\n", + "print('performance 1.3.2 is: ',performance1_3_2)\n", + "\n", + "# performance of Unigram Tagger\n", + "performance1_3_3 = uni_model.evaluate(test_sentences_X1)\n", + "print('performance 1.3.3 is: ',performance1_3_3)\n", + "\n", + "# performance of Bigram Tagger\n", + "performance1_3_4 = bi_model.evaluate(test_sentences_X1)\n", + "print('performance 1.3.4 is: ',performance1_3_4)\n", + "\n", + "# performance of Trigram Tagger\n", + "performance1_3_5 = tri_model.evaluate(test_sentences_X1)\n", + "print('performance 1.3.5 is: ',performance1_3_5)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "##### Calculate performance 1.4 - own POS tagger model with X2 = brown" + ] + }, + { + "cell_type": "code", + "execution_count": 178, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "calculated perfomance 1.4= 0.785464840867\n" + ] + } + ], + "source": [ + "size=10000\n", + "clf.fit(X2[:size], y2[:size])\n", + "X2_test, y2_test = transform_to_dataset(test_sentences_X2)\n", + "performance1_4 = clf.score(X2_test, y2_test)\n", + "print(\"calculated perfomance 1.4= \",performance1_4)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "##### Calculate performance 1.5 - pre-trained POS tagger model using NLTK (maxentropy english) with X2 = brown" + ] + }, + { + "cell_type": "code", + "execution_count": 179, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Accuracy: 0.6044583741861567\n" + ] + } + ], + "source": [ + "#extract only the words from feature trainings set\n", + "only_words_X2 = [x['word'] for x in X2_test]\n", + "\n", + "#train with the pos tagger by nltk\n", + "pos_tags_by_pre_trained_pos_tagger = [word_tag_tuple[1] for word_tag_tuple in pos_tag(only_words_X2, lang='eng')]\n", + "\n", + "#calculate performance by comparing each pos tag\n", + "performance1_5 = 0\n", + "for index in range(len(pos_tags_by_pre_trained_pos_tagger)):\n", + " if(pos_tags_by_pre_trained_pos_tagger[index]==y2_test[index]):\n", + " performance1_5 += 1\n", + "performance1_5 /= len(pos_tags_by_pre_trained_pos_tagger)\n", + "\n", + "print(\"Accuracy:\", performance1_5)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "##### Calculate performance 1.6 - rule-based classifiers with X2 = brown" + ] + }, + { + "cell_type": "code", + "execution_count": 180, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "performance 1.6.1 is: 0.10997763652187324\n", + "performance 1.6.2 is: 0.17594438874995869\n", + "performance 1.6.3 is: 0.8773754310202373\n", + "performance 1.6.4 is: 0.3390490564374869\n", + "performance 1.6.5 is: 0.19178610379738467\n" + ] + } + ], + "source": [ + "uni_model = nltk.UnigramTagger(training_sentences_X2)\n", + "bi_model = nltk.BigramTagger(training_sentences_X2)\n", + "tri_model = nltk.TrigramTagger(training_sentences_X2)\n", + "\n", + "#evaluate taggers\n", + "# performance of Default Tagger\n", + "performance1_6_1 = def_model.evaluate(test_sentences_X2)\n", + "print('performance 1.6.1 is: ',performance1_6_1)\n", + "\n", + "# performance of Regex Tagger\n", + "performance1_6_2 = regexp_model.evaluate(test_sentences_X2)\n", + "print('performance 1.6.2 is: ',performance1_6_2)\n", + "\n", + "# performance of Unigram Tagger\n", + "performance1_6_3 = uni_model.evaluate(test_sentences_X2)\n", + "print('performance 1.6.3 is: ',performance1_6_3)\n", + "\n", + "# performance of Bigram Tagger\n", + "performance1_6_4 = bi_model.evaluate(test_sentences_X2)\n", + "print('performance 1.6.4 is: ',performance1_6_4)\n", + "\n", + "# performance of Trigram Tagger\n", + "performance1_6_5 = tri_model.evaluate(test_sentences_X2)\n", + "print('performance 1.6.5 is: ',performance1_6_5)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Using the classifier\n", + "for results the link of pos_tags:\n", + "https://www.ling.upenn.edu/courses/Fall_2003/ling001/penn_treebank_pos.html" + ] + }, + { + "cell_type": "code", + "execution_count": 181, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "3.6.3\n" + ] + } + ], + "source": [ + "def pos_tag(sentence):\n", + " print('checking...')\n", + " tagged_sentence = []\n", + " tags = clf.predict([features(sentence, index) for index in range(len(sentence))])\n", + " return zip(sentence, tags)\n", + "\n", + "import platform\n", + "print(platform.python_version())\n", + "\n", + "#print(list(pos_tag(word_tokenize('Hello world, lets do something awesome today!'))))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Results for Task 1\n", + "* get results for english (plot a graph with all classifiers x results)\n", + " * performance 1.1 = model1 in X1\n", + " * performance 1.2 = model2 in X1\n", + " * performance 1.3.x = model3.x in X1\n", + " * performance 1.4 = model1 in X2\n", + " * performance 1.5 = model2 in X2\n", + " * performance 1.6.x = model3.x in X2" + ] + }, + { + "cell_type": "code", + "execution_count": 182, + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "High five! You successfully sent some data to your account on plotly. View your plot in your browser at https://plot.ly/~carsten95/0 or inside your plot.ly account where it is named 'basic-bar'\n" + ] + }, + { + "data": { + "text/html": [ + "" + ], + "text/plain": [ + "" + ] + }, + "execution_count": 182, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import plotly\n", + "plotly.tools.set_credentials_file(username='carsten95', api_key='vElf5IOxiFheQdjTxjXW')\n", + "plotly.__version__\n", + "import plotly.plotly as py\n", + "import plotly.graph_objs as go\n", + "\n", + "data = [go.Bar(\n", + " x=['performance 1.1', 'performance 1.2', 'performance 1.3.1', 'performance 1.3.2', 'performance 1.3.3', 'performance 1.3.4', 'performance 1.3.5', 'performance 1.4', 'performance 1.5' , 'performance 1.6.1', 'performance 1.6.2', 'performance 1.6.3', 'performance 1.6.4', 'performance 1.6.5'],\n", + " y=[performance1_1, performance1_2, performance1_3_1, performance1_3_2, performance1_3_3, performance1_3_4, performance1_3_5, performance1_4, performance1_5, performance1_6_1, performance1_6_2, performance1_6_3, performance1_6_4, performance1_6_5]\n", + " )]\n", + "\n", + "py.iplot(data, filename='basic-bar')" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "collapsed": true + }, + "source": [ + "### Results for Task 2\n", + "* train your model with standard features (plot a graph with all classifiers x results)\n", + " * performance 2.1 = model4 in X3\n", + " * model 4 your POS tagger model (not english)\n", + " * performance 2.2 = model5 in X3\n", + " * pre-trained POS tagger model using RDRPOSTagger 1 or TreeTagger 2 (not english)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "##### Calculate Performance 2.1" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "again building a pipeline:\n", + "* first vectorizing the dictionary based on feature dict\n", + "* second, initializing and training the max entropy classifier decision tree" + ] + }, + { + "cell_type": "code", + "execution_count": 183, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "clf = Pipeline([\n", + " ('vectorizer', DictVectorizer(sparse=False)),\n", + " ('classifier', DecisionTreeClassifier(criterion='entropy'))\n", + "])" + ] + }, + { + "cell_type": "code", + "execution_count": 184, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "training done\n", + "Accuracy: 0.838410907381\n" + ] + } + ], + "source": [ + "size=10000\n", + "clf.fit(X3[:size], y3[:size])\n", + " \n", + "print('training done')\n", + " \n", + "X3_test, y3_test = transform_to_dataset(test_sentences_X3)\n", + "\n", + "performance2_1 = clf.score(X3_test, y3_test)\n", + "\n", + "print(\"Accuracy:\", performance2_1)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "##### Calculate Performance 2.2\n", + "* using RDRPOS Taggger in a python 3 port rom https://github.com/jacopofar/RDRPOSTagger-python-3" + ] + }, + { + "cell_type": "code", + "execution_count": 185, + "metadata": {}, + "outputs": [], + "source": [ + "#RDRPOSTagger port python 3 from https://github.com/jacopofar/RDRPOSTagger-python-3" + ] + }, + { + "cell_type": "code", + "execution_count": 186, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "wrote file de_text.tx in cwd with each word of the sentence seperated by a space\n" + ] + } + ], + "source": [ + "#generate a german txt text file:\n", + "f = open(\"de_text.txt\", 'w')\n", + "for sentence in test_sentences_X3:\n", + " for word, tag in sentence:\n", + " f.write(word + \" \")\n", + " f.write(\"\\n\")\n", + "f.close()\n", + "\n", + "print(\"wrote file de_text.tx in cwd with each word of the sentence seperated by a space\")" + ] + }, + { + "cell_type": "code", + "execution_count": 187, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "stored: /Users/Carsten/GitRepos/NLP-LAB/Carsten_Solutions\n" + ] + } + ], + "source": [ + "#to use RDRPOSTagger we have to store the path where we are working currently and where the donwnloaded RDRPOSTagger is stored\n", + "import sys, os\n", + "\n", + "#current working directory to restore it later\n", + "dir_path = os.getcwd()\n", + "print(\"stored: \", dir_path)" + ] + }, + { + "cell_type": "code", + "execution_count": 188, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "switched to path: /Users/Carsten/Downloads/RDRPOSTagger-python-3-master/pSCRDRtagger\n" + ] + } + ], + "source": [ + "#set the rdrpos as path to work in lownloaded api\n", + "RDRPOS_TAGGER_PATH = \"/Users/Carsten/Downloads/RDRPOSTagger-python-3-master/pSCRDRtagger\"\n", + "sys.path.insert(0, RDRPOS_TAGGER_PATH)\n", + "os.chdir(RDRPOS_TAGGER_PATH)\n", + "print(\"switched to path:\", RDRPOS_TAGGER_PATH)" + ] + }, + { + "cell_type": "code", + "execution_count": 189, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "['Node', '__builtins__', '__cached__', '__doc__', '__file__', '__loader__', '__name__', '__package__', '__spec__', 'tabStr']\n" + ] + } + ], + "source": [ + "# import and rename for easier use\n", + "import RDRPOSTagger\n", + "r = RDRPOSTagger.RDRPOSTagger()\n", + "\n", + "#load files\n", + "r.constructSCRDRtreeFromRDRfile(\"../Models/POS/German.RDR\")\n", + "DICT = RDRPOSTagger.readDictionary(\"../Models/POS/German.DICT\")" + ] + }, + { + "cell_type": "code", + "execution_count": 190, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "('\\nOutput file:', 'de_text.txt.TAGGED')\n" + ] + } + ], + "source": [ + "#switch back to dir in which we worked at the start\n", + "os.chdir(dir_path)\n", + "\n", + "# generate file with tags after each word with the delimiter /\n", + "r.tagRawCorpus(DICT, \"de_text.txt\")" + ] + }, + { + "cell_type": "code", + "execution_count": 191, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "#from generated textfile above, seperate the word and tags\n", + "tagged_words = []\n", + "f = open(\"de_text.txt.TAGGED\", 'r')\n", + "for line in f:\n", + " for splits in line.split():\n", + " cmp = splits.rsplit('/',1)\n", + " if len(cmp) != 2:\n", + " print(\"error parsing: \", cmp)\n", + " else:\n", + " w,t = cmp\n", + " tagged_words.append((w,t))" + ] + }, + { + "cell_type": "code", + "execution_count": 192, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[('CUPERTINO', 'NE'), ('(', '$('), ('rtr', 'NE'), ('/', '$('), ('whp', 'XY'), (')', '$('), ('.', '$.'), ('Der', 'ART'), ('Chef', 'NN'), ('des', 'ART')]" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/plain": [ + "[[('CUPERTINO', 'NE')], [('(', '$('), ('rtr', 'NE'), ('/', '$('), ('whp', 'XY'), (')', '$('), ('.', '$.')], ...]" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "display(tagged_words[:10])\n", + "display(test_sentences_X3[:10])" + ] + }, + { + "cell_type": "code", + "execution_count": 193, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Accuracy 2.2 = 0.9754407616361072\n" + ] + } + ], + "source": [ + "performance2_2 = 0 # for test \n", + "\n", + "#counter for the words\n", + "i = 0\n", + "\n", + "#evaluate accuracy\n", + "for sent in test_sentences_X3:\n", + " for tagged_w in sent:\n", + " if tagged_w[1] == tagged_words[i][1]:\n", + " performance2_2 += 1\n", + " i += 1\n", + "performance2_2 = performance2_2 / len(tagged_words)\n", + "print(\"Accuracy 2.2 = \",performance2_2)" + ] + }, + { + "cell_type": "code", + "execution_count": 194, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "High five! You successfully sent some data to your account on plotly. View your plot in your browser at https://plot.ly/~carsten95/0 or inside your plot.ly account where it is named 'basic-bar'\n" + ] + }, + { + "data": { + "text/html": [ + "" + ], + "text/plain": [ + "" + ] + }, + "execution_count": 194, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "#visualize results with plotly\n", + "data = [go.Bar(\n", + " x=['performance 2.1', 'performance 2.2'],\n", + " y=[performance2_1, performance2_2]\n", + " )]\n", + "\n", + "py.iplot(data, filename='basic-bar')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.3" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +}