task 2 angefangen
This commit is contained in:
		| @ -9,12 +9,13 @@ | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": 39, | ||||
|    "execution_count": 23, | ||||
|    "metadata": { | ||||
|     "collapsed": true | ||||
|    }, | ||||
|    "outputs": [], | ||||
|    "source": [ | ||||
|     "import numpy as np\n", | ||||
|     "import nltk\n", | ||||
|     "from nltk import word_tokenize, pos_tag" | ||||
|    ] | ||||
| @ -29,7 +30,7 @@ | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": 40, | ||||
|    "execution_count": 24, | ||||
|    "metadata": { | ||||
|     "collapsed": true | ||||
|    }, | ||||
| @ -49,14 +50,14 @@ | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": 41, | ||||
|    "execution_count": 25, | ||||
|    "metadata": {}, | ||||
|    "outputs": [ | ||||
|     { | ||||
|      "name": "stdout", | ||||
|      "output_type": "stream", | ||||
|      "text": [ | ||||
|       "{'word': 'bims', 'length': 4, 'is_capitalized': False, 'prefix-1': 'b', 'suffix-1': 's', 'prev_word': 'i', 'next_word': 'der'}\n" | ||||
|       "{'word': 'bims', 'length': 4, 'is_capitalized': False, 'prefix-1': 'b', 'suffix-1': 's', 'prev_word': 'i', 'next_word': 'der', 'kindOfCamelCase': False, 'includesSpace': False}\n" | ||||
|      ] | ||||
|     } | ||||
|    ], | ||||
| @ -69,7 +70,9 @@ | ||||
|     "        'prefix-1': sentence[index][0],\n", | ||||
|     "        'suffix-1': sentence[index][-1],\n", | ||||
|     "        'prev_word': '' if index == 0 else sentence[index - 1],\n", | ||||
|     "        'next_word': '' if index == len(sentence) - 1 else sentence[index + 1]\n", | ||||
|     "        'next_word': '' if index == len(sentence) - 1 else sentence[index + 1],\n", | ||||
|     "        'kindOfCamelCase': sentence[index][1:].lower() != sentence[index][1:],\n", | ||||
|     "        'includesSpace': True if ((' ') in sentence[index]) else False #depemds on tokenizer\n", | ||||
|     "    }\n", | ||||
|     "\n", | ||||
|     "print(features(\"halli hallo i bims der Programmierer\".strip().split(\" \"), 3))" | ||||
| @ -84,18 +87,27 @@ | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": null, | ||||
|    "execution_count": 26, | ||||
|    "metadata": { | ||||
|     "collapsed": true | ||||
|    }, | ||||
|    "outputs": [], | ||||
|    "source": [] | ||||
|    "source": [ | ||||
|     "#max entropie pre trained pos tag\n", | ||||
|     "#see Calculate performance 1.2" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "markdown", | ||||
|    "metadata": {}, | ||||
|    "source": [ | ||||
|     "### 3. model3.x = rule-based classifiers (x = 1 to 5)" | ||||
|     "### 3. model3.x = rule-based classifiers (x = 1 to 5)\n", | ||||
|     "1. DefaultTagger that simply tags everything with the same tag\n", | ||||
|     "2. RegexpTagger that applies tags according to a set of regular expressions\n", | ||||
|     "3. N-Gram (n-gram tagger is a generalization of a unigram tagger whose context is the current word together with the part-of-speech tags of the n-1 preceding token)\n", | ||||
|     "    + UnigramTagger\n", | ||||
|     "    + BigramTagger\n", | ||||
|     "    + TrigramTagger" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
| @ -105,7 +117,18 @@ | ||||
|     "collapsed": true | ||||
|    }, | ||||
|    "outputs": [], | ||||
|    "source": [] | ||||
|    "source": [ | ||||
|     "#used from description for RegexpTagger\n", | ||||
|     "patterns = [(r'.*ing$', 'VBG'), (r'.*ed$', 'VBD'), (r'.*es$', 'VBZ'), (r'.*ould$', 'MD'), (r'.*\\'s$', 'NN$'),               \n", | ||||
|     "             (r'.*s$', 'NNS'), (r'^-?[0-9]+(.[0-9]+)?$', 'CD'), (r'.*', 'NN')]\n", | ||||
|     "\n", | ||||
|     "#train taggers\n", | ||||
|     "def_model = nltk.DefaultTagger('NN')\n", | ||||
|     "regexp_model = nltk.RegexpTagger(patterns)\n", | ||||
|     "uni_model = nltk.UnigramTagger(training_sentences_X1)\n", | ||||
|     "bi_model = nltk.BigramTagger(training_sentences_X1)\n", | ||||
|     "tri_model = nltk.TrigramTagger(training_sentences_X1)" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "markdown", | ||||
| @ -156,7 +179,7 @@ | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": 42, | ||||
|    "execution_count": 27, | ||||
|    "metadata": {}, | ||||
|    "outputs": [ | ||||
|     { | ||||
| @ -183,7 +206,7 @@ | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": 43, | ||||
|    "execution_count": 28, | ||||
|    "metadata": {}, | ||||
|    "outputs": [ | ||||
|     { | ||||
| @ -209,14 +232,70 @@ | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": 44, | ||||
|    "execution_count": 64, | ||||
|    "metadata": {}, | ||||
|    "outputs": [ | ||||
|     { | ||||
|      "data": { | ||||
|       "text/plain": [ | ||||
|        "'#import pandas as pd\\n#df = pd.read_table(\"/Users/Carsten/GitRepos/NLP-LAB/Carsten_Solutions/sets/croatia/set.hr.conll\")\\n#df.head()\\n\\n#x3 = other language\\n#from croatia:\\n#by ZˇeljkoAgic ́,⋆NikolaLjubesˇic ́ http://www.lrec-conf.org/proceedings/lrec2014/pdf/690_Paper.pdf\\n#licenses: https://creativecommons.org/licenses/by-sa/4.0/\\ncorp = nltk.corpus.ConllCorpusReader(root=\"/Users/Carsten/GitRepos/NLP-LAB/Carsten_Solutions/sets/croatia/\", fileids=[\"set.hr.conll\"], columntypes=(\\'ignore\\',\\'ignore\\',\\'pos\\',\\'ignore\\',\\'ignore\\',\\'ignore\\',\\'ignore\\',\\'ignore\\',\\'ignore\\',\\'ignore\\'))\\nprint(corp.tagged_sents[-100])\\n#from croatia:\\n#by ZˇeljkoAgic ́,⋆NikolaLjubesˇic ́ http://www.lrec-conf.org/proceedings/lrec2014/pdf/690_Paper.pdf\\n#licenses: https://creativecommons.org/licenses/by-sa/4.0/'" | ||||
|       ] | ||||
|      }, | ||||
|      "execution_count": 64, | ||||
|      "metadata": {}, | ||||
|      "output_type": "execute_result" | ||||
|     } | ||||
|    ], | ||||
|    "source": [ | ||||
|     "'''#import pandas as pd\n", | ||||
|     "#df = pd.read_table(\"/Users/Carsten/GitRepos/NLP-LAB/Carsten_Solutions/sets/croatia/set.hr.conll\")\n", | ||||
|     "#df.head()\n", | ||||
|     "\n", | ||||
|     "#x3 = other language\n", | ||||
|     "#from croatia:\n", | ||||
|     "#by ZˇeljkoAgic ́,⋆NikolaLjubesˇic ́ http://www.lrec-conf.org/proceedings/lrec2014/pdf/690_Paper.pdf\n", | ||||
|     "#licenses: https://creativecommons.org/licenses/by-sa/4.0/\n", | ||||
|     "corp = nltk.corpus.ConllCorpusReader(root=\"/Users/Carsten/GitRepos/NLP-LAB/Carsten_Solutions/sets/croatia/\", fileids=[\"set.hr.conll\"], columntypes=('ignore','ignore','pos','ignore','ignore','ignore','ignore','ignore','ignore','ignore'))\n", | ||||
|     "print(corp.tagged_sents[-100])\n", | ||||
|     "#from croatia:\n", | ||||
|     "#by ZˇeljkoAgic ́,⋆NikolaLjubesˇic ́ http://www.lrec-conf.org/proceedings/lrec2014/pdf/690_Paper.pdf\n", | ||||
|     "#licenses: https://creativecommons.org/licenses/by-sa/4.0/'''\n" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": 30, | ||||
|    "metadata": { | ||||
|     "collapsed": true | ||||
|    }, | ||||
|    "outputs": [], | ||||
|    "source": [ | ||||
|     "#nltk.download('brown')\n", | ||||
|     "#x3 = other language" | ||||
|     "#? nltk.corpus.ConllCorpusReader" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": 52, | ||||
|    "metadata": {}, | ||||
|    "outputs": [ | ||||
|     { | ||||
|      "name": "stdout", | ||||
|      "output_type": "stream", | ||||
|      "text": [ | ||||
|       "<class 'nltk.collections.LazyMap'>\n", | ||||
|       "50472\n", | ||||
|       "[('So', 'ADV'), ('kann', 'VMFIN'), ('man', 'PIS'), ('Marsilius', 'NE'), ('von', 'APPR'), ('Padua', 'NE'), ('so', 'ADV'), ('wenig', 'ADV'), ('zu', 'APPR'), ('einem', 'ART'), ('Vorläufer', 'NN'), ('moderner', 'ADJA'), ('Volkssouveränität', 'NN'), ('machen', 'VVINF'), ('wie', 'KOKOM'), ('Rousseau', 'NE'), ('zum', 'APPRART'), ('Verkünder', 'NN'), ('eines', 'ART'), ('``', '$('), ('Zurück', 'NN'), ('zur', 'APPRART'), ('Natur', 'NN'), (\"''\", '$('), ('.', '$.')]\n" | ||||
|      ] | ||||
|     } | ||||
|    ], | ||||
|    "source": [ | ||||
|     "# TODO: loading german corpus \n", | ||||
|     "X3 = nltk.corpus.ConllCorpusReader(root='/Users/Carsten/GitRepos/NLP-LAB/Carsten_Solutions/sets/german/', fileids=['tiger_release_aug07.corrected.16012013.conll09'], columntypes=['ignore', 'words', 'ignore', 'ignore', 'pos'], encoding='utf-8')\n", | ||||
|     "german_tagged_sents = X3.tagged_sents()\n", | ||||
|     "print(type(german_tagged_sents))\n", | ||||
|     "print(len(german_tagged_sents))\n", | ||||
|     "\n", | ||||
|     "print (german_tagged_sents[-100])" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
| @ -246,7 +325,7 @@ | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": 45, | ||||
|    "execution_count": 32, | ||||
|    "metadata": { | ||||
|     "collapsed": true | ||||
|    }, | ||||
| @ -259,18 +338,21 @@ | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": 46, | ||||
|    "execution_count": 65, | ||||
|    "metadata": {}, | ||||
|    "outputs": [ | ||||
|     { | ||||
|      "name": "stdout", | ||||
|      "output_type": "stream", | ||||
|      "text": [ | ||||
|       "<class 'nltk.corpus.reader.util.ConcatenatedCorpusView'>\n", | ||||
|       "got  3131  training sentences and  783  test sentences\n" | ||||
|      ] | ||||
|     } | ||||
|    ], | ||||
|    "source": [ | ||||
|     "#print(type(nltk.corpus.treebank.tagged_sents()))\n", | ||||
|     "\n", | ||||
|     "#object including the annotated sentences\n", | ||||
|     "annotated_sent = nltk.corpus.treebank.tagged_sents()\n", | ||||
|     "\n", | ||||
| @ -285,7 +367,7 @@ | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": 47, | ||||
|    "execution_count": 34, | ||||
|    "metadata": { | ||||
|     "collapsed": true | ||||
|    }, | ||||
| @ -303,8 +385,10 @@ | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": 48, | ||||
|    "metadata": {}, | ||||
|    "execution_count": 35, | ||||
|    "metadata": { | ||||
|     "collapsed": true | ||||
|    }, | ||||
|    "outputs": [], | ||||
|    "source": [ | ||||
|     "#trainings inputset X and training teacher set y\n", | ||||
| @ -324,7 +408,7 @@ | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": 49, | ||||
|    "execution_count": 36, | ||||
|    "metadata": {}, | ||||
|    "outputs": [ | ||||
|     { | ||||
| @ -350,7 +434,7 @@ | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": 50, | ||||
|    "execution_count": 37, | ||||
|    "metadata": { | ||||
|     "collapsed": true | ||||
|    }, | ||||
| @ -360,6 +444,56 @@ | ||||
|     "X2, y2 = transform_to_dataset(training_sentences_X2)" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "markdown", | ||||
|    "metadata": {}, | ||||
|    "source": [ | ||||
|     "##### Generate Training and Testdata for X3\n", | ||||
|     "1. split annotaed sentences into training and testdata\n", | ||||
|     "2. split trainingdata into input data and teacherdata\n", | ||||
|     "    *input is the feature vector of each word\n", | ||||
|     "    *output is a list of POS tags for each word and sentences" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": 63, | ||||
|    "metadata": {}, | ||||
|    "outputs": [ | ||||
|     { | ||||
|      "name": "stdout", | ||||
|      "output_type": "stream", | ||||
|      "text": [ | ||||
|       "<class 'nltk.collections.LazyMap'>\n", | ||||
|       "got  40377  training sentences and  10095  test sentences\n" | ||||
|      ] | ||||
|     } | ||||
|    ], | ||||
|    "source": [ | ||||
|     "#object including the annotated sentences\n", | ||||
|     "annotated_sent = X3.tagged_sents()\n", | ||||
|     "\n", | ||||
|     "#print(type(annotated_sent))\n", | ||||
|     "\n", | ||||
|     "#to split the data, calculate the borders for ratio\n", | ||||
|     "cutoff = int(.8 * len(annotated_sent))\n", | ||||
|     "training_sentences_X3 = annotated_sent[:cutoff]\n", | ||||
|     "test_sentences_X3 = annotated_sent[cutoff:]\n", | ||||
|     "\n", | ||||
|     "#show the amount of sentences\n", | ||||
|     "print(\"got \",len(training_sentences_X3),\" training sentences and \", len(test_sentences_X3), \" test sentences\")" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": 66, | ||||
|    "metadata": {}, | ||||
|    "outputs": [], | ||||
|    "source": [ | ||||
|     "#trainings inputset X and training teacher set y\n", | ||||
|     "X3, y3 = transform_to_dataset(training_sentences_X3)" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "markdown", | ||||
|    "metadata": { | ||||
| @ -374,7 +508,7 @@ | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": 51, | ||||
|    "execution_count": 40, | ||||
|    "metadata": { | ||||
|     "collapsed": true | ||||
|    }, | ||||
| @ -394,7 +528,7 @@ | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": 52, | ||||
|    "execution_count": 41, | ||||
|    "metadata": { | ||||
|     "collapsed": true | ||||
|    }, | ||||
| @ -417,7 +551,7 @@ | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": 53, | ||||
|    "execution_count": 42, | ||||
|    "metadata": {}, | ||||
|    "outputs": [ | ||||
|     { | ||||
| @ -425,13 +559,13 @@ | ||||
|      "output_type": "stream", | ||||
|      "text": [ | ||||
|       "training OK\n", | ||||
|       "Accuracy: 0.87983432307\n" | ||||
|       "Accuracy: 0.880632766106\n" | ||||
|      ] | ||||
|     } | ||||
|    ], | ||||
|    "source": [ | ||||
|     "size=10000\n", | ||||
|     "clf.fit(X[:size], y[:size])\n", | ||||
|     "clf.fit(X1[:size], y1[:size])\n", | ||||
|     " \n", | ||||
|     "print('training OK')\n", | ||||
|     " \n", | ||||
| @ -446,37 +580,206 @@ | ||||
|    "cell_type": "markdown", | ||||
|    "metadata": {}, | ||||
|    "source": [ | ||||
|     "##### Calculate other performances" | ||||
|     "##### Calculate performance 1.2" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": 58, | ||||
|    "execution_count": 43, | ||||
|    "metadata": {}, | ||||
|    "outputs": [ | ||||
|     { | ||||
|      "name": "stdout", | ||||
|      "output_type": "stream", | ||||
|      "text": [ | ||||
|       "calculated perfomance 1.4=  0.756485959481\n" | ||||
|       "Accuracy: 0.8936074654423873\n" | ||||
|      ] | ||||
|     } | ||||
|    ], | ||||
|    "source": [ | ||||
|     "#extract only the words from feature trainings set\n", | ||||
|     "only_words_X1 = [x['word'] for x in X1_test]\n", | ||||
|     "\n", | ||||
|     "#train with the pos tagger by nltk\n", | ||||
|     "pos_tags_by_pre_trained_pos_tagger = [word_tag_tuple[1] for word_tag_tuple in pos_tag(only_words_X1, lang='eng')]\n", | ||||
|     "\n", | ||||
|     "#calculate performance by comparing each pos tag\n", | ||||
|     "performance1_2 = 0\n", | ||||
|     "for index in range(len(pos_tags_by_pre_trained_pos_tagger)):\n", | ||||
|     "    if(pos_tags_by_pre_trained_pos_tagger[index]==y1_test[index]):\n", | ||||
|     "        performance1_2 += 1\n", | ||||
|     "performance1_2 /= len(pos_tags_by_pre_trained_pos_tagger)\n", | ||||
|     "\n", | ||||
|     "performance1_3 = 0\n", | ||||
|     "print(\"Accuracy:\", performance1_2)" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "markdown", | ||||
|    "metadata": {}, | ||||
|    "source": [ | ||||
|     "##### Calculate performance 1.3\n", | ||||
|     "1. DefaultTagger that simply tags everything with the same tag\n", | ||||
|     "2. RegexpTagger that applies tags according to a set of regular expressions\n", | ||||
|     "3. N-Gram (n-gram tagger is a generalization of a unigram tagger whose context is the current word together with the part-of-speech tags of the n-1 preceding token)\n", | ||||
|     "    + UnigramTagger\n", | ||||
|     "    + BigramTagger\n", | ||||
|     "    + TrigramTagger" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": 44, | ||||
|    "metadata": {}, | ||||
|    "outputs": [ | ||||
|     { | ||||
|      "name": "stdout", | ||||
|      "output_type": "stream", | ||||
|      "text": [ | ||||
|       "performance 1.3.1 is:  0.1447677029791906\n", | ||||
|       "performance 1.3.2 is:  0.24232746145017217\n", | ||||
|       "performance 1.3.3 is:  0.8608213982733669\n", | ||||
|       "performance 1.3.4 is:  0.1132791057437996\n", | ||||
|       "performance 1.3.5 is:  0.06736863116922003\n" | ||||
|      ] | ||||
|     } | ||||
|    ], | ||||
|    "source": [ | ||||
|     "#evaluate taggers\n", | ||||
|     "# performance of Default Tagger\n", | ||||
|     "performance1_3_1 = def_model.evaluate(test_sentences_X1)\n", | ||||
|     "print('performance 1.3.1 is: ',performance1_3_1)\n", | ||||
|     "\n", | ||||
|     "# performance1_4\n", | ||||
|     "# performance of Regex Tagger\n", | ||||
|     "performance1_3_2 = regexp_model.evaluate(test_sentences_X1)\n", | ||||
|     "print('performance 1.3.2 is: ',performance1_3_2)\n", | ||||
|     "\n", | ||||
|     "# performance of Unigram Tagger\n", | ||||
|     "performance1_3_3 = uni_model.evaluate(test_sentences_X1)\n", | ||||
|     "print('performance 1.3.3 is: ',performance1_3_3)\n", | ||||
|     "\n", | ||||
|     "# performance of Bigram Tagger\n", | ||||
|     "performance1_3_4 = bi_model.evaluate(test_sentences_X1)\n", | ||||
|     "print('performance 1.3.4 is: ',performance1_3_4)\n", | ||||
|     "\n", | ||||
|     "# performance of Trigram Tagger\n", | ||||
|     "performance1_3_5 = tri_model.evaluate(test_sentences_X1)\n", | ||||
|     "print('performance 1.3.5 is: ',performance1_3_5)" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "markdown", | ||||
|    "metadata": {}, | ||||
|    "source": [ | ||||
|     "##### Calculate performance 1.4" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": 45, | ||||
|    "metadata": {}, | ||||
|    "outputs": [ | ||||
|     { | ||||
|      "name": "stdout", | ||||
|      "output_type": "stream", | ||||
|      "text": [ | ||||
|       "calculated perfomance 1.4=  0.75680543774\n" | ||||
|      ] | ||||
|     } | ||||
|    ], | ||||
|    "source": [ | ||||
|     "size=10000\n", | ||||
|     "clf.fit(X2[:size], y2[:size])\n", | ||||
|     "X2_test, y2_test = transform_to_dataset(test_sentences_X2)\n", | ||||
|     "performance1_4 = clf.score(X2_test, y2_test)\n", | ||||
|     "print(\"calculated perfomance 1.4= \",performance1_4)\n", | ||||
|     "print(\"calculated perfomance 1.4= \",performance1_4)" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "markdown", | ||||
|    "metadata": {}, | ||||
|    "source": [ | ||||
|     "##### Calculate performance 1.5" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": 46, | ||||
|    "metadata": {}, | ||||
|    "outputs": [ | ||||
|     { | ||||
|      "name": "stdout", | ||||
|      "output_type": "stream", | ||||
|      "text": [ | ||||
|       "Accuracy: 0.6044583741861567\n" | ||||
|      ] | ||||
|     } | ||||
|    ], | ||||
|    "source": [ | ||||
|     "#extract only the words from feature trainings set\n", | ||||
|     "only_words_X2 = [x['word'] for x in X2_test]\n", | ||||
|     "\n", | ||||
|     "#train with the pos tagger by nltk\n", | ||||
|     "pos_tags_by_pre_trained_pos_tagger = [word_tag_tuple[1] for word_tag_tuple in pos_tag(only_words_X2, lang='eng')]\n", | ||||
|     "\n", | ||||
|     "#calculate performance by comparing each pos tag\n", | ||||
|     "performance1_5 = 0\n", | ||||
|     "for index in range(len(pos_tags_by_pre_trained_pos_tagger)):\n", | ||||
|     "    if(pos_tags_by_pre_trained_pos_tagger[index]==y2_test[index]):\n", | ||||
|     "        performance1_5 += 1\n", | ||||
|     "performance1_5 /= len(pos_tags_by_pre_trained_pos_tagger)\n", | ||||
|     "\n", | ||||
|     "performance1_6 = 0" | ||||
|     "print(\"Accuracy:\", performance1_5)" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "markdown", | ||||
|    "metadata": {}, | ||||
|    "source": [ | ||||
|     "##### Calculate performance 1.6" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": 47, | ||||
|    "metadata": {}, | ||||
|    "outputs": [ | ||||
|     { | ||||
|      "name": "stdout", | ||||
|      "output_type": "stream", | ||||
|      "text": [ | ||||
|       "performance 1.6.1 is:  0.10997763652187324\n", | ||||
|       "performance 1.6.2 is:  0.17594438874995869\n", | ||||
|       "performance 1.6.3 is:  0.8773754310202373\n", | ||||
|       "performance 1.6.4 is:  0.3390490564374869\n", | ||||
|       "performance 1.6.5 is:  0.19178610379738467\n" | ||||
|      ] | ||||
|     } | ||||
|    ], | ||||
|    "source": [ | ||||
|     "uni_model = nltk.UnigramTagger(training_sentences_X2)\n", | ||||
|     "bi_model = nltk.BigramTagger(training_sentences_X2)\n", | ||||
|     "tri_model = nltk.TrigramTagger(training_sentences_X2)\n", | ||||
|     "\n", | ||||
|     "#evaluate taggers\n", | ||||
|     "# performance of Default Tagger\n", | ||||
|     "performance1_6_1 = def_model.evaluate(test_sentences_X2)\n", | ||||
|     "print('performance 1.6.1 is: ',performance1_6_1)\n", | ||||
|     "\n", | ||||
|     "# performance of Regex Tagger\n", | ||||
|     "performance1_6_2 = regexp_model.evaluate(test_sentences_X2)\n", | ||||
|     "print('performance 1.6.2 is: ',performance1_6_2)\n", | ||||
|     "\n", | ||||
|     "# performance of Unigram Tagger\n", | ||||
|     "performance1_6_3 = uni_model.evaluate(test_sentences_X2)\n", | ||||
|     "print('performance 1.6.3 is: ',performance1_6_3)\n", | ||||
|     "\n", | ||||
|     "# performance of Bigram Tagger\n", | ||||
|     "performance1_6_4 = bi_model.evaluate(test_sentences_X2)\n", | ||||
|     "print('performance 1.6.4 is: ',performance1_6_4)\n", | ||||
|     "\n", | ||||
|     "# performance of Trigram Tagger\n", | ||||
|     "performance1_6_5 = tri_model.evaluate(test_sentences_X2)\n", | ||||
|     "print('performance 1.6.5 is: ',performance1_6_5)" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
| @ -490,7 +793,7 @@ | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": 56, | ||||
|    "execution_count": 48, | ||||
|    "metadata": {}, | ||||
|    "outputs": [ | ||||
|     { | ||||
| @ -499,7 +802,7 @@ | ||||
|      "text": [ | ||||
|       "3.6.3\n", | ||||
|       "checking...\n", | ||||
|       "[('Hello', 'CS'), ('world', 'NN'), (',', ','), ('lets', 'NNS'), ('do', 'DO'), ('something', 'PN'), ('awesome', 'NN'), ('today', 'NR'), ('!', 'CD')]\n" | ||||
|       "[('Hello', 'VBD-HL'), ('world', 'VBD'), (',', ','), ('lets', 'NNS'), ('do', 'DO'), ('something', 'PN'), ('awesome', 'NN'), ('today', 'NR'), ('!', 'CD')]\n" | ||||
|      ] | ||||
|     } | ||||
|    ], | ||||
| @ -532,7 +835,7 @@ | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": 57, | ||||
|    "execution_count": 49, | ||||
|    "metadata": { | ||||
|     "scrolled": true | ||||
|    }, | ||||
| @ -553,7 +856,7 @@ | ||||
|        "<plotly.tools.PlotlyDisplay object>" | ||||
|       ] | ||||
|      }, | ||||
|      "execution_count": 57, | ||||
|      "execution_count": 49, | ||||
|      "metadata": {}, | ||||
|      "output_type": "execute_result" | ||||
|     } | ||||
| @ -566,8 +869,8 @@ | ||||
|     "import plotly.graph_objs as go\n", | ||||
|     "\n", | ||||
|     "data = [go.Bar(\n", | ||||
|     "            x=['performance 1.1', 'performance 1.2', 'performance 1.3', 'performance 1.4', 'performance 1.5' , 'performance 1.6'],\n", | ||||
|     "            y=[performance1_1, performance1_2, performance1_3, performance1_4, performance1_5, performance1_6]\n", | ||||
|     "            x=['performance 1.1', 'performance 1.2', 'performance 1.3.1', 'performance 1.3.2', 'performance 1.3.3', 'performance 1.3.4', 'performance 1.3.5', 'performance 1.4', 'performance 1.5' , 'performance 1.6.1', 'performance 1.6.2', 'performance 1.6.3', 'performance 1.6.4', 'performance 1.6.5'],\n", | ||||
|     "            y=[performance1_1, performance1_2, performance1_3_1, performance1_3_2, performance1_3_3, performance1_3_4, performance1_3_5, performance1_4, performance1_5, performance1_6_1, performance1_6_2, performance1_6_3, performance1_6_4, performance1_6_5]\n", | ||||
|     "    )]\n", | ||||
|     "\n", | ||||
|     "py.iplot(data, filename='basic-bar')" | ||||
| @ -582,12 +885,55 @@ | ||||
|     "### Results for Task 2\n", | ||||
|     "* train your model with standard features (plot a graph with all classifiers x results)\n", | ||||
|     "    * performance 2.1 = model4 in X3\n", | ||||
|     "    * performance 2.2 = model5 in X3" | ||||
|     "        * model 4 your POS tagger model (not english)\n", | ||||
|     "    * performance 2.2 = model5 in X3\n", | ||||
|     "        * pre-trained POS tagger model using RDRPOSTagger 1 or TreeTagger 2 (not english)" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": 60, | ||||
|    "execution_count": 70, | ||||
|    "metadata": { | ||||
|     "collapsed": true | ||||
|    }, | ||||
|    "outputs": [], | ||||
|    "source": [ | ||||
|     "clf = Pipeline([\n", | ||||
|     "    ('vectorizer', DictVectorizer(sparse=False)),\n", | ||||
|     "    ('classifier', DecisionTreeClassifier(criterion='entropy'))\n", | ||||
|     "])" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": 76, | ||||
|    "metadata": {}, | ||||
|    "outputs": [ | ||||
|     { | ||||
|      "name": "stdout", | ||||
|      "output_type": "stream", | ||||
|      "text": [ | ||||
|       "training OK\n", | ||||
|       "Accuracy: 0.836976962858\n" | ||||
|      ] | ||||
|     } | ||||
|    ], | ||||
|    "source": [ | ||||
|     "size=10000\n", | ||||
|     "clf.fit(X3[:size], y3[:size])\n", | ||||
|     " \n", | ||||
|     "print('training OK')\n", | ||||
|     " \n", | ||||
|     "X3_test, y3_test = transform_to_dataset(test_sentences_X3)\n", | ||||
|     "\n", | ||||
|     "performance2_1 = clf.score(X3_test, y3_test)\n", | ||||
|     "\n", | ||||
|     "print(\"Accuracy:\", performance2_1)" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": 77, | ||||
|    "metadata": {}, | ||||
|    "outputs": [ | ||||
|     { | ||||
| @ -606,13 +952,12 @@ | ||||
|        "<plotly.tools.PlotlyDisplay object>" | ||||
|       ] | ||||
|      }, | ||||
|      "execution_count": 60, | ||||
|      "execution_count": 77, | ||||
|      "metadata": {}, | ||||
|      "output_type": "execute_result" | ||||
|     } | ||||
|    ], | ||||
|    "source": [ | ||||
|     "performance2_1 = 0\n", | ||||
|     "performance2_2 = 0\n", | ||||
|     "\n", | ||||
|     "data = [go.Bar(\n", | ||||
|  | ||||
		Reference in New Issue
	
	Block a user