1003 lines
29 KiB
Plaintext
1003 lines
29 KiB
Plaintext
{
|
|
"cells": [
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"# Exercise 1"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 23,
|
|
"metadata": {
|
|
"collapsed": true
|
|
},
|
|
"outputs": [],
|
|
"source": [
|
|
"import numpy as np\n",
|
|
"import nltk\n",
|
|
"from nltk import word_tokenize, pos_tag"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"## Classifiers\n",
|
|
"note: for model1 and model3 you can try different classifiers: Hidden Markov Model, Logistic Regression, Maximum Entropy Markov Models, Decision Trees, Naive Bayes, etc.. __choose one!__"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 24,
|
|
"metadata": {
|
|
"collapsed": true
|
|
},
|
|
"outputs": [],
|
|
"source": [
|
|
"from sklearn.tree import DecisionTreeClassifier\n",
|
|
"from sklearn.feature_extraction import DictVectorizer\n",
|
|
"from sklearn.pipeline import Pipeline"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"### 1. model1 = your POS tagger model (english)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 25,
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"{'word': 'bims', 'length': 4, 'is_capitalized': False, 'prefix-1': 'b', 'suffix-1': 's', 'prev_word': 'i', 'next_word': 'der', 'kindOfCamelCase': False, 'includesSpace': False}\n"
|
|
]
|
|
}
|
|
],
|
|
"source": [
|
|
"def features(sentence, index):\n",
|
|
" return {\n",
|
|
" 'word': sentence[index],\n",
|
|
" 'length': len(sentence[index]),\n",
|
|
" 'is_capitalized': sentence[index][0].upper() == sentence[index][0],\n",
|
|
" 'prefix-1': sentence[index][0],\n",
|
|
" 'suffix-1': sentence[index][-1],\n",
|
|
" 'prev_word': '' if index == 0 else sentence[index - 1],\n",
|
|
" 'next_word': '' if index == len(sentence) - 1 else sentence[index + 1],\n",
|
|
" 'kindOfCamelCase': sentence[index][1:].lower() != sentence[index][1:],\n",
|
|
" 'includesSpace': True if ((' ') in sentence[index]) else False #depemds on tokenizer\n",
|
|
" }\n",
|
|
"\n",
|
|
"print(features(\"halli hallo i bims der Programmierer\".strip().split(\" \"), 3))"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"### 2. model2 = pre-trained POS tagger model using NLTK (maxentropy english)\n"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 26,
|
|
"metadata": {
|
|
"collapsed": true
|
|
},
|
|
"outputs": [],
|
|
"source": [
|
|
"#max entropie pre trained pos tag\n",
|
|
"#see Calculate performance 1.2"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"### 3. model3.x = rule-based classifiers (x = 1 to 5)\n",
|
|
"1. DefaultTagger that simply tags everything with the same tag\n",
|
|
"2. RegexpTagger that applies tags according to a set of regular expressions\n",
|
|
"3. N-Gram (n-gram tagger is a generalization of a unigram tagger whose context is the current word together with the part-of-speech tags of the n-1 preceding token)\n",
|
|
" + UnigramTagger\n",
|
|
" + BigramTagger\n",
|
|
" + TrigramTagger"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {
|
|
"collapsed": true
|
|
},
|
|
"outputs": [],
|
|
"source": [
|
|
"#used from description for RegexpTagger\n",
|
|
"patterns = [(r'.*ing$', 'VBG'), (r'.*ed$', 'VBD'), (r'.*es$', 'VBZ'), (r'.*ould$', 'MD'), (r'.*\\'s$', 'NN$'), \n",
|
|
" (r'.*s$', 'NNS'), (r'^-?[0-9]+(.[0-9]+)?$', 'CD'), (r'.*', 'NN')]\n",
|
|
"\n",
|
|
"#train taggers\n",
|
|
"def_model = nltk.DefaultTagger('NN')\n",
|
|
"regexp_model = nltk.RegexpTagger(patterns)\n",
|
|
"uni_model = nltk.UnigramTagger(training_sentences_X1)\n",
|
|
"bi_model = nltk.BigramTagger(training_sentences_X1)\n",
|
|
"tri_model = nltk.TrigramTagger(training_sentences_X1)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"### 4. model4 = your POS tagger model (not english)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {
|
|
"collapsed": true
|
|
},
|
|
"outputs": [],
|
|
"source": []
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"### 5. model5 = pre-trained POS tagger model using RDRPOSTagger 1 or TreeTagger 2 (not english)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {
|
|
"collapsed": true
|
|
},
|
|
"outputs": [],
|
|
"source": []
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"## Corpora\n",
|
|
"note: data split for training/test = 0.8/0.2 (sequencial)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"#### 1. X1 = nltk.corpus.treebank (english)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 27,
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"[nltk_data] Downloading package treebank to\n",
|
|
"[nltk_data] /Users/Carsten/nltk_data...\n",
|
|
"[nltk_data] Package treebank is already up-to-date!\n"
|
|
]
|
|
}
|
|
],
|
|
"source": [
|
|
"nltk.download('treebank')\n",
|
|
"x1 = nltk.corpus.treebank"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"#### 2. X2 = nltk.corpus.brown (english)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 28,
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"[nltk_data] Downloading package brown to /Users/Carsten/nltk_data...\n",
|
|
"[nltk_data] Package brown is already up-to-date!\n"
|
|
]
|
|
}
|
|
],
|
|
"source": [
|
|
"nltk.download('brown')\n",
|
|
"x2 = nltk.corpus.brown"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"#### 3. X3 = other language (not english)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 64,
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"'#import pandas as pd\\n#df = pd.read_table(\"/Users/Carsten/GitRepos/NLP-LAB/Carsten_Solutions/sets/croatia/set.hr.conll\")\\n#df.head()\\n\\n#x3 = other language\\n#from croatia:\\n#by ZˇeljkoAgic ́,⋆NikolaLjubesˇic ́ http://www.lrec-conf.org/proceedings/lrec2014/pdf/690_Paper.pdf\\n#licenses: https://creativecommons.org/licenses/by-sa/4.0/\\ncorp = nltk.corpus.ConllCorpusReader(root=\"/Users/Carsten/GitRepos/NLP-LAB/Carsten_Solutions/sets/croatia/\", fileids=[\"set.hr.conll\"], columntypes=(\\'ignore\\',\\'ignore\\',\\'pos\\',\\'ignore\\',\\'ignore\\',\\'ignore\\',\\'ignore\\',\\'ignore\\',\\'ignore\\',\\'ignore\\'))\\nprint(corp.tagged_sents[-100])\\n#from croatia:\\n#by ZˇeljkoAgic ́,⋆NikolaLjubesˇic ́ http://www.lrec-conf.org/proceedings/lrec2014/pdf/690_Paper.pdf\\n#licenses: https://creativecommons.org/licenses/by-sa/4.0/'"
|
|
]
|
|
},
|
|
"execution_count": 64,
|
|
"metadata": {},
|
|
"output_type": "execute_result"
|
|
}
|
|
],
|
|
"source": [
|
|
"'''#import pandas as pd\n",
|
|
"#df = pd.read_table(\"/Users/Carsten/GitRepos/NLP-LAB/Carsten_Solutions/sets/croatia/set.hr.conll\")\n",
|
|
"#df.head()\n",
|
|
"\n",
|
|
"#x3 = other language\n",
|
|
"#from croatia:\n",
|
|
"#by ZˇeljkoAgic ́,⋆NikolaLjubesˇic ́ http://www.lrec-conf.org/proceedings/lrec2014/pdf/690_Paper.pdf\n",
|
|
"#licenses: https://creativecommons.org/licenses/by-sa/4.0/\n",
|
|
"corp = nltk.corpus.ConllCorpusReader(root=\"/Users/Carsten/GitRepos/NLP-LAB/Carsten_Solutions/sets/croatia/\", fileids=[\"set.hr.conll\"], columntypes=('ignore','ignore','pos','ignore','ignore','ignore','ignore','ignore','ignore','ignore'))\n",
|
|
"print(corp.tagged_sents[-100])\n",
|
|
"#from croatia:\n",
|
|
"#by ZˇeljkoAgic ́,⋆NikolaLjubesˇic ́ http://www.lrec-conf.org/proceedings/lrec2014/pdf/690_Paper.pdf\n",
|
|
"#licenses: https://creativecommons.org/licenses/by-sa/4.0/'''\n"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 30,
|
|
"metadata": {
|
|
"collapsed": true
|
|
},
|
|
"outputs": [],
|
|
"source": [
|
|
"#? nltk.corpus.ConllCorpusReader"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 52,
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"<class 'nltk.collections.LazyMap'>\n",
|
|
"50472\n",
|
|
"[('So', 'ADV'), ('kann', 'VMFIN'), ('man', 'PIS'), ('Marsilius', 'NE'), ('von', 'APPR'), ('Padua', 'NE'), ('so', 'ADV'), ('wenig', 'ADV'), ('zu', 'APPR'), ('einem', 'ART'), ('Vorläufer', 'NN'), ('moderner', 'ADJA'), ('Volkssouveränität', 'NN'), ('machen', 'VVINF'), ('wie', 'KOKOM'), ('Rousseau', 'NE'), ('zum', 'APPRART'), ('Verkünder', 'NN'), ('eines', 'ART'), ('``', '$('), ('Zurück', 'NN'), ('zur', 'APPRART'), ('Natur', 'NN'), (\"''\", '$('), ('.', '$.')]\n"
|
|
]
|
|
}
|
|
],
|
|
"source": [
|
|
"# TODO: loading german corpus \n",
|
|
"X3 = nltk.corpus.ConllCorpusReader(root='/Users/Carsten/GitRepos/NLP-LAB/Carsten_Solutions/sets/german/', fileids=['tiger_release_aug07.corrected.16012013.conll09'], columntypes=['ignore', 'words', 'ignore', 'ignore', 'pos'], encoding='utf-8')\n",
|
|
"german_tagged_sents = X3.tagged_sents()\n",
|
|
"print(type(german_tagged_sents))\n",
|
|
"print(len(german_tagged_sents))\n",
|
|
"\n",
|
|
"print (german_tagged_sents[-100])"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"### Task 1\n",
|
|
"* get results for english (plot a graph with all classifiers x results)\n",
|
|
" * performance 1.1 = model1 in X1\n",
|
|
" * performance 1.2 = model2 in X1\n",
|
|
" * performance 1.3.x = model3.x in X1\n",
|
|
" * performance 1.4 = model1 in X2\n",
|
|
" * performance 1.5 = model2 in X2\n",
|
|
" * performance 1.6.x = model3.x in X2"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"##### Generate Training and Testdata for X1\n",
|
|
"1. split annotaed sentences into training and testdata\n",
|
|
"2. split trainingdata into input data and teacherdata\n",
|
|
" *input is the feature vector of each word\n",
|
|
" *output is a list of POS tags for each word and sentences"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 32,
|
|
"metadata": {
|
|
"collapsed": true
|
|
},
|
|
"outputs": [],
|
|
"source": [
|
|
"#to generate trainingsdata, delete the assigned tags as a function\n",
|
|
"def untag(tagged_sentence):\n",
|
|
" return [w for w, t in tagged_sentence]"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 65,
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"<class 'nltk.corpus.reader.util.ConcatenatedCorpusView'>\n",
|
|
"got 3131 training sentences and 783 test sentences\n"
|
|
]
|
|
}
|
|
],
|
|
"source": [
|
|
"#print(type(nltk.corpus.treebank.tagged_sents()))\n",
|
|
"\n",
|
|
"#object including the annotated sentences\n",
|
|
"annotated_sent = nltk.corpus.treebank.tagged_sents()\n",
|
|
"\n",
|
|
"#to split the data, calculate the borders for ratio\n",
|
|
"cutoff = int(.8 * len(annotated_sent))\n",
|
|
"training_sentences_X1 = annotated_sent[:cutoff]\n",
|
|
"test_sentences_X1 = annotated_sent[cutoff:]\n",
|
|
"\n",
|
|
"#show the amount of sentences\n",
|
|
"print(\"got \",len(training_sentences_X1),\" training sentences and \", len(test_sentences_X1), \" test sentences\")"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 34,
|
|
"metadata": {
|
|
"collapsed": true
|
|
},
|
|
"outputs": [],
|
|
"source": [
|
|
"#for training split sentences with its tags into y (for a sentences its resulting tags for each word) and transform sentences and x as a list of the features extracet for echt word in the sentences\n",
|
|
"def transform_to_dataset(tagged_sentences):\n",
|
|
" X, y = [], []\n",
|
|
" for tagged_sentence in tagged_sentences:\n",
|
|
" for index in range(len(tagged_sentence)):\n",
|
|
" X.append(features(untag(tagged_sentence), index))\n",
|
|
" y.append(tagged_sentence[index][1]) \n",
|
|
" return X, y"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 35,
|
|
"metadata": {
|
|
"collapsed": true
|
|
},
|
|
"outputs": [],
|
|
"source": [
|
|
"#trainings inputset X and training teacher set y\n",
|
|
"X1, y1 = transform_to_dataset(training_sentences_X1)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"##### Generate Training and Testdata for X2\n",
|
|
"1. split annotaed sentences into training and testdata\n",
|
|
"2. split trainingdata into input data and teacherdata\n",
|
|
" *input is the feature vector of each word\n",
|
|
" *output is a list of POS tags for each word and sentences"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 36,
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"got 45872 training sentences and 11468 test sentences\n"
|
|
]
|
|
}
|
|
],
|
|
"source": [
|
|
"#object including the annotated sentences\n",
|
|
"annotated_sent = nltk.corpus.brown.tagged_sents()\n",
|
|
"\n",
|
|
"#to split the data, calculate the borders for ratio\n",
|
|
"cutoff = int(.8 * len(annotated_sent))\n",
|
|
"training_sentences_X2 = annotated_sent[:cutoff]\n",
|
|
"test_sentences_X2 = annotated_sent[cutoff:]\n",
|
|
"\n",
|
|
"#show the amount of sentences\n",
|
|
"print(\"got \",len(training_sentences_X2),\" training sentences and \", len(test_sentences_X2), \" test sentences\")"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 37,
|
|
"metadata": {
|
|
"collapsed": true
|
|
},
|
|
"outputs": [],
|
|
"source": [
|
|
"#trainings inputset X and training teacher set y\n",
|
|
"X2, y2 = transform_to_dataset(training_sentences_X2)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"##### Generate Training and Testdata for X3\n",
|
|
"1. split annotaed sentences into training and testdata\n",
|
|
"2. split trainingdata into input data and teacherdata\n",
|
|
" *input is the feature vector of each word\n",
|
|
" *output is a list of POS tags for each word and sentences"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 63,
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"<class 'nltk.collections.LazyMap'>\n",
|
|
"got 40377 training sentences and 10095 test sentences\n"
|
|
]
|
|
}
|
|
],
|
|
"source": [
|
|
"#object including the annotated sentences\n",
|
|
"annotated_sent = X3.tagged_sents()\n",
|
|
"\n",
|
|
"#print(type(annotated_sent))\n",
|
|
"\n",
|
|
"#to split the data, calculate the borders for ratio\n",
|
|
"cutoff = int(.8 * len(annotated_sent))\n",
|
|
"training_sentences_X3 = annotated_sent[:cutoff]\n",
|
|
"test_sentences_X3 = annotated_sent[cutoff:]\n",
|
|
"\n",
|
|
"#show the amount of sentences\n",
|
|
"print(\"got \",len(training_sentences_X3),\" training sentences and \", len(test_sentences_X3), \" test sentences\")"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 66,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"#trainings inputset X and training teacher set y\n",
|
|
"X3, y3 = transform_to_dataset(training_sentences_X3)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {
|
|
"collapsed": true
|
|
},
|
|
"source": [
|
|
"#### Implementing a classifier\n",
|
|
"relevant imports\n",
|
|
"* decision tree as the AI for classfing\n",
|
|
"* dict vercorizer transforms the feature dictionary into a vector as the input for the tree"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 40,
|
|
"metadata": {
|
|
"collapsed": true
|
|
},
|
|
"outputs": [],
|
|
"source": [
|
|
"from sklearn.tree import DecisionTreeClassifier\n",
|
|
"from sklearn.feature_extraction import DictVectorizer\n",
|
|
"from sklearn.pipeline import Pipeline"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"Pipeline manages vectorizer and classifier"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 41,
|
|
"metadata": {
|
|
"collapsed": true
|
|
},
|
|
"outputs": [],
|
|
"source": [
|
|
"clf = Pipeline([\n",
|
|
" ('vectorizer', DictVectorizer(sparse=False)),\n",
|
|
" ('classifier', DecisionTreeClassifier(criterion='entropy'))\n",
|
|
"])"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"##### Calculate performance 1.1 \n",
|
|
"* fit the decision tree for a limited amount (size) of training \n",
|
|
"* test data and compare with score function on testdata"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 42,
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"training OK\n",
|
|
"Accuracy: 0.880632766106\n"
|
|
]
|
|
}
|
|
],
|
|
"source": [
|
|
"size=10000\n",
|
|
"clf.fit(X1[:size], y1[:size])\n",
|
|
" \n",
|
|
"print('training OK')\n",
|
|
" \n",
|
|
"X1_test, y1_test = transform_to_dataset(test_sentences_X1)\n",
|
|
"\n",
|
|
"performance1_1 = clf.score(X1_test, y1_test)\n",
|
|
"\n",
|
|
"print(\"Accuracy:\", performance1_1)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"##### Calculate performance 1.2"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 43,
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"Accuracy: 0.8936074654423873\n"
|
|
]
|
|
}
|
|
],
|
|
"source": [
|
|
"#extract only the words from feature trainings set\n",
|
|
"only_words_X1 = [x['word'] for x in X1_test]\n",
|
|
"\n",
|
|
"#train with the pos tagger by nltk\n",
|
|
"pos_tags_by_pre_trained_pos_tagger = [word_tag_tuple[1] for word_tag_tuple in pos_tag(only_words_X1, lang='eng')]\n",
|
|
"\n",
|
|
"#calculate performance by comparing each pos tag\n",
|
|
"performance1_2 = 0\n",
|
|
"for index in range(len(pos_tags_by_pre_trained_pos_tagger)):\n",
|
|
" if(pos_tags_by_pre_trained_pos_tagger[index]==y1_test[index]):\n",
|
|
" performance1_2 += 1\n",
|
|
"performance1_2 /= len(pos_tags_by_pre_trained_pos_tagger)\n",
|
|
"\n",
|
|
"print(\"Accuracy:\", performance1_2)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"##### Calculate performance 1.3\n",
|
|
"1. DefaultTagger that simply tags everything with the same tag\n",
|
|
"2. RegexpTagger that applies tags according to a set of regular expressions\n",
|
|
"3. N-Gram (n-gram tagger is a generalization of a unigram tagger whose context is the current word together with the part-of-speech tags of the n-1 preceding token)\n",
|
|
" + UnigramTagger\n",
|
|
" + BigramTagger\n",
|
|
" + TrigramTagger"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 44,
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"performance 1.3.1 is: 0.1447677029791906\n",
|
|
"performance 1.3.2 is: 0.24232746145017217\n",
|
|
"performance 1.3.3 is: 0.8608213982733669\n",
|
|
"performance 1.3.4 is: 0.1132791057437996\n",
|
|
"performance 1.3.5 is: 0.06736863116922003\n"
|
|
]
|
|
}
|
|
],
|
|
"source": [
|
|
"#evaluate taggers\n",
|
|
"# performance of Default Tagger\n",
|
|
"performance1_3_1 = def_model.evaluate(test_sentences_X1)\n",
|
|
"print('performance 1.3.1 is: ',performance1_3_1)\n",
|
|
"\n",
|
|
"# performance of Regex Tagger\n",
|
|
"performance1_3_2 = regexp_model.evaluate(test_sentences_X1)\n",
|
|
"print('performance 1.3.2 is: ',performance1_3_2)\n",
|
|
"\n",
|
|
"# performance of Unigram Tagger\n",
|
|
"performance1_3_3 = uni_model.evaluate(test_sentences_X1)\n",
|
|
"print('performance 1.3.3 is: ',performance1_3_3)\n",
|
|
"\n",
|
|
"# performance of Bigram Tagger\n",
|
|
"performance1_3_4 = bi_model.evaluate(test_sentences_X1)\n",
|
|
"print('performance 1.3.4 is: ',performance1_3_4)\n",
|
|
"\n",
|
|
"# performance of Trigram Tagger\n",
|
|
"performance1_3_5 = tri_model.evaluate(test_sentences_X1)\n",
|
|
"print('performance 1.3.5 is: ',performance1_3_5)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"##### Calculate performance 1.4"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 45,
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"calculated perfomance 1.4= 0.75680543774\n"
|
|
]
|
|
}
|
|
],
|
|
"source": [
|
|
"size=10000\n",
|
|
"clf.fit(X2[:size], y2[:size])\n",
|
|
"X2_test, y2_test = transform_to_dataset(test_sentences_X2)\n",
|
|
"performance1_4 = clf.score(X2_test, y2_test)\n",
|
|
"print(\"calculated perfomance 1.4= \",performance1_4)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"##### Calculate performance 1.5"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 46,
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"Accuracy: 0.6044583741861567\n"
|
|
]
|
|
}
|
|
],
|
|
"source": [
|
|
"#extract only the words from feature trainings set\n",
|
|
"only_words_X2 = [x['word'] for x in X2_test]\n",
|
|
"\n",
|
|
"#train with the pos tagger by nltk\n",
|
|
"pos_tags_by_pre_trained_pos_tagger = [word_tag_tuple[1] for word_tag_tuple in pos_tag(only_words_X2, lang='eng')]\n",
|
|
"\n",
|
|
"#calculate performance by comparing each pos tag\n",
|
|
"performance1_5 = 0\n",
|
|
"for index in range(len(pos_tags_by_pre_trained_pos_tagger)):\n",
|
|
" if(pos_tags_by_pre_trained_pos_tagger[index]==y2_test[index]):\n",
|
|
" performance1_5 += 1\n",
|
|
"performance1_5 /= len(pos_tags_by_pre_trained_pos_tagger)\n",
|
|
"\n",
|
|
"print(\"Accuracy:\", performance1_5)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"##### Calculate performance 1.6"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 47,
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"performance 1.6.1 is: 0.10997763652187324\n",
|
|
"performance 1.6.2 is: 0.17594438874995869\n",
|
|
"performance 1.6.3 is: 0.8773754310202373\n",
|
|
"performance 1.6.4 is: 0.3390490564374869\n",
|
|
"performance 1.6.5 is: 0.19178610379738467\n"
|
|
]
|
|
}
|
|
],
|
|
"source": [
|
|
"uni_model = nltk.UnigramTagger(training_sentences_X2)\n",
|
|
"bi_model = nltk.BigramTagger(training_sentences_X2)\n",
|
|
"tri_model = nltk.TrigramTagger(training_sentences_X2)\n",
|
|
"\n",
|
|
"#evaluate taggers\n",
|
|
"# performance of Default Tagger\n",
|
|
"performance1_6_1 = def_model.evaluate(test_sentences_X2)\n",
|
|
"print('performance 1.6.1 is: ',performance1_6_1)\n",
|
|
"\n",
|
|
"# performance of Regex Tagger\n",
|
|
"performance1_6_2 = regexp_model.evaluate(test_sentences_X2)\n",
|
|
"print('performance 1.6.2 is: ',performance1_6_2)\n",
|
|
"\n",
|
|
"# performance of Unigram Tagger\n",
|
|
"performance1_6_3 = uni_model.evaluate(test_sentences_X2)\n",
|
|
"print('performance 1.6.3 is: ',performance1_6_3)\n",
|
|
"\n",
|
|
"# performance of Bigram Tagger\n",
|
|
"performance1_6_4 = bi_model.evaluate(test_sentences_X2)\n",
|
|
"print('performance 1.6.4 is: ',performance1_6_4)\n",
|
|
"\n",
|
|
"# performance of Trigram Tagger\n",
|
|
"performance1_6_5 = tri_model.evaluate(test_sentences_X2)\n",
|
|
"print('performance 1.6.5 is: ',performance1_6_5)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"#### Using the classifier\n",
|
|
"for results the link of pos_tags:\n",
|
|
"https://www.ling.upenn.edu/courses/Fall_2003/ling001/penn_treebank_pos.html"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 48,
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"3.6.3\n",
|
|
"checking...\n",
|
|
"[('Hello', 'VBD-HL'), ('world', 'VBD'), (',', ','), ('lets', 'NNS'), ('do', 'DO'), ('something', 'PN'), ('awesome', 'NN'), ('today', 'NR'), ('!', 'CD')]\n"
|
|
]
|
|
}
|
|
],
|
|
"source": [
|
|
"def pos_tag(sentence):\n",
|
|
" print('checking...')\n",
|
|
" tagged_sentence = []\n",
|
|
" tags = clf.predict([features(sentence, index) for index in range(len(sentence))])\n",
|
|
" return zip(sentence, tags)\n",
|
|
"\n",
|
|
"import platform\n",
|
|
"print(platform.python_version())\n",
|
|
"\n",
|
|
"print(list(pos_tag(word_tokenize('Hello world, lets do something awesome today!'))))"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"### Results for Task 1\n",
|
|
"* get results for english (plot a graph with all classifiers x results)\n",
|
|
" * performance 1.1 = model1 in X1\n",
|
|
" * performance 1.2 = model2 in X1\n",
|
|
" * performance 1.3.x = model3.x in X1\n",
|
|
" * performance 1.4 = model1 in X2\n",
|
|
" * performance 1.5 = model2 in X2\n",
|
|
" * performance 1.6.x = model3.x in X2"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 49,
|
|
"metadata": {
|
|
"scrolled": true
|
|
},
|
|
"outputs": [
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"High five! You successfully sent some data to your account on plotly. View your plot in your browser at https://plot.ly/~carsten95/0 or inside your plot.ly account where it is named 'basic-bar'\n"
|
|
]
|
|
},
|
|
{
|
|
"data": {
|
|
"text/html": [
|
|
"<iframe id=\"igraph\" scrolling=\"no\" style=\"border:none;\" seamless=\"seamless\" src=\"https://plot.ly/~carsten95/0.embed\" height=\"525px\" width=\"100%\"></iframe>"
|
|
],
|
|
"text/plain": [
|
|
"<plotly.tools.PlotlyDisplay object>"
|
|
]
|
|
},
|
|
"execution_count": 49,
|
|
"metadata": {},
|
|
"output_type": "execute_result"
|
|
}
|
|
],
|
|
"source": [
|
|
"import plotly\n",
|
|
"plotly.tools.set_credentials_file(username='carsten95', api_key='vElf5IOxiFheQdjTxjXW')\n",
|
|
"plotly.__version__\n",
|
|
"import plotly.plotly as py\n",
|
|
"import plotly.graph_objs as go\n",
|
|
"\n",
|
|
"data = [go.Bar(\n",
|
|
" x=['performance 1.1', 'performance 1.2', 'performance 1.3.1', 'performance 1.3.2', 'performance 1.3.3', 'performance 1.3.4', 'performance 1.3.5', 'performance 1.4', 'performance 1.5' , 'performance 1.6.1', 'performance 1.6.2', 'performance 1.6.3', 'performance 1.6.4', 'performance 1.6.5'],\n",
|
|
" y=[performance1_1, performance1_2, performance1_3_1, performance1_3_2, performance1_3_3, performance1_3_4, performance1_3_5, performance1_4, performance1_5, performance1_6_1, performance1_6_2, performance1_6_3, performance1_6_4, performance1_6_5]\n",
|
|
" )]\n",
|
|
"\n",
|
|
"py.iplot(data, filename='basic-bar')"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {
|
|
"collapsed": true
|
|
},
|
|
"source": [
|
|
"### Results for Task 2\n",
|
|
"* train your model with standard features (plot a graph with all classifiers x results)\n",
|
|
" * performance 2.1 = model4 in X3\n",
|
|
" * model 4 your POS tagger model (not english)\n",
|
|
" * performance 2.2 = model5 in X3\n",
|
|
" * pre-trained POS tagger model using RDRPOSTagger 1 or TreeTagger 2 (not english)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 70,
|
|
"metadata": {
|
|
"collapsed": true
|
|
},
|
|
"outputs": [],
|
|
"source": [
|
|
"clf = Pipeline([\n",
|
|
" ('vectorizer', DictVectorizer(sparse=False)),\n",
|
|
" ('classifier', DecisionTreeClassifier(criterion='entropy'))\n",
|
|
"])"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 76,
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"training OK\n",
|
|
"Accuracy: 0.836976962858\n"
|
|
]
|
|
}
|
|
],
|
|
"source": [
|
|
"size=10000\n",
|
|
"clf.fit(X3[:size], y3[:size])\n",
|
|
" \n",
|
|
"print('training OK')\n",
|
|
" \n",
|
|
"X3_test, y3_test = transform_to_dataset(test_sentences_X3)\n",
|
|
"\n",
|
|
"performance2_1 = clf.score(X3_test, y3_test)\n",
|
|
"\n",
|
|
"print(\"Accuracy:\", performance2_1)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 77,
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"High five! You successfully sent some data to your account on plotly. View your plot in your browser at https://plot.ly/~carsten95/0 or inside your plot.ly account where it is named 'basic-bar'\n"
|
|
]
|
|
},
|
|
{
|
|
"data": {
|
|
"text/html": [
|
|
"<iframe id=\"igraph\" scrolling=\"no\" style=\"border:none;\" seamless=\"seamless\" src=\"https://plot.ly/~carsten95/0.embed\" height=\"525px\" width=\"100%\"></iframe>"
|
|
],
|
|
"text/plain": [
|
|
"<plotly.tools.PlotlyDisplay object>"
|
|
]
|
|
},
|
|
"execution_count": 77,
|
|
"metadata": {},
|
|
"output_type": "execute_result"
|
|
}
|
|
],
|
|
"source": [
|
|
"performance2_2 = 0\n",
|
|
"\n",
|
|
"data = [go.Bar(\n",
|
|
" x=['performance 2.1', 'performance 2.2'],\n",
|
|
" y=[performance2_1, performance2_2]\n",
|
|
" )]\n",
|
|
"\n",
|
|
"py.iplot(data, filename='basic-bar')"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {
|
|
"collapsed": true
|
|
},
|
|
"outputs": [],
|
|
"source": []
|
|
}
|
|
],
|
|
"metadata": {
|
|
"kernelspec": {
|
|
"display_name": "Python 3",
|
|
"language": "python",
|
|
"name": "python3"
|
|
},
|
|
"language_info": {
|
|
"codemirror_mode": {
|
|
"name": "ipython",
|
|
"version": 3
|
|
},
|
|
"file_extension": ".py",
|
|
"mimetype": "text/x-python",
|
|
"name": "python",
|
|
"nbconvert_exporter": "python",
|
|
"pygments_lexer": "ipython3",
|
|
"version": "3.6.3"
|
|
}
|
|
},
|
|
"nbformat": 4,
|
|
"nbformat_minor": 2
|
|
}
|