Merge branch 'master' of ssh://the-cake-is-a-lie.net:20022/jonas/NLP-LAB
This commit is contained in:
commit
8747300382
@ -4,12 +4,18 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Exercise 1"
|
||||
"# Exercise 1\n",
|
||||
"Solution by:\n",
|
||||
"Carsten Draschner \n",
|
||||
"2719095\n",
|
||||
"\n",
|
||||
"Following Instructions: \n",
|
||||
"https://github.com/SmartDataAnalytics/MA-INF-4222-NLP-Lab/blob/master/2018_SoSe/exercises/Task01_Instructions.ipynb"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 23,
|
||||
"execution_count": 83,
|
||||
"metadata": {
|
||||
"collapsed": true
|
||||
},
|
||||
@ -25,12 +31,12 @@
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Classifiers\n",
|
||||
"note: for model1 and model3 you can try different classifiers: Hidden Markov Model, Logistic Regression, Maximum Entropy Markov Models, Decision Trees, Naive Bayes, etc.. __choose one!__"
|
||||
"**Decision Tree** import from skikit learn"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 24,
|
||||
"execution_count": 84,
|
||||
"metadata": {
|
||||
"collapsed": true
|
||||
},
|
||||
@ -45,19 +51,20 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### 1. model1 = your POS tagger model (english)"
|
||||
"### 1. model1 = your POS tagger model (english)\n",
|
||||
"for a words defined by its in dex with the given sentences a feature vector fot this word will be determinded"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 25,
|
||||
"execution_count": 85,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"{'word': 'bims', 'length': 4, 'is_capitalized': False, 'prefix-1': 'b', 'suffix-1': 's', 'prev_word': 'i', 'next_word': 'der', 'kindOfCamelCase': False, 'includesSpace': False}\n"
|
||||
"defined own feature model\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@ -72,10 +79,13 @@
|
||||
" 'prev_word': '' if index == 0 else sentence[index - 1],\n",
|
||||
" 'next_word': '' if index == len(sentence) - 1 else sentence[index + 1],\n",
|
||||
" 'kindOfCamelCase': sentence[index][1:].lower() != sentence[index][1:],\n",
|
||||
" 'includesSpace': True if ((' ') in sentence[index]) else False #depemds on tokenizer\n",
|
||||
" 'includesSpace': True if ((' ') in sentence[index]) else False, #depemds on tokenizer\n",
|
||||
" 'containsNumber': sum(str(i) in (sentence[index]) for i in range(10))>0,\n",
|
||||
" 'prefix-2': sentence[index][1] if len(sentence[index])>1 else \"-1\",\n",
|
||||
" 'suffix-2': sentence[index][-2] if len(sentence[index])>1 else \"-1\"\n",
|
||||
" }\n",
|
||||
"\n",
|
||||
"print(features(\"halli hallo i bims der Programmierer\".strip().split(\" \"), 3))"
|
||||
"print(\"defined own feature model\")\n",
|
||||
"#print(features(\"halli hallo i bims der Programmierer\".strip().split(\" \"), 3))"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -87,7 +97,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 26,
|
||||
"execution_count": 86,
|
||||
"metadata": {
|
||||
"collapsed": true
|
||||
},
|
||||
@ -112,13 +122,24 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {
|
||||
"collapsed": true
|
||||
"execution_count": 87,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"\"#used from description for RegexpTagger\\npatterns = [(r'.*ing$', 'VBG'), (r'.*ed$', 'VBD'), (r'.*es$', 'VBZ'), (r'.*ould$', 'MD'), (r'.*'s$', 'NN$'), \\n (r'.*s$', 'NNS'), (r'^-?[0-9]+(.[0-9]+)?$', 'CD'), (r'.*', 'NN')]\\n\\n#train taggers\\ndef_model = nltk.DefaultTagger('NN')\\nregexp_model = nltk.RegexpTagger(patterns)\\nuni_model = nltk.UnigramTagger(training_sentences_X1)\\nbi_model = nltk.BigramTagger(training_sentences_X1)\\ntri_model = nltk.TrigramTagger(training_sentences_X1)\""
|
||||
]
|
||||
},
|
||||
"outputs": [],
|
||||
"execution_count": 87,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"#used from description for RegexpTagger\n",
|
||||
"#see Task 1.3 and 1.6\n",
|
||||
"\n",
|
||||
"'''#used from description for RegexpTagger\n",
|
||||
"patterns = [(r'.*ing$', 'VBG'), (r'.*ed$', 'VBD'), (r'.*es$', 'VBZ'), (r'.*ould$', 'MD'), (r'.*\\'s$', 'NN$'), \n",
|
||||
" (r'.*s$', 'NNS'), (r'^-?[0-9]+(.[0-9]+)?$', 'CD'), (r'.*', 'NN')]\n",
|
||||
"\n",
|
||||
@ -127,7 +148,7 @@
|
||||
"regexp_model = nltk.RegexpTagger(patterns)\n",
|
||||
"uni_model = nltk.UnigramTagger(training_sentences_X1)\n",
|
||||
"bi_model = nltk.BigramTagger(training_sentences_X1)\n",
|
||||
"tri_model = nltk.TrigramTagger(training_sentences_X1)"
|
||||
"tri_model = nltk.TrigramTagger(training_sentences_X1)'''"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -139,12 +160,14 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 88,
|
||||
"metadata": {
|
||||
"collapsed": true
|
||||
},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
"source": [
|
||||
"#see Task 2.1"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
@ -155,12 +178,14 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 89,
|
||||
"metadata": {
|
||||
"collapsed": true
|
||||
},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
"source": [
|
||||
"#see Task 2.2"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
@ -179,7 +204,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 27,
|
||||
"execution_count": 90,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@ -188,13 +213,15 @@
|
||||
"text": [
|
||||
"[nltk_data] Downloading package treebank to\n",
|
||||
"[nltk_data] /Users/Carsten/nltk_data...\n",
|
||||
"[nltk_data] Package treebank is already up-to-date!\n"
|
||||
"[nltk_data] Package treebank is already up-to-date!\n",
|
||||
"downloaded treebank\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"nltk.download('treebank')\n",
|
||||
"x1 = nltk.corpus.treebank"
|
||||
"x1 = nltk.corpus.treebank\n",
|
||||
"print(\"downloaded treebank\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -206,7 +233,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 28,
|
||||
"execution_count": 91,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@ -214,13 +241,15 @@
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[nltk_data] Downloading package brown to /Users/Carsten/nltk_data...\n",
|
||||
"[nltk_data] Package brown is already up-to-date!\n"
|
||||
"[nltk_data] Package brown is already up-to-date!\n",
|
||||
"downloaded brown\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"nltk.download('brown')\n",
|
||||
"x2 = nltk.corpus.brown"
|
||||
"x2 = nltk.corpus.brown\n",
|
||||
"print(\"downloaded brown\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -232,7 +261,38 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 64,
|
||||
"execution_count": 92,
|
||||
"metadata": {
|
||||
"collapsed": true
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#? nltk.corpus.ConllCorpusReader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 93,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"downloaded german tiger corpus\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# TODO: loading german corpus \n",
|
||||
"X3 = nltk.corpus.ConllCorpusReader(root='/Users/Carsten/GitRepos/NLP-LAB/Carsten_Solutions/sets/german/', fileids=['tiger_release_aug07.corrected.16012013.conll09'], columntypes=['ignore', 'words', 'ignore', 'ignore', 'pos'], encoding='utf-8')\n",
|
||||
"german_tagged_sents = X3.tagged_sents()\n",
|
||||
"print(\"downloaded german tiger corpus\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 94,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@ -241,7 +301,7 @@
|
||||
"'#import pandas as pd\\n#df = pd.read_table(\"/Users/Carsten/GitRepos/NLP-LAB/Carsten_Solutions/sets/croatia/set.hr.conll\")\\n#df.head()\\n\\n#x3 = other language\\n#from croatia:\\n#by ZˇeljkoAgic ́,⋆NikolaLjubesˇic ́ http://www.lrec-conf.org/proceedings/lrec2014/pdf/690_Paper.pdf\\n#licenses: https://creativecommons.org/licenses/by-sa/4.0/\\ncorp = nltk.corpus.ConllCorpusReader(root=\"/Users/Carsten/GitRepos/NLP-LAB/Carsten_Solutions/sets/croatia/\", fileids=[\"set.hr.conll\"], columntypes=(\\'ignore\\',\\'ignore\\',\\'pos\\',\\'ignore\\',\\'ignore\\',\\'ignore\\',\\'ignore\\',\\'ignore\\',\\'ignore\\',\\'ignore\\'))\\nprint(corp.tagged_sents[-100])\\n#from croatia:\\n#by ZˇeljkoAgic ́,⋆NikolaLjubesˇic ́ http://www.lrec-conf.org/proceedings/lrec2014/pdf/690_Paper.pdf\\n#licenses: https://creativecommons.org/licenses/by-sa/4.0/'"
|
||||
]
|
||||
},
|
||||
"execution_count": 64,
|
||||
"execution_count": 94,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@ -262,42 +322,6 @@
|
||||
"#licenses: https://creativecommons.org/licenses/by-sa/4.0/'''\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 30,
|
||||
"metadata": {
|
||||
"collapsed": true
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#? nltk.corpus.ConllCorpusReader"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 52,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"<class 'nltk.collections.LazyMap'>\n",
|
||||
"50472\n",
|
||||
"[('So', 'ADV'), ('kann', 'VMFIN'), ('man', 'PIS'), ('Marsilius', 'NE'), ('von', 'APPR'), ('Padua', 'NE'), ('so', 'ADV'), ('wenig', 'ADV'), ('zu', 'APPR'), ('einem', 'ART'), ('Vorläufer', 'NN'), ('moderner', 'ADJA'), ('Volkssouveränität', 'NN'), ('machen', 'VVINF'), ('wie', 'KOKOM'), ('Rousseau', 'NE'), ('zum', 'APPRART'), ('Verkünder', 'NN'), ('eines', 'ART'), ('``', '$('), ('Zurück', 'NN'), ('zur', 'APPRART'), ('Natur', 'NN'), (\"''\", '$('), ('.', '$.')]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# TODO: loading german corpus \n",
|
||||
"X3 = nltk.corpus.ConllCorpusReader(root='/Users/Carsten/GitRepos/NLP-LAB/Carsten_Solutions/sets/german/', fileids=['tiger_release_aug07.corrected.16012013.conll09'], columntypes=['ignore', 'words', 'ignore', 'ignore', 'pos'], encoding='utf-8')\n",
|
||||
"german_tagged_sents = X3.tagged_sents()\n",
|
||||
"print(type(german_tagged_sents))\n",
|
||||
"print(len(german_tagged_sents))\n",
|
||||
"\n",
|
||||
"print (german_tagged_sents[-100])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
@ -312,6 +336,13 @@
|
||||
" * performance 1.6.x = model3.x in X2"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Generating Testdata"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
@ -325,27 +356,26 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 32,
|
||||
"execution_count": 95,
|
||||
"metadata": {
|
||||
"collapsed": true
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#to generate trainingsdata, delete the assigned tags as a function\n",
|
||||
"#to generate trainingsdata, ignore the assigned tags as a function\n",
|
||||
"def untag(tagged_sentence):\n",
|
||||
" return [w for w, t in tagged_sentence]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 65,
|
||||
"execution_count": 96,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"<class 'nltk.corpus.reader.util.ConcatenatedCorpusView'>\n",
|
||||
"got 3131 training sentences and 783 test sentences\n"
|
||||
]
|
||||
}
|
||||
@ -365,9 +395,16 @@
|
||||
"print(\"got \",len(training_sentences_X1),\" training sentences and \", len(test_sentences_X1), \" test sentences\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"**transform_to_dataset** generates the input X as a list of feature dictinionaries and an output y as a list of pos tags. "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 34,
|
||||
"execution_count": 97,
|
||||
"metadata": {
|
||||
"collapsed": true
|
||||
},
|
||||
@ -385,14 +422,21 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 35,
|
||||
"metadata": {
|
||||
"collapsed": true
|
||||
},
|
||||
"outputs": [],
|
||||
"execution_count": 98,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"generated X1 (feature sets) and y1 set of teacher tags\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"#trainings inputset X and training teacher set y\n",
|
||||
"X1, y1 = transform_to_dataset(training_sentences_X1)"
|
||||
"X1, y1 = transform_to_dataset(training_sentences_X1)\n",
|
||||
"print(\"generated X1 (feature sets) and y1 set of teacher tags\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -408,7 +452,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 36,
|
||||
"execution_count": 99,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@ -434,14 +478,21 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 37,
|
||||
"metadata": {
|
||||
"collapsed": true
|
||||
},
|
||||
"outputs": [],
|
||||
"execution_count": 100,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"generated X2 (feature sets) and y2 set of teacher tags\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"#trainings inputset X and training teacher set y\n",
|
||||
"X2, y2 = transform_to_dataset(training_sentences_X2)"
|
||||
"X2, y2 = transform_to_dataset(training_sentences_X2)\n",
|
||||
"print(\"generated X2 (feature sets) and y2 set of teacher tags\")#(X3[:3], y3[:3])"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -457,14 +508,13 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 63,
|
||||
"execution_count": 101,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"<class 'nltk.collections.LazyMap'>\n",
|
||||
"got 40377 training sentences and 10095 test sentences\n"
|
||||
]
|
||||
}
|
||||
@ -486,12 +536,21 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 66,
|
||||
"execution_count": 102,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"generated X3 (feature sets) and y3 set of teacher tags\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"#trainings inputset X and training teacher set y\n",
|
||||
"X3, y3 = transform_to_dataset(training_sentences_X3)"
|
||||
"X3, y3 = transform_to_dataset(training_sentences_X3)\n",
|
||||
"print(\"generated X3 (feature sets) and y3 set of teacher tags\")#(X3[:3], y3[:3])"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -508,15 +567,22 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 40,
|
||||
"metadata": {
|
||||
"collapsed": true
|
||||
},
|
||||
"outputs": [],
|
||||
"execution_count": 103,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"imported sktree, DictVectorizer, Pipeline\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from sklearn.tree import DecisionTreeClassifier\n",
|
||||
"from sklearn.feature_extraction import DictVectorizer\n",
|
||||
"from sklearn.pipeline import Pipeline"
|
||||
"from sklearn.pipeline import Pipeline\n",
|
||||
"print(\"imported sktree, DictVectorizer, Pipeline\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -528,30 +594,44 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 41,
|
||||
"metadata": {
|
||||
"collapsed": true
|
||||
},
|
||||
"outputs": [],
|
||||
"execution_count": 104,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Initialized classifier\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"clf = Pipeline([\n",
|
||||
" ('vectorizer', DictVectorizer(sparse=False)),\n",
|
||||
" ('classifier', DecisionTreeClassifier(criterion='entropy'))\n",
|
||||
"])"
|
||||
"])\n",
|
||||
"print(\"Initialized classifier\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"##### Calculate performance 1.1 \n",
|
||||
"#### Calculating performances"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"##### Calculate performance 1.1 - own POS tagger model with X1 = treebank\n",
|
||||
"* fit the decision tree for a limited amount (size) of training \n",
|
||||
"* test data and compare with score function on testdata"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 42,
|
||||
"execution_count": 105,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@ -559,7 +639,7 @@
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"training OK\n",
|
||||
"Accuracy: 0.880632766106\n"
|
||||
"Accuracy: 0.883077997904\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@ -580,12 +660,12 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"##### Calculate performance 1.2"
|
||||
"##### Calculate performance 1.2 - pre-trained POS tagger model using NLTK (maxentropy english) with X1 = treebank"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 43,
|
||||
"execution_count": 106,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@ -617,7 +697,7 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"##### Calculate performance 1.3\n",
|
||||
"##### Calculate performance 1.3 - rule-based classifiers with X1 = treebank\n",
|
||||
"1. DefaultTagger that simply tags everything with the same tag\n",
|
||||
"2. RegexpTagger that applies tags according to a set of regular expressions\n",
|
||||
"3. N-Gram (n-gram tagger is a generalization of a unigram tagger whose context is the current word together with the part-of-speech tags of the n-1 preceding token)\n",
|
||||
@ -628,7 +708,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 44,
|
||||
"execution_count": 107,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@ -644,6 +724,17 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"#used from description for RegexpTagger\n",
|
||||
"patterns = [(r'.*ing$', 'VBG'), (r'.*ed$', 'VBD'), (r'.*es$', 'VBZ'), (r'.*ould$', 'MD'), (r'.*\\'s$', 'NN$'), \n",
|
||||
" (r'.*s$', 'NNS'), (r'^-?[0-9]+(.[0-9]+)?$', 'CD'), (r'.*', 'NN')]\n",
|
||||
"\n",
|
||||
"#train taggers\n",
|
||||
"def_model = nltk.DefaultTagger('NN')\n",
|
||||
"regexp_model = nltk.RegexpTagger(patterns)\n",
|
||||
"uni_model = nltk.UnigramTagger(training_sentences_X1)\n",
|
||||
"bi_model = nltk.BigramTagger(training_sentences_X1)\n",
|
||||
"tri_model = nltk.TrigramTagger(training_sentences_X1)\n",
|
||||
"\n",
|
||||
"#evaluate taggers\n",
|
||||
"# performance of Default Tagger\n",
|
||||
"performance1_3_1 = def_model.evaluate(test_sentences_X1)\n",
|
||||
@ -670,19 +761,19 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"##### Calculate performance 1.4"
|
||||
"##### Calculate performance 1.4 - own POS tagger model with X2 = brown"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 45,
|
||||
"execution_count": 108,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"calculated perfomance 1.4= 0.75680543774\n"
|
||||
"calculated perfomance 1.4= 0.772156918908\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@ -698,12 +789,12 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"##### Calculate performance 1.5"
|
||||
"##### Calculate performance 1.5 - pre-trained POS tagger model using NLTK (maxentropy english) with X2 = brown"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 46,
|
||||
"execution_count": 109,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@ -735,12 +826,12 @@
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"##### Calculate performance 1.6"
|
||||
"##### Calculate performance 1.6 - rule-based classifiers with X2 = brown"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 47,
|
||||
"execution_count": 110,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@ -793,16 +884,14 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 48,
|
||||
"execution_count": 111,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"3.6.3\n",
|
||||
"checking...\n",
|
||||
"[('Hello', 'VBD-HL'), ('world', 'VBD'), (',', ','), ('lets', 'NNS'), ('do', 'DO'), ('something', 'PN'), ('awesome', 'NN'), ('today', 'NR'), ('!', 'CD')]\n"
|
||||
"3.6.3\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@ -816,7 +905,7 @@
|
||||
"import platform\n",
|
||||
"print(platform.python_version())\n",
|
||||
"\n",
|
||||
"print(list(pos_tag(word_tokenize('Hello world, lets do something awesome today!'))))"
|
||||
"#print(list(pos_tag(word_tokenize('Hello world, lets do something awesome today!'))))"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -835,7 +924,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 49,
|
||||
"execution_count": 112,
|
||||
"metadata": {
|
||||
"scrolled": true
|
||||
},
|
||||
@ -856,7 +945,7 @@
|
||||
"<plotly.tools.PlotlyDisplay object>"
|
||||
]
|
||||
},
|
||||
"execution_count": 49,
|
||||
"execution_count": 112,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
@ -890,9 +979,25 @@
|
||||
" * pre-trained POS tagger model using RDRPOSTagger 1 or TreeTagger 2 (not english)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"##### Calculate Performance 2.1"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"again building a pipeline:\n",
|
||||
"* first vectorizing the dictionary based on feature dict\n",
|
||||
"* second, initializing and training the max entropy classifier decision tree"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 70,
|
||||
"execution_count": 113,
|
||||
"metadata": {
|
||||
"collapsed": true
|
||||
},
|
||||
@ -906,15 +1011,15 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 76,
|
||||
"execution_count": 114,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"training OK\n",
|
||||
"Accuracy: 0.836976962858\n"
|
||||
"training done\n",
|
||||
"Accuracy: 0.838839915374\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
@ -922,7 +1027,7 @@
|
||||
"size=10000\n",
|
||||
"clf.fit(X3[:size], y3[:size])\n",
|
||||
" \n",
|
||||
"print('training OK')\n",
|
||||
"print('training done')\n",
|
||||
" \n",
|
||||
"X3_test, y3_test = transform_to_dataset(test_sentences_X3)\n",
|
||||
"\n",
|
||||
@ -931,9 +1036,217 @@
|
||||
"print(\"Accuracy:\", performance2_1)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"##### Calculate Performance 2.2\n",
|
||||
"* using RDRPOS Taggger in a python 3 port rom https://github.com/jacopofar/RDRPOSTagger-python-3"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 77,
|
||||
"execution_count": 141,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#RDRPOSTagger port python 3 from https://github.com/jacopofar/RDRPOSTagger-python-3"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 131,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"wrote file de_text.tx in cwd with each word of the sentence seperated by a space\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"#generate a german txt text file:\n",
|
||||
"f = open(\"de_text.txt\", 'w')\n",
|
||||
"for sentence in test_sentences_X3:\n",
|
||||
" for word, tag in sentence:\n",
|
||||
" f.write(word + \" \")\n",
|
||||
" f.write(\"\\n\")\n",
|
||||
"f.close()\n",
|
||||
"\n",
|
||||
"print(\"wrote file de_text.tx in cwd with each word of the sentence seperated by a space\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 132,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"stored: /Users/Carsten/GitRepos/NLP-LAB/Carsten_Solutions\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"#to use RDRPOSTagger we have to store the path where we are working currently and where the donwnloaded RDRPOSTagger is stored\n",
|
||||
"import sys, os\n",
|
||||
"\n",
|
||||
"#current working directory to restore it later\n",
|
||||
"dir_path = os.getcwd()\n",
|
||||
"print(\"stored: \", dir_path)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 133,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"switched to path: /Users/Carsten/Downloads/RDRPOSTagger-python-3-master/pSCRDRtagger\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"#set the rdrpos as path to work in lownloaded api\n",
|
||||
"RDRPOS_TAGGER_PATH = \"/Users/Carsten/Downloads/RDRPOSTagger-python-3-master/pSCRDRtagger\"\n",
|
||||
"sys.path.insert(0, RDRPOS_TAGGER_PATH)\n",
|
||||
"os.chdir(RDRPOS_TAGGER_PATH)\n",
|
||||
"print(\"switched to path:\", RDRPOS_TAGGER_PATH)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 134,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"['Node', '__builtins__', '__cached__', '__doc__', '__file__', '__loader__', '__name__', '__package__', '__spec__', 'tabStr']\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# import and rename for easier use\n",
|
||||
"import RDRPOSTagger\n",
|
||||
"r = RDRPOSTagger.RDRPOSTagger()\n",
|
||||
"\n",
|
||||
"#load files\n",
|
||||
"r.constructSCRDRtreeFromRDRfile(\"../Models/POS/German.RDR\")\n",
|
||||
"DICT = RDRPOSTagger.readDictionary(\"../Models/POS/German.DICT\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 135,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"('\\nOutput file:', 'de_text.txt.TAGGED')\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"#switch back to dir in which we worked at the start\n",
|
||||
"os.chdir(dir_path)\n",
|
||||
"\n",
|
||||
"# generate file with tags after each word with the delimiter /\n",
|
||||
"r.tagRawCorpus(DICT, \"de_text.txt\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 142,
|
||||
"metadata": {
|
||||
"collapsed": true
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#from generated textfile above, seperate the word and tags\n",
|
||||
"tagged_words = []\n",
|
||||
"f = open(\"de_text.txt.TAGGED\", 'r')\n",
|
||||
"for line in f:\n",
|
||||
" for splits in line.split():\n",
|
||||
" cmp = splits.rsplit('/',1)\n",
|
||||
" if len(cmp) != 2:\n",
|
||||
" print(\"error parsing: \", cmp)\n",
|
||||
" else:\n",
|
||||
" w,t = cmp\n",
|
||||
" tagged_words.append((w,t))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 143,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[('CUPERTINO', 'NE'), ('(', '$('), ('rtr', 'NE'), ('/', '$('), ('whp', 'XY'), (')', '$('), ('.', '$.'), ('Der', 'ART'), ('Chef', 'NN'), ('des', 'ART')]"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"[[('CUPERTINO', 'NE')], [('(', '$('), ('rtr', 'NE'), ('/', '$('), ('whp', 'XY'), (')', '$('), ('.', '$.')], ...]"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"display(tagged_words[:10])\n",
|
||||
"display(test_sentences_X3[:10])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 144,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Accuracy 2.2 = 0.9754407616361072\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"performance2_2 = 0 # for test \n",
|
||||
"\n",
|
||||
"#counter for the words\n",
|
||||
"i = 0\n",
|
||||
"\n",
|
||||
"#evaluate accuracy\n",
|
||||
"for sent in test_sentences_X3:\n",
|
||||
" for tagged_w in sent:\n",
|
||||
" if tagged_w[1] == tagged_words[i][1]:\n",
|
||||
" performance2_2 += 1\n",
|
||||
" i += 1\n",
|
||||
"performance2_2 = performance2_2 / len(tagged_words)\n",
|
||||
"print(\"Accuracy 2.2 = \",performance2_2)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 145,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@ -952,14 +1265,13 @@
|
||||
"<plotly.tools.PlotlyDisplay object>"
|
||||
]
|
||||
},
|
||||
"execution_count": 77,
|
||||
"execution_count": 145,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"performance2_2 = 0\n",
|
||||
"\n",
|
||||
"#visualize results with plotly\n",
|
||||
"data = [go.Bar(\n",
|
||||
" x=['performance 2.1', 'performance 2.2'],\n",
|
||||
" y=[performance2_1, performance2_2]\n",
|
||||
|
Loading…
Reference in New Issue
Block a user