2018-04-28 21:15:48 +02:00
|
|
|
{
|
|
|
|
"cells": [
|
|
|
|
{
|
|
|
|
"cell_type": "markdown",
|
|
|
|
"metadata": {},
|
|
|
|
"source": [
|
|
|
|
"# NLP-LAB Exercise 01 by jonas weinz\n",
|
|
|
|
"----"
|
|
|
|
]
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"cell_type": "code",
|
|
|
|
"execution_count": 1,
|
|
|
|
"metadata": {},
|
|
|
|
"outputs": [],
|
|
|
|
"source": [
|
2018-04-29 19:52:07 +02:00
|
|
|
"%matplotlib ipympl\n",
|
2018-04-28 21:15:48 +02:00
|
|
|
"import nltk\n",
|
|
|
|
"import pprint\n",
|
|
|
|
"from sklearn.tree import DecisionTreeClassifier\n",
|
|
|
|
"from sklearn.feature_extraction import DictVectorizer\n",
|
|
|
|
"from sklearn.pipeline import Pipeline"
|
|
|
|
]
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"cell_type": "markdown",
|
|
|
|
"metadata": {},
|
|
|
|
"source": [
|
|
|
|
"## implementing own classifiers"
|
|
|
|
]
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"cell_type": "markdown",
|
|
|
|
"metadata": {},
|
|
|
|
"source": [
|
|
|
|
"* writing an own feature funtion"
|
|
|
|
]
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"cell_type": "code",
|
|
|
|
"execution_count": 2,
|
|
|
|
"metadata": {},
|
2018-05-06 14:44:24 +02:00
|
|
|
"outputs": [],
|
2018-04-28 21:15:48 +02:00
|
|
|
"source": [
|
|
|
|
"def features(sentence, index):\n",
|
|
|
|
" word = sentence[index]\n",
|
|
|
|
" is_punctuation_mark = word == \"!\" or word == \".\" or word == \",\" or word == \"?\"\n",
|
|
|
|
" sentence_length = len(sentence)\n",
|
2018-04-29 19:52:07 +02:00
|
|
|
" relative_third = (index * 3) // sentence_length \n",
|
|
|
|
" vowels = word.count('a') + word.count('e') + word.count('i') + word.count('o') + word.count('u')\n",
|
2018-04-28 21:15:48 +02:00
|
|
|
" return {\n",
|
2018-04-29 19:52:07 +02:00
|
|
|
" 'word': word,\n",
|
2018-04-28 21:15:48 +02:00
|
|
|
" 'is_capitalized': sentence[index][0].upper() == sentence[index][0],\n",
|
|
|
|
" 'prefix-1': sentence[index][0],\n",
|
|
|
|
" 'suffix-1': sentence[index][-1],\n",
|
2018-04-29 19:52:07 +02:00
|
|
|
" 'prefix-2': sentence[index][1] if len(word) > 1 else '',\n",
|
|
|
|
" 'suffix-2': sentence[index][-2] if len(word) > 1 else '',\n",
|
2018-04-28 21:15:48 +02:00
|
|
|
" 'prev_word': '' if index == 0 else sentence[index - 1],\n",
|
|
|
|
" 'next_word': '' if index == len(sentence) - 1 else sentence[index + 1],\n",
|
2018-04-29 19:52:07 +02:00
|
|
|
" 'length': len(word),\n",
|
|
|
|
" 'index' : index,\n",
|
|
|
|
" 'rev_index': len(sentence) - index,\n",
|
|
|
|
" 'sentence_length_': len(sentence),\n",
|
|
|
|
" 'relative_third': relative_third,\n",
|
2018-04-28 21:15:48 +02:00
|
|
|
" 'numerical': word.isnumeric(),\n",
|
|
|
|
" 'is_punctuation_mark': is_punctuation_mark,\n",
|
|
|
|
" ',': word == \",\",\n",
|
|
|
|
" '.': word == \".\",\n",
|
|
|
|
" '!': word == \"!\",\n",
|
2018-04-29 19:52:07 +02:00
|
|
|
" '?': word == \"?\",\n",
|
|
|
|
" 'vowels' : vowels\n",
|
2018-05-06 14:44:24 +02:00
|
|
|
" }"
|
2018-04-28 21:15:48 +02:00
|
|
|
]
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"cell_type": "code",
|
|
|
|
"execution_count": 3,
|
|
|
|
"metadata": {},
|
|
|
|
"outputs": [],
|
|
|
|
"source": [
|
2018-05-06 14:44:24 +02:00
|
|
|
"#test_sentence = ['The','cake','is','a','lie','!']\n",
|
2018-04-28 21:15:48 +02:00
|
|
|
"#for i in range(len(test_sentence)):\n",
|
|
|
|
"# pprint.pprint(features(test_sentence, i))"
|
|
|
|
]
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"cell_type": "markdown",
|
|
|
|
"metadata": {},
|
|
|
|
"source": [
|
|
|
|
"* function for creating training sets"
|
|
|
|
]
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"cell_type": "code",
|
|
|
|
"execution_count": 4,
|
|
|
|
"metadata": {},
|
|
|
|
"outputs": [],
|
|
|
|
"source": [
|
|
|
|
"def untag(tagged_sentence):\n",
|
|
|
|
" return [w for w,t in tagged_sentence]\n",
|
|
|
|
"\n",
|
|
|
|
"def transform_to_dataset(tagged_sentences):\n",
|
|
|
|
" X,y = [], []\n",
|
|
|
|
" \n",
|
|
|
|
" for s in tagged_sentences:\n",
|
|
|
|
" for i in range(len(s)):\n",
|
|
|
|
" X.append(features(untag(s),i))\n",
|
|
|
|
" y.append(s[i][1])\n",
|
|
|
|
" return X,y\n",
|
|
|
|
"\n",
|
|
|
|
"def create_training_and_test_set(annotated_sentences, relative_cutoff):\n",
|
|
|
|
" cutoff = int(relative_cutoff * len(annotated_sentences))\n",
|
|
|
|
" training_sentences = annotated_sentences[:cutoff]\n",
|
|
|
|
" test_sentences = annotated_sentences[cutoff:]\n",
|
|
|
|
" \n",
|
|
|
|
" X,y = transform_to_dataset(training_sentences)\n",
|
|
|
|
" tX, ty = transform_to_dataset(test_sentences)\n",
|
|
|
|
" \n",
|
|
|
|
" return X,y,tX,ty"
|
|
|
|
]
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"cell_type": "markdown",
|
|
|
|
"metadata": {},
|
|
|
|
"source": [
|
|
|
|
"* Decision Tree classifier"
|
|
|
|
]
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"cell_type": "code",
|
|
|
|
"execution_count": 5,
|
|
|
|
"metadata": {},
|
|
|
|
"outputs": [],
|
|
|
|
"source": [
|
|
|
|
"def train_classifier(X,y,classifier,max_size=10000):\n",
|
|
|
|
" clf = Pipeline([\n",
|
|
|
|
" ('vectorizer', DictVectorizer(sparse=False)),\n",
|
|
|
|
" ('classifier', classifier)\n",
|
|
|
|
" ])\n",
|
|
|
|
" \n",
|
|
|
|
" print(\"start training…\")\n",
|
|
|
|
" \n",
|
|
|
|
" clf.fit(\n",
|
|
|
|
" X if len(X) < max_size else X[:max_size],\n",
|
|
|
|
" y if len(y) < max_size else y[:max_size]\n",
|
|
|
|
" )\n",
|
|
|
|
" \n",
|
|
|
|
" print(\"training done\")\n",
|
|
|
|
" \n",
|
|
|
|
" return clf"
|
|
|
|
]
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"cell_type": "markdown",
|
|
|
|
"metadata": {},
|
|
|
|
"source": [
|
|
|
|
"* classifier evaluater"
|
|
|
|
]
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"cell_type": "code",
|
|
|
|
"execution_count": 6,
|
|
|
|
"metadata": {},
|
|
|
|
"outputs": [],
|
|
|
|
"source": [
|
|
|
|
"def test_classifier(clf, tX, ty):\n",
|
|
|
|
" accuracy = clf.score(tX, ty)\n",
|
|
|
|
" print(\"Accuracy: \", accuracy)\n",
|
2018-04-29 19:52:07 +02:00
|
|
|
" # TODO: more analytics\n",
|
|
|
|
" return accuracy"
|
2018-04-28 21:15:48 +02:00
|
|
|
]
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"cell_type": "markdown",
|
|
|
|
"metadata": {},
|
|
|
|
"source": [
|
2018-05-03 13:56:53 +02:00
|
|
|
"## Task 01:"
|
2018-04-28 21:15:48 +02:00
|
|
|
]
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"cell_type": "markdown",
|
|
|
|
"metadata": {},
|
|
|
|
"source": [
|
2018-05-02 17:59:50 +02:00
|
|
|
"### Performance 1\n"
|
2018-04-29 19:52:07 +02:00
|
|
|
]
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"cell_type": "markdown",
|
|
|
|
"metadata": {},
|
|
|
|
"source": [
|
|
|
|
"#### Model 01\n",
|
2018-04-28 21:15:48 +02:00
|
|
|
"* train and testing english custom POS tagger model:"
|
|
|
|
]
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"cell_type": "code",
|
2018-05-06 14:44:24 +02:00
|
|
|
"execution_count": 7,
|
2018-04-28 21:15:48 +02:00
|
|
|
"metadata": {},
|
2018-05-02 21:06:19 +02:00
|
|
|
"outputs": [],
|
2018-04-28 21:15:48 +02:00
|
|
|
"source": [
|
2018-05-02 17:59:50 +02:00
|
|
|
"def model_01(X,y,tX,ty, max_size=1000):\n",
|
|
|
|
" #classifier = DecisionTreeClassifier(criterion='entropy')\n",
|
|
|
|
" from sklearn.neural_network import MLPClassifier\n",
|
2018-05-03 13:56:53 +02:00
|
|
|
" model01_clf = train_classifier(X,y,MLPClassifier(),max_size=max_size)\n",
|
2018-05-02 17:59:50 +02:00
|
|
|
" return test_classifier(clf=model01_clf, tX=tX, ty=ty)"
|
2018-04-29 19:52:07 +02:00
|
|
|
]
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"cell_type": "markdown",
|
|
|
|
"metadata": {},
|
|
|
|
"source": [
|
|
|
|
"#### Model 02"
|
2018-04-28 21:15:48 +02:00
|
|
|
]
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"cell_type": "code",
|
2018-05-02 21:06:19 +02:00
|
|
|
"execution_count": 8,
|
2018-04-28 21:15:48 +02:00
|
|
|
"metadata": {},
|
2018-05-02 21:06:19 +02:00
|
|
|
"outputs": [],
|
2018-04-28 21:15:48 +02:00
|
|
|
"source": [
|
2018-05-02 17:59:50 +02:00
|
|
|
"def model_02(tX,ty):\n",
|
|
|
|
" m2_y = nltk.pos_tag([w['word'] for w in tX])\n",
|
|
|
|
" # compare results\n",
|
|
|
|
" n_correct = sum((1 if m2_y[i][1] == ty[i] else 0) for i in range(len(ty)))\n",
|
|
|
|
" return n_correct / len(ty)"
|
|
|
|
]
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"cell_type": "markdown",
|
|
|
|
"metadata": {},
|
|
|
|
"source": [
|
|
|
|
"#### Model 03"
|
2018-04-28 21:15:48 +02:00
|
|
|
]
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"cell_type": "code",
|
2018-05-02 21:06:19 +02:00
|
|
|
"execution_count": 9,
|
2018-04-28 21:15:48 +02:00
|
|
|
"metadata": {},
|
2018-05-02 17:59:50 +02:00
|
|
|
"outputs": [],
|
2018-04-28 21:15:48 +02:00
|
|
|
"source": [
|
2018-05-02 17:59:50 +02:00
|
|
|
"def model_03(corpus_tagged, corpus_sents, cut=0.8):\n",
|
|
|
|
" \n",
|
|
|
|
" patterns = [(r'.*ing$', 'VBG'), (r'.*ed$', 'VBD'), (r'.*es$', 'VBZ'), (r'.*ould$', 'MD'), (r'.*\\'s$', 'NN$'), \n",
|
|
|
|
" (r'.*s$', 'NNS'), (r'^-?[0-9]+(.[0-9]+)?$', 'CD'), (r'.*', 'NN')]\n",
|
|
|
|
" \n",
|
2018-05-02 21:06:19 +02:00
|
|
|
" s = int(len(corpus_tagged) * cut)\n",
|
|
|
|
" train_sents = corpus_tagged[:s]\n",
|
|
|
|
" test_sents = corpus_tagged[s:]\n",
|
2018-05-02 17:59:50 +02:00
|
|
|
" \n",
|
|
|
|
" models = {\n",
|
|
|
|
" 'def_model': nltk.DefaultTagger('NN'),\n",
|
|
|
|
" 'regexp_model': nltk.RegexpTagger(patterns),\n",
|
|
|
|
" 'uni_model': nltk.UnigramTagger(train_sents),\n",
|
|
|
|
" 'bi_model': nltk.BigramTagger(train_sents),\n",
|
|
|
|
" 'tri_model': nltk.TrigramTagger(train_sents)\n",
|
|
|
|
" }\n",
|
|
|
|
" \n",
|
|
|
|
" performance = {}\n",
|
|
|
|
" for name,model in models.items():\n",
|
|
|
|
" performance[name] = model.evaluate(test_sents)\n",
|
|
|
|
" \n",
|
|
|
|
" return performance\n"
|
2018-04-29 19:52:07 +02:00
|
|
|
]
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"cell_type": "markdown",
|
|
|
|
"metadata": {},
|
|
|
|
"source": [
|
2018-05-02 17:59:50 +02:00
|
|
|
"### Applying models on Datasets"
|
|
|
|
]
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"cell_type": "code",
|
2018-05-02 21:06:19 +02:00
|
|
|
"execution_count": 10,
|
2018-05-02 17:59:50 +02:00
|
|
|
"metadata": {},
|
2018-05-02 21:06:19 +02:00
|
|
|
"outputs": [
|
2018-05-03 13:56:53 +02:00
|
|
|
{
|
|
|
|
"data": {
|
|
|
|
"text/plain": [
|
|
|
|
"[('Pierre', 'NNP'),\n",
|
|
|
|
" ('Vinken', 'NNP'),\n",
|
|
|
|
" (',', ','),\n",
|
|
|
|
" ('61', 'CD'),\n",
|
|
|
|
" ('years', 'NNS'),\n",
|
|
|
|
" ('old', 'JJ'),\n",
|
|
|
|
" (',', ','),\n",
|
|
|
|
" ('will', 'MD'),\n",
|
|
|
|
" ('join', 'VB'),\n",
|
|
|
|
" ('the', 'DT'),\n",
|
|
|
|
" ('board', 'NN'),\n",
|
|
|
|
" ('as', 'IN'),\n",
|
|
|
|
" ('a', 'DT'),\n",
|
|
|
|
" ('nonexecutive', 'JJ'),\n",
|
|
|
|
" ('director', 'NN'),\n",
|
|
|
|
" ('Nov.', 'NNP'),\n",
|
|
|
|
" ('29', 'CD'),\n",
|
|
|
|
" ('.', '.')]"
|
|
|
|
]
|
|
|
|
},
|
|
|
|
"metadata": {},
|
|
|
|
"output_type": "display_data"
|
|
|
|
},
|
2018-05-02 21:06:19 +02:00
|
|
|
{
|
|
|
|
"name": "stdout",
|
|
|
|
"output_type": "stream",
|
|
|
|
"text": [
|
|
|
|
"P1.1\n",
|
|
|
|
"start training…\n",
|
|
|
|
"training done\n",
|
2018-05-06 14:44:24 +02:00
|
|
|
"Accuracy: 0.7755377014821099\n",
|
2018-05-02 21:06:19 +02:00
|
|
|
"P1.2\n",
|
|
|
|
"P1.3\n",
|
|
|
|
"P1.4\n",
|
|
|
|
"start training…\n",
|
|
|
|
"training done\n",
|
2018-05-06 14:44:24 +02:00
|
|
|
"Accuracy: 0.63253390325317\n",
|
2018-05-02 21:06:19 +02:00
|
|
|
"P1.5\n",
|
|
|
|
"P1.6\n",
|
2018-05-06 14:44:24 +02:00
|
|
|
"{'P1.1': 0.7755377014821099,\n",
|
2018-05-02 21:06:19 +02:00
|
|
|
" 'P1.2': 0.8936074654423873,\n",
|
|
|
|
" 'P1.3 -- bi_model': 0.1132791057437996,\n",
|
|
|
|
" 'P1.3 -- def_model': 0.1447677029791906,\n",
|
|
|
|
" 'P1.3 -- regexp_model': 0.24232746145017217,\n",
|
|
|
|
" 'P1.3 -- tri_model': 0.06736863116922003,\n",
|
|
|
|
" 'P1.3 -- uni_model': 0.8608213982733669,\n",
|
2018-05-06 14:44:24 +02:00
|
|
|
" 'P1.4': 0.63253390325317,\n",
|
2018-05-02 21:06:19 +02:00
|
|
|
" 'P1.5': 0.6044583741861567,\n",
|
|
|
|
" 'P1.6 -- bi_model': 0.1132791057437996,\n",
|
|
|
|
" 'P1.6 -- def_model': 0.1447677029791906,\n",
|
|
|
|
" 'P1.6 -- regexp_model': 0.24232746145017217,\n",
|
|
|
|
" 'P1.6 -- tri_model': 0.06736863116922003,\n",
|
|
|
|
" 'P1.6 -- uni_model': 0.8608213982733669}\n"
|
|
|
|
]
|
|
|
|
}
|
|
|
|
],
|
2018-05-02 17:59:50 +02:00
|
|
|
"source": [
|
2018-05-02 21:06:19 +02:00
|
|
|
"performances = {}\n",
|
2018-05-02 17:59:50 +02:00
|
|
|
"\n",
|
|
|
|
"treebank_tagged = nltk.corpus.treebank.tagged_sents()\n",
|
|
|
|
"treebank_sents = nltk.corpus.treebank.sents()\n",
|
|
|
|
"\n",
|
2018-05-02 21:06:19 +02:00
|
|
|
"brown_tagged = nltk.corpus.brown.tagged_sents()#(categories='news')\n",
|
|
|
|
"brown_sents = nltk.corpus.brown.sents()#(categories='news')\n",
|
2018-05-02 17:59:50 +02:00
|
|
|
"\n",
|
2018-05-03 13:56:53 +02:00
|
|
|
"display(treebank_tagged[0])\n",
|
|
|
|
"\n",
|
2018-05-02 17:59:50 +02:00
|
|
|
"X1,y1,tX1,ty1 = create_training_and_test_set(annotated_sentences=treebank_tagged, \n",
|
|
|
|
" relative_cutoff=0.8)\n",
|
|
|
|
"\n",
|
|
|
|
"X2,y2,tX2,ty2 = create_training_and_test_set(annotated_sentences=brown_tagged, \n",
|
2018-05-02 21:06:19 +02:00
|
|
|
" relative_cutoff=0.8)\n",
|
|
|
|
"\n",
|
|
|
|
"\n",
|
|
|
|
"print(\"P1.1\")\n",
|
|
|
|
"performances['P1.1'] = model_01(X1,y1,tX1,ty1)\n",
|
|
|
|
"\n",
|
|
|
|
"print(\"P1.2\")\n",
|
|
|
|
"performances['P1.2'] = model_02(tX1,ty1)\n",
|
|
|
|
"\n",
|
|
|
|
"print(\"P1.3\")\n",
|
|
|
|
"p3 = model_03(treebank_tagged, treebank_sents)\n",
|
|
|
|
"for k,v in p3.items():\n",
|
|
|
|
" performances[\"P1.3 -- \" + k] = v\n",
|
|
|
|
"\n",
|
|
|
|
"print(\"P1.4\")\n",
|
|
|
|
"performances['P1.4'] = model_01(X2,y2,tX2,ty2)\n",
|
|
|
|
"\n",
|
|
|
|
"print(\"P1.5\")\n",
|
|
|
|
"performances['P1.5'] = model_02(tX2,ty2)\n",
|
|
|
|
"\n",
|
|
|
|
"print(\"P1.6\")\n",
|
|
|
|
"p6 = model_03(brown_tagged, brown_sents)\n",
|
|
|
|
"for k,v in p3.items():\n",
|
|
|
|
" performances[\"P1.6 -- \" + k] = v\n",
|
|
|
|
"\n",
|
|
|
|
"pprint.pprint(performances)\n"
|
|
|
|
]
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"cell_type": "markdown",
|
|
|
|
"metadata": {},
|
|
|
|
"source": [
|
|
|
|
"### Plotting Data"
|
|
|
|
]
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"cell_type": "code",
|
|
|
|
"execution_count": 11,
|
|
|
|
"metadata": {},
|
|
|
|
"outputs": [
|
|
|
|
{
|
|
|
|
"data": {
|
|
|
|
"application/vnd.jupyter.widget-view+json": {
|
2018-05-06 14:44:24 +02:00
|
|
|
"model_id": "c6d8e1186c1f44dcb77b146346b1dedb",
|
2018-05-02 21:06:19 +02:00
|
|
|
"version_major": 2,
|
|
|
|
"version_minor": 0
|
|
|
|
},
|
|
|
|
"text/html": [
|
|
|
|
"<p>Failed to display Jupyter Widget of type <code>FigureCanvasNbAgg</code>.</p>\n",
|
|
|
|
"<p>\n",
|
|
|
|
" If you're reading this message in the Jupyter Notebook or JupyterLab Notebook, it may mean\n",
|
|
|
|
" that the widgets JavaScript is still loading. If this message persists, it\n",
|
|
|
|
" likely means that the widgets JavaScript library is either not installed or\n",
|
|
|
|
" not enabled. See the <a href=\"https://ipywidgets.readthedocs.io/en/stable/user_install.html\">Jupyter\n",
|
|
|
|
" Widgets Documentation</a> for setup instructions.\n",
|
|
|
|
"</p>\n",
|
|
|
|
"<p>\n",
|
|
|
|
" If you're reading this message in another frontend (for example, a static\n",
|
|
|
|
" rendering on GitHub or <a href=\"https://nbviewer.jupyter.org/\">NBViewer</a>),\n",
|
|
|
|
" it may mean that your frontend doesn't currently support widgets.\n",
|
|
|
|
"</p>\n"
|
|
|
|
],
|
|
|
|
"text/plain": [
|
|
|
|
"FigureCanvasNbAgg()"
|
|
|
|
]
|
|
|
|
},
|
|
|
|
"metadata": {},
|
|
|
|
"output_type": "display_data"
|
|
|
|
}
|
|
|
|
],
|
|
|
|
"source": [
|
|
|
|
"import matplotlib.pyplot as plt\n",
|
|
|
|
"import numpy as np\n",
|
|
|
|
"#weights = clf.named_steps['classifier'].feature_importances_\n",
|
|
|
|
"#labels = clf.named_steps['vectorizer'].get_feature_names()\n",
|
|
|
|
"\n",
|
|
|
|
"#sort\n",
|
|
|
|
"#weights, labels = (list(t) for t in zip(*sorted(zip(weights, labels))))\n",
|
|
|
|
"\n",
|
|
|
|
"fig_1, ax_1 = plt.subplots()\n",
|
|
|
|
"plt.bar(np.arange(len(performances)), performances.values())\n",
|
|
|
|
"plt.xticks(np.arange(len(performances)), performances.keys(), rotation=30, ha='right')\n",
|
|
|
|
"plt.tight_layout()\n",
|
|
|
|
"plt.show()\n"
|
2018-04-28 21:15:48 +02:00
|
|
|
]
|
|
|
|
},
|
2018-05-03 13:56:53 +02:00
|
|
|
{
|
|
|
|
"cell_type": "markdown",
|
|
|
|
"metadata": {},
|
|
|
|
"source": [
|
|
|
|
"----\n",
|
|
|
|
"## Task 2"
|
|
|
|
]
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"cell_type": "code",
|
|
|
|
"execution_count": 12,
|
|
|
|
"metadata": {},
|
|
|
|
"outputs": [],
|
|
|
|
"source": [
|
|
|
|
"WORDS = 'words' #: column type for words\n",
|
|
|
|
"POS = 'pos' #: column type for part-of-speech tags\n",
|
|
|
|
"TREE = 'tree' #: column type for parse trees\n",
|
|
|
|
"CHUNK = 'chunk' #: column type for chunk structures\n",
|
|
|
|
"NE = 'ne' #: column type for named entities\n",
|
|
|
|
"SRL = 'srl' #: column type for semantic role labels\n",
|
|
|
|
"IGNORE = 'ignore' #: column type for column that should be ignored\n",
|
|
|
|
"train_path = \"./ru_syntagrus-ud-train.conllu\"\n",
|
|
|
|
"ru_corp = nltk.corpus.ConllCorpusReader(root=\"./\", \n",
|
|
|
|
" fileids=[\"ru_syntagrus-ud-train-uncommented.conllu\"],\n",
|
|
|
|
" columntypes=[IGNORE, WORDS, IGNORE, POS],\n",
|
|
|
|
" encoding='utf-8')\n"
|
|
|
|
]
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"cell_type": "code",
|
|
|
|
"execution_count": 13,
|
|
|
|
"metadata": {},
|
|
|
|
"outputs": [],
|
|
|
|
"source": [
|
|
|
|
"ru_tagged = ru_corp.tagged_sents()\n",
|
|
|
|
"\n",
|
2018-05-06 14:44:24 +02:00
|
|
|
"\n",
|
2018-05-03 13:56:53 +02:00
|
|
|
"\n",
|
|
|
|
"X3,y3,tX3,ty3 = create_training_and_test_set(annotated_sentences=ru_tagged, \n",
|
|
|
|
" relative_cutoff=0.8)\n"
|
|
|
|
]
|
|
|
|
},
|
2018-05-06 14:44:24 +02:00
|
|
|
{
|
|
|
|
"cell_type": "markdown",
|
|
|
|
"metadata": {},
|
|
|
|
"source": [
|
|
|
|
"#### Model 04, Performance 2.1"
|
|
|
|
]
|
|
|
|
},
|
2018-05-03 13:56:53 +02:00
|
|
|
{
|
|
|
|
"cell_type": "code",
|
2018-05-06 14:44:24 +02:00
|
|
|
"execution_count": 14,
|
2018-05-03 13:56:53 +02:00
|
|
|
"metadata": {},
|
|
|
|
"outputs": [
|
|
|
|
{
|
|
|
|
"name": "stdout",
|
|
|
|
"output_type": "stream",
|
|
|
|
"text": [
|
|
|
|
"P2.1\n",
|
|
|
|
"start training…\n",
|
|
|
|
"training done\n",
|
2018-05-06 14:44:24 +02:00
|
|
|
"Accuracy: 0.7079014288483687\n",
|
|
|
|
"0.7079014288483687\n"
|
2018-05-03 13:56:53 +02:00
|
|
|
]
|
|
|
|
}
|
|
|
|
],
|
|
|
|
"source": [
|
|
|
|
"print(\"P2.1\")\n",
|
2018-05-06 14:44:24 +02:00
|
|
|
"performances2 = {}\n",
|
|
|
|
"performances2['P2.1'] = model_01(X3,y3,tX3,ty3, max_size=1000)\n",
|
|
|
|
"print(performances2['P2.1'])"
|
|
|
|
]
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"cell_type": "markdown",
|
|
|
|
"metadata": {},
|
|
|
|
"source": [
|
|
|
|
"#### Model 05, Performance 2.2"
|
|
|
|
]
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"cell_type": "code",
|
|
|
|
"execution_count": 15,
|
|
|
|
"metadata": {},
|
|
|
|
"outputs": [],
|
|
|
|
"source": [
|
|
|
|
"# write russian text out to file:\n",
|
|
|
|
"f = open(\"ru_text.txt\", 'w')\n",
|
|
|
|
"for sentence in ru_tagged:\n",
|
|
|
|
" for word, tag in sentence:\n",
|
|
|
|
" f.write(word + \" \")\n",
|
|
|
|
" f.write(\"\\n\")\n",
|
|
|
|
"f.close()"
|
|
|
|
]
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"cell_type": "markdown",
|
|
|
|
"metadata": {},
|
|
|
|
"source": [
|
|
|
|
"* download the python 3 fork of the rdrpos-tagger: https://github.com/jacopofar/RDRPOSTagger-python-3\n",
|
|
|
|
"* adjust `RDRPOS_TAGGER_PATH` to match with the download location"
|
|
|
|
]
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"cell_type": "code",
|
|
|
|
"execution_count": 16,
|
|
|
|
"metadata": {},
|
|
|
|
"outputs": [
|
|
|
|
{
|
|
|
|
"name": "stdout",
|
|
|
|
"output_type": "stream",
|
|
|
|
"text": [
|
|
|
|
"/home/jonas/Dokumente/gitRepos/NLP-LAB/Jonas_Solutions\n"
|
|
|
|
]
|
|
|
|
}
|
|
|
|
],
|
|
|
|
"source": [
|
|
|
|
"import sys, os\n",
|
|
|
|
"\n",
|
|
|
|
"dir_path = os.getcwd()\n",
|
|
|
|
"print(dir_path)"
|
|
|
|
]
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"cell_type": "code",
|
2018-05-06 14:47:02 +02:00
|
|
|
"execution_count": 17,
|
2018-05-06 14:44:24 +02:00
|
|
|
"metadata": {},
|
|
|
|
"outputs": [
|
|
|
|
{
|
|
|
|
"name": "stdout",
|
|
|
|
"output_type": "stream",
|
|
|
|
"text": [
|
2018-05-06 14:47:02 +02:00
|
|
|
"['Node', '__builtins__', '__cached__', '__doc__', '__file__', '__loader__', '__name__', '__package__', '__spec__', 'tabStr']\n",
|
|
|
|
"('\\nOutput file:', 'ru_text.txt.TAGGED')\n"
|
2018-05-06 14:44:24 +02:00
|
|
|
]
|
|
|
|
}
|
|
|
|
],
|
|
|
|
"source": [
|
|
|
|
"RDRPOS_TAGGER_PATH = r\"/home/jonas/src/RDRPOSTagger-python-3/pSCRDRtagger/\"\n",
|
|
|
|
"\n",
|
|
|
|
"sys.path.insert(0, RDRPOS_TAGGER_PATH)\n",
|
|
|
|
"os.chdir(RDRPOS_TAGGER_PATH)\n",
|
|
|
|
"\n",
|
|
|
|
"import RDRPOSTagger as model05_tagger \n",
|
|
|
|
"\n",
|
|
|
|
"r = model05_tagger.RDRPOSTagger()\n",
|
|
|
|
"r.constructSCRDRtreeFromRDRfile(\"../Models/UniPOS/UD_Russian-SynTagRus/train.UniPOS.RDR\")\n",
|
|
|
|
"DICT = model05_tagger.readDictionary(\"../Models/UniPOS/UD_Russian-SynTagRus/train.UniPOS.DICT\")\n",
|
|
|
|
"\n",
|
|
|
|
"os.chdir(dir_path)\n",
|
|
|
|
"\n",
|
|
|
|
"r.tagRawCorpus(DICT, \"ru_text.txt\")"
|
|
|
|
]
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"cell_type": "code",
|
2018-05-06 14:47:02 +02:00
|
|
|
"execution_count": 18,
|
2018-05-06 14:44:24 +02:00
|
|
|
"metadata": {},
|
|
|
|
"outputs": [],
|
|
|
|
"source": [
|
|
|
|
"tagged_words = []\n",
|
|
|
|
"f = open(\"ru_text.txt.TAGGED\", 'r')\n",
|
|
|
|
"for line in f:\n",
|
|
|
|
" for splits in line.split():\n",
|
|
|
|
" cmp = splits.rsplit('/',1)\n",
|
|
|
|
" if len(cmp) != 2:\n",
|
|
|
|
" print(\"error parsing: \", cmp)\n",
|
|
|
|
" else:\n",
|
|
|
|
" w,t = cmp\n",
|
|
|
|
" tagged_words.append((w,t))\n"
|
|
|
|
]
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"cell_type": "code",
|
2018-05-06 14:47:02 +02:00
|
|
|
"execution_count": 19,
|
2018-05-06 14:44:24 +02:00
|
|
|
"metadata": {},
|
|
|
|
"outputs": [],
|
|
|
|
"source": [
|
|
|
|
"score_2_2 = 0\n",
|
|
|
|
"i = 0\n",
|
|
|
|
"for sent in ru_tagged:\n",
|
|
|
|
" for tagged_w in sent:\n",
|
|
|
|
" if tagged_w[1] == tagged_words[i][1]:\n",
|
|
|
|
" score_2_2 += 1\n",
|
|
|
|
" i += 1\n",
|
|
|
|
"performances2['P2.2'] = score_2_2 / len(tagged_words)\n"
|
|
|
|
]
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"cell_type": "markdown",
|
|
|
|
"metadata": {},
|
|
|
|
"source": [
|
|
|
|
"## Results of performance 2.2"
|
|
|
|
]
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"cell_type": "code",
|
2018-05-06 14:47:02 +02:00
|
|
|
"execution_count": 20,
|
2018-05-06 14:44:24 +02:00
|
|
|
"metadata": {},
|
2018-05-06 14:47:02 +02:00
|
|
|
"outputs": [
|
|
|
|
{
|
|
|
|
"name": "stdout",
|
|
|
|
"output_type": "stream",
|
|
|
|
"text": [
|
|
|
|
"{'P2.1': 0.7079014288483687, 'P2.2': 0.8899716702179293}\n"
|
|
|
|
]
|
|
|
|
}
|
|
|
|
],
|
2018-05-06 14:44:24 +02:00
|
|
|
"source": [
|
|
|
|
"pprint.pprint(performances2)"
|
|
|
|
]
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"cell_type": "code",
|
2018-05-06 14:47:02 +02:00
|
|
|
"execution_count": 21,
|
2018-05-06 14:44:24 +02:00
|
|
|
"metadata": {},
|
2018-05-06 14:47:02 +02:00
|
|
|
"outputs": [
|
|
|
|
{
|
|
|
|
"data": {
|
|
|
|
"application/vnd.jupyter.widget-view+json": {
|
|
|
|
"model_id": "aeb29243e58d49b8942122ceec03fab5",
|
|
|
|
"version_major": 2,
|
|
|
|
"version_minor": 0
|
|
|
|
},
|
|
|
|
"text/html": [
|
|
|
|
"<p>Failed to display Jupyter Widget of type <code>FigureCanvasNbAgg</code>.</p>\n",
|
|
|
|
"<p>\n",
|
|
|
|
" If you're reading this message in the Jupyter Notebook or JupyterLab Notebook, it may mean\n",
|
|
|
|
" that the widgets JavaScript is still loading. If this message persists, it\n",
|
|
|
|
" likely means that the widgets JavaScript library is either not installed or\n",
|
|
|
|
" not enabled. See the <a href=\"https://ipywidgets.readthedocs.io/en/stable/user_install.html\">Jupyter\n",
|
|
|
|
" Widgets Documentation</a> for setup instructions.\n",
|
|
|
|
"</p>\n",
|
|
|
|
"<p>\n",
|
|
|
|
" If you're reading this message in another frontend (for example, a static\n",
|
|
|
|
" rendering on GitHub or <a href=\"https://nbviewer.jupyter.org/\">NBViewer</a>),\n",
|
|
|
|
" it may mean that your frontend doesn't currently support widgets.\n",
|
|
|
|
"</p>\n"
|
|
|
|
],
|
|
|
|
"text/plain": [
|
|
|
|
"FigureCanvasNbAgg()"
|
|
|
|
]
|
|
|
|
},
|
|
|
|
"metadata": {},
|
|
|
|
"output_type": "display_data"
|
|
|
|
}
|
|
|
|
],
|
2018-05-06 14:44:24 +02:00
|
|
|
"source": [
|
|
|
|
"fig_2, ax_2 = plt.subplots()\n",
|
|
|
|
"plt.bar(np.arange(len(performances2)), performances2.values())\n",
|
|
|
|
"plt.xticks(np.arange(len(performances2)), performances2.keys(), rotation=30, ha='right')\n",
|
|
|
|
"plt.tight_layout()\n",
|
|
|
|
"plt.show()\n"
|
2018-05-03 13:56:53 +02:00
|
|
|
]
|
|
|
|
},
|
2018-04-28 21:15:48 +02:00
|
|
|
{
|
|
|
|
"cell_type": "code",
|
|
|
|
"execution_count": null,
|
|
|
|
"metadata": {},
|
|
|
|
"outputs": [],
|
|
|
|
"source": []
|
|
|
|
}
|
|
|
|
],
|
|
|
|
"metadata": {
|
|
|
|
"kernelspec": {
|
|
|
|
"display_name": "Python 3",
|
|
|
|
"language": "python",
|
|
|
|
"name": "python3"
|
|
|
|
},
|
|
|
|
"language_info": {
|
|
|
|
"codemirror_mode": {
|
|
|
|
"name": "ipython",
|
|
|
|
"version": 3
|
|
|
|
},
|
|
|
|
"file_extension": ".py",
|
|
|
|
"mimetype": "text/x-python",
|
|
|
|
"name": "python",
|
|
|
|
"nbconvert_exporter": "python",
|
|
|
|
"pygments_lexer": "ipython3",
|
2018-05-02 17:59:50 +02:00
|
|
|
"version": "3.6.5"
|
2018-04-28 21:15:48 +02:00
|
|
|
}
|
|
|
|
},
|
|
|
|
"nbformat": 4,
|
|
|
|
"nbformat_minor": 2
|
|
|
|
}
|