nlp-lab/Jonas_Solutions/Exercise01.ipynb

373 lines
10 KiB
Plaintext
Raw Normal View History

2018-04-28 21:15:48 +02:00
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# NLP-LAB Exercise 01 by jonas weinz\n",
"----"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
2018-04-29 19:52:07 +02:00
"%matplotlib ipympl\n",
2018-04-28 21:15:48 +02:00
"import nltk\n",
"import pprint\n",
"from sklearn.tree import DecisionTreeClassifier\n",
"from sklearn.feature_extraction import DictVectorizer\n",
"from sklearn.pipeline import Pipeline"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## implementing own classifiers"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"* writing an own feature funtion"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'\\n return {\\n \\'word\\': word,\\n \\'is_capitalized\\': word[0].upper() == word[0],\\n \\'prefix-1\\': word[0],\\n \\'suffix-1\\': word[-1],\\n \\'prev_word\\': \\'\\' if index == 0 else sentence[index - 1],\\n \\'next_word\\': \\'\\' if index == len(sentence) - 1 else sentence[index + 1],\\n \\'length\\': len(word),\\n \\'index\\' : index,\\n \\'rev_index\\': len(sentence) - index,\\n \\'sentence_length\\': len(sentence)#,\\n \\'relative_third\\': relative_third,\\n \\'is_punctuation_mark\\': is_punctuation_mark,\\n \\',\\': word == \",\",\\n \\'.\\': word == \".\",\\n \\'!\\': word == \"!\",\\n \\'?\\': word == \"?\"\\n }\\n'"
]
},
"execution_count": 2,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"def features(sentence, index):\n",
" word = sentence[index]\n",
" is_punctuation_mark = word == \"!\" or word == \".\" or word == \",\" or word == \"?\"\n",
" sentence_length = len(sentence)\n",
2018-04-29 19:52:07 +02:00
" relative_third = (index * 3) // sentence_length \n",
" vowels = word.count('a') + word.count('e') + word.count('i') + word.count('o') + word.count('u')\n",
2018-04-28 21:15:48 +02:00
" return {\n",
2018-04-29 19:52:07 +02:00
" 'word': word,\n",
2018-04-28 21:15:48 +02:00
" 'is_capitalized': sentence[index][0].upper() == sentence[index][0],\n",
" 'prefix-1': sentence[index][0],\n",
" 'suffix-1': sentence[index][-1],\n",
2018-04-29 19:52:07 +02:00
" 'prefix-2': sentence[index][1] if len(word) > 1 else '',\n",
" 'suffix-2': sentence[index][-2] if len(word) > 1 else '',\n",
2018-04-28 21:15:48 +02:00
" 'prev_word': '' if index == 0 else sentence[index - 1],\n",
" 'next_word': '' if index == len(sentence) - 1 else sentence[index + 1],\n",
2018-04-29 19:52:07 +02:00
" 'length': len(word),\n",
" 'index' : index,\n",
" 'rev_index': len(sentence) - index,\n",
" 'sentence_length_': len(sentence),\n",
" 'relative_third': relative_third,\n",
2018-04-28 21:15:48 +02:00
" 'numerical': word.isnumeric(),\n",
" 'is_punctuation_mark': is_punctuation_mark,\n",
" ',': word == \",\",\n",
" '.': word == \".\",\n",
" '!': word == \"!\",\n",
2018-04-29 19:52:07 +02:00
" '?': word == \"?\",\n",
" 'vowels' : vowels\n",
2018-04-28 21:15:48 +02:00
" }\n",
"'''\n",
" return {\n",
" 'word': word,\n",
" 'is_capitalized': word[0].upper() == word[0],\n",
" 'prefix-1': word[0],\n",
" 'suffix-1': word[-1],\n",
" 'prev_word': '' if index == 0 else sentence[index - 1],\n",
" 'next_word': '' if index == len(sentence) - 1 else sentence[index + 1],\n",
" 'length': len(word),\n",
" 'index' : index,\n",
" 'rev_index': len(sentence) - index,\n",
" 'sentence_length': len(sentence)#,\n",
" 'relative_third': relative_third,\n",
" 'is_punctuation_mark': is_punctuation_mark,\n",
" ',': word == \",\",\n",
" '.': word == \".\",\n",
" '!': word == \"!\",\n",
" '?': word == \"?\"\n",
" }\n",
"'''"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"test_sentence = ['The','cake','is','a','lie','!']\n",
"#for i in range(len(test_sentence)):\n",
"# pprint.pprint(features(test_sentence, i))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"* function for creating training sets"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [],
"source": [
"def untag(tagged_sentence):\n",
" return [w for w,t in tagged_sentence]\n",
"\n",
"def transform_to_dataset(tagged_sentences):\n",
" X,y = [], []\n",
" \n",
" for s in tagged_sentences:\n",
" for i in range(len(s)):\n",
" X.append(features(untag(s),i))\n",
" y.append(s[i][1])\n",
" return X,y\n",
"\n",
"def create_training_and_test_set(annotated_sentences, relative_cutoff):\n",
" cutoff = int(relative_cutoff * len(annotated_sentences))\n",
" training_sentences = annotated_sentences[:cutoff]\n",
" test_sentences = annotated_sentences[cutoff:]\n",
" \n",
" X,y = transform_to_dataset(training_sentences)\n",
" tX, ty = transform_to_dataset(test_sentences)\n",
" \n",
" return X,y,tX,ty"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"* Decision Tree classifier"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [],
"source": [
"def train_classifier(X,y,classifier,max_size=10000):\n",
" clf = Pipeline([\n",
" ('vectorizer', DictVectorizer(sparse=False)),\n",
" ('classifier', classifier)\n",
" ])\n",
" \n",
" print(\"start training…\")\n",
" \n",
" clf.fit(\n",
" X if len(X) < max_size else X[:max_size],\n",
" y if len(y) < max_size else y[:max_size]\n",
" )\n",
" \n",
" print(\"training done\")\n",
" \n",
" return clf"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"* classifier evaluater"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [],
"source": [
"def test_classifier(clf, tX, ty):\n",
" accuracy = clf.score(tX, ty)\n",
" print(\"Accuracy: \", accuracy)\n",
2018-04-29 19:52:07 +02:00
" # TODO: more analytics\n",
" return accuracy"
2018-04-28 21:15:48 +02:00
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
2018-05-02 17:59:50 +02:00
"## Exercise 01:"
2018-04-28 21:15:48 +02:00
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
2018-05-02 17:59:50 +02:00
"### Performance 1\n"
2018-04-29 19:52:07 +02:00
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Model 01\n",
2018-04-28 21:15:48 +02:00
"* train and testing english custom POS tagger model:"
]
},
{
"cell_type": "code",
2018-05-02 17:59:50 +02:00
"execution_count": 17,
2018-04-28 21:15:48 +02:00
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"start training…\n",
2018-05-02 17:59:50 +02:00
"training done\n",
"Accuracy: 0.768551324916413\n"
2018-04-28 21:15:48 +02:00
]
}
],
"source": [
2018-05-02 17:59:50 +02:00
"def model_01(X,y,tX,ty, max_size=1000):\n",
" #classifier = DecisionTreeClassifier(criterion='entropy')\n",
" from sklearn.neural_network import MLPClassifier\n",
" model01_clf = train_classifier(X,y,MLPClassifier(),max_size=1000)\n",
" return test_classifier(clf=model01_clf, tX=tX, ty=ty)"
2018-04-29 19:52:07 +02:00
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Model 02"
2018-04-28 21:15:48 +02:00
]
},
{
"cell_type": "code",
2018-05-02 17:59:50 +02:00
"execution_count": 30,
2018-04-28 21:15:48 +02:00
"metadata": {},
"outputs": [
{
2018-05-02 17:59:50 +02:00
"name": "stdout",
"output_type": "stream",
"text": [
"0.8936074654423873\n"
]
2018-04-28 21:15:48 +02:00
}
],
"source": [
2018-05-02 17:59:50 +02:00
"def model_02(tX,ty):\n",
" m2_y = nltk.pos_tag([w['word'] for w in tX])\n",
" # compare results\n",
" n_correct = sum((1 if m2_y[i][1] == ty[i] else 0) for i in range(len(ty)))\n",
" return n_correct / len(ty)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Model 03"
2018-04-28 21:15:48 +02:00
]
},
{
"cell_type": "code",
2018-05-02 17:59:50 +02:00
"execution_count": 38,
2018-04-28 21:15:48 +02:00
"metadata": {},
2018-05-02 17:59:50 +02:00
"outputs": [],
2018-04-28 21:15:48 +02:00
"source": [
2018-05-02 17:59:50 +02:00
"def model_03(corpus_tagged, corpus_sents, cut=0.8):\n",
" \n",
" patterns = [(r'.*ing$', 'VBG'), (r'.*ed$', 'VBD'), (r'.*es$', 'VBZ'), (r'.*ould$', 'MD'), (r'.*\\'s$', 'NN$'), \n",
" (r'.*s$', 'NNS'), (r'^-?[0-9]+(.[0-9]+)?$', 'CD'), (r'.*', 'NN')]\n",
" \n",
" s = int(len(corpus_sents) * cut)\n",
" train_sents = corpus_sents[:size]\n",
" test_sents = corpus_sents[size:]\n",
" \n",
" models = {\n",
" 'def_model': nltk.DefaultTagger('NN'),\n",
" 'regexp_model': nltk.RegexpTagger(patterns),\n",
" 'uni_model': nltk.UnigramTagger(train_sents),\n",
" 'bi_model': nltk.BigramTagger(train_sents),\n",
" 'tri_model': nltk.TrigramTagger(train_sents)\n",
" }\n",
" \n",
" performance = {}\n",
" for name,model in models.items():\n",
" performance[name] = model.evaluate(test_sents)\n",
" \n",
" return performance\n"
2018-04-29 19:52:07 +02:00
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
2018-05-02 17:59:50 +02:00
"### Applying models on Datasets"
]
},
{
"cell_type": "code",
"execution_count": 41,
"metadata": {},
"outputs": [],
"source": [
"accs_p1 = [0] * 3\n",
"names_p1 = [\"P1.1\", \"P1.2\", \"P1.3\"]\n",
"\n",
"treebank_tagged = nltk.corpus.treebank.tagged_sents()\n",
"treebank_sents = nltk.corpus.treebank.sents()\n",
"\n",
"brown_tagged = nltk.corpus.brown.tagged_sents()\n",
"brown_sents = nltk.corpus.brown.sents()\n",
"\n",
"X1,y1,tX1,ty1 = create_training_and_test_set(annotated_sentences=treebank_tagged, \n",
" relative_cutoff=0.8)\n",
"\n",
"X2,y2,tX2,ty2 = create_training_and_test_set(annotated_sentences=brown_tagged, \n",
" relative_cutoff=0.8)\n"
2018-04-28 21:15:48 +02:00
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
2018-05-02 17:59:50 +02:00
"version": "3.6.5"
2018-04-28 21:15:48 +02:00
}
},
"nbformat": 4,
"nbformat_minor": 2
}