301 lines
7.9 KiB
Plaintext
301 lines
7.9 KiB
Plaintext
{
|
|
"cells": [
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"# NLP-LAB Exercise 01 by jonas weinz\n",
|
|
"----"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 1,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"import nltk\n",
|
|
"import pprint\n",
|
|
"from sklearn.tree import DecisionTreeClassifier\n",
|
|
"from sklearn.feature_extraction import DictVectorizer\n",
|
|
"from sklearn.pipeline import Pipeline"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"## implementing own classifiers"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"* writing an own feature funtion"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 2,
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"'\\n return {\\n \\'word\\': word,\\n \\'is_capitalized\\': word[0].upper() == word[0],\\n \\'prefix-1\\': word[0],\\n \\'suffix-1\\': word[-1],\\n \\'prev_word\\': \\'\\' if index == 0 else sentence[index - 1],\\n \\'next_word\\': \\'\\' if index == len(sentence) - 1 else sentence[index + 1],\\n \\'length\\': len(word),\\n \\'index\\' : index,\\n \\'rev_index\\': len(sentence) - index,\\n \\'sentence_length\\': len(sentence)#,\\n \\'relative_third\\': relative_third,\\n \\'is_punctuation_mark\\': is_punctuation_mark,\\n \\',\\': word == \",\",\\n \\'.\\': word == \".\",\\n \\'!\\': word == \"!\",\\n \\'?\\': word == \"?\"\\n }\\n'"
|
|
]
|
|
},
|
|
"execution_count": 2,
|
|
"metadata": {},
|
|
"output_type": "execute_result"
|
|
}
|
|
],
|
|
"source": [
|
|
"def features(sentence, index):\n",
|
|
" word = sentence[index]\n",
|
|
" is_punctuation_mark = word == \"!\" or word == \".\" or word == \",\" or word == \"?\"\n",
|
|
" sentence_length = len(sentence)\n",
|
|
" relative_third = (index * 2) // sentence_length \n",
|
|
" return {\n",
|
|
" 'word': sentence[index],\n",
|
|
" 'is_capitalized': sentence[index][0].upper() == sentence[index][0],\n",
|
|
" 'prefix-1': sentence[index][0],\n",
|
|
" 'suffix-1': sentence[index][-1],\n",
|
|
" 'prev_word': '' if index == 0 else sentence[index - 1],\n",
|
|
" 'next_word': '' if index == len(sentence) - 1 else sentence[index + 1],\n",
|
|
" 'numerical': word.isnumeric(),\n",
|
|
" 'is_punctuation_mark': is_punctuation_mark,\n",
|
|
" ',': word == \",\",\n",
|
|
" '.': word == \".\",\n",
|
|
" '!': word == \"!\",\n",
|
|
" '?': word == \"?\"\n",
|
|
" }\n",
|
|
"'''\n",
|
|
" return {\n",
|
|
" 'word': word,\n",
|
|
" 'is_capitalized': word[0].upper() == word[0],\n",
|
|
" 'prefix-1': word[0],\n",
|
|
" 'suffix-1': word[-1],\n",
|
|
" 'prev_word': '' if index == 0 else sentence[index - 1],\n",
|
|
" 'next_word': '' if index == len(sentence) - 1 else sentence[index + 1],\n",
|
|
" 'length': len(word),\n",
|
|
" 'index' : index,\n",
|
|
" 'rev_index': len(sentence) - index,\n",
|
|
" 'sentence_length': len(sentence)#,\n",
|
|
" 'relative_third': relative_third,\n",
|
|
" 'is_punctuation_mark': is_punctuation_mark,\n",
|
|
" ',': word == \",\",\n",
|
|
" '.': word == \".\",\n",
|
|
" '!': word == \"!\",\n",
|
|
" '?': word == \"?\"\n",
|
|
" }\n",
|
|
"'''"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 3,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"test_sentence = ['The','cake','is','a','lie','!']\n",
|
|
"#for i in range(len(test_sentence)):\n",
|
|
"# pprint.pprint(features(test_sentence, i))"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"* function for creating training sets"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 4,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"def untag(tagged_sentence):\n",
|
|
" return [w for w,t in tagged_sentence]\n",
|
|
"\n",
|
|
"def transform_to_dataset(tagged_sentences):\n",
|
|
" X,y = [], []\n",
|
|
" \n",
|
|
" for s in tagged_sentences:\n",
|
|
" for i in range(len(s)):\n",
|
|
" X.append(features(untag(s),i))\n",
|
|
" y.append(s[i][1])\n",
|
|
" return X,y\n",
|
|
"\n",
|
|
"def create_training_and_test_set(annotated_sentences, relative_cutoff):\n",
|
|
" cutoff = int(relative_cutoff * len(annotated_sentences))\n",
|
|
" training_sentences = annotated_sentences[:cutoff]\n",
|
|
" test_sentences = annotated_sentences[cutoff:]\n",
|
|
" \n",
|
|
" X,y = transform_to_dataset(training_sentences)\n",
|
|
" tX, ty = transform_to_dataset(test_sentences)\n",
|
|
" \n",
|
|
" return X,y,tX,ty"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"* Decision Tree classifier"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 5,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"def train_classifier(X,y,classifier,max_size=10000):\n",
|
|
" clf = Pipeline([\n",
|
|
" ('vectorizer', DictVectorizer(sparse=False)),\n",
|
|
" ('classifier', classifier)\n",
|
|
" ])\n",
|
|
" \n",
|
|
" print(\"start training…\")\n",
|
|
" \n",
|
|
" clf.fit(\n",
|
|
" X if len(X) < max_size else X[:max_size],\n",
|
|
" y if len(y) < max_size else y[:max_size]\n",
|
|
" )\n",
|
|
" \n",
|
|
" print(\"training done\")\n",
|
|
" \n",
|
|
" return clf"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"* classifier evaluater"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 6,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"def test_classifier(clf, tX, ty):\n",
|
|
" accuracy = clf.score(tX, ty)\n",
|
|
" print(\"Accuracy: \", accuracy)\n",
|
|
" # TODO: more analytics"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"## Exercises:"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"metadata": {},
|
|
"source": [
|
|
"### Exercise 01\n",
|
|
"* train and testing english custom POS tagger model:"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 25,
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"start training…\n",
|
|
"training done\n",
|
|
"Accuracy: 0.8842756624582065\n"
|
|
]
|
|
}
|
|
],
|
|
"source": [
|
|
"annotated_sent = nltk.corpus.treebank.tagged_sents()\n",
|
|
"\n",
|
|
"X,y,tX,ty = create_training_and_test_set(annotated_sentences=annotated_sent, \n",
|
|
" relative_cutoff=0.8)\n",
|
|
"\n",
|
|
"classifier = DecisionTreeClassifier(criterion='entropy', splitter='random')\n",
|
|
"clf = train_classifier(X,y,classifier)\n",
|
|
"test_classifier(clf=clf, tX=tX, ty=ty)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 24,
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"8257\n"
|
|
]
|
|
}
|
|
],
|
|
"source": [
|
|
"print(len(classifier.feature_importances_))"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 19,
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"80637"
|
|
]
|
|
},
|
|
"execution_count": 19,
|
|
"metadata": {},
|
|
"output_type": "execute_result"
|
|
}
|
|
],
|
|
"source": [
|
|
"len(y)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": []
|
|
}
|
|
],
|
|
"metadata": {
|
|
"kernelspec": {
|
|
"display_name": "Python 3",
|
|
"language": "python",
|
|
"name": "python3"
|
|
},
|
|
"language_info": {
|
|
"codemirror_mode": {
|
|
"name": "ipython",
|
|
"version": 3
|
|
},
|
|
"file_extension": ".py",
|
|
"mimetype": "text/x-python",
|
|
"name": "python",
|
|
"nbconvert_exporter": "python",
|
|
"pygments_lexer": "ipython3",
|
|
"version": "3.6.3"
|
|
}
|
|
},
|
|
"nbformat": 4,
|
|
"nbformat_minor": 2
|
|
}
|