Merge branch 'master' of ssh://the-cake-is-a-lie.net:20022/jonas/NLP-LAB
This commit is contained in:
commit
93726f4dc6
@ -14,6 +14,7 @@
|
|||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
|
"%matplotlib ipympl\n",
|
||||||
"import nltk\n",
|
"import nltk\n",
|
||||||
"import pprint\n",
|
"import pprint\n",
|
||||||
"from sklearn.tree import DecisionTreeClassifier\n",
|
"from sklearn.tree import DecisionTreeClassifier\n",
|
||||||
@ -56,20 +57,29 @@
|
|||||||
" word = sentence[index]\n",
|
" word = sentence[index]\n",
|
||||||
" is_punctuation_mark = word == \"!\" or word == \".\" or word == \",\" or word == \"?\"\n",
|
" is_punctuation_mark = word == \"!\" or word == \".\" or word == \",\" or word == \"?\"\n",
|
||||||
" sentence_length = len(sentence)\n",
|
" sentence_length = len(sentence)\n",
|
||||||
" relative_third = (index * 2) // sentence_length \n",
|
" relative_third = (index * 3) // sentence_length \n",
|
||||||
|
" vowels = word.count('a') + word.count('e') + word.count('i') + word.count('o') + word.count('u')\n",
|
||||||
" return {\n",
|
" return {\n",
|
||||||
" 'word': sentence[index],\n",
|
" 'word': word,\n",
|
||||||
" 'is_capitalized': sentence[index][0].upper() == sentence[index][0],\n",
|
" 'is_capitalized': sentence[index][0].upper() == sentence[index][0],\n",
|
||||||
" 'prefix-1': sentence[index][0],\n",
|
" 'prefix-1': sentence[index][0],\n",
|
||||||
" 'suffix-1': sentence[index][-1],\n",
|
" 'suffix-1': sentence[index][-1],\n",
|
||||||
|
" 'prefix-2': sentence[index][1] if len(word) > 1 else '',\n",
|
||||||
|
" 'suffix-2': sentence[index][-2] if len(word) > 1 else '',\n",
|
||||||
" 'prev_word': '' if index == 0 else sentence[index - 1],\n",
|
" 'prev_word': '' if index == 0 else sentence[index - 1],\n",
|
||||||
" 'next_word': '' if index == len(sentence) - 1 else sentence[index + 1],\n",
|
" 'next_word': '' if index == len(sentence) - 1 else sentence[index + 1],\n",
|
||||||
|
" 'length': len(word),\n",
|
||||||
|
" 'index' : index,\n",
|
||||||
|
" 'rev_index': len(sentence) - index,\n",
|
||||||
|
" 'sentence_length_': len(sentence),\n",
|
||||||
|
" 'relative_third': relative_third,\n",
|
||||||
" 'numerical': word.isnumeric(),\n",
|
" 'numerical': word.isnumeric(),\n",
|
||||||
" 'is_punctuation_mark': is_punctuation_mark,\n",
|
" 'is_punctuation_mark': is_punctuation_mark,\n",
|
||||||
" ',': word == \",\",\n",
|
" ',': word == \",\",\n",
|
||||||
" '.': word == \".\",\n",
|
" '.': word == \".\",\n",
|
||||||
" '!': word == \"!\",\n",
|
" '!': word == \"!\",\n",
|
||||||
" '?': word == \"?\"\n",
|
" '?': word == \"?\",\n",
|
||||||
|
" 'vowels' : vowels\n",
|
||||||
" }\n",
|
" }\n",
|
||||||
"'''\n",
|
"'''\n",
|
||||||
" return {\n",
|
" return {\n",
|
||||||
@ -187,7 +197,8 @@
|
|||||||
"def test_classifier(clf, tX, ty):\n",
|
"def test_classifier(clf, tX, ty):\n",
|
||||||
" accuracy = clf.score(tX, ty)\n",
|
" accuracy = clf.score(tX, ty)\n",
|
||||||
" print(\"Accuracy: \", accuracy)\n",
|
" print(\"Accuracy: \", accuracy)\n",
|
||||||
" # TODO: more analytics"
|
" # TODO: more analytics\n",
|
||||||
|
" return accuracy"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -201,13 +212,30 @@
|
|||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"### Exercise 01\n",
|
"### Exercise 01\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 10,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"accs = [0] * 5\n",
|
||||||
|
"names = [\"M1\", \"M2\", \"M3\", \"M4\", \"M5\"]\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"#### Model 01\n",
|
||||||
"* train and testing english custom POS tagger model:"
|
"* train and testing english custom POS tagger model:"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 25,
|
"execution_count": 9,
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [
|
"outputs": [
|
||||||
{
|
{
|
||||||
@ -215,8 +243,7 @@
|
|||||||
"output_type": "stream",
|
"output_type": "stream",
|
||||||
"text": [
|
"text": [
|
||||||
"start training…\n",
|
"start training…\n",
|
||||||
"training done\n",
|
"training done\n"
|
||||||
"Accuracy: 0.8842756624582065\n"
|
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
@ -226,46 +253,92 @@
|
|||||||
"X,y,tX,ty = create_training_and_test_set(annotated_sentences=annotated_sent, \n",
|
"X,y,tX,ty = create_training_and_test_set(annotated_sentences=annotated_sent, \n",
|
||||||
" relative_cutoff=0.8)\n",
|
" relative_cutoff=0.8)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"classifier = DecisionTreeClassifier(criterion='entropy', splitter='random')\n",
|
"#classifier = DecisionTreeClassifier(criterion='entropy')\n",
|
||||||
"clf = train_classifier(X,y,classifier)\n",
|
"from sklearn.neural_network import MLPClassifier\n",
|
||||||
"test_classifier(clf=clf, tX=tX, ty=ty)"
|
"model01_clf = train_classifier(X,y,MLPClassifier(),max_size=10000)\n",
|
||||||
|
"accs[0] = test_classifier(clf=clf, tX=tX, ty=ty)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "markdown",
|
||||||
"execution_count": 24,
|
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [
|
|
||||||
{
|
|
||||||
"name": "stdout",
|
|
||||||
"output_type": "stream",
|
|
||||||
"text": [
|
|
||||||
"8257\n"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"source": [
|
"source": [
|
||||||
"print(len(classifier.feature_importances_))"
|
"#### Model 02"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 19,
|
"execution_count": 13,
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [
|
"outputs": [
|
||||||
{
|
{
|
||||||
"data": {
|
"data": {
|
||||||
"text/plain": [
|
"text/plain": [
|
||||||
"80637"
|
"0"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"execution_count": 19,
|
"execution_count": 13,
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"output_type": "execute_result"
|
"output_type": "execute_result"
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"source": [
|
"source": [
|
||||||
"len(y)"
|
"accs[1]"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 8,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/plain": [
|
||||||
|
"'\\nimport matplotlib.pyplot as plt\\nimport numpy as np\\n\\nweights = clf.named_steps[\\'classifier\\'].feature_importances_\\nlabels = clf.named_steps[\\'vectorizer\\'].get_feature_names()\\n\\n#sort\\nweights, labels = (list(t) for t in zip(*sorted(zip(weights, labels))))\\n\\n#fig_1, ax_1 = plt.subplots()\\n#plt.bar(np.arange(len(weights)), weights)\\n#plt.xticks(np.arange(len(weights)), labels, rotation=90)\\n#plt.show()\\n\\nprint(\"Most important features:\")\\npprint.pprint(list(reversed(labels[-20:])))\\nprint(\"with weights: \")\\npprint.pprint(list(reversed(weights[-20:])))\\n'"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"execution_count": 8,
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "execute_result"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"'''\n",
|
||||||
|
"import matplotlib.pyplot as plt\n",
|
||||||
|
"import numpy as np\n",
|
||||||
|
"\n",
|
||||||
|
"weights = clf.named_steps['classifier'].feature_importances_\n",
|
||||||
|
"labels = clf.named_steps['vectorizer'].get_feature_names()\n",
|
||||||
|
"\n",
|
||||||
|
"#sort\n",
|
||||||
|
"weights, labels = (list(t) for t in zip(*sorted(zip(weights, labels))))\n",
|
||||||
|
"\n",
|
||||||
|
"#fig_1, ax_1 = plt.subplots()\n",
|
||||||
|
"#plt.bar(np.arange(len(weights)), weights)\n",
|
||||||
|
"#plt.xticks(np.arange(len(weights)), labels, rotation=90)\n",
|
||||||
|
"#plt.show()\n",
|
||||||
|
"\n",
|
||||||
|
"print(\"Most important features:\")\n",
|
||||||
|
"pprint.pprint(list(reversed(labels[-20:])))\n",
|
||||||
|
"print(\"with weights: \")\n",
|
||||||
|
"pprint.pprint(list(reversed(weights[-20:])))\n",
|
||||||
|
"'''"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"```\n",
|
||||||
|
"from sklearn import tree\n",
|
||||||
|
"import graphviz\n",
|
||||||
|
"dot_data = tree.export_graphviz(clf.named_steps['classifier'], out_file='test',\n",
|
||||||
|
" feature_names=labels,\n",
|
||||||
|
" filled=True, rounded=True, \n",
|
||||||
|
" special_characters=True)\n",
|
||||||
|
"#graph = graphviz.Source(dot_data)\n",
|
||||||
|
"#graph\n",
|
||||||
|
"```"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
Loading…
Reference in New Issue
Block a user