I'm Blue, dabedi dabedei...

This commit is contained in:
Jonas Weinz 2018-05-02 17:59:50 +02:00
parent d5c17c09a8
commit 2c625f7f92

View File

@ -205,24 +205,14 @@
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
"## Exercises:" "## Exercise 01:"
] ]
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
"### Exercise 01\n" "### Performance 1\n"
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {},
"outputs": [],
"source": [
"accs = [0] * 5\n",
"names = [\"M1\", \"M2\", \"M3\", \"M4\", \"M5\"]\n"
] ]
}, },
{ {
@ -235,7 +225,7 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 9, "execution_count": 17,
"metadata": {}, "metadata": {},
"outputs": [ "outputs": [
{ {
@ -243,20 +233,17 @@
"output_type": "stream", "output_type": "stream",
"text": [ "text": [
"start training…\n", "start training…\n",
"training done\n" "training done\n",
"Accuracy: 0.768551324916413\n"
] ]
} }
], ],
"source": [ "source": [
"annotated_sent = nltk.corpus.treebank.tagged_sents()\n", "def model_01(X,y,tX,ty, max_size=1000):\n",
"\n", " #classifier = DecisionTreeClassifier(criterion='entropy')\n",
"X,y,tX,ty = create_training_and_test_set(annotated_sentences=annotated_sent, \n", " from sklearn.neural_network import MLPClassifier\n",
" relative_cutoff=0.8)\n", " model01_clf = train_classifier(X,y,MLPClassifier(),max_size=1000)\n",
"\n", " return test_classifier(clf=model01_clf, tX=tX, ty=ty)"
"#classifier = DecisionTreeClassifier(criterion='entropy')\n",
"from sklearn.neural_network import MLPClassifier\n",
"model01_clf = train_classifier(X,y,MLPClassifier(),max_size=10000)\n",
"accs[0] = test_classifier(clf=clf, tX=tX, ty=ty)"
] ]
}, },
{ {
@ -268,77 +255,89 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 13, "execution_count": 30,
"metadata": {}, "metadata": {},
"outputs": [ "outputs": [
{ {
"data": { "name": "stdout",
"text/plain": [ "output_type": "stream",
"0" "text": [
"0.8936074654423873\n"
] ]
},
"execution_count": 13,
"metadata": {},
"output_type": "execute_result"
} }
], ],
"source": [ "source": [
"accs[1]" "def model_02(tX,ty):\n",
] " m2_y = nltk.pos_tag([w['word'] for w in tX])\n",
}, " # compare results\n",
{ " n_correct = sum((1 if m2_y[i][1] == ty[i] else 0) for i in range(len(ty)))\n",
"cell_type": "code", " return n_correct / len(ty)"
"execution_count": 8,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'\\nimport matplotlib.pyplot as plt\\nimport numpy as np\\n\\nweights = clf.named_steps[\\'classifier\\'].feature_importances_\\nlabels = clf.named_steps[\\'vectorizer\\'].get_feature_names()\\n\\n#sort\\nweights, labels = (list(t) for t in zip(*sorted(zip(weights, labels))))\\n\\n#fig_1, ax_1 = plt.subplots()\\n#plt.bar(np.arange(len(weights)), weights)\\n#plt.xticks(np.arange(len(weights)), labels, rotation=90)\\n#plt.show()\\n\\nprint(\"Most important features:\")\\npprint.pprint(list(reversed(labels[-20:])))\\nprint(\"with weights: \")\\npprint.pprint(list(reversed(weights[-20:])))\\n'"
]
},
"execution_count": 8,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"'''\n",
"import matplotlib.pyplot as plt\n",
"import numpy as np\n",
"\n",
"weights = clf.named_steps['classifier'].feature_importances_\n",
"labels = clf.named_steps['vectorizer'].get_feature_names()\n",
"\n",
"#sort\n",
"weights, labels = (list(t) for t in zip(*sorted(zip(weights, labels))))\n",
"\n",
"#fig_1, ax_1 = plt.subplots()\n",
"#plt.bar(np.arange(len(weights)), weights)\n",
"#plt.xticks(np.arange(len(weights)), labels, rotation=90)\n",
"#plt.show()\n",
"\n",
"print(\"Most important features:\")\n",
"pprint.pprint(list(reversed(labels[-20:])))\n",
"print(\"with weights: \")\n",
"pprint.pprint(list(reversed(weights[-20:])))\n",
"'''"
] ]
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"metadata": {}, "metadata": {},
"source": [ "source": [
"```\n", "#### Model 03"
"from sklearn import tree\n", ]
"import graphviz\n", },
"dot_data = tree.export_graphviz(clf.named_steps['classifier'], out_file='test',\n", {
" feature_names=labels,\n", "cell_type": "code",
" filled=True, rounded=True, \n", "execution_count": 38,
" special_characters=True)\n", "metadata": {},
"#graph = graphviz.Source(dot_data)\n", "outputs": [],
"#graph\n", "source": [
"```" "def model_03(corpus_tagged, corpus_sents, cut=0.8):\n",
" \n",
" patterns = [(r'.*ing$', 'VBG'), (r'.*ed$', 'VBD'), (r'.*es$', 'VBZ'), (r'.*ould$', 'MD'), (r'.*\\'s$', 'NN$'), \n",
" (r'.*s$', 'NNS'), (r'^-?[0-9]+(.[0-9]+)?$', 'CD'), (r'.*', 'NN')]\n",
" \n",
" s = int(len(corpus_sents) * cut)\n",
" train_sents = corpus_sents[:size]\n",
" test_sents = corpus_sents[size:]\n",
" \n",
" models = {\n",
" 'def_model': nltk.DefaultTagger('NN'),\n",
" 'regexp_model': nltk.RegexpTagger(patterns),\n",
" 'uni_model': nltk.UnigramTagger(train_sents),\n",
" 'bi_model': nltk.BigramTagger(train_sents),\n",
" 'tri_model': nltk.TrigramTagger(train_sents)\n",
" }\n",
" \n",
" performance = {}\n",
" for name,model in models.items():\n",
" performance[name] = model.evaluate(test_sents)\n",
" \n",
" return performance\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Applying models on Datasets"
]
},
{
"cell_type": "code",
"execution_count": 41,
"metadata": {},
"outputs": [],
"source": [
"accs_p1 = [0] * 3\n",
"names_p1 = [\"P1.1\", \"P1.2\", \"P1.3\"]\n",
"\n",
"treebank_tagged = nltk.corpus.treebank.tagged_sents()\n",
"treebank_sents = nltk.corpus.treebank.sents()\n",
"\n",
"brown_tagged = nltk.corpus.brown.tagged_sents()\n",
"brown_sents = nltk.corpus.brown.sents()\n",
"\n",
"X1,y1,tX1,ty1 = create_training_and_test_set(annotated_sentences=treebank_tagged, \n",
" relative_cutoff=0.8)\n",
"\n",
"X2,y2,tX2,ty2 = create_training_and_test_set(annotated_sentences=brown_tagged, \n",
" relative_cutoff=0.8)\n"
] ]
}, },
{ {
@ -365,7 +364,7 @@
"name": "python", "name": "python",
"nbconvert_exporter": "python", "nbconvert_exporter": "python",
"pygments_lexer": "ipython3", "pygments_lexer": "ipython3",
"version": "3.6.3" "version": "3.6.5"
} }
}, },
"nbformat": 4, "nbformat": 4,