task 1 erweitert
This commit is contained in:
		| @ -9,7 +9,7 @@ | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": 6, | ||||
|    "execution_count": 39, | ||||
|    "metadata": { | ||||
|     "collapsed": true | ||||
|    }, | ||||
| @ -29,7 +29,7 @@ | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": 7, | ||||
|    "execution_count": 40, | ||||
|    "metadata": { | ||||
|     "collapsed": true | ||||
|    }, | ||||
| @ -49,7 +49,7 @@ | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": 8, | ||||
|    "execution_count": 41, | ||||
|    "metadata": {}, | ||||
|    "outputs": [ | ||||
|     { | ||||
| @ -156,7 +156,7 @@ | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": 9, | ||||
|    "execution_count": 42, | ||||
|    "metadata": {}, | ||||
|    "outputs": [ | ||||
|     { | ||||
| @ -183,7 +183,7 @@ | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": 10, | ||||
|    "execution_count": 43, | ||||
|    "metadata": {}, | ||||
|    "outputs": [ | ||||
|     { | ||||
| @ -209,7 +209,7 @@ | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": 11, | ||||
|    "execution_count": 44, | ||||
|    "metadata": { | ||||
|     "collapsed": true | ||||
|    }, | ||||
| @ -225,14 +225,19 @@ | ||||
|    "source": [ | ||||
|     "### Task 1\n", | ||||
|     "* get results for english (plot a graph with all classifiers x results)\n", | ||||
|     "    * performance 1.1 = model1 in X1" | ||||
|     "    * performance 1.1 = model1 in X1\n", | ||||
|     "    * performance 1.2 = model2 in X1\n", | ||||
|     "    * performance 1.3.x = model3.x in X1\n", | ||||
|     "    * performance 1.4 = model1 in X2\n", | ||||
|     "    * performance 1.5 = model2 in X2\n", | ||||
|     "    * performance 1.6.x = model3.x in X2" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "markdown", | ||||
|    "metadata": {}, | ||||
|    "source": [ | ||||
|     "##### Generate Training and Testdata\n", | ||||
|     "##### Generate Training and Testdata for X1\n", | ||||
|     "1. split annotaed sentences into training and testdata\n", | ||||
|     "2. split trainingdata into input data and teacherdata\n", | ||||
|     "    *input is the feature vector of each word\n", | ||||
| @ -241,7 +246,20 @@ | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": 12, | ||||
|    "execution_count": 45, | ||||
|    "metadata": { | ||||
|     "collapsed": true | ||||
|    }, | ||||
|    "outputs": [], | ||||
|    "source": [ | ||||
|     "#to generate trainingsdata, delete the assigned tags as a function\n", | ||||
|     "def untag(tagged_sentence):\n", | ||||
|     "    return [w for w, t in tagged_sentence]" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": 46, | ||||
|    "metadata": {}, | ||||
|    "outputs": [ | ||||
|     { | ||||
| @ -253,21 +271,26 @@ | ||||
|     } | ||||
|    ], | ||||
|    "source": [ | ||||
|     "#to generate trainingsdata, delete the assigned tags as a function\n", | ||||
|     "def untag(tagged_sentence):\n", | ||||
|     "    return [w for w, t in tagged_sentence]\n", | ||||
|     "\n", | ||||
|     "#object including the annotated sentences\n", | ||||
|     "annotated_sent = nltk.corpus.treebank.tagged_sents()\n", | ||||
|     "\n", | ||||
|     "#to split the data, calculate the borders for ratio\n", | ||||
|     "cutoff = int(.8 * len(annotated_sent))\n", | ||||
|     "training_sentences = annotated_sent[:cutoff]\n", | ||||
|     "test_sentences = annotated_sent[cutoff:]\n", | ||||
|     "training_sentences_X1 = annotated_sent[:cutoff]\n", | ||||
|     "test_sentences_X1 = annotated_sent[cutoff:]\n", | ||||
|     "\n", | ||||
|     "#show the amount of sentences\n", | ||||
|     "print(\"got \",len(training_sentences),\" training sentences and \", len(test_sentences), \" test sentences\")\n", | ||||
|     "\n", | ||||
|     "print(\"got \",len(training_sentences_X1),\" training sentences and \", len(test_sentences_X1), \" test sentences\")" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": 47, | ||||
|    "metadata": { | ||||
|     "collapsed": true | ||||
|    }, | ||||
|    "outputs": [], | ||||
|    "source": [ | ||||
|     "#for training split sentences with its tags into y (for a sentences its resulting tags for each word) and transform sentences and x as a list of the features extracet for echt word in the sentences\n", | ||||
|     "def transform_to_dataset(tagged_sentences):\n", | ||||
|     "    X, y = [], []\n", | ||||
| @ -275,10 +298,66 @@ | ||||
|     "        for index in range(len(tagged_sentence)):\n", | ||||
|     "            X.append(features(untag(tagged_sentence), index))\n", | ||||
|     "            y.append(tagged_sentence[index][1]) \n", | ||||
|     "    return X, y\n", | ||||
|     "\n", | ||||
|     "    return X, y" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": 48, | ||||
|    "metadata": {}, | ||||
|    "outputs": [], | ||||
|    "source": [ | ||||
|     "#trainings inputset X and training teacher set y\n", | ||||
|     "X, y = transform_to_dataset(training_sentences)" | ||||
|     "X1, y1 = transform_to_dataset(training_sentences_X1)" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "markdown", | ||||
|    "metadata": {}, | ||||
|    "source": [ | ||||
|     "##### Generate Training and Testdata for X2\n", | ||||
|     "1. split annotaed sentences into training and testdata\n", | ||||
|     "2. split trainingdata into input data and teacherdata\n", | ||||
|     "    *input is the feature vector of each word\n", | ||||
|     "    *output is a list of POS tags for each word and sentences" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": 49, | ||||
|    "metadata": {}, | ||||
|    "outputs": [ | ||||
|     { | ||||
|      "name": "stdout", | ||||
|      "output_type": "stream", | ||||
|      "text": [ | ||||
|       "got  45872  training sentences and  11468  test sentences\n" | ||||
|      ] | ||||
|     } | ||||
|    ], | ||||
|    "source": [ | ||||
|     "#object including the annotated sentences\n", | ||||
|     "annotated_sent = nltk.corpus.brown.tagged_sents()\n", | ||||
|     "\n", | ||||
|     "#to split the data, calculate the borders for ratio\n", | ||||
|     "cutoff = int(.8 * len(annotated_sent))\n", | ||||
|     "training_sentences_X2 = annotated_sent[:cutoff]\n", | ||||
|     "test_sentences_X2 = annotated_sent[cutoff:]\n", | ||||
|     "\n", | ||||
|     "#show the amount of sentences\n", | ||||
|     "print(\"got \",len(training_sentences_X2),\" training sentences and \", len(test_sentences_X2), \" test sentences\")" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": 50, | ||||
|    "metadata": { | ||||
|     "collapsed": true | ||||
|    }, | ||||
|    "outputs": [], | ||||
|    "source": [ | ||||
|     "#trainings inputset X and training teacher set y\n", | ||||
|     "X2, y2 = transform_to_dataset(training_sentences_X2)" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
| @ -295,7 +374,7 @@ | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": 13, | ||||
|    "execution_count": 51, | ||||
|    "metadata": { | ||||
|     "collapsed": true | ||||
|    }, | ||||
| @ -315,7 +394,7 @@ | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": 14, | ||||
|    "execution_count": 52, | ||||
|    "metadata": { | ||||
|     "collapsed": true | ||||
|    }, | ||||
| @ -338,7 +417,7 @@ | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": 15, | ||||
|    "execution_count": 53, | ||||
|    "metadata": {}, | ||||
|    "outputs": [ | ||||
|     { | ||||
| @ -346,7 +425,7 @@ | ||||
|      "output_type": "stream", | ||||
|      "text": [ | ||||
|       "training OK\n", | ||||
|       "Accuracy: 0.880832376865\n" | ||||
|       "Accuracy: 0.87983432307\n" | ||||
|      ] | ||||
|     } | ||||
|    ], | ||||
| @ -356,9 +435,9 @@ | ||||
|     " \n", | ||||
|     "print('training OK')\n", | ||||
|     " \n", | ||||
|     "X_test, y_test = transform_to_dataset(test_sentences)\n", | ||||
|     "X1_test, y1_test = transform_to_dataset(test_sentences_X1)\n", | ||||
|     "\n", | ||||
|     "performance1_1 = clf.score(X_test, y_test)\n", | ||||
|     "performance1_1 = clf.score(X1_test, y1_test)\n", | ||||
|     "\n", | ||||
|     "print(\"Accuracy:\", performance1_1)" | ||||
|    ] | ||||
| @ -372,16 +451,31 @@ | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": 16, | ||||
|    "metadata": { | ||||
|     "collapsed": true | ||||
|    }, | ||||
|    "outputs": [], | ||||
|    "execution_count": 58, | ||||
|    "metadata": {}, | ||||
|    "outputs": [ | ||||
|     { | ||||
|      "name": "stdout", | ||||
|      "output_type": "stream", | ||||
|      "text": [ | ||||
|       "calculated perfomance 1.4=  0.756485959481\n" | ||||
|      ] | ||||
|     } | ||||
|    ], | ||||
|    "source": [ | ||||
|     "performance1_2 = 0\n", | ||||
|     "\n", | ||||
|     "performance1_3 = 0\n", | ||||
|     "performance1_4 = 0\n", | ||||
|     "\n", | ||||
|     "# performance1_4\n", | ||||
|     "size=10000\n", | ||||
|     "clf.fit(X2[:size], y2[:size])\n", | ||||
|     "X2_test, y2_test = transform_to_dataset(test_sentences_X2)\n", | ||||
|     "performance1_4 = clf.score(X2_test, y2_test)\n", | ||||
|     "print(\"calculated perfomance 1.4= \",performance1_4)\n", | ||||
|     "\n", | ||||
|     "performance1_5 = 0\n", | ||||
|     "\n", | ||||
|     "performance1_6 = 0" | ||||
|    ] | ||||
|   }, | ||||
| @ -396,7 +490,7 @@ | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": 17, | ||||
|    "execution_count": 56, | ||||
|    "metadata": {}, | ||||
|    "outputs": [ | ||||
|     { | ||||
| @ -405,7 +499,7 @@ | ||||
|      "text": [ | ||||
|       "3.6.3\n", | ||||
|       "checking...\n", | ||||
|       "[('Hello', 'NNP'), ('world', 'VBD'), (',', ','), ('lets', 'NNS'), ('do', 'VB'), ('something', 'VBG'), ('awesome', 'NN'), ('today', 'NN'), ('!', 'CD')]\n" | ||||
|       "[('Hello', 'CS'), ('world', 'NN'), (',', ','), ('lets', 'NNS'), ('do', 'DO'), ('something', 'PN'), ('awesome', 'NN'), ('today', 'NR'), ('!', 'CD')]\n" | ||||
|      ] | ||||
|     } | ||||
|    ], | ||||
| @ -438,7 +532,62 @@ | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": 22, | ||||
|    "execution_count": 57, | ||||
|    "metadata": { | ||||
|     "scrolled": true | ||||
|    }, | ||||
|    "outputs": [ | ||||
|     { | ||||
|      "name": "stdout", | ||||
|      "output_type": "stream", | ||||
|      "text": [ | ||||
|       "High five! You successfully sent some data to your account on plotly. View your plot in your browser at https://plot.ly/~carsten95/0 or inside your plot.ly account where it is named 'basic-bar'\n" | ||||
|      ] | ||||
|     }, | ||||
|     { | ||||
|      "data": { | ||||
|       "text/html": [ | ||||
|        "<iframe id=\"igraph\" scrolling=\"no\" style=\"border:none;\" seamless=\"seamless\" src=\"https://plot.ly/~carsten95/0.embed\" height=\"525px\" width=\"100%\"></iframe>" | ||||
|       ], | ||||
|       "text/plain": [ | ||||
|        "<plotly.tools.PlotlyDisplay object>" | ||||
|       ] | ||||
|      }, | ||||
|      "execution_count": 57, | ||||
|      "metadata": {}, | ||||
|      "output_type": "execute_result" | ||||
|     } | ||||
|    ], | ||||
|    "source": [ | ||||
|     "import plotly\n", | ||||
|     "plotly.tools.set_credentials_file(username='carsten95', api_key='vElf5IOxiFheQdjTxjXW')\n", | ||||
|     "plotly.__version__\n", | ||||
|     "import plotly.plotly as py\n", | ||||
|     "import plotly.graph_objs as go\n", | ||||
|     "\n", | ||||
|     "data = [go.Bar(\n", | ||||
|     "            x=['performance 1.1', 'performance 1.2', 'performance 1.3', 'performance 1.4', 'performance 1.5' , 'performance 1.6'],\n", | ||||
|     "            y=[performance1_1, performance1_2, performance1_3, performance1_4, performance1_5, performance1_6]\n", | ||||
|     "    )]\n", | ||||
|     "\n", | ||||
|     "py.iplot(data, filename='basic-bar')" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "markdown", | ||||
|    "metadata": { | ||||
|     "collapsed": true | ||||
|    }, | ||||
|    "source": [ | ||||
|     "### Results for Task 2\n", | ||||
|     "* train your model with standard features (plot a graph with all classifiers x results)\n", | ||||
|     "    * performance 2.1 = model4 in X3\n", | ||||
|     "    * performance 2.2 = model5 in X3" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": 60, | ||||
|    "metadata": {}, | ||||
|    "outputs": [ | ||||
|     { | ||||
| @ -457,21 +606,18 @@ | ||||
|        "<plotly.tools.PlotlyDisplay object>" | ||||
|       ] | ||||
|      }, | ||||
|      "execution_count": 22, | ||||
|      "execution_count": 60, | ||||
|      "metadata": {}, | ||||
|      "output_type": "execute_result" | ||||
|     } | ||||
|    ], | ||||
|    "source": [ | ||||
|     "import plotly\n", | ||||
|     "plotly.tools.set_credentials_file(username='carsten95', api_key='vElf5IOxiFheQdjTxjXW')\n", | ||||
|     "plotly.__version__\n", | ||||
|     "import plotly.plotly as py\n", | ||||
|     "import plotly.graph_objs as go\n", | ||||
|     "performance2_1 = 0\n", | ||||
|     "performance2_2 = 0\n", | ||||
|     "\n", | ||||
|     "data = [go.Bar(\n", | ||||
|     "            x=['performance 1.1', 'performance 1.2', 'performance 1.3', 'performance 1.4', 'performance 1.5' , 'performance 1.6'],\n", | ||||
|     "            y=[performance1_1, performance1_2, performance1_3, performance1_4, performance1_5, performance1_6]\n", | ||||
|     "            x=['performance 2.1', 'performance 2.2'],\n", | ||||
|     "            y=[performance2_1, performance2_2]\n", | ||||
|     "    )]\n", | ||||
|     "\n", | ||||
|     "py.iplot(data, filename='basic-bar')" | ||||
|  | ||||
		Reference in New Issue
	
	Block a user