created Task 04
This commit is contained in:
		
							
								
								
									
										84
									
								
								Jonas_Solutions/Task03_Instructions.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										84
									
								
								Jonas_Solutions/Task03_Instructions.py
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,84 @@ | ||||
| import numpy as np  | ||||
| import pandas as pd  | ||||
| from sklearn.feature_extraction.text import CountVectorizer | ||||
| from keras.preprocessing.text import Tokenizer | ||||
| from keras.preprocessing.sequence import pad_sequences | ||||
| from keras.models import Sequential | ||||
| from keras.layers import Dense, Embedding, LSTM, SpatialDropout1D | ||||
| from sklearn.model_selection import train_test_split | ||||
| from keras.utils.np_utils import to_categorical | ||||
| import re | ||||
|  | ||||
| ''' | ||||
| Task 3: playing with NN framwork/keras and basic sentiment analysis | ||||
| - use the following model as a baseline and improve it! | ||||
| - export your metadata (just basic hyperparameters and outcomes for test data!) | ||||
| - test data = 0.3 (not in this example, change it!) | ||||
| - random_state = 4222 | ||||
| - no need to cross-validation! | ||||
| ''' | ||||
|  | ||||
| # parameters | ||||
| max_fatures = 500 | ||||
| embed_dim = 128 | ||||
| lstm_out = 196 | ||||
| dropout = 0.1 | ||||
| dropout_1d = 0.4 | ||||
| recurrent_dropout = 0.1 | ||||
| random_state = 1324 | ||||
| validation_size = 1000 | ||||
| batch_size = 16 | ||||
| epochs=2 | ||||
| verbose= 2 | ||||
|  | ||||
| df = pd.read_csv('dataset_sentiment.csv') | ||||
| df = df[['text','sentiment']] | ||||
| print(df[0:10]) | ||||
|  | ||||
| df = df[df.sentiment != "Neutral"] | ||||
| df['text'] = df['text'].apply(lambda x: x.lower()) | ||||
| df['text'] = df['text'].apply(lambda x: x.replace('rt',' ')) | ||||
| df['text'] = df['text'].apply((lambda x: re.sub('[^a-zA-z0-9\s]','',x))) | ||||
|      | ||||
| tok = Tokenizer(num_words=max_fatures, split=' ') | ||||
| tok.fit_on_texts(df['text'].values) | ||||
| X = tok.texts_to_sequences(df['text'].values) | ||||
| X = pad_sequences(X) | ||||
|  | ||||
| nn = Sequential() | ||||
| nn.add(Embedding(max_fatures, embed_dim, input_length = X.shape[1])) | ||||
| nn.add(SpatialDropout1D(dropout_1d)) | ||||
| nn.add(LSTM(lstm_out, dropout=dropout, recurrent_dropout=recurrent_dropout)) | ||||
| nn.add(Dense(2, activation='softmax')) | ||||
| nn.compile(loss = 'categorical_crossentropy', optimizer='adam', metrics = ['accuracy']) | ||||
| print(nn.summary()) | ||||
|  | ||||
| Y = pd.get_dummies(df['sentiment']).values | ||||
| X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.30, random_state = random_state) | ||||
| nn.fit(X_train, Y_train, epochs = epochs, batch_size=batch_size, verbose=verbose) | ||||
|  | ||||
| X_validate = X_test[-validation_size:] | ||||
| Y_validate = Y_test[-validation_size:] | ||||
| X_test = X_test[:-validation_size] | ||||
| Y_test = Y_test[:-validation_size] | ||||
| score, accuracy = nn.evaluate(X_test, Y_test, verbose = 2, batch_size = batch_size) | ||||
| print("score: %.2f" % (score)) | ||||
| print("acc: %.2f" % (accuracy)) | ||||
|  | ||||
| pos_cnt, neg_cnt, pos_ok, neg_ok = 0, 0, 0, 0 | ||||
| for x in range(len(X_validate)): | ||||
|     result = nn.predict(X_validate[x].reshape(1,X_test.shape[1]),batch_size=1,verbose = 2)[0] | ||||
|     if np.argmax(result) == np.argmax(Y_validate[x]): | ||||
|         if np.argmax(Y_validate[x]) == 0: neg_ok += 1 | ||||
|         else: pos_ok += 1 | ||||
|     if np.argmax(Y_validate[x]) == 0: neg_cnt += 1 | ||||
|     else: pos_cnt += 1 | ||||
|  | ||||
| print("pos_acc", pos_ok/pos_cnt*100, "%") | ||||
| print("neg_acc", neg_ok/neg_cnt*100, "%") | ||||
|  | ||||
| X2 = ['what are u going to say about that? the truth, wassock?!'] | ||||
| X2 = tok.texts_to_sequences(X2) | ||||
| X2 = pad_sequences(X2, maxlen=26, dtype='int32', value=0) | ||||
| print(X2) | ||||
| print(nn.predict(X2, batch_size=1, verbose = 2)[0]) | ||||
							
								
								
									
										293
									
								
								Jonas_Solutions/Task_03.ipynb
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										293
									
								
								Jonas_Solutions/Task_03.ipynb
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,293 @@ | ||||
| { | ||||
|  "cells": [ | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": 13, | ||||
|    "metadata": {}, | ||||
|    "outputs": [], | ||||
|    "source": [ | ||||
|     "import numpy as np \n", | ||||
|     "import pandas as pd \n", | ||||
|     "from sklearn.feature_extraction.text import CountVectorizer\n", | ||||
|     "from keras.preprocessing.text import Tokenizer\n", | ||||
|     "from keras.preprocessing.sequence import pad_sequences\n", | ||||
|     "from keras.models import Sequential\n", | ||||
|     "from keras.layers import Dense, Embedding, LSTM, SpatialDropout1D\n", | ||||
|     "from sklearn.model_selection import train_test_split\n", | ||||
|     "from keras.utils.np_utils import to_categorical\n", | ||||
|     "import re\n" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": 17, | ||||
|    "metadata": {}, | ||||
|    "outputs": [ | ||||
|     { | ||||
|      "name": "stdout", | ||||
|      "output_type": "stream", | ||||
|      "text": [ | ||||
|       "dataset already downloaded\n" | ||||
|      ] | ||||
|     } | ||||
|    ], | ||||
|    "source": [ | ||||
|     "%%bash\n", | ||||
|     "\n", | ||||
|     "if [ ! -e 'dataset_sentiment.csv' ]\n", | ||||
|     "then\n", | ||||
|     "    echo \"downloading dataset\"\n", | ||||
|     "    wget https://raw.githubusercontent.com/SmartDataAnalytics/MA-INF-4222-NLP-Lab/master/2018_SoSe/exercises/dataset_sentiment.csv\n", | ||||
|     "else\n", | ||||
|     "    echo \"dataset already downloaded\"\n", | ||||
|     "fi" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": 3, | ||||
|    "metadata": {}, | ||||
|    "outputs": [], | ||||
|    "source": [ | ||||
|     "# parameters\n", | ||||
|     "max_fatures = 500\n", | ||||
|     "embed_dim = 128\n", | ||||
|     "lstm_out = 196\n", | ||||
|     "dropout = 0.1\n", | ||||
|     "dropout_1d = 0.4\n", | ||||
|     "recurrent_dropout = 0.1\n", | ||||
|     "random_state = 1324\n", | ||||
|     "validation_size = 1000\n", | ||||
|     "batch_size = 16\n", | ||||
|     "epochs=2\n", | ||||
|     "verbose= 2" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": 4, | ||||
|    "metadata": {}, | ||||
|    "outputs": [ | ||||
|     { | ||||
|      "name": "stdout", | ||||
|      "output_type": "stream", | ||||
|      "text": [ | ||||
|       "                                                text sentiment\n", | ||||
|       "0  RT @NancyLeeGrahn: How did everyone feel about...   Neutral\n", | ||||
|       "1  RT @ScottWalker: Didn't catch the full #GOPdeb...  Positive\n", | ||||
|       "2  RT @TJMShow: No mention of Tamir Rice and the ...   Neutral\n", | ||||
|       "3  RT @RobGeorge: That Carly Fiorina is trending ...  Positive\n", | ||||
|       "4  RT @DanScavino: #GOPDebate w/ @realDonaldTrump...  Positive\n", | ||||
|       "5  RT @GregAbbott_TX: @TedCruz: \"On my first day ...  Positive\n", | ||||
|       "6  RT @warriorwoman91: I liked her and was happy ...  Negative\n", | ||||
|       "7  Going on #MSNBC Live with @ThomasARoberts arou...   Neutral\n", | ||||
|       "8  Deer in the headlights RT @lizzwinstead: Ben C...  Negative\n", | ||||
|       "9  RT @NancyOsborne180: Last night's debate prove...  Negative\n" | ||||
|      ] | ||||
|     } | ||||
|    ], | ||||
|    "source": [ | ||||
|     "df = pd.read_csv('dataset_sentiment.csv')\n", | ||||
|     "df = df[['text','sentiment']]\n", | ||||
|     "print(df[0:10])\n", | ||||
|     "\n", | ||||
|     "df = df[df.sentiment != \"Neutral\"]\n", | ||||
|     "df['text'] = df['text'].apply(lambda x: x.lower())\n", | ||||
|     "df['text'] = df['text'].apply(lambda x: x.replace('rt',' '))\n", | ||||
|     "df['text'] = df['text'].apply((lambda x: re.sub('[^a-zA-z0-9\\s]','',x)))" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": 5, | ||||
|    "metadata": {}, | ||||
|    "outputs": [], | ||||
|    "source": [ | ||||
|     "tok = Tokenizer(num_words=max_fatures, split=' ')\n", | ||||
|     "tok.fit_on_texts(df['text'].values)\n", | ||||
|     "X = tok.texts_to_sequences(df['text'].values)\n", | ||||
|     "X = pad_sequences(X)" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": 6, | ||||
|    "metadata": {}, | ||||
|    "outputs": [ | ||||
|     { | ||||
|      "name": "stdout", | ||||
|      "output_type": "stream", | ||||
|      "text": [ | ||||
|       "_________________________________________________________________\n", | ||||
|       "Layer (type)                 Output Shape              Param #   \n", | ||||
|       "=================================================================\n", | ||||
|       "embedding_1 (Embedding)      (None, 26, 128)           64000     \n", | ||||
|       "_________________________________________________________________\n", | ||||
|       "spatial_dropout1d_1 (Spatial (None, 26, 128)           0         \n", | ||||
|       "_________________________________________________________________\n", | ||||
|       "lstm_1 (LSTM)                (None, 196)               254800    \n", | ||||
|       "_________________________________________________________________\n", | ||||
|       "dense_1 (Dense)              (None, 2)                 394       \n", | ||||
|       "=================================================================\n", | ||||
|       "Total params: 319,194\n", | ||||
|       "Trainable params: 319,194\n", | ||||
|       "Non-trainable params: 0\n", | ||||
|       "_________________________________________________________________\n", | ||||
|       "None\n" | ||||
|      ] | ||||
|     } | ||||
|    ], | ||||
|    "source": [ | ||||
|     "nn = Sequential()\n", | ||||
|     "nn.add(Embedding(max_fatures, embed_dim, input_length = X.shape[1]))\n", | ||||
|     "nn.add(SpatialDropout1D(dropout_1d))\n", | ||||
|     "nn.add(LSTM(lstm_out, dropout=dropout, recurrent_dropout=recurrent_dropout))\n", | ||||
|     "nn.add(Dense(2, activation='softmax'))\n", | ||||
|     "nn.compile(loss = 'categorical_crossentropy', optimizer='adam', metrics = ['accuracy'])\n", | ||||
|     "print(nn.summary())" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": 7, | ||||
|    "metadata": {}, | ||||
|    "outputs": [ | ||||
|     { | ||||
|      "name": "stdout", | ||||
|      "output_type": "stream", | ||||
|      "text": [ | ||||
|       "Epoch 1/2\n", | ||||
|       " - 30s - loss: 0.4374 - acc: 0.8161\n", | ||||
|       "Epoch 2/2\n", | ||||
|       " - 30s - loss: 0.3614 - acc: 0.8487\n" | ||||
|      ] | ||||
|     }, | ||||
|     { | ||||
|      "data": { | ||||
|       "text/plain": [ | ||||
|        "<keras.callbacks.History at 0x7fe38072e978>" | ||||
|       ] | ||||
|      }, | ||||
|      "execution_count": 7, | ||||
|      "metadata": {}, | ||||
|      "output_type": "execute_result" | ||||
|     } | ||||
|    ], | ||||
|    "source": [ | ||||
|     "Y = pd.get_dummies(df['sentiment']).values\n", | ||||
|     "X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.30, random_state = random_state)\n", | ||||
|     "nn.fit(X_train, Y_train, epochs = epochs, batch_size=batch_size, verbose=verbose)" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": 9, | ||||
|    "metadata": {}, | ||||
|    "outputs": [ | ||||
|     { | ||||
|      "name": "stdout", | ||||
|      "output_type": "stream", | ||||
|      "text": [ | ||||
|       "score: 0.37\n", | ||||
|       "acc: 0.85\n" | ||||
|      ] | ||||
|     } | ||||
|    ], | ||||
|    "source": [ | ||||
|     "X_validate = X_test[-validation_size:]\n", | ||||
|     "Y_validate = Y_test[-validation_size:]\n", | ||||
|     "X_test = X_test[:-validation_size]\n", | ||||
|     "Y_test = Y_test[:-validation_size]\n", | ||||
|     "score, accuracy = nn.evaluate(X_test, Y_test, verbose = 2, batch_size = batch_size)\n", | ||||
|     "print(\"score: %.2f\" % (score))\n", | ||||
|     "print(\"acc: %.2f\" % (accuracy))" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": 10, | ||||
|    "metadata": {}, | ||||
|    "outputs": [], | ||||
|    "source": [ | ||||
|     "pos_cnt, neg_cnt, pos_ok, neg_ok = 0, 0, 0, 0\n", | ||||
|     "for x in range(len(X_validate)):\n", | ||||
|     "    result = nn.predict(X_validate[x].reshape(1,X_test.shape[1]),batch_size=1,verbose = 2)[0]\n", | ||||
|     "    if np.argmax(result) == np.argmax(Y_validate[x]):\n", | ||||
|     "        if np.argmax(Y_validate[x]) == 0: neg_ok += 1\n", | ||||
|     "        else: pos_ok += 1\n", | ||||
|     "    if np.argmax(Y_validate[x]) == 0: neg_cnt += 1\n", | ||||
|     "    else: pos_cnt += 1" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": 11, | ||||
|    "metadata": {}, | ||||
|    "outputs": [ | ||||
|     { | ||||
|      "name": "stdout", | ||||
|      "output_type": "stream", | ||||
|      "text": [ | ||||
|       "pos_acc 31.770833333333332 %\n", | ||||
|       "neg_acc 97.27722772277228 %\n" | ||||
|      ] | ||||
|     } | ||||
|    ], | ||||
|    "source": [ | ||||
|     "print(\"pos_acc\", pos_ok/pos_cnt*100, \"%\")\n", | ||||
|     "print(\"neg_acc\", neg_ok/neg_cnt*100, \"%\")" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": 12, | ||||
|    "metadata": {}, | ||||
|    "outputs": [ | ||||
|     { | ||||
|      "name": "stdout", | ||||
|      "output_type": "stream", | ||||
|      "text": [ | ||||
|       "[[  0   0   0   0   0   0   0   0   0   0   0   0   0   0   0   0  48  37\n", | ||||
|       "  311 189   4 144  22  16   1 281]]\n", | ||||
|       "[0.93431044 0.06568963]\n" | ||||
|      ] | ||||
|     } | ||||
|    ], | ||||
|    "source": [ | ||||
|     "X2 = ['what are u going to say about that? the truth, wassock?!']\n", | ||||
|     "X2 = tok.texts_to_sequences(X2)\n", | ||||
|     "X2 = pad_sequences(X2, maxlen=26, dtype='int32', value=0)\n", | ||||
|     "print(X2)\n", | ||||
|     "print(nn.predict(X2, batch_size=1, verbose = 2)[0])" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": null, | ||||
|    "metadata": {}, | ||||
|    "outputs": [], | ||||
|    "source": [] | ||||
|   } | ||||
|  ], | ||||
|  "metadata": { | ||||
|   "kernelspec": { | ||||
|    "display_name": "Python 3", | ||||
|    "language": "python", | ||||
|    "name": "python3" | ||||
|   }, | ||||
|   "language_info": { | ||||
|    "codemirror_mode": { | ||||
|     "name": "ipython", | ||||
|     "version": 3 | ||||
|    }, | ||||
|    "file_extension": ".py", | ||||
|    "mimetype": "text/x-python", | ||||
|    "name": "python", | ||||
|    "nbconvert_exporter": "python", | ||||
|    "pygments_lexer": "ipython3", | ||||
|    "version": "3.6.5" | ||||
|   } | ||||
|  }, | ||||
|  "nbformat": 4, | ||||
|  "nbformat_minor": 2 | ||||
| } | ||||
		Reference in New Issue
	
	Block a user