{ "cells": [ { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "/home/jonas/.local/lib/python3.6/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n", " from ._conv import register_converters as _register_converters\n", "Using TensorFlow backend.\n" ] } ], "source": [ "import numpy as np \n", "import pandas as pd \n", "from sklearn.feature_extraction.text import CountVectorizer\n", "from keras.preprocessing.text import Tokenizer\n", "from keras.preprocessing.sequence import pad_sequences\n", "from keras.models import Sequential\n", "from keras.layers import Dense, Embedding, LSTM, SpatialDropout1D\n", "from sklearn.model_selection import train_test_split\n", "from keras.utils.np_utils import to_categorical\n", "import re\n" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ ">**Task 3**: playing with NN framwork/keras and basic sentiment analysis\n", ">- use the following model as a baseline and improve it!\n", ">- export your metadata (just basic hyperparameters and outcomes for test data!)\n", ">- test data = 0.3 (not in this example, change it!)\n", ">- random_state = 4222\n", ">- no need to cross-validation!\n", "\n" ] }, { "cell_type": "code", "execution_count": 2, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "dataset already downloaded\n" ] } ], "source": [ "%%bash\n", "\n", "if [ ! -e 'dataset_sentiment.csv' ]\n", "then\n", " echo \"downloading dataset\"\n", " wget https://raw.githubusercontent.com/SmartDataAnalytics/MA-INF-4222-NLP-Lab/master/2018_SoSe/exercises/dataset_sentiment.csv\n", "else\n", " echo \"dataset already downloaded\"\n", "fi" ] }, { "cell_type": "code", "execution_count": 3, "metadata": {}, "outputs": [], "source": [ "# parameters\n", "max_fatures = 500\n", "embed_dim = 128\n", "lstm_out = 196\n", "dropout = 0.1\n", "dropout_1d = 0.4\n", "recurrent_dropout = 0.1\n", "random_state = 1324\n", "validation_size = 1000\n", "batch_size = 16\n", "epochs=2\n", "verbose= 2" ] }, { "cell_type": "code", "execution_count": 13, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ " text sentiment\n", "0 RT @NancyLeeGrahn: How did everyone feel about... Neutral\n", "1 RT @ScottWalker: Didn't catch the full #GOPdeb... Positive\n", "2 RT @TJMShow: No mention of Tamir Rice and the ... Neutral\n", "3 RT @RobGeorge: That Carly Fiorina is trending ... Positive\n", "4 RT @DanScavino: #GOPDebate w/ @realDonaldTrump... Positive\n", "5 RT @GregAbbott_TX: @TedCruz: \"On my first day ... Positive\n", "6 RT @warriorwoman91: I liked her and was happy ... Negative\n", "7 Going on #MSNBC Live with @ThomasARoberts arou... Neutral\n", "8 Deer in the headlights RT @lizzwinstead: Ben C... Negative\n", "9 RT @NancyOsborne180: Last night's debate prove... Negative\n" ] } ], "source": [ "df = pd.read_csv('dataset_sentiment.csv')\n", "df = df[['text','sentiment']]\n", "print(df[0:10])\n", "\n", "df = df[df.sentiment != \"Neutral\"]\n", "df['text'] = df['text'].apply(lambda x: x.lower())\n", "df['text'] = df['text'].apply(lambda x: x.replace('rt',' '))\n", "df['text'] = df['text'].apply((lambda x: re.sub('[^a-zA-Z0-9\\s]','',x)))" ] }, { "cell_type": "code", "execution_count": 22, "metadata": {}, "outputs": [], "source": [ "tok = Tokenizer(num_words=max_fatures, split=' ')\n", "tok.fit_on_texts(df['text'].values)\n", "X = tok.texts_to_sequences(df['text'].values)\n", "X = pad_sequences(X)" ] }, { "cell_type": "code", "execution_count": 15, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "_________________________________________________________________\n", "Layer (type) Output Shape Param # \n", "=================================================================\n", "embedding_2 (Embedding) (None, 26, 128) 64000 \n", "_________________________________________________________________\n", "spatial_dropout1d_2 (Spatial (None, 26, 128) 0 \n", "_________________________________________________________________\n", "lstm_2 (LSTM) (None, 196) 254800 \n", "_________________________________________________________________\n", "dense_2 (Dense) (None, 2) 394 \n", "=================================================================\n", "Total params: 319,194\n", "Trainable params: 319,194\n", "Non-trainable params: 0\n", "_________________________________________________________________\n", "None\n" ] } ], "source": [ "nn = Sequential()\n", "nn.add(Embedding(max_fatures, embed_dim, input_length = X.shape[1]))\n", "nn.add(SpatialDropout1D(dropout_1d))\n", "nn.add(LSTM(lstm_out, dropout=dropout, recurrent_dropout=recurrent_dropout))\n", "nn.add(Dense(2, activation='softmax'))\n", "nn.compile(loss = 'categorical_crossentropy', optimizer='adam', metrics = ['accuracy'])\n", "print(nn.summary())" ] }, { "cell_type": "code", "execution_count": 7, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Epoch 1/2\n", " - 21s - loss: 0.4322 - acc: 0.8196\n", "Epoch 2/2\n", " - 25s - loss: 0.3612 - acc: 0.8509\n" ] }, { "data": { "text/plain": [ "" ] }, "execution_count": 7, "metadata": {}, "output_type": "execute_result" } ], "source": [ "Y = pd.get_dummies(df['sentiment']).values\n", "X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.30, random_state = random_state)\n", "nn.fit(X_train, Y_train, epochs = epochs, batch_size=batch_size, verbose=verbose)" ] }, { "cell_type": "code", "execution_count": 8, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "score: 0.37\n", "acc: 0.84\n" ] } ], "source": [ "X_validate = X_test[-validation_size:]\n", "Y_validate = Y_test[-validation_size:]\n", "X_test = X_test[:-validation_size]\n", "Y_test = Y_test[:-validation_size]\n", "score, accuracy = nn.evaluate(X_test, Y_test, verbose = 2, batch_size = batch_size)\n", "print(\"score: %.2f\" % (score))\n", "print(\"acc: %.2f\" % (accuracy))" ] }, { "cell_type": "code", "execution_count": 9, "metadata": {}, "outputs": [], "source": [ "pos_cnt, neg_cnt, pos_ok, neg_ok = 0, 0, 0, 0\n", "for x in range(len(X_validate)):\n", " result = nn.predict(X_validate[x].reshape(1,X_test.shape[1]),batch_size=1,verbose = 2)[0]\n", " if np.argmax(result) == np.argmax(Y_validate[x]):\n", " if np.argmax(Y_validate[x]) == 0: neg_ok += 1\n", " else: pos_ok += 1\n", " if np.argmax(Y_validate[x]) == 0: neg_cnt += 1\n", " else: pos_cnt += 1" ] }, { "cell_type": "code", "execution_count": 10, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "pos_acc 39.58333333333333 %\n", "neg_acc 95.29702970297029 %\n" ] } ], "source": [ "print(\"pos_acc\", pos_ok/pos_cnt*100, \"%\")\n", "print(\"neg_acc\", neg_ok/neg_cnt*100, \"%\")" ] }, { "cell_type": "code", "execution_count": 11, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "[[ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 48 37\n", " 311 189 4 144 22 16 1 281]]\n", "[0.8364928 0.16350722]\n" ] } ], "source": [ "X2 = ['what are u going to say about that? the truth, wassock?!']\n", "X2 = tok.texts_to_sequences(X2)\n", "X2 = pad_sequences(X2, maxlen=26, dtype='int32', value=0)\n", "print(X2)\n", "print(nn.predict(X2, batch_size=1, verbose = 2)[0])" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.6.5" } }, "nbformat": 4, "nbformat_minor": 2 }