task 2 and 3 and a naive approach test image

This commit is contained in:
Carsten 2018-05-31 11:49:28 +02:00
parent 05f432fd41
commit 5fba30ef0b
3 changed files with 983 additions and 123 deletions

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,518 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 51,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
" text sentiment\n",
"0 RT @NancyLeeGrahn: How did everyone feel about... Neutral\n",
"1 RT @ScottWalker: Didn't catch the full #GOPdeb... Positive\n",
"2 RT @TJMShow: No mention of Tamir Rice and the ... Neutral\n",
"3 RT @RobGeorge: That Carly Fiorina is trending ... Positive\n",
"4 RT @DanScavino: #GOPDebate w/ @realDonaldTrump... Positive\n",
"5 RT @GregAbbott_TX: @TedCruz: \"On my first day ... Positive\n",
"6 RT @warriorwoman91: I liked her and was happy ... Negative\n",
"7 Going on #MSNBC Live with @ThomasARoberts arou... Neutral\n",
"8 Deer in the headlights RT @lizzwinstead: Ben C... Negative\n",
"9 RT @NancyOsborne180: Last night's debate prove... Negative\n"
]
}
],
"source": [
"import numpy as np \n",
"import pandas as pd \n",
"from sklearn.feature_extraction.text import CountVectorizer\n",
"from keras.preprocessing.text import Tokenizer\n",
"from keras.preprocessing.sequence import pad_sequences\n",
"from keras.models import Sequential\n",
"from keras.layers import Dense, Embedding, LSTM, SpatialDropout1D\n",
"from sklearn.model_selection import train_test_split\n",
"from keras.utils.np_utils import to_categorical\n",
"import re\n",
"\n",
"'''\n",
"Task 3: playing with NN framwork/keras and basic sentiment analysis\n",
"- use the following model as a baseline and improve it!\n",
"- export your metadata (just basic hyperparameters and outcomes for test data!)\n",
"- test data = 0.3 (not in this example, change it!)\n",
"- random_state = 4222\n",
"- no need to cross-validation!\n",
"'''\n",
"\n",
"# parameters\n",
"max_fatures = 1000\n",
"embed_dim = 128\n",
"lstm_out = 196\n",
"dropout = 0.1\n",
"dropout_1d = 0.4\n",
"recurrent_dropout = 0.1\n",
"random_state = 4222\n",
"validation_size = 1000\n",
"batch_size = 16\n",
"epochs=2\n",
"verbose= 2\n",
"\n",
"df = pd.read_csv('/Users/Carsten/GitRepos/NLP-LAB/Carsten_Solutions/sets/sentiment-analysis/dataset_sentiment.csv')\n",
"df = df[['text','sentiment']]\n",
"print(df[0:10])"
]
},
{
"cell_type": "code",
"execution_count": 52,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"df = df[df.sentiment != \"Neutral\"]\n",
"#replace all capital letters with its small character\n",
"df['text'] = df['text'].apply(lambda x: x.lower())\n",
"#removes all rt in messages, often occuring in front of twitter raw data\n",
"df['text'] = df['text'].apply(lambda x: x.replace('rt',' '))\n",
"#only accepts alphanumerical characters, erease all other characters\n",
"df['text'] = df['text'].apply((lambda x: re.sub('[^a-zA-Z0-9\\s]','',x)))"
]
},
{
"cell_type": "code",
"execution_count": 53,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"2236\n",
"8493\n"
]
}
],
"source": [
"#evaluate distribution of positive and negative examples\n",
"print(len(df[df.sentiment == \"Positive\"]))\n",
"print(len(df[df.sentiment == \"Negative\"]))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": 54,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
" text sentiment\n",
"1 scottwalker didnt catch the full gopdebate l... Positive\n",
"3 robgeorge that carly fiorina is trending ho... Positive\n",
"4 danscavino gopdebate w realdonaldtrump deliv... Positive\n",
"5 gregabbotttx tedcruz on my first day i will ... Positive\n",
"6 warriorwoman91 i liked her and was happy whe... Negative\n",
"8 deer in the headlights lizzwinstead ben cars... Negative\n",
"9 nancyosborne180 last nights debate proved it... Negative\n",
"10 jgreendc realdonaldtrump in all fairness billc... Negative\n",
"11 waynedupreeshow just woke up to tweet this o... Positive\n",
"12 me reading my familys comments about how great... Negative\n"
]
}
],
"source": [
"print(df[0:10])"
]
},
{
"cell_type": "code",
"execution_count": 55,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[[ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 363 122 1 722\n",
" 2 39 58 237 36 210 6 174 12 742]\n",
" [ 0 0 0 0 0 0 0 0 0 0 0 16 284 252 5 818 102 167\n",
" 26 135 6 1 172 12 2 233 723 17]\n",
" [ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 2 309 23\n",
" 1 216 12 1 702 6 185 207 371 670]\n",
" [ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 127 17 53\n",
" 262 410 9 82 303 441 62 194 2 51]\n",
" [ 0 0 0 0 0 0 0 0 0 9 167 8 21 63 9 612 188 21\n",
" 189 4 34 1 562 19 819 2 44 743]]\n"
]
}
],
"source": [
"#generates a tokenizer with fixed lenght\n",
"tok = Tokenizer(num_words=max_fatures, split=' ')\n",
"#train tokenizer\n",
"tok.fit_on_texts(df['text'].values)\n",
"#transforms each sentence to a sequence of intgers\n",
"X = tok.texts_to_sequences(df['text'].values)\n",
"\n",
"#print(X[:20])\n",
"\n",
"#for each of these sequences it transforms it to an array of same length by inserting 0 in front by standard configuration of parameters \n",
"X = pad_sequences(X)\n",
"print(X[:5])\n",
"\n",
"\n",
"#print(type(X))\n"
]
},
{
"cell_type": "code",
"execution_count": 56,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"_________________________________________________________________\n",
"Layer (type) Output Shape Param # \n",
"=================================================================\n",
"embedding_5 (Embedding) (None, 28, 128) 128000 \n",
"_________________________________________________________________\n",
"spatial_dropout1d_5 (Spatial (None, 28, 128) 0 \n",
"_________________________________________________________________\n",
"lstm_5 (LSTM) (None, 196) 254800 \n",
"_________________________________________________________________\n",
"dense_5 (Dense) (None, 2) 394 \n",
"=================================================================\n",
"Total params: 383,194\n",
"Trainable params: 383,194\n",
"Non-trainable params: 0\n",
"_________________________________________________________________\n",
"None\n"
]
}
],
"source": [
"#configuration of nn\n",
"nn = Sequential()\n",
"nn.add(Embedding(max_fatures, embed_dim, input_length = X.shape[1]))\n",
"nn.add(SpatialDropout1D(dropout_1d))\n",
"nn.add(LSTM(lstm_out, dropout=dropout, recurrent_dropout=recurrent_dropout))\n",
"nn.add(Dense(2, activation='softmax'))\n",
"nn.compile(loss = 'categorical_crossentropy', optimizer='adam', metrics = ['accuracy'])\n",
"print(nn.summary())"
]
},
{
"cell_type": "code",
"execution_count": 57,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Epoch 1/2\n",
" - 55s - loss: 0.4244 - acc: 0.8173\n",
"Epoch 2/2\n",
" - 53s - loss: 0.3386 - acc: 0.8565\n",
"score: 0.37\n",
"acc: 0.86\n"
]
}
],
"source": [
"#get the teacher values\n",
"Y = pd.get_dummies(df['sentiment']).values\n",
"#gnerates X and y train & test data\n",
"X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.30, random_state = random_state)\n",
"#trains the machine learning configuration with the declared parameters in the top\n",
"nn.fit(X_train, Y_train, epochs = epochs, batch_size=batch_size, verbose=verbose)\n",
"\n",
"#also generate validation set by cutting the last validation_size elements from test data\n",
"X_validate = X_test[-validation_size:]\n",
"Y_validate = Y_test[-validation_size:]\n",
"X_test = X_test[:-validation_size]\n",
"Y_test = Y_test[:-validation_size]\n",
"\n",
"#evaluates the score and the accuracy\n",
"score, accuracy = nn.evaluate(X_test, Y_test, verbose = 2, batch_size = batch_size)\n",
"print(\"score: %.2f\" % (score))\n",
"print(\"acc: %.2f\" % (accuracy))"
]
},
{
"cell_type": "code",
"execution_count": 69,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"pos_acc 48.78048780487805 %\n",
"neg_acc 94.46540880503144 %\n"
]
}
],
"source": [
"#initialize counter for evaluating prediction\n",
"pos_cnt, neg_cnt, pos_ok, neg_ok, tp, fp, tn ,fn = 0, 0, 0, 0, 0, 0, 0, 0 \n",
"for x in range(len(X_validate)):\n",
" #predict for each element in validation set its true false probability\n",
" result = nn.predict(X_validate[x].reshape(1,X_test.shape[1]),batch_size=1,verbose = 2)[0]\n",
" #print(result)\n",
" #check if highest prob for same class\n",
" if np.argmax(result) == np.argmax(Y_validate[x]):\n",
" #if high prob in first array element ---> classification as neg ---> count neg \n",
" if np.argmax(Y_validate[x]) == 0: neg_ok += 1\n",
" #else count as pos\n",
" else: pos_ok += 1\n",
" #count of teacher labels\n",
" if np.argmax(Y_validate[x]) == 0: neg_cnt += 1\n",
" else: pos_cnt += 1\n",
"\n",
"#print results\n",
"print(\"pos_acc\", pos_ok/pos_cnt*100, \"%\")\n",
"print(\"neg_acc\", neg_ok/neg_cnt*100, \"%\")\n",
"\n",
"pos_acc = pos_ok/pos_cnt\n",
"neg_acc = neg_ok/neg_cnt"
]
},
{
"cell_type": "code",
"execution_count": 70,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[[ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n",
" 5 700 8 8 12 218 8 5 290 14]]\n",
"[ 0.0937252 0.9062748]\n"
]
}
],
"source": [
"#evaluate on different dataset\n",
"X2 = ['Jonas is nice and happy and in love and is looking for freedom']\n",
"X2 = tok.texts_to_sequences(X2)\n",
"X2 = pad_sequences(X2, maxlen=len(X[0]), dtype='int32', value=0)\n",
"print(X2)\n",
"print(nn.predict(X2, batch_size=1, verbose = 2)[0])"
]
},
{
"cell_type": "code",
"execution_count": 78,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"configuration_1 = {\n",
" \"name\":\"experiment1\",\n",
" \n",
" \"model\":\"generic_model_1_name\",\n",
" \"model-name\":\"deepNN\",\n",
"\n",
"\n",
" \"dataset\":\"X1\",\n",
" \"dataset-name\":\"dataset_sentiment.csv\",\n",
" \"dataset-link\":\"see_github\",\n",
" \n",
" \"measurement-name\":\"generic_measurement_hub\",\n",
" \n",
" \"execution-name\":\"genereric_execution_name\",\n",
" \n",
" \"precision\":-1,\n",
" \"recall\":-1, \n",
" \"pos_accn\":pos_acc,\n",
" \"neg_acc\":neg_acc,\n",
" \"accuracy\":score\n",
"}"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Export Data"
]
},
{
"cell_type": "code",
"execution_count": 79,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"from rdflib import Namespace, Graph, Literal\n",
"from rdflib.namespace import FOAF, OWL, XSD, RDFS, DCTERMS, DOAP, DC, RDF"
]
},
{
"cell_type": "code",
"execution_count": 80,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"prov = Namespace('http://www.w3.org/ns/prov#')\n",
"dcat = Namespace('http://www.w3.org/ns/dcat#')\n",
"mexalgo = Namespace('http://mex.aksw.org/mex-algo#')\n",
"mexperf = Namespace('http://mex.aksw.org/mex-perf#')\n",
"mexcore = Namespace('http://mex.aksw.org/mex-core#')\n",
"this = Namespace('http://mex.aksw.org/examples/')"
]
},
{
"cell_type": "code",
"execution_count": 81,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"def experiment_root_graph(root_node_name):\n",
" g = Graph()\n",
" g.add((this[root_node_name],RDF.type, mexcore.Experiment))\n",
" g.add((this[root_node_name],RDF.type, mexcore.ApplicationContext))\n",
" g.add((this[root_node_name],RDFS.label, Literal('2719095')))\n",
" g.add((this[root_node_name],DCTERMS.date, Literal('2018-05-28',datatype=XSD.date)))\n",
" g.add((this[root_node_name],FOAF.givenName, Literal('Carsten')))\n",
" g.add((this[root_node_name],FOAF.mbox, Literal('carsten.draschner@gmail.com')))\n",
" return g ,this[root_node_name]\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": 82,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"<class 'rdflib.term.URIRef'>\n"
]
}
],
"source": [
"g, r = experiment_root_graph(\"first_test\")\n",
"print(type(r))"
]
},
{
"cell_type": "code",
"execution_count": 83,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"#extend a rdf graph with a configuration\n",
"def experiment_to_rdf(graph, experiment, root_node):\n",
" \n",
" g.add((this[experiment[\"name\"]],RDF.type,mexcore.ExperimentConfiguration))\n",
" g.add((this[experiment[\"name\"]],prov.used, this[experiment[\"model\"]]))\n",
" g.add((this[experiment[\"name\"]],prov.wasStartedBy, root_node))\n",
" \n",
" #dataset information\n",
" g.add((this[experiment[\"dataset\"]],RDF.type,mexcore.Dataset))\n",
" g.add((this.dataset2,RDFS.label,Literal(experiment[\"dataset-name\"])))\n",
" g.add((this.dataset2,DCTERMS.landingPage,Literal(experiment[\"dataset-link\"])))\n",
" \n",
" #model description\n",
" g.add((this[experiment[\"model\"]],RDF.type,mexalgo.Algorithm))\n",
" g.add((this[experiment[\"model\"]],RDFS.label,Literal(experiment[\"model-name\"])))\n",
" g.add((this[experiment[\"model\"]],DCTERMS.identifier,Literal(experiment[\"model-name\"])))\n",
" ###g.add((this.model1,mexalgo.hasHyperParameter,this.hyerparameter1))\n",
" \n",
" #execution\n",
" g.add((this[experiment[\"execution-name\"]],RDF.type,mexcore.ExecutionOverall))\n",
" g.add((this[experiment[\"execution-name\"]],prov.generated,this[experiment[\"measurement-name\"]]))\n",
" g.add((this[experiment[\"execution-name\"]],prov.used,this.test))\n",
" g.add((this[experiment[\"execution-name\"]],prov.used,this[experiment[\"model\"]]))\n",
" \n",
" #test\n",
" g.add((this.test,RDF.type,mexcore.Test))\n",
" g.add((this.test,RDFS.label,Literal('Test')))\n",
" \n",
" #evaluation information\n",
" g.add((this[experiment[\"measurement-name\"]],RDF.type,mexcore.PerformanceMeasure))\n",
" g.add((this[experiment[\"measurement-name\"]],mexperf.precision,Literal(experiment[\"precision\"],datatype=XSD.float)))\n",
" g.add((this[experiment[\"measurement-name\"]],mexperf.recall,Literal(experiment[\"recall\"],datatype=XSD.float)))\n",
" g.add((this[experiment[\"measurement-name\"]],mexperf.accuracy,Literal(experiment[\"accuracy\"],datatype=XSD.float)))\n",
" g.add((this[experiment[\"measurement-name\"]],prov.wasGeneratedBy,this[experiment[\"execution-name\"]]))"
]
},
{
"cell_type": "code",
"execution_count": 84,
"metadata": {},
"outputs": [],
"source": [
"experiment_to_rdf(g,configuration_1,r)"
]
},
{
"cell_type": "code",
"execution_count": 85,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"with open('task3_metadata.ttl','wb') as f:\n",
" f.write(g.serialize(format='turtle'))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.3"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 247 KiB