nlp-lab/Jonas_Solutions/Task_02_JonasWeinz.ipynb
Jonas Weinz 444ea7d876 blubb
2018-05-17 12:06:34 +02:00

589 lines
16 KiB
Plaintext

{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# NLP-LAB Exercise 02 by Jonas Weinz (2571421)\n",
"## links:\n",
"\n",
"* Article: https://miguelmalvarez.com/2017/03/23/how-can-machine-learning-and-ai-help-solving-the-fake-news-problem/\n",
" * corresponding code: https://github.com/kjam/random_hackery/blob/master/Attempting%20to%20detect%20fake%20news.ipynb\n",
"\n",
"* Tutorial on Datacamp: https://www.datacamp.com/community/tutorials/scikit-learn-fake-news\n",
"\n",
"* liar dataset paper: https://www.cs.ucsb.edu/~william/papers/acl2017.pdf\n",
" * dataset: https://www.cs.ucsb.edu/~william/data/liar_dataset.zip\n",
"\n",
"* Mex Vocabulary: http://jens-lehmann.org/files/2015/semantics_mex.pdf"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Dependencies for this Notebook:\n",
"* library [rdflib](https://github.com/RDFLib/rdflib)\n",
" * install: `pip3 install rdflib`\n"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Populating the interactive namespace from numpy and matplotlib\n"
]
}
],
"source": [
"%pylab inline"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"import pandas as pd\n",
"import numpy as np\n",
"import itertools\n",
"import sklearn.utils as sku\n",
"from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer, HashingVectorizer\n",
"from sklearn.model_selection import train_test_split\n",
"from sklearn.linear_model import PassiveAggressiveClassifier\n",
"from sklearn.naive_bayes import MultinomialNB\n",
"from sklearn import metrics\n",
"import matplotlib.pyplot as plt\n",
"from pprint import pprint as pp\n",
"from IPython.display import display, Markdown, Latex\n",
"import collections\n",
"import traceback\n",
"import os\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Tools used later"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"def plot_confusion_matrix(cm, classes,\n",
" title,\n",
" normalize=False,\n",
" cmap=plt.cm.Blues):\n",
" fig_1, ax_1 = plt.subplots()\n",
" \"\"\"\n",
" See full source and example: \n",
" http://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html\n",
" \n",
" This function prints and plots the confusion matrix.\n",
" Normalization can be applied by setting `normalize=True`.\n",
" \"\"\"\n",
" plt.imshow(cm, interpolation='nearest', cmap=cmap)\n",
" plt.title('Confusion Matrix for:\\n' + title)\n",
" plt.colorbar()\n",
" tick_marks = np.arange(len(classes))\n",
" plt.xticks(tick_marks, classes, rotation=45)\n",
" plt.yticks(tick_marks, classes)\n",
"\n",
" if normalize:\n",
" cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n",
" print(\"Normalized confusion matrix\")\n",
" else:\n",
" print('Confusion matrix, without normalization')\n",
"\n",
" thresh = cm.max() / 2.\n",
" \n",
" pp(cm)\n",
" \n",
" for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n",
" plt.text(j, i, cm[i, j],\n",
" horizontalalignment=\"center\",\n",
" color=\"white\" if cm[i, j] > thresh else \"black\")\n",
"\n",
" plt.tight_layout()\n",
" plt.ylabel('True label')\n",
" plt.xlabel('Predicted label')"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [],
"source": [
"def test_classifier(labels, title, Xt, yt, clf):\n",
" pred = clf.predict(Xt)\n",
" score = metrics.accuracy_score(yt, pred)\n",
" pp(\"score: \" + str(score))\n",
" cm = metrics.confusion_matrix(yt, pred, labels=labels)\n",
" plot_confusion_matrix(cm, classes=labels, title=title)"
]
},
{
"cell_type": "code",
"execution_count": 11,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"\"\\nfrom rdflib import Graph, Literal, BNode, RDF, Namespace\\nfrom rdflib.namespace import FOAF, DC, XSD\\n\\nmexcore = Namespace('http://mex.aksw.org/mex-core#')\\nmexperf = Namespace('http://mex.aksw.org/mex-perf#')\\nmexalgo = Namespace('http://mex.aksw.org/mex-algo#')\\nprov = Namespace('http://www.w3.org/ns/prov#')\\n\\ndef create_mex_graph():\\n graph = Graph()\\n graph.bind(mexcore)\\n graph.bind(mexperf)\\n graph.bind(mexalgo)\\n graph.bind(prov)\\n graph.bind(FOAF)\\n graph.bind(DC)\\n graph.bind(XSD)\\n \\n return graph\\n\\ndef mex_performance(experiment, model, dataset, performance, phase='Train', graph=create_mex_graph()):\\n \\n p = BNode()\\n \\n\""
]
},
"execution_count": 11,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"'''\n",
"from rdflib import Graph, Literal, BNode, RDF, Namespace\n",
"from rdflib.namespace import FOAF, DC, XSD\n",
"\n",
"mexcore = Namespace('http://mex.aksw.org/mex-core#')\n",
"mexperf = Namespace('http://mex.aksw.org/mex-perf#')\n",
"mexalgo = Namespace('http://mex.aksw.org/mex-algo#')\n",
"prov = Namespace('http://www.w3.org/ns/prov#')\n",
"\n",
"def create_mex_graph():\n",
" graph = Graph()\n",
" graph.bind(mexcore)\n",
" graph.bind(mexperf)\n",
" graph.bind(mexalgo)\n",
" graph.bind(prov)\n",
" graph.bind(FOAF)\n",
" graph.bind(DC)\n",
" graph.bind(XSD)\n",
" \n",
" return graph\n",
"\n",
"def mex_performance(experiment, model, dataset, performance, phase='Train', graph=create_mex_graph()):\n",
" \n",
" p = BNode()\n",
" \n",
"'''"
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"Namespace('http://xmlns.com/foaf/0.1/')"
]
},
"execution_count": 9,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"FOAF\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Generate/Download Datasets we are working on\n",
"\n",
"* running bash script to download all needed data and store it into the `data` subfolder"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"%%bash\n",
"./Task_2_gen_data.sh"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"----\n",
"## configuration 1"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"df_1 = pd.read_csv('data/fake_or_real_news.csv').set_index('Unnamed: 0')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"* display first 10 entries"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"display(df_1.shape)\n",
"display(df_1[:10])"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"* create test dataset"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"X1, Xt1, y1, yt1 = train_test_split(df_1.drop('label', axis=1)['text'], df_1.label, test_size=0.25, random_state=4222)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"vectorizer_1 = TfidfVectorizer(stop_words='english', max_df=0.7)\n",
"vec_train_1 = vectorizer_1.fit_transform(X1)\n",
"vec_test_1 = vectorizer_1.transform(Xt1)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"* trying a Random Forest classifier "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from sklearn.ensemble import RandomForestClassifier as RFC\n",
"clf_a = RFC(criterion='entropy', random_state=4222)\n",
"max_size=10000\n",
"clf_a.fit(vec_train_1[:max_size], y1[:max_size])\n",
"test_classifier(labels=[\"FAKE\",\"REAL\"], title=\"Configuration 1, model a -- train\", Xt=vec_train_1,yt=y1, clf=clf_a)\n",
"test_classifier(labels=[\"FAKE\",\"REAL\"], title=\"Configuration 1, model a -- test\", Xt=vec_test_1,yt=yt1, clf=clf_a)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"----\n",
"## configuration 2\n",
"\n",
"* read data"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"names = [\n",
" \"id\",\n",
" \"label\",\n",
" \"statement\",\n",
" \"subjects\",\n",
" \"speaker\",\n",
" \"job\",\n",
" \"state\",\n",
" \"party\",\n",
" \"#barely_true\",\n",
" \"#false\",\n",
" \"#half_true\",\n",
" \"#mostly_true\",\n",
" \"#pants_on_fire\",\n",
" \"context\"\n",
"]\n",
"\n",
"df_2_train = pd.read_csv(\"data/train.tsv\", delimiter='\\t', names=names)\n",
"df_2_test = pd.read_csv(\"data/test.tsv\", delimiter='\\t', names=names)\n",
"df_2_valid= pd.read_csv(\"data/valid.tsv\", delimiter='\\t', names=names)\n",
"\n",
"# use only 'False' and 'True' statements\n",
"df_2_train = df_2_train[df_2_train['label'].isin([\"false\",\"true\"])]\n",
"df_2_test = df_2_test[df_2_test['label'].isin([\"false\",\"true\"])]\n",
"df_2_valid = df_2_valid[df_2_valid['label'].isin([\"false\",\"true\"])]\n",
"\n",
"display(Markdown(\"----\\n#### Train Data:\"))\n",
"display(df_2_train.head())\n",
"display(Markdown(\"----\\n#### Test Data:\"))\n",
"display(df_2_test.head())\n",
"display(Markdown(\"----\\n#### Valid Data:\"))\n",
"display(df_2_valid.head())"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### tdidf vectorizer on new dataset\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"X2 = df_2_train['statement']\n",
"y2 = df_2_train['label']\n",
"Xt2 = df_2_test['statement']\n",
"yt2 = df_2_test['label']\n",
"Xv2 = df_2_valid['statement']\n",
"yv2 = df_2_valid['label']\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"vectorizer_2 = TfidfVectorizer(stop_words='english', max_df=0.7)\n",
"vec_train_2 = vectorizer_2.fit_transform(X2)\n",
"vec_test_2 = vectorizer_2.transform(Xt2)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"?MLPClassifier"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"* trying a MLP as classifier "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from sklearn.neural_network import MLPClassifier\n",
"clf_b = MLPClassifier(hidden_layer_sizes=(100,), random_state=4222)\n",
"clf_b.fit(vec_train_2, y2)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"test_classifier(labels=[\"true\", \"false\"], title=\"configuration 2 -- train\", Xt=vec_train_2, yt=y2, clf=clf_b)\n",
"test_classifier(labels=[\"true\", \"false\"], title=\"configuration 2 -- test\", Xt=vec_test_2, yt=yt2, clf=clf_b)\n",
"test_classifier(labels=[\"true\", \"false\"], title=\"configuration 2 -- valid\", Xt=vectorizer_2.transform(Xv2), yt=yv2, clf=clf_b)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"----\n",
"## configuration 3"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"yt2_c3 = yt2.copy()\n",
"yt2_c3[yt2_c3 == \"true\"] = \"REAL\"\n",
"yt2_c3[yt2_c3 == \"false\"] = \"FAKE\"\n",
"\n",
"test_classifier(labels=[\"REAL\", \"FAKE\"], \n",
" title=\"configuration 3: model a) → dataset 2\",\n",
" Xt=vectorizer_1.transform(Xt2),\n",
" yt=yt2_c3, clf=clf_a)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"yt1_c3 = yt1.copy()\n",
"yt1_c3[yt1_c3 == \"REAL\"] = \"true\"\n",
"yt1_c3[yt1_c3 == \"FAKE\"] = \"false\"\n",
"\n",
"test_classifier(labels=[\"true\", \"false\"], \n",
" title=\"configuration 3: model b) → dataset 1\",\n",
" Xt=vectorizer_2.transform(Xt1),\n",
" yt=yt1_c3, clf=clf_b)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"----\n",
"## configuration 4)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def get_dataset3_split(dataset1_in, dataset2_in):\n",
" try:\n",
" print('processing datasets')\n",
" print('ds1=', dataset1_in)\n",
" print('ds2=', dataset2_in)\n",
"\n",
" print('-- fake news')\n",
" df1 = pd.read_csv(dataset1_in, sep=',', usecols=['title','text','label'])\n",
" df1['claim'] = df1[['title', 'text']].apply(lambda x: '. '.join(x), axis=1)\n",
" del df1['title']\n",
" del df1['text']\n",
" df1.rename(index=str, columns={'label': 'y'}, inplace=True)\n",
" print(df1.keys())\n",
" print(len(df1[df1['y']=='REAL']))\n",
" print(len(df1[df1['y']=='FAKE']))\n",
" df1['y'] = np.where(df1['y'] == 'FAKE', 'false', 'true')\n",
" print(len(df1))\n",
"\n",
" print('-- liar liar')\n",
" df2 = pd.read_csv(dataset2_in, sep='\\t', header=None, usecols=[1,2], names=['y', 'claim'])\n",
" print(df2.keys())\n",
" print(set(df2.y), len(df2))\n",
" print(len(df2[df2['y'] == 'true']))\n",
" print(len(df2[df2['y'] == 'false']))\n",
" df2=df2[(df2['y'] == 'true') | (df2['y'] == 'false')]\n",
" print(set(df2.y), len(df2))\n",
"\n",
" df3=pd.concat([df1, df2], ignore_index=True)\n",
"\n",
" print(df3['y'].value_counts())\n",
" print('done')\n",
" return train_test_split(df3['claim'], df3['y'], test_size=0.3, random_state=4222)\n",
" except Exception as e:\n",
" print(e)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"X3, Xt3, y3, yt3 = get_dataset3_split('data/fake_or_real_news.csv', 'data/train.tsv')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"vectorizer_3 = TfidfVectorizer(stop_words='english', max_df=0.7)\n",
"vec_train_3 = vectorizer_3.fit_transform(X3)\n",
"vec_test_3 = vectorizer_3.transform(Xt3)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"* using MLP again"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"clf_3 = MLPClassifier(hidden_layer_sizes=(16,16), random_state=4222)\n",
"clf_3.fit(vec_train_3, y3)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"test_classifier(labels=[\"true\",\"false\"], title=\"Configuration 4 -- train\", Xt=vec_train_3, yt=y3, clf=clf_3)\n",
"test_classifier(labels=[\"true\",\"false\"], title=\"Configuration 4 -- test\", Xt=vec_test_3, yt=yt3, clf=clf_3)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.5"
}
},
"nbformat": 4,
"nbformat_minor": 2
}