Jonas: first stuff for task 2

This commit is contained in:
Jonas Weinz 2018-05-09 16:50:59 +02:00
parent 12081cb5df
commit 42ac5d8d81
3 changed files with 470 additions and 3 deletions

View File

@ -6,13 +6,25 @@
"source": [ "source": [
"# NLP-LAB Exercise 01 by jonas weinz\n", "# NLP-LAB Exercise 01 by jonas weinz\n",
"----\n", "----\n",
"## links:\n",
"\n",
"* Article: https://miguelmalvarez.com/2017/03/23/how-can-machine-learning-and-ai-help-solving-the-fake-news-problem/\n",
" * corresponding code: https://github.com/kjam/random_hackery/blob/master/Attempting%20to%20detect%20fake%20news.ipynb\n",
"\n",
"* Tutorial on Datacamp: https://www.datacamp.com/community/tutorials/scikit-learn-fake-news\n",
"\n",
"* liar dataset paper: https://www.cs.ucsb.edu/~william/papers/acl2017.pdf\n",
" * dataset: https://www.cs.ucsb.edu/~william/data/liar_dataset.zip\n",
"\n",
"## Dependencies for this Notebook:\n",
"* library [rdflib](https://github.com/RDFLib/rdflib)\n", "* library [rdflib](https://github.com/RDFLib/rdflib)\n",
" * install: `pip3 install rdflib`\n",
"* " "* "
] ]
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 1, "execution_count": 4,
"metadata": {}, "metadata": {},
"outputs": [ "outputs": [
{ {
@ -24,7 +36,287 @@
} }
], ],
"source": [ "source": [
"%pylab inline" "%pylab ipympl"
]
},
{
"cell_type": "code",
"execution_count": 31,
"metadata": {},
"outputs": [],
"source": [
"import pandas as pd\n",
"import numpy as np\n",
"import itertools\n",
"import sklearn.utils as sku\n",
"import os"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Generate/Download Datasets we are working on\n",
"\n",
"* running bash script to download all needed data and store it into the `data` subfolder"
]
},
{
"cell_type": "code",
"execution_count": 18,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"================================================================================\n",
"downloading and unpacking https://www.cs.ucsb.edu/~william/data/liar_dataset.zip if not already existing\n",
"================================================================================\n",
"UnZip 6.00 of 20 April 2009, by Debian. Original by Info-ZIP.\n",
"\n",
"Latest sources and executables are at ftp://ftp.info-zip.org/pub/infozip/ ;\n",
"see ftp://ftp.info-zip.org/pub/infozip/UnZip.html for other sites.\n",
"\n",
"Compiled with gcc 6.3.0 20170415 for Unix (Linux ELF).\n",
"\n",
"UnZip special compilation options:\n",
" ACORN_FTYPE_NFS\n",
" COPYRIGHT_CLEAN (PKZIP 0.9x unreducing method not supported)\n",
" SET_DIR_ATTRIB\n",
" SYMLINKS (symbolic links supported, if RTL and file system permit)\n",
" TIMESTAMP\n",
" UNIXBACKUP\n",
" USE_EF_UT_TIME\n",
" USE_UNSHRINK (PKZIP/Zip 1.x unshrinking method supported)\n",
" USE_DEFLATE64 (PKZIP 4.x Deflate64(tm) supported)\n",
" UNICODE_SUPPORT [wide-chars, char coding: UTF-8] (handle UTF-8 paths)\n",
" LARGE_FILE_SUPPORT (large files over 2 GiB supported)\n",
" ZIP64_SUPPORT (archives using Zip64 for large files supported)\n",
" USE_BZIP2 (PKZIP 4.6+, using bzip2 lib version 1.0.6, 6-Sept-2010)\n",
" VMS_TEXT_CONV\n",
" WILD_STOP_AT_DIR\n",
" [decryption, version 2.11 of 05 Jan 2007]\n",
"\n",
"UnZip and ZipInfo environment options:\n",
" UNZIP: [none]\n",
" UNZIPOPT: [none]\n",
" ZIPINFO: [none]\n",
" ZIPINFOOPT: [none]\n",
"Archive: liar_dataset.zip\n",
" inflating: README \n",
" inflating: test.tsv \n",
" inflating: train.tsv \n",
" inflating: valid.tsv \n",
"================================================================================\n",
"successfully finished action: downloading and unpacking https://www.cs.ucsb.edu/~william/data/liar_dataset.zip if not already existing\n",
"================================================================================\n",
"================================================================================\n",
"downloading and unpacking https://raw.githubusercontent.com/GeorgeMcIntire/fake_real_news_dataset/master/fake_or_real_news.csv.zip if not already existing\n",
"================================================================================\n",
"Archive: fake_or_real_news.csv.zip\n",
" inflating: fake_or_real_news.csv \n",
" creating: __MACOSX/\n",
" inflating: __MACOSX/._fake_or_real_news.csv \n",
"================================================================================\n",
"successfully finished action: downloading and unpacking https://raw.githubusercontent.com/GeorgeMcIntire/fake_real_news_dataset/master/fake_or_real_news.csv.zip if not already existing\n",
"================================================================================\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
" % Total % Received % Xferd Average Speed Time Time Time Current\n",
" Dload Upload Total Spent Left Speed\n",
"100 989k 100 989k 0 0 366k 0 0:00:02 0:00:02 --:--:-- 366k\n",
" % Total % Received % Xferd Average Speed Time Time Time Current\n",
" Dload Upload Total Spent Left Speed\n",
"100 11.3M 100 11.3M 0 0 2590k 0 0:00:04 0:00:04 --:--:-- 2751k\n"
]
}
],
"source": [
"%%bash\n",
"./Task_2_gen_data.sh"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Read in fake news table"
]
},
{
"cell_type": "code",
"execution_count": 36,
"metadata": {},
"outputs": [],
"source": [
"df_1 = pd.read_csv('data/fake_or_real_news.csv').set_index('Unnamed: 0')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"* display first 10 entries"
]
},
{
"cell_type": "code",
"execution_count": 37,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"(6335, 3)"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/html": [
"<div>\n",
"<style scoped>\n",
" .dataframe tbody tr th:only-of-type {\n",
" vertical-align: middle;\n",
" }\n",
"\n",
" .dataframe tbody tr th {\n",
" vertical-align: top;\n",
" }\n",
"\n",
" .dataframe thead th {\n",
" text-align: right;\n",
" }\n",
"</style>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: right;\">\n",
" <th></th>\n",
" <th>title</th>\n",
" <th>text</th>\n",
" <th>label</th>\n",
" </tr>\n",
" <tr>\n",
" <th>Unnamed: 0</th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" <tr>\n",
" <th>8476</th>\n",
" <td>You Can Smell Hillarys Fear</td>\n",
" <td>Daniel Greenfield, a Shillman Journalism Fello...</td>\n",
" <td>FAKE</td>\n",
" </tr>\n",
" <tr>\n",
" <th>10294</th>\n",
" <td>Watch The Exact Moment Paul Ryan Committed Pol...</td>\n",
" <td>Google Pinterest Digg Linkedin Reddit Stumbleu...</td>\n",
" <td>FAKE</td>\n",
" </tr>\n",
" <tr>\n",
" <th>3608</th>\n",
" <td>Kerry to go to Paris in gesture of sympathy</td>\n",
" <td>U.S. Secretary of State John F. Kerry said Mon...</td>\n",
" <td>REAL</td>\n",
" </tr>\n",
" <tr>\n",
" <th>10142</th>\n",
" <td>Bernie supporters on Twitter erupt in anger ag...</td>\n",
" <td>— Kaydee King (@KaydeeKing) November 9, 2016 T...</td>\n",
" <td>FAKE</td>\n",
" </tr>\n",
" <tr>\n",
" <th>875</th>\n",
" <td>The Battle of New York: Why This Primary Matters</td>\n",
" <td>It's primary day in New York and front-runners...</td>\n",
" <td>REAL</td>\n",
" </tr>\n",
" <tr>\n",
" <th>6903</th>\n",
" <td>Tehran, USA</td>\n",
" <td>\\nIm not an immigrant, but my grandparents ...</td>\n",
" <td>FAKE</td>\n",
" </tr>\n",
" <tr>\n",
" <th>7341</th>\n",
" <td>Girl Horrified At What She Watches Boyfriend D...</td>\n",
" <td>Share This Baylee Luciani (left), Screenshot o...</td>\n",
" <td>FAKE</td>\n",
" </tr>\n",
" <tr>\n",
" <th>95</th>\n",
" <td>Britains Schindler Dies at 106</td>\n",
" <td>A Czech stockbroker who saved more than 650 Je...</td>\n",
" <td>REAL</td>\n",
" </tr>\n",
" <tr>\n",
" <th>4869</th>\n",
" <td>Fact check: Trump and Clinton at the 'commande...</td>\n",
" <td>Hillary Clinton and Donald Trump made some ina...</td>\n",
" <td>REAL</td>\n",
" </tr>\n",
" <tr>\n",
" <th>2909</th>\n",
" <td>Iran reportedly makes new push for uranium con...</td>\n",
" <td>Iranian negotiators reportedly have made a las...</td>\n",
" <td>REAL</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>\n",
"</div>"
],
"text/plain": [
" title \\\n",
"Unnamed: 0 \n",
"8476 You Can Smell Hillarys Fear \n",
"10294 Watch The Exact Moment Paul Ryan Committed Pol... \n",
"3608 Kerry to go to Paris in gesture of sympathy \n",
"10142 Bernie supporters on Twitter erupt in anger ag... \n",
"875 The Battle of New York: Why This Primary Matters \n",
"6903 Tehran, USA \n",
"7341 Girl Horrified At What She Watches Boyfriend D... \n",
"95 Britains Schindler Dies at 106 \n",
"4869 Fact check: Trump and Clinton at the 'commande... \n",
"2909 Iran reportedly makes new push for uranium con... \n",
"\n",
" text label \n",
"Unnamed: 0 \n",
"8476 Daniel Greenfield, a Shillman Journalism Fello... FAKE \n",
"10294 Google Pinterest Digg Linkedin Reddit Stumbleu... FAKE \n",
"3608 U.S. Secretary of State John F. Kerry said Mon... REAL \n",
"10142 — Kaydee King (@KaydeeKing) November 9, 2016 T... FAKE \n",
"875 It's primary day in New York and front-runners... REAL \n",
"6903 \\nIm not an immigrant, but my grandparents ... FAKE \n",
"7341 Share This Baylee Luciani (left), Screenshot o... FAKE \n",
"95 A Czech stockbroker who saved more than 650 Je... REAL \n",
"4869 Hillary Clinton and Donald Trump made some ina... REAL \n",
"2909 Iranian negotiators reportedly have made a las... REAL "
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"display(df_1.shape)\n",
"display(df_1[:10])"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"* create test dataset"
] ]
}, },
{ {
@ -32,7 +324,25 @@
"execution_count": null, "execution_count": null,
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [] "source": [
"def create_test_dset(dset, cutoff=0.7):\n",
" shuffled = sku.shuffle(dset)\n",
" y = shuffled.label\n",
" df_1 = shuffled.drop('label', axis=1)\n",
" "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"cut = 0.7\n",
"\n",
"y = df_1.label\n",
"df_1 = df_1.drop('label', axis=1)\n"
]
} }
], ],
"metadata": { "metadata": {

View File

@ -0,0 +1,72 @@
#!/usr/bin/env bash
# helper functions:
function lineprint {
printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' =
}
function message {
lineprint
printf "$1\n"
lineprint
}
current_action="IDLE"
function confirm_action {
message "successfully finished action: $current_action"
}
function set_action {
current_action="$1"
message "$1"
}
function perform {
"$@"
local status=$?
if [ $status -ne 0 ]
then
message "$current_action failed!"
fi
return $status
}
function perform_and_exit {
perform "$@" || exit
}
# Downloading and unzipping dataset
D1_URL=https://www.cs.ucsb.edu/~william/data/liar_dataset.zip
D1_ZIP=${D1_URL##*/}
D2_URL=https://raw.githubusercontent.com/GeorgeMcIntire/fake_real_news_dataset/master/fake_or_real_news.csv.zip
D2_ZIP=${D2_URL##*/}
set_action "downloading and unpacking $D1_URL if not already existing"
# testing for unzip:
perform_and_exit unzip -v
perform_and_exit mkdir -p ./data
perform_and_exit cd ./data/
if [ ! -e $D1_ZIP ];
then
perform_and_exit curl $D1_URL --output ./$D1_ZIP
perform_and_exit unzip $D1_ZIP
fi
confirm_action
set_action "downloading and unpacking $D2_URL if not already existing"
if [ ! -e $D2_ZIP ];
then
perform_and_exit curl $D2_URL --output ./$D2_ZIP
perform_and_exit unzip $D2_ZIP
fi
confirm_action

File diff suppressed because one or more lines are too long