renamed a lot of stuff

This commit is contained in:
Jonas Weinz
2018-07-27 12:11:38 +02:00
parent 6525dbc120
commit a221d9f3b5
12 changed files with 4119 additions and 4108 deletions

View File

@ -0,0 +1,847 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Learner for Emoji classifier 🤓\n",
"**usage:**\n",
"run all cells, then go to the [user interface](#User-Interface)"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"%matplotlib inline"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"Using TensorFlow backend.\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"[nltk_data] Downloading package punkt to /home/jonas/nltk_data...\n",
"[nltk_data] Package punkt is already up-to-date!\n",
"[nltk_data] Downloading package averaged_perceptron_tagger to\n",
"[nltk_data] /home/jonas/nltk_data...\n",
"[nltk_data] Package averaged_perceptron_tagger is already up-to-\n",
"[nltk_data] date!\n",
"[nltk_data] Downloading package wordnet to /home/jonas/nltk_data...\n",
"[nltk_data] Package wordnet is already up-to-date!\n"
]
}
],
"source": [
"import twitter_learning as twl\n",
"import glob\n",
"import sys\n",
"from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer, HashingVectorizer\n",
"import pickle\n",
"import matplotlib.pyplot as plt\n",
"import matplotlib\n",
"import numpy as np"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## user interface area:"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"* UI helper functions and global states"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"from IPython.display import clear_output, Markdown, Math\n",
"import ipywidgets as widgets\n",
"\n",
"out_areas = {}\n",
"shown_widgets = {}\n",
"tab_manager = widgets.Tab()\n",
"\n",
"def mp(obj):\n",
" display(Markdown(obj))\n",
"\n",
"def set_widget_visibility(widget_names, visible=True):\n",
" for w in widget_names:\n",
" shown_widgets[w].disabled = not visible\n",
"\n",
"def create_area(area_name:str, list_widgets:list, out_name:str, tab=tab_manager):\n",
" \"\"\"\n",
" creates a table of widgets with corresponding output area below\n",
" \n",
" @param area_name: title of the area\n",
" @param list_widgets: list of tuples: (widget, name:str)\n",
" @param out_name: name for the output area\n",
" \"\"\"\n",
" if out_name is not None:\n",
" out = widgets.Output()\n",
" out_areas[out_name] = out\n",
" h_box_widgets = []\n",
" v_box_widgets = []\n",
" for v in list_widgets:\n",
" for h in v:\n",
" if 'description' in h[0].__dir__() and h[1] is not None:\n",
" h[0].description = h[1]\n",
" if h[1] is not None:\n",
" shown_widgets[h[1]] = h[0]\n",
" h_box_widgets.append(h[0])\n",
" v_box_widgets.append(widgets.HBox(h_box_widgets))\n",
" h_box_widgets = []\n",
" \n",
" if out_name is not None:\n",
" v_box_widgets += [out]\n",
" tab.children = list(tab.children) + [widgets.VBox(v_box_widgets)]\n",
" tab.set_title(len(tab.children) - 1, area_name)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"* build UI"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [
{
"data": {
"text/markdown": [
"----"
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/markdown": [
"## User Interface"
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "e94b33b8493a48798d3adda091986a78",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Tab(children=(VBox(children=(HBox(children=(HTML(value='<b> Data Root Folder: </b> <br> setup the folder conta…"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"mp(\"----\")\n",
"mp(\"## User Interface\")\n",
"# create widgets\n",
"create_area(\"load dataset 💾\",\n",
" [\n",
" [\n",
" (widgets.HTML(\"<b> Data Root Folder: </b> <br> setup the folder containing *.json train data \"), None)\n",
" ],\n",
" [\n",
" (widgets.Text(value=\"./data_en/\"), \"root_path\"),\n",
" (widgets.Button(), \"set_path\")\n",
" ],\n",
" [\n",
" (widgets.HTML(\"<b> Loading and preprocessing options: </b> <br> setup the range of files to load. Only_emoticons will filter out 'non-smiley' emojis, min_words is the minimum amount of words for one document. Also you can setup top-emoji filtering or only load samples containing a custom emoji set\"), None)\n",
" ],\n",
" [\n",
" (widgets.IntRangeSlider(disabled=True, min=0, max=0), \"file_range\"),\n",
" (widgets.Checkbox(value=True,disabled=True), \"only_emoticons\"),\n",
" (widgets.Checkbox(value=False,disabled=True), \"apply_lemmatization_and_stemming\"),\n",
" (widgets.BoundedIntText(value=5,min=0, max=10), \"min_words\")\n",
" ],\n",
" [\n",
" #(widgets.BoundedIntText(value=-1,disabled=True,min=-1, max=10), \"k_means_cluster\")\n",
" (widgets.BoundedIntText(value=20,disabled=True,min=-1, max=100), \"n_top_emojis\"),\n",
" (widgets.Dropdown(options=[\"latest\", \"mean\"], value=\"latest\"), \"label_criteria\"),\n",
" (widgets.Text(value=\"\"), \"custom_emojis\")\n",
" ],\n",
" [\n",
" (widgets.Button(disabled=True),\"load_data\")\n",
" ]\n",
" ],\n",
" \"load\")\n",
"\n",
"classifier_tab = widgets.Tab()\n",
"\n",
"create_area(\"keras\",\n",
" [\n",
" [\n",
" (widgets.IntSlider(min=0, max=10), \"n_keras_layer\")\n",
" ],\n",
" [\n",
" (widgets.HBox([]), \"n_keras_neurons\")\n",
" ],\n",
" [\n",
" (widgets.HBox([]), \"keras_funcs\")\n",
" ]\n",
" ],\n",
" None,\n",
" classifier_tab)\n",
"\n",
"create_area(\"create/save/load classifier\",\n",
" [\n",
" [\n",
" (classifier_tab, \"classifier_tab\")\n",
" ],\n",
" [\n",
" (widgets.HTML(\"<b> Create new Classifier: </b> <br> create a new keras classifier with layer options from above. Also a vectorizer will be trained on loaded sample data. If doc2vec is disabled, TFIDF is used\"), None)\n",
" ],\n",
" [\n",
" (widgets.Checkbox(value=True),\"use_doc2vec\"),\n",
" (widgets.Checkbox(value=True),\"d2v_use_pretrained\"),\n",
" (widgets.IntText(value=100),\"d2v_size\"),\n",
" (widgets.IntText(value=8), \"d2v_window\"),\n",
" (widgets.IntSlider(value=5, min=0, max=32), \"d2v_min_count\")\n",
" ],\n",
" [\n",
" (widgets.Button(), \"create_classifier\")\n",
" ],\n",
" [\n",
" (widgets.HTML(\"<b> Save Classifier: </b>\"), None)\n",
" ],\n",
" [\n",
" (widgets.Text(), \"classifier_name\"),\n",
" (widgets.Button(), \"save_classifier\")\n",
" ],\n",
" [\n",
" (widgets.HTML(\"<b> Load Classifier: </b>\"), None)\n",
" ],\n",
" [\n",
" (widgets.Select(options=sorted(glob.glob(\"./*.pipeline\"))), \"clf_file_selector\"),\n",
" (widgets.Text(), \"clf_file\"),\n",
" (widgets.Button(), \"load_classifier\")\n",
" ]\n",
" ],\n",
" \"create\")\n",
"\n",
"create_area(\"train classifier 🎓\", \n",
" [\n",
" [\n",
" (widgets.HTML(\"<b> Custom Batch Settings: </b> <br> (Ignored if batch_size is 0)\"), None)\n",
" ],\n",
" [\n",
" (widgets.IntSlider(value=0,min=0,max=0), \"batch_size\"),\n",
" (widgets.FloatSlider(value=0.15, min=0, max=1), \"val_split\")\n",
" ],\n",
" [\n",
" (widgets.HTML(\"<b> Train: </b>\"), None)\n",
" ],\n",
" [\n",
" (widgets.IntText(value=1), \"n_epochs\"),\n",
" (widgets.Button(),\"train\")\n",
" ]\n",
" ], \n",
" \"train\" )\n",
"create_area(\"playground 😎\",\n",
" [\n",
" [\n",
" (widgets.HTML(\"<b> predict single sentence </b> <br> (uses min distance to given emojis in prediction_ground_set)\"), None)\n",
" ],\n",
" [\n",
" (widgets.Text(),\"test_input\"),\n",
" (widgets.Text(value=\"😳😋😀😌😏😔😒😎😢😅😁😉🙌🙏😘😊😩😍😭😂\"),\"prediction_ground_set\"),\n",
" (widgets.HTML(\"<h1>∅</h1>\"),\"prediction\"),\n",
" ],\n",
" [\n",
" (widgets.Checkbox(),\"show_sorted_list\"),\n",
" (widgets.Button(),\"show_plot\")\n",
" ],\n",
" [\n",
" (widgets.HTML(\"<b> Test on loaded validation set: </b> <br> (performs prediction plot on all validation samples that are labeled with given emojis)\"), None)\n",
" ],\n",
" [\n",
" (widgets.Text(value=\"😳😋😀😌😏😔😒😎😢😅😁😉🙌🙏😘😊😩😍😭😂\"), \"validation_emojis\"),\n",
" (widgets.Button(),\"show_validation_plot\")\n",
" ]\n",
" ],\n",
" \"playground\")\n",
"\n",
"tab_manager"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"----\n",
"## global variables:"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [],
"source": [
"sdm = None\n",
"pm = None\n",
"tr = None"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## pretty jupyter print"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [],
"source": [
"import collections\n",
"import traceback\n",
"from pprint import pprint as pp\n",
"\n",
"def jupyter_print(obj, cell_w = 10, headers=None, p_type=True, ret_mdown=False, index_offset=0, list_horizontal=False):\n",
" \"\"\"\n",
" pretty hacky function to convert arrays, lists and matrices into\n",
" nice readable markdown code and render that in jupyter. if that is not possible\n",
" it will use pretty print instead\n",
" \"\"\"\n",
" try:\n",
" ts = \"**Type:** \" + str(type(obj)).strip(\"<>\") + \"\\n\\n\"\n",
" if type(obj) == str:\n",
" display(Markdown(obj))\n",
" elif isinstance(obj, collections.Iterable):\n",
" if isinstance(obj[0], collections.Iterable) and type(obj[0]) is not str:\n",
" # we have a table\n",
" \n",
" if headers is None:\n",
" headers = [str(i) for i in range(len(obj[0]))]\n",
" \n",
" if len(headers) < len(obj[0]):\n",
" headers += [\" \" for i in range(len(obj[0]) - len(headers))]\n",
" \n",
" s = \"|\" + \" \" * cell_w + \"|\"\n",
" \n",
" for h in headers:\n",
" s += str(h) + \" \" * (cell_w - len(h)) + \"|\"\n",
" s += \"\\n|\" + \"-\" * (len(headers) + (len(headers) + 1) * cell_w) + \"|\\n\"\n",
" \n",
" #s = (\"|\" + (\" \" * (cell_w))) * len(obj[0]) + \"|\\n\" + \"|\" + (\"-\" * (cell_w + 1)) * len(obj[0])\n",
" #s += '|\\n'\n",
" \n",
" row = index_offset\n",
" \n",
" for o in obj:\n",
" s += \"|**\" + str(row) + \"**\" + \" \" * (cell_w - (len(str(row))+4))\n",
" row += 1\n",
" for i in o:\n",
" s += \"|\" + str(i) + \" \" * (cell_w - len(str(i)))\n",
" s+=\"|\" + '\\n'\n",
" s += ts\n",
" display(Markdown(s))\n",
" return s if ret_mdown else None\n",
" else:\n",
" # we have a list\n",
" \n",
" \n",
" if headers is None:\n",
" headers = [\"index\",\"value\"]\n",
" \n",
" index_title = headers[0]\n",
" value_title = headers[1]\n",
" \n",
" s = \"|\" + index_title + \" \" * (cell_w - len(value_title)) + \"|\" + value_title + \" \" * (cell_w - len(value_title)) + \"|\" + '\\n'\n",
" s += \"|\" + \"-\" * (1 + 2 * cell_w) + '|\\n'\n",
" i = index_offset\n",
" for o in obj:\n",
" s_i = str(i)\n",
" s_o = str(o)\n",
" s += \"|\" + s_i + \" \" * (cell_w - len(s_i)) + \"|\" + s_o + \" \" * (cell_w - len(s_o)) + \"|\" + '\\n'\n",
" i+=1\n",
" s += ts\n",
" #print(s)\n",
" display(Markdown(s))\n",
" return s if ret_mdown else None\n",
" else:\n",
" jupyter_print([obj])\n",
" except Exception as e:\n",
" print(ts)\n",
" pp(obj) \n",
"\n",
"jp = jupyter_print"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## output progress printing:"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [],
"source": [
"class progress_indicator(object):\n",
" \n",
" def __init__(self, description=\"progress\"):\n",
" self.w = widgets.FloatProgress(value=0, min=0,max=1, description = description)\n",
" display(self.w)\n",
" def update(self, val):\n",
" self.w.value = val\n",
" "
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"----\n",
"## load datasets"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {},
"outputs": [],
"source": [
"def set_path(b):\n",
" with out_areas[\"load\"]:\n",
" clear_output()\n",
" mp(\"----\")\n",
" files = sorted(glob.glob(shown_widgets[\"root_path\"].value + \"/*.json\"))\n",
" \n",
" if len(files) == 0:\n",
" sys.stderr.write(\"ERROR: no json files available in \" + shown_widgets[\"root_path\"].value + \"\\n\")\n",
" set_widget_visibility([\"file_range\",\n",
" \"only_emoticons\",\n",
" \"n_top_emojis\",\n",
" \"apply_lemmatization_and_stemming\",\n",
" \"load_data\"], False)\n",
" return\n",
" \n",
" mp(\"**available files:**\")\n",
" jp(files, headers=[\"fileindex\",\"filepath\"])\n",
" set_widget_visibility([\"file_range\",\n",
" \"only_emoticons\",\n",
" \"n_top_emojis\",\n",
" \"apply_lemmatization_and_stemming\",\n",
" \"load_data\"], True)\n",
" shown_widgets[\"file_range\"].min=0\n",
" shown_widgets[\"file_range\"].max=len(files) -1\n",
"\n",
"def load_data(b):\n",
" global sdm\n",
" with out_areas[\"load\"]:\n",
" clear_output()\n",
" mp(\"----\")\n",
" \n",
" r = shown_widgets[\"file_range\"].value\n",
" r = (r[0], r[1] + 1) # range has to be exclusive according to the last element!\n",
" \n",
" p_r = progress_indicator(\"reading progress\")\n",
" \n",
" lemm_and_stemm = shown_widgets[\"apply_lemmatization_and_stemming\"].value\n",
" \n",
" if lemm_and_stemm:\n",
" p_s = progress_indicator(\"stemming progress\")\n",
" \n",
" emoji_mean = shown_widgets[\"label_criteria\"].value == \"mean\"\n",
" \n",
" custom_emojis = list(shown_widgets[\"custom_emojis\"].value)\n",
" \n",
" min_words = shown_widgets[\"min_words\"].value\n",
" \n",
" sdm = twl.sample_data_manager.generate_and_read(path=shown_widgets[\"root_path\"].value,\n",
" n_top_emojis=shown_widgets[\"n_top_emojis\"].value,\n",
" file_range=range(r[0], r[1]),\n",
" n_kmeans_cluster=-1,\n",
" read_progress_callback=p_r.update,\n",
" stem_progress_callback=p_s.update if lemm_and_stemm else None,\n",
" apply_stemming = lemm_and_stemm,\n",
" emoji_mean=emoji_mean,\n",
" custom_target_emojis=custom_emojis if len(custom_emojis) > 0 else None,\n",
" min_words=min_words)\n",
" shown_widgets[\"batch_size\"].max = len(sdm.labels)\n",
" \n",
" \n",
"# linking functions with buttons:\n",
"shown_widgets[\"set_path\"].on_click(set_path)\n",
"shown_widgets[\"load_data\"].on_click(load_data)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## train"
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {},
"outputs": [],
"source": [
"def train(b):\n",
" global sdm\n",
" global pm\n",
" global tr\n",
" with out_areas[\"train\"]:\n",
" clear_output()\n",
" mp(\"----\")\n",
" if sdm is None or pm is None:\n",
" sys.stderr.write(\"ERROR: sample data and/or classifier missing!\\n\")\n",
" return\n",
" \n",
" batch_size = shown_widgets[\"batch_size\"].value\n",
" val_split = shown_widgets[\"val_split\"].value\n",
" n_epochs = shown_widgets[\"n_epochs\"].value\n",
" \n",
" print(\"update train test split:\")\n",
" sdm.create_train_test_split(split=val_split)\n",
" \n",
" print(\"fit\")\n",
" \n",
" p = progress_indicator()\n",
" \n",
" tr = twl.trainer(sdm=sdm, pm=pm)\n",
" tr.fit(progress_callback=p.update, batch_size=batch_size if batch_size > 0 else None, n_epochs=n_epochs)\n",
" \n",
"\n",
"# linking:\n",
"shown_widgets[\"train\"].on_click(train)\n",
" "
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## create classifier"
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {},
"outputs": [],
"source": [
"keras_acivations = [\n",
" \"softmax\",\n",
" \"elu\",\n",
" \"selu\",\n",
" \"softplus\",\n",
" \"softsign\",\n",
" \"relu\",\n",
" \"tanh\",\n",
" \"sigmoid\",\n",
" \"hard_sigmoid\",\n",
" \"linear\",\n",
" \"None\"\n",
"]\n",
"\n",
"def populate_keras_options(b):\n",
" n_layers = shown_widgets[\"n_keras_layer\"].value\n",
" hbox_neurons = shown_widgets[\"n_keras_neurons\"]\n",
" hbox_funcs = shown_widgets[\"keras_funcs\"]\n",
" \n",
" hbox_neurons.children = [widgets.IntText(description = str(i)) for i in range(n_layers)]\n",
" hbox_funcs.children = [widgets.Dropdown(options=keras_acivations,description = str(i)) for i in range(n_layers)]\n",
" \n",
" #hbox_neurons.children[-1].disabled = True\n",
"\n",
"def create_classifier(b):\n",
" global sdm\n",
" global pm\n",
" global tr\n",
" with out_areas[\"create\"]:\n",
" clear_output()\n",
" mp(\"----\")\n",
" if sdm is None:\n",
" sys.stderr.write(\"load a dataset first!\\n\")\n",
" return\n",
" \n",
" chosen_classifier = classifier_tab.get_title(classifier_tab.selected_index)\n",
" \n",
" mp(\"**chosen classifier**: `\" + chosen_classifier + \"`\")\n",
" \n",
" # creating the vectorizer\n",
" vectorizer = None\n",
" if shown_widgets[\"use_doc2vec\"].value:\n",
" if shown_widgets[\"d2v_use_pretrained\"].value:\n",
" vectorizer = pickle.load( open( \"doc2VecModel.p\", \"rb\" ) )\n",
" else:\n",
" vectorizer = twl.skd2v.Doc2VecTransformer(size=shown_widgets[\"d2v_size\"].value,\n",
" window=shown_widgets[\"d2v_window\"].value,\n",
" min_count=shown_widgets[\"d2v_min_count\"].value)\n",
" else:\n",
" vectorizer=TfidfVectorizer(stop_words='english')\n",
" \n",
" # TODO: add more classifier options here:\n",
" if chosen_classifier is 'keras':\n",
" sdm.create_train_test_split()\n",
" \n",
" n_layers = shown_widgets[\"n_keras_layer\"].value\n",
" hbox_neurons = shown_widgets[\"n_keras_neurons\"]\n",
" hbox_funcs = shown_widgets[\"keras_funcs\"]\n",
"\n",
" layers = []\n",
" for i in range(n_layers):\n",
" func = hbox_funcs.children[i].value\n",
" if func == 'None':\n",
" func = None\n",
" layers.append((hbox_neurons.children[i].value, func))\n",
" \n",
" # modify last layer:\n",
" layers[-1] = (sdm.y.shape[1], layers[-1][1])\n",
" \n",
" mp(\"**layers:** \")\n",
" jp(layers, headers=['#neurons', 'activation_func'])\n",
"\n",
" pm = stl.pipeline_manager.create_keras_pipeline_with_vectorizer(vectorizer, layers=layers, sdm=sdm, fit_vectorizer=not shown_widgets[\"d2v_use_pretrained\"].value)\n",
"\n",
"def save_classifier(b):\n",
" global sdm\n",
" global pm\n",
" global tr\n",
" with out_areas[\"create\"]:\n",
" clear_output()\n",
" mp(\"----\")\n",
" if pm is None:\n",
" sys.stderr.write(\"ERROR: create classifier first\")\n",
" return\n",
" \n",
" pm.save(shown_widgets[\"classifier_name\"].value)\n",
"\n",
"def load_classifier(b):\n",
" global sdm\n",
" global pm\n",
" global tr\n",
" with out_areas[\"create\"]:\n",
" clear_output()\n",
" mp(\"----\")\n",
"\n",
"def update_file_selector(b):\n",
" shown_widgets[\"clf_file_selector\"].options = sorted(glob.glob(\"./*.pipeline\"))\n",
"\n",
"def clf_file_selector(b):\n",
" shown_widgets[\"clf_file\"].value = shown_widgets[\"clf_file_selector\"].value\n",
" update_file_selector(b)\n",
"\n",
"def load_classifier(b):\n",
" global sdm\n",
" global pm\n",
" global tr\n",
" with out_areas[\"create\"]:\n",
" clear_output()\n",
" mp(\"----\")\n",
" clf_file = shown_widgets[\"clf_file\"].value\n",
" pm = stl.pipeline_manager.load_from_pipeline_file(clf_file)\n",
" \n",
"\n",
"# link\n",
"shown_widgets[\"n_keras_layer\"].observe(populate_keras_options)\n",
"shown_widgets[\"create_classifier\"].on_click(create_classifier)\n",
"shown_widgets[\"save_classifier\"].on_click(save_classifier)\n",
"shown_widgets[\"load_classifier\"].on_click(load_classifier)\n",
"shown_widgets[\"clf_file_selector\"].observe(clf_file_selector)\n",
"\n",
"\n",
"\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## plotting stuff for testing area"
]
},
{
"cell_type": "code",
"execution_count": 11,
"metadata": {},
"outputs": [],
"source": [
"def sentiment_score(s):\n",
" #(pos, neg, neu)^T\n",
" return s[0] - s[1]\n",
"\n",
"def plot_sentiment_space(predicted_sentiment_vectors, top_sentiments, top_emojis):\n",
" # sentiment score axis\n",
" top_X = np.array([sentiment_score(x) for x in top_sentiments])\n",
" pred_X = np.array([sentiment_score(x) for x in predicted_sentiment_vectors])\n",
" \n",
" # neutral axis:\n",
" top_Y = np.array([x[2] for x in top_sentiments])\n",
" pred_Y = np.array([x[2] for x in predicted_sentiment_vectors])\n",
" \n",
" fig_1, ax_1 = plt.subplots()#figsize=(15,10))\n",
" plt.title(\"sentiment-score-plot\")\n",
" plt.xlabel(\"sentiment score\")\n",
" plt.ylabel(\"neutrality\")\n",
" plt.xlim([-1,1])\n",
" plt.ylim([0,1])\n",
" for i in range(len(top_X)):\n",
" plt.text(top_X[i], top_Y[i], top_emojis[i])\n",
" plt.plot(pred_X, pred_Y, 'bo')\n",
" #plt.savefig(title + \" -- sentiment-plot.png\", bbox_inches='tight')\n",
" \n",
" # sentiment score axis\n",
" top_X = np.array([x[0] for x in top_sentiments])\n",
" pred_X = np.array([x[0] for x in predicted_sentiment_vectors])\n",
" \n",
" # neutral axis:\n",
" top_Y = np.array([x[1] for x in top_sentiments])\n",
" pred_Y = np.array([x[1] for x in predicted_sentiment_vectors])\n",
" \n",
" fig_2, ax_2 = plt.subplots()#figsize=(15,10))\n",
" plt.title(\"positive-negative-plot\")\n",
" plt.xlabel(\"positive\")\n",
" plt.ylabel(\"negative\")\n",
" plt.xlim([0,1])\n",
" plt.ylim([0,1])\n",
" for i in range(len(top_X)):\n",
" plt.text(top_X[i], top_Y[i], top_emojis[i])\n",
" plt.plot(pred_X, pred_Y, 'bo')\n",
" #plt.savefig(title + \" -- positive-negative-plot.png\", bbox_inches='tight')\n",
" plt.show()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## testing area"
]
},
{
"cell_type": "code",
"execution_count": 12,
"metadata": {},
"outputs": [],
"source": [
"top_20 = list(\"😳😋😀😌😏😔😒😎😢😅😁😉🙌🙏😘😊😩😍😭😂\")\n",
"top_20_sents = twl.emoji2sent(top_20)\n",
"\n",
"pred = None\n",
"\n",
"def test_input(b):\n",
" global sdm\n",
" global pm\n",
" global tr\n",
" global pred\n",
" with out_areas[\"playground\"]:\n",
" clear_output()\n",
" mp(\"----\")\n",
" if pm is None:\n",
" sys.stderr.write(\"ERROR: load or create classifier first\")\n",
" return\n",
" X = shown_widgets[\"test_input\"].value\n",
" pred = pm.predict([X])\n",
" target_list=list(shown_widgets[\"prediction_ground_set\"].value)\n",
" shown_widgets[\"prediction\"].value = \"<h1> \" + str(twl.sent2emoji(pred,custom_target_emojis=target_list)[0]) + \"</h1>\"\n",
" if shown_widgets[\"show_sorted_list\"].value:\n",
" mp(\"## \" + \"\".join(twl.edist.sentiment_vector_to_emoji(pred, only_emoticons=True, n_results=100, custom_target_emojis=target_list)))\n",
" \n",
"\n",
"\n",
"def plot_pred(b):\n",
" global sdm\n",
" global pm\n",
" global tr\n",
" global pred\n",
" with out_areas[\"playground\"]:\n",
" plot_sentiment_space(pred, top_20_sents, top_20)\n",
" \n",
" \n",
"def plot_subset_pred(b):\n",
" global sdm\n",
" global pm\n",
" global tr\n",
" global pred\n",
" with out_areas[\"playground\"]:\n",
" clear_output()\n",
" \n",
" if sdm is None or pm is None:\n",
" sys.stderr.write(\"ERROR: sample data and/or classifier missing!\\n\")\n",
" return\n",
" \n",
" if tr is None:\n",
" tr = twl.trainer(sdm=sdm, pm=pm)\n",
" \n",
" pred, y = tr.test(emoji_subset=list(shown_widgets[\"validation_emojis\"].value))\n",
" print(len(pred))\n",
" plot_sentiment_space(pred, top_20_sents, top_20)\n",
"\n",
"#link\n",
"shown_widgets[\"test_input\"].observe(test_input)\n",
"shown_widgets[\"show_plot\"].on_click(plot_pred)\n",
"shown_widgets[\"show_validation_plot\"].on_click(plot_subset_pred)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.5"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,35 @@
# Advanced Approach
----
## Folder Overview
| Filename | short_description |
| -------------------------------------------- | ------------------------------------------------------------ |
| twitter_learning.py / twitter_learning.ipynb | module containing the main classes for the learning process |
| Learner.ipynb | notebook containing a user interface to control the learn process |
| Evaluation_sentiment_dataset.ipynb | notebook creating an evaluation on the sentiment dataset |
----
## twitter_learning.py
TODO
----
## Learner.ipynb
This file provides all controls for the train process and feeds the classifier with data. To use it just run all cells and jump to the user interface Part. Some explanations for the options:
### load datasets
![1532531542185](README.assets/1532531542185.png)
----
## Evaluation_sentiment_dataset.ipynb
TODO

Binary file not shown.

View File

@ -0,0 +1,848 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import pandas as pd\n",
"from IPython.display import clear_output, Markdown, Math\n",
"import ipywidgets as widgets\n",
"import os\n",
"import glob\n",
"import json\n",
"import numpy as np\n",
"import itertools\n",
"import sklearn.utils as sku\n",
"from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer, HashingVectorizer\n",
"from sklearn.model_selection import train_test_split\n",
"from sklearn.preprocessing import MultiLabelBinarizer, LabelBinarizer\n",
"from sklearn.cluster import KMeans\n",
"import nltk\n",
"from keras.models import load_model\n",
"from sklearn.externals import joblib\n",
"import pickle\n",
"import operator\n",
"from sklearn.pipeline import Pipeline\n",
"import json\n",
"nltk.download('punkt')\n",
"nltk.download('averaged_perceptron_tagger')\n",
"nltk.download('wordnet')"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import sys\n",
"sys.path.append(\"..\")\n",
"\n",
"import Tools.Emoji_Distance as edist\n",
"\n",
"def emoji2sent(emoji_arr, only_emoticons=True):\n",
" return np.array([edist.emoji_to_sentiment_vector(e, only_emoticons=only_emoticons) for e in emoji_arr])\n",
"\n",
"def sent2emoji(sent_arr, custom_target_emojis=None, only_emoticons=True):\n",
" return [edist.sentiment_vector_to_emoji(s, custom_target_emojis=custom_target_emojis, only_emoticons=only_emoticons) for s in sent_arr]"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"SINGLE_LABEL = True"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"----\n",
"## classes and functions we are using later:\n",
"----"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"* functions for selecting items from a set / list"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"def latest(lst):\n",
" return lst[-1] if len(lst) > 0 else 'X' \n",
"def most_common(lst):\n",
" # trying to find the most common used emoji in the given lst\n",
" return max(set(lst), key=lst.count) if len(lst) > 0 else \"X\" # setting label to 'X' if there is an empty emoji list"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"* our emoji blacklist (skin and sex modifiers)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# defining blacklist for modifier emojis:\n",
"emoji_blacklist = set([\n",
" chr(0x1F3FB),\n",
" chr(0x1F3FC),\n",
" chr(0x1F3FD),\n",
" chr(0x1F3FE),\n",
" chr(0x1F3FF),\n",
" chr(0x2642),\n",
" chr(0x2640)\n",
"])"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"* lemmatization helper functions"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from nltk.stem.snowball import SnowballStemmer\n",
"from nltk.stem import WordNetLemmatizer\n",
"from nltk import pos_tag\n",
"from nltk import word_tokenize\n",
"from nltk.corpus import wordnet\n",
"\n",
"def get_wordnet_pos(treebank_tag):\n",
"\n",
" if treebank_tag.startswith('J'):\n",
" return wordnet.ADJ\n",
" elif treebank_tag.startswith('V'):\n",
" return wordnet.VERB\n",
" elif treebank_tag.startswith('N'):\n",
" return wordnet.NOUN\n",
" elif treebank_tag.startswith('R'):\n",
" return wordnet.ADV\n",
" else:\n",
" return wordnet.NOUN"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### sample data manager\n",
"the sample data manager loads and preprocesses data\n",
"most common way to use:\n",
"\n",
"\n",
"* `sdm = sample_data_manager.generate_and_read(path:str, only_emoticons=True, apply_stemming=True, n_top_emojis=-1, file_range=None)`\n",
"\n",
" * Generates a sample_data_manager object and preprocess data in one step\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"class sample_data_manager(object):\n",
" @staticmethod\n",
" def generate_and_read(path:str, only_emoticons=True, apply_stemming=True, n_top_emojis=-1, file_range=None, n_kmeans_cluster=-1, progress_callback=None):\n",
" \"\"\"\n",
" generate, read and process train data in one step.\n",
" \n",
" @param path: folder containing json files to process\n",
" @param only_emoticons: if True, only messages containing emoticons (provided by Tools.Emoji_Distance) are used\n",
" @param apply_stemming: apply stemming and lemmatization on dataset\n",
" @param n_top_emojis: only use messages containing one of <`n_top_emojis`>-top emojis. set to `-1` to prevent top emoji filtering\n",
" @param file_range: range of file's indices to read (eg `range(3)` to read the first three files). If `None`: all files are read\n",
" @param n_kmeans_cluster: generating multilabeled labels with kmeans with these number of clusters. Set to -1 to use the plain sentiment space as label\n",
" \n",
" @return: sample_data_manager object\n",
" \"\"\"\n",
" sdm = sample_data_manager(path)\n",
" sdm.read_files(file_index_range=range(sdm.n_files) if file_range is None else file_range, only_emoticons=only_emoticons, progress_callback=progress_callback)\n",
" if apply_stemming:\n",
" sdm.apply_stemming_and_lemmatization()\n",
" \n",
" sdm.generate_emoji_count_and_weights()\n",
" \n",
" if n_top_emojis > 0:\n",
" sdm.filter_by_top_emojis(n_top=n_top_emojis)\n",
" \n",
" if n_kmeans_cluster > 0:\n",
" sdm.generate_kmeans_binary_label(only_emoticons=only_emoticons, n_clusters=n_kmeans_cluster)\n",
" \n",
" return sdm\n",
" \n",
" \n",
" def __init__(self, data_root_folder:str):\n",
" \"\"\"\n",
" constructor for manual initialization\n",
" \n",
" @param data_root_folder: folder containing json files to process\n",
" \"\"\"\n",
" self.data_root_folder = data_root_folder\n",
" self.json_files = sorted(glob.glob(self.data_root_folder + \"/*.json\"))\n",
" self.n_files = len(self.json_files)\n",
" self.raw_data = None\n",
" self.emojis = None\n",
" self.plain_text = None\n",
" self.labels = None\n",
" self.emoji_count = None\n",
" self.emoji_weights = None\n",
" self.X = None\n",
" self.y = None\n",
" self.Xt = None\n",
" self.yt = None\n",
" self.top_emojis = None\n",
" self.binary_labels = None\n",
" self.use_binary_labels = False\n",
" self.kmeans_cluster = None\n",
" self.label_binarizer = None\n",
" \n",
" def read_files(self, file_index_range:list, only_emoticons=True, progress_callback=None):\n",
" \"\"\"\n",
" reading (multiple) files to one panda table.\n",
" \n",
" @param file_index_range: range of file's indices to read (eg `range(3)` to read the first three files)\n",
" @param only_emoticons: if True, only messages containing emoticons (aka smileys) are used. This classification is derived from Tools.Emoji_Distance\n",
" \"\"\"\n",
" assert np.min(file_index_range) >= 0 and np.max(file_index_range) < self.n_files\n",
" for i in file_index_range:\n",
" print(\"reading file: \" + self.json_files[i] + \"...\")\n",
" if self.raw_data is None:\n",
" self.raw_data = pd.read_json(self.json_files[i], encoding=\"utf-8\")\n",
" else:\n",
" self.raw_data = self.raw_data.append(pd.read_json(self.json_files[i], encoding=\"utf-8\"))\n",
" if progress_callback is not None:\n",
" progress_callback()\n",
" self.emojis = self.raw_data['EMOJI']\n",
" self.plain_text = self.raw_data['text']\n",
" \n",
" # replacing keywords. TODO: maybe these information can be extracted and used\n",
" self.plain_text = self.plain_text.str.replace(\"(<EMOJI>|<USER>|<HASHTAG>)\",\"\").str.replace(\"[\" + \"\".join(list(emoji_blacklist)) + \"]\",\"\")\n",
" \n",
" # so far filtering for the latest emoji. TODO: maybe there are also better approaches\n",
" self.labels = emoji2sent([latest(e) for e in self.emojis], only_emoticons=only_emoticons )\n",
" \n",
" # and filter out all samples we have no label for:\n",
" wrong_labels = np.isnan(np.linalg.norm(self.labels, axis=1)) \n",
"\n",
" self.labels = self.labels[np.invert(wrong_labels)]\n",
" self.plain_text = self.plain_text[np.invert(wrong_labels)]\n",
" self.emojis = self.emojis[np.invert(wrong_labels)]\n",
" \n",
" print(\"imported \" + str(len(self.labels)) + \" samples\")\n",
" \n",
" def apply_stemming_and_lemmatization(self):\n",
" \"\"\"\n",
" apply stemming and lemmatization to plain text samples\n",
" \"\"\"\n",
" stemmer = SnowballStemmer(\"english\")\n",
" for key in self.plain_text.keys():\n",
" stemmed_sent = []\n",
" for word in self.plain_text[key].split(\" \"):\n",
" word_stemmed = stemmer.stem(word)\n",
" stemmed_sent.append(word_stemmed)\n",
" stemmed_sent = (\" \").join(stemmed_sent)\n",
" self.plain_text[key] = stemmed_sent\n",
" \n",
" lemmatizer = WordNetLemmatizer()\n",
" for key in self.plain_text.keys():\n",
" lemmatized_sent = []\n",
" sent_pos = pos_tag(word_tokenize(self.plain_text[key]))\n",
" for word in sent_pos:\n",
" wordnet_pos = get_wordnet_pos(word[1].lower())\n",
" word_lemmatized = lemmatizer.lemmatize(word[0], pos=wordnet_pos)\n",
" lemmatized_sent.append(word_lemmatized)\n",
" lemmatized_sent = (\" \").join(lemmatized_sent)\n",
" self.plain_text[key] = lemmatized_sent\n",
" \n",
" def generate_emoji_count_and_weights(self):\n",
" \"\"\"\n",
" counting occurences of emojis\n",
" \"\"\"\n",
" self.emoji_count = {}\n",
" for e_list in self.emojis:\n",
" for e in set(e_list):\n",
" if e not in self.emoji_count:\n",
" self.emoji_count[e] = 0\n",
" self.emoji_count[e] += 1\n",
" \n",
" emoji_sum = sum([self.emoji_count[e] for e in self.emoji_count])\n",
"\n",
" self.emoji_weights = {}\n",
" for e in self.emoji_count:\n",
" # tfidf for emojis\n",
" self.emoji_weights[e] = np.log((emoji_sum / self.emoji_count[e]))\n",
"\n",
" weights_sum= sum([self.emoji_weights[x] for x in self.emoji_weights])\n",
"\n",
" # normalize:\n",
" for e in self.emoji_weights:\n",
" self.emoji_weights[e] = self.emoji_weights[e] / weights_sum\n",
"\n",
" self.emoji_weights['X'] = 0 # dummy values\n",
" self.emoji_count['X'] = 0\n",
" \n",
" def get_emoji_count(self):\n",
" \"\"\"\n",
" @return: descending list of tuples in form (<emoji as character>, <emoji count>) \n",
" \"\"\"\n",
" assert self.emoji_count is not None\n",
" \n",
" sorted_emoji_count = list(reversed(sorted(self.emoji_count.items(), key=operator.itemgetter(1))))\n",
" #display(sorted_emoji_count)\n",
" return sorted_emoji_count\n",
" \n",
" def filter_by_top_emojis(self,n_top = 20):\n",
" \"\"\"\n",
" filgter out messages not containing one of the `n_top` emojis\n",
" \n",
" @param n_top: number of top emojis used for filtering\n",
" \"\"\"\n",
" assert self.labels is not None # ← messages are already read in\n",
" \n",
" self.top_emojis = [x[0] for x in self.get_emoji_count()[:n_top]]\n",
" in_top = [edist.sentiment_vector_to_emoji(x) in self.top_emojis for x in self.labels]\n",
" self.labels = self.labels[in_top]\n",
" self.plain_text = self.plain_text[in_top]\n",
" self.emojis = self.emojis[in_top]\n",
" print(\"remaining samples after top emoji filtering: \", len(self.labels))\n",
" \n",
" def generate_kmeans_binary_label(self, only_emoticons=True, n_clusters=5):\n",
" \"\"\"\n",
" generate binary labels using kmeans.\n",
" \n",
" @param only_emoticons: set whether we're using the full emoji set or only emoticons\n",
" @param n_clusters: number of cluster we're generating in emoji's sentiment space\n",
" \"\"\"\n",
" assert self.labels is not None\n",
" array_sentiment_vectors = edist.list_sentiment_emoticon_vectors if only_emoticons else edist.list_sentiment_vectors\n",
" array_sentiment_vectors = np.array(array_sentiment_vectors)\n",
" \n",
" list_emojis = edist.list_emoticon_emojis if only_emoticons else edist.list_emojis\n",
" self.use_binary_labels = True\n",
" print(\"clustering following emojis: \" + \"\".join(list_emojis) + \"...\")\n",
" self.kmeans_cluster = KMeans(n_clusters=n_clusters).fit(array_sentiment_vectors)\n",
" print(\"clustering done\")\n",
" self.label_binarizer = LabelBinarizer()\n",
" \n",
" multiclass_labels = self.kmeans_cluster.predict(self.labels)\n",
" \n",
" # FIXME: we have to guarantee that in every dataset all classes occur.\n",
" # otherwise batch fitting is not possible!\n",
" # (or we have to precompute the mlb fitting process somewhere...)\n",
" self.binary_labels = self.label_binarizer.fit_transform(multiclass_labels)\n",
" \n",
" \n",
" def create_train_test_split(self, split = 0.1, random_state = 4222):\n",
" assert self.plain_text is not None and self.labels is not None\n",
" if self.X is not None:\n",
" sys.stderr.write(\"WARNING: overwriting existing train/test split \\n\")\n",
" \n",
" labels = self.binary_labels if self.use_binary_labels else self.labels\n",
" assert labels is not None\n",
" self.X, self.Xt, self.y, self.yt = train_test_split(self.plain_text, labels, test_size=split, random_state=random_state)\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"* the pipeline manager saves and stores sklearn pipelines. Keras models are handled differently, so the have to be named explicitly during save and load operations"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"class pipeline_manager(object):\n",
" @staticmethod\n",
" def load_from_pipeline_file(pipeline_file:str):\n",
" \"\"\"\n",
" loading a json configuration file and using it's paramters to call 'load_pipeline_from_files'\n",
" \"\"\"\n",
" with open(pipeline_file, 'r') as f:\n",
" d = json.load(f)\n",
" \n",
" keras_models = d['keras_models']\n",
" all_models = d['all_models']\n",
" \n",
" return pipeline_manager.load_pipeline_from_files(pipeline_file.rsplit('.',1)[0], keras_models, all_models)\n",
"\n",
"\n",
" @staticmethod\n",
" def load_pipeline_from_files(file_prefix:str, keras_models = [], all_models = []):\n",
" \"\"\"\n",
" load a pipeline from files. A pipeline should be represented by multiple model files in the form '<file_prefix>.<model_name>'\n",
" \n",
" @param file_prefix: basename of all files (without extension)\n",
" @param keras_models: list of keras models (keras model files, only extension name). Leave this list empty if this is not a keras pipeline\n",
" @param all_models: list of all models (including keras_models, only extension name).\n",
" \n",
" @return a pipeline manager object\n",
" \"\"\"\n",
" \n",
" pm = pipeline_manager(keras_models=keras_models)\n",
" pm.load(file_prefix, all_models)\n",
" return pm\n",
" \n",
" @staticmethod\n",
" def create_keras_pipeline_with_vectorizer(vectorizer, layers, sdm:sample_data_manager, loss=None, optimizer=None):\n",
" '''\n",
" creates pipeline with vectorizer and keras classifier\n",
" \n",
" @param vectorizer: Vectorizer object. will be fitted with data provided by sdm\n",
" @param layers: list of keras layers. One keras layer is a tuple in form: (<#neurons:int>, <activation_func:str>)\n",
" @param sdm: sample data manager to get data for the vectorizer\n",
" @param loss: set keras loss function. Depending whether sdm use multiclass labels `categorical_crossentropy` or `mean_squared_error` is used as default\n",
" @param optimizer: set keras optimizer. Depending whether sdm use multiclass labels `sgd` or `adam` is used as default\n",
" \n",
" @return: a pipeline manager object\n",
" \n",
" '''\n",
" from keras.models import Sequential\n",
" from keras.layers import Dense\n",
" \n",
" if sdm.X is None:\n",
" sdm.create_train_test_split()\n",
" \n",
" vec_train = vectorizer.fit_transform(sdm.X)\n",
" vec_test = vectorizer.transform(sdm.Xt)\n",
" # creating keras model:\n",
" model=Sequential()\n",
" \n",
" keras_layers = []\n",
" first_layer = True\n",
" for layer in layers:\n",
" if first_layer:\n",
" model.add(Dense(units=layer[0], activation=layer[1], input_dim=vectorizer.transform([\" \"])[0]._shape[1]))\n",
" first_layer = False\n",
" else:\n",
" model.add(Dense(units=layer[0], activation=layer[1]))\n",
" \n",
" if sdm.use_binary_labels: \n",
" loss_function = loss if loss is not None else 'categorical_crossentropy'\n",
" optimizer_function = optimizer if optimizer is not None else 'sgd'\n",
" model.compile(loss=loss_function,\n",
" optimizer=optimizer_function,\n",
" metrics=['accuracy'])\n",
" else:\n",
" loss_function = loss if loss is not None else 'mean_squared_error'\n",
" optimizer_function = optimizer if optimizer is not None else 'adam'\n",
" model.compile(loss=loss_function,\n",
" optimizer=optimizer_function)\n",
" \n",
" pipeline = Pipeline([\n",
" ('vectorizer',vectorizer),\n",
" ('keras_model', model)\n",
" ])\n",
" \n",
" return pipeline_manager(pipeline=pipeline, keras_models=['keras_model'])\n",
" \n",
" @staticmethod\n",
" def create_pipeline_with_classifier_and_vectorizer(vectorizer, classifier, sdm:sample_data_manager = None):\n",
" '''\n",
" creates pipeline with vectorizer and non-keras classifier\n",
" \n",
" @param vectorizer: Vectorizer object. will be fitted with data provided by sdm\n",
" @param classifier: unfitted classifier object (should be compatible with all sklearn classifiers)\n",
" @param sdm: sample data manager to get data for the vectorizer\n",
" \n",
" @return: a pipeline manager object\n",
" '''\n",
" if sdm is not None:\n",
" if sdm.X is None:\n",
" sdm.create_train_test_split()\n",
"\n",
" vec_train = vectorizer.fit_transform(sdm.X)\n",
" vec_test = vectorizer.transform(sdm.Xt)\n",
" \n",
" pipeline = Pipeline([\n",
" ('vectorizer',vectorizer),\n",
" ('classifier', classifier)\n",
" ])\n",
" \n",
" return pipeline_manager(pipeline=pipeline, keras_models=[])\n",
" \n",
" def __init__(self, pipeline = None, keras_models = []):\n",
" \"\"\"\n",
" constructor\n",
" \n",
" @param pipeline: a sklearn pipeline\n",
" @param keras_models: list of keras steps in pipeline. Neccessary because saving and loading from keras models differs from the scikit ones\n",
" \"\"\"\n",
" \n",
" self.pipeline = pipeline\n",
" self.additional_objects = {}\n",
" self.keras_models = keras_models\n",
" \n",
" def save(self, prefix:str):\n",
" \"\"\"\n",
" saving the pipeline. It generates one file per model in the form: '<prefix>.<model_name>'\n",
" \n",
" @param prefix: file prefix for all models\n",
" \"\"\"\n",
" \n",
"\n",
" print(self.keras_models)\n",
" # doing this like explained here: https://stackoverflow.com/a/43415459\n",
" for step in self.pipeline.named_steps:\n",
" if step in self.keras_models:\n",
" self.pipeline.named_steps[step].model.save(prefix + \".\" + step)\n",
" else:\n",
" joblib.dump(self.pipeline.named_steps[step], prefix + \".\" + str(step))\n",
" \n",
" load_command = \"pipeline_manager.load_pipeline_from_files( '\"\n",
" load_command += prefix + \"', \" + str(self.keras_models) + \", \"\n",
" load_command += str(list(self.pipeline.named_steps.keys())) + \")\"\n",
"\n",
" with open(prefix + '.pipeline', 'w') as outfile:\n",
" json.dump({'keras_models': self.keras_models, 'all_models': [step for step in self.pipeline.named_steps]}, outfile)\n",
" \n",
" import __main__ as main\n",
" if not hasattr(main, '__file__'):\n",
" display(\"saved pipeline. It can be loaded the following way:\")\n",
" display(Markdown(\"> ```\\n\"+load_command+\"\\n```\")) # ← if we're in jupyter, print the fancy way :)\n",
" else:\n",
" print(\"saved pipeline. It can be loaded the following way:\")\n",
" print(load_command)\n",
" \n",
" \n",
" def load(self, prefix:str, models = []):\n",
" \"\"\"\n",
" load a pipeline. A pipeline should be represented by multiple model files in the form '<prefix>.<model_name>'\n",
" NOTE: keras model names (if there are some) have to be defined in self.keras_models first!\n",
" \n",
" @param prefix: the prefix for all model files\n",
" @param models: model_names to load\n",
" \"\"\"\n",
" self.pipeline = None\n",
" model_list = []\n",
" for model in models:\n",
" if model in self.keras_models:\n",
" model_list.append((model, load_model(prefix + \".\" + model)))\n",
" else:\n",
" model_list.append((model, joblib.load(prefix+\".\" + model)))\n",
" self.pipeline = Pipeline(model_list)\n",
" \n",
" def fit(self,X,y):\n",
" \"\"\"fitting the pipeline\"\"\"\n",
" self.pipeline.fit(X,y)\n",
" \n",
" def predict(self,X):\n",
" \"\"\"predict\"\"\"\n",
" return self.pipeline.predict(X)\n",
" "
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"* the trainer class passes Data from the sample manager to the pipeline manager"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"class trainer(object):\n",
" def __init__(self, sdm:sample_data_manager, pm:pipeline_manager):\n",
" \"\"\"constructor\"\"\"\n",
" self.sdm = sdm\n",
" self.pm = pm\n",
" \n",
" def fit(self, max_size=10000, disabled_fit_steps=['vectorizer'], keras_batch_fitting_layer=['keras_model'], batch_size=None, n_epochs=1, progress_callback=None):\n",
" \"\"\"\n",
" fitting data in the pipeline. Because we don't want to refit the vectorizer, the pipeline models containing the vectorizer have to be named explicitly\n",
" \n",
" @param max_size: don't train more examples than that number\n",
" @param disabled_fit_steps: list of pipeline steps that we want to prevent to refit. Normally all vectorizer steps\n",
" \"\"\"\n",
" # TODO: make batch fitting available here (eg: continous waiting for data and fitting them)\n",
" if self.sdm.X is None:\n",
" self.sdm.create_train_test_split()\n",
" disabled_fits = {}\n",
" disabled_fit_transforms = {}\n",
" \n",
" disabled_keras_fits = {}\n",
" \n",
" named_steps = self.pm.pipeline.named_steps\n",
" \n",
" for s in disabled_fit_steps:\n",
" # now it gets really dirty:\n",
" # replace fit functions we don't want to call again (e.g. for vectorizers)\n",
" disabled_fits[s] = named_steps[s].fit\n",
" disabled_fit_transforms[s] = named_steps[s].fit_transform\n",
" named_steps[s].fit = lambda self, X, y=None: self\n",
" named_steps[s].fit_transform = named_steps[s].transform\n",
" \n",
" for k in keras_batch_fitting_layer:\n",
" # forcing batch fitting on keras\n",
" disabled_keras_fits[k]=named_steps[k].fit\n",
" named_steps[k].fit = lambda X, y: named_steps[k].train_on_batch(X.todense(), y) # ← why has keras no sparse support on batch progressing!?!?!\n",
" \n",
" if batch_size is None:\n",
" self.pm.fit(X = self.sdm.X[:max_size], y = self.sdm.y[:max_size])\n",
" else:\n",
" n = len(self.sdm.X) // batch_size\n",
" for i in range(n_epochs):\n",
" for j in range(n):\n",
" self.pm.fit(X = np.array(self.sdm.X[j*batch_size:(j+1)*batch_size]), y = np.array(self.sdm.y[j*batch_size:(j+1)*batch_size]))\n",
" if progress_callback is not None:\n",
" progress_callback()\n",
" pred, yt = self.test()\n",
" mean_squared_error = ((pred - yt)**2).mean(axis=0)\n",
" print(\"#\" + str(j) + \": loss: \", mean_squared_error)\n",
"\n",
" \n",
" # restore replaced fit functions:\n",
" for s in disabled_fit_steps:\n",
" named_steps[s].fit = disabled_fits[s]\n",
" named_steps[s].fit_transform = disabled_fit_transforms[s]\n",
" \n",
" for k in keras_batch_fitting_layer:\n",
" named_steps[k].fit = disabled_keras_fits[k]\n",
" \n",
" def test(self):\n",
" '''\n",
" @return: prediction:list, teacher:list\n",
" '''\n",
" if self.sdm.X is None:\n",
" self.sdm.create_train_test_split()\n",
" return self.pm.predict(self.sdm.Xt), self.sdm.yt\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"----\n",
"## Train"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"* when in notebook environment: run the stuff below:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import __main__ as main\n",
"if not hasattr(main, '__file__'):\n",
" # we are in an interactive environment (probably in jupyter)\n",
" # load data:\n",
" \n",
" # setting n_kmeans_clusters to a value > 0 activates binarized labeling automatically! \n",
" # set to -1 to disable kmeans clustering and generating labels in plain sentiment space\n",
" \n",
" #n_kmeans_cluster = 5\n",
" n_kmeans_cluster = -1\n",
" sdm = sample_data_manager.generate_and_read(path=\"./data_en/\", n_top_emojis=20, file_range=range(1), n_kmeans_cluster=n_kmeans_cluster)\n",
" sdm.create_train_test_split()\n",
" #pm = pipeline_manager.create_keras_pipeline_with_vectorizer(vectorizer=TfidfVectorizer(stop_words='english'),\\n\",\n",
" # layers=[(10000, 'relu'),(5000, 'relu'),(2500, 'relu'),(y1[0].shape[0],None)], sdm=sdm)\\n\",\n",
" pm = pipeline_manager.create_keras_pipeline_with_vectorizer(vectorizer=TfidfVectorizer(stop_words='english'),\n",
" layers=[(2500, 'relu'),(sdm.y.shape[1],None)], sdm=sdm)\n",
" tr = trainer(sdm=sdm, pm=pm)\n",
" tr.fit(100)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"----\n",
"## save classifier"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import __main__ as main\n",
"if not hasattr(main, '__file__'):\n",
" pm.save('custom_classifier')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"----\n",
"## Prediction\n",
"\n",
"* predict and save to `test.csv`"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import __main__ as main\n",
"if not hasattr(main, '__file__'):\n",
" pred, teacher = tr.test()\n",
" \n",
" display(pred)\n",
" display(teacher)\n",
" \n",
" print('prediction variance: ', np.linalg.norm(np.var(pred, axis=0)))\n",
" print('teacher variance: ', np.linalg.norm(np.var(teacher, axis=0)))\n",
" \n",
" # build a dataframe to visualize test results:\n",
" testlist = pd.DataFrame({'text': sdm.Xt, \n",
" 'teacher': sent2emoji(sdm.yt),\n",
" 'teacher_sentiment': sdm.yt.tolist(),\n",
" 'predict': sent2emoji(pred, custom_target_emojis=sdm.top_emojis),\n",
" 'predicted_sentiment': pred.tolist()})\n",
" # display:\n",
" display(testlist.head())\n",
" \n",
" # mean squared error:\n",
" teacher_sentiments = np.array([sample[1]['teacher_sentiment'] for sample in testlist.iterrows()])\n",
" predicted_sentiments = np.array([sample[1]['predicted_sentiment'] for sample in testlist.iterrows()])\n",
"\n",
" mean_squared_error = ((teacher_sentiments - predicted_sentiments)**2).mean(axis=0)\n",
" print(\"Mean Squared Error: \", mean_squared_error)\n",
" print(\"Variance teacher: \", np.var(teacher_sentiments, axis=0))\n",
" print(\"Variance prediction: \", np.var(predicted_sentiments, axis=0))\n",
" \n",
" # save to csv:\n",
" testlist.to_csv('test.csv')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"----\n",
"## Load classifier\n",
"\n",
"* loading classifier and show a test widget"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import __main__ as main\n",
"if not hasattr(main, '__file__'):\n",
" try:\n",
" pm\n",
" except NameError:\n",
" pass\n",
" else:\n",
" del pm # delete existing pipeline manager if ther is one\n",
"\n",
" pm = pipeline_manager.load_pipeline_from_files( 'custom_classifier', ['keras_model'], ['vectorizer', 'keras_model'])\n",
" lookup_emojis = [#'😂',\n",
" '😭',\n",
" '😍',\n",
" '😩',\n",
" '😊',\n",
" '😘',\n",
" '🙏',\n",
" '🙌',\n",
" '😉',\n",
" '😁',\n",
" '😅',\n",
" '😎',\n",
" '😢',\n",
" '😒',\n",
" '😏',\n",
" '😌',\n",
" '😔',\n",
" '😋',\n",
" '😀',\n",
" '😤']\n",
" out = widgets.Output()\n",
"\n",
" t = widgets.Text()\n",
" b = widgets.Button(\n",
" description='get emoji',\n",
" disabled=False,\n",
" button_style='', # 'success', 'info', 'warning', 'danger' or ''\n",
" tooltip='Click me',\n",
" icon='check'\n",
" )\n",
"\n",
"\n",
"\n",
" def handle_submit(sender):\n",
" with out:\n",
" clear_output()\n",
" with out:\n",
" pred = pm.predict([t.value])\n",
"\n",
" display(Markdown(\"# Predicted Emoji \" + str(sent2emoji(pred, lookup_emojis)[0])))\n",
" display(Markdown(\"# Sentiment Vector: $$ \\pmatrix{\" + str(pred[0,0]) +\n",
" \"\\\\\\\\\" + str(pred[0,1]) + \"\\\\\\\\\" + str(pred[0,2]) + \"}$$\"))\n",
"\n",
" b.on_click(handle_submit)\n",
"\n",
" display(t)\n",
" display(widgets.VBox([b, out])) "
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.5"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,832 @@
# coding: utf-8
# In[1]:
import pandas as pd
from IPython.display import clear_output, Markdown, Math
import ipywidgets as widgets
import os
import glob
import json
import numpy as np
import itertools
import sklearn.utils as sku
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer, HashingVectorizer
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MultiLabelBinarizer, LabelBinarizer
from sklearn.cluster import KMeans
import nltk
from keras.models import load_model
from sklearn.externals import joblib
import pickle
import operator
from sklearn.pipeline import Pipeline
import json
import datetime
import matplotlib.pyplot as plt
nltk.download('punkt')
nltk.download('averaged_perceptron_tagger')
nltk.download('wordnet')
from keras import losses
# check whether the display function exists:
try:
display
except NameError:
print("no fancy display function found... using print instead")
display = print
# In[2]:
import sys
sys.path.append("..")
import Tools.Emoji_Distance as edist
import Tools.sklearn_doc2vec as skd2v
def emoji2sent(emoji_arr, only_emoticons=True):
return np.array([edist.emoji_to_sentiment_vector(e, only_emoticons=only_emoticons) for e in emoji_arr])
def sent2emoji(sent_arr, custom_target_emojis=None, only_emoticons=True):
return [edist.sentiment_vector_to_emoji(s, custom_target_emojis=custom_target_emojis, only_emoticons=only_emoticons) for s in sent_arr]
# In[3]:
SINGLE_LABEL = True
# top 20 emojis:
top_20 = list("😳😋😀😌😏😔😒😎😢😅😁😉🙌🙏😘😊😩😍😭😂")
top_20_sents = emoji2sent(top_20)
# plotting function to evaluate stuff:
def sentiment_score(s):
#(pos, neg, neu)^T
return s[0] - s[1]
def plot_sentiment_space(predicted_sentiment_vectors, top_sentiments, top_emojis, style='bo', additional_patches = None):
# sentiment score axis
top_X = np.array([sentiment_score(x) for x in top_sentiments])
pred_X = np.array([sentiment_score(x) for x in predicted_sentiment_vectors])
# neutral axis:
top_Y = np.array([x[2] for x in top_sentiments])
pred_Y = np.array([x[2] for x in predicted_sentiment_vectors])
fig_1, ax_1 = plt.subplots()#figsize=(15,10))
plt.title("sentiment-score-plot")
plt.xlabel("sentiment score")
plt.ylabel("neutrality")
plt.xlim([-1,1])
plt.ylim([0,1])
for i in range(len(top_X)):
plt.text(top_X[i], top_Y[i], top_emojis[i])
plt.plot(pred_X, pred_Y, style)
for p_tuple in additional_patches:
ax_1.add_artist(p_tuple[0])
p_tuple[0].set_alpha(0.4)
plt.savefig("val-error_sentiment-plot" + str(datetime.datetime.now()) + ".png", bbox_inches='tight')
# sentiment score axis
top_X = np.array([x[0] for x in top_sentiments])
pred_X = np.array([x[0] for x in predicted_sentiment_vectors])
# neutral axis:
top_Y = np.array([x[1] for x in top_sentiments])
pred_Y = np.array([x[1] for x in predicted_sentiment_vectors])
fig_2, ax_2 = plt.subplots()#figsize=(15,10))
plt.title("positive-negative-plot")
plt.xlabel("positive")
plt.ylabel("negative")
plt.xlim([0,1])
plt.ylim([0,1])
for i in range(len(top_X)):
plt.text(top_X[i], top_Y[i], top_emojis[i])
plt.plot(pred_X, pred_Y, style)
for p_tuple in additional_patches:
ax_2.add_artist(p_tuple[1])
p_tuple[1].set_alpha(0.4)
plt.savefig("val-error_positive-negative-plot" + str(datetime.datetime.now()) + ".png", bbox_inches='tight')
plt.show()
# ----
# ## classes and functions we are using later:
# ----
# * functions for selecting items from a set / list
# In[4]:
def latest(lst):
return lst[-1] if len(lst) > 0 else 'X'
def most_common(lst):
# trying to find the most common used emoji in the given lst
return max(set(lst), key=lst.count) if len(lst) > 0 else "X" # setting label to 'X' if there is an empty emoji list
# * our emoji blacklist (skin and sex modifiers)
# In[5]:
# defining blacklist for modifier emojis:
emoji_blacklist = set([
chr(0x1F3FB),
chr(0x1F3FC),
chr(0x1F3FD),
chr(0x1F3FE),
chr(0x1F3FF),
chr(0x2642),
chr(0x2640)
])
# * lemmatization helper functions
# In[6]:
from nltk.stem.snowball import SnowballStemmer
from nltk.stem import WordNetLemmatizer
from nltk import pos_tag
from nltk import word_tokenize
from nltk.corpus import wordnet
def get_wordnet_pos(treebank_tag):
if treebank_tag.startswith('J'):
return wordnet.ADJ
elif treebank_tag.startswith('V'):
return wordnet.VERB
elif treebank_tag.startswith('N'):
return wordnet.NOUN
elif treebank_tag.startswith('R'):
return wordnet.ADV
else:
return wordnet.NOUN
# global stemmer and lemmatizer function
stemmer = SnowballStemmer("english")
def stem(s):
stemmed_sent = []
for word in s.split(" "):
word_stemmed = stemmer.stem(word)
stemmed_sent.append(word_stemmed)
stemmed_sent = (" ").join(stemmed_sent)
return stemmed_sent
lemmatizer = WordNetLemmatizer()
def lemm(s):
lemmatized_sent = []
sent_pos = pos_tag(word_tokenize(s))
for word in sent_pos:
wordnet_pos = get_wordnet_pos(word[1].lower())
word_lemmatized = lemmatizer.lemmatize(word[0], pos=wordnet_pos)
lemmatized_sent.append(word_lemmatized)
lemmatized_sent = (" ").join(lemmatized_sent)
return lemmatized_sent
def batch_stem(sentences):
return [stem(s) for s in sentences]
def batch_lemm(sentences):
return [lemm(s) for s in sentences]
# ### sample data manager
# the sample data manager loads and preprocesses data
# most common way to use:
#
#
# * `sdm = sample_data_manager.generate_and_read(path:str, only_emoticons=True, apply_stemming=True, n_top_emojis=-1, file_range=None)`
#
# * Generates a sample_data_manager object and preprocess data in one step
#
# In[7]:
class sample_data_manager(object):
@staticmethod
def generate_and_read(path:str, only_emoticons=True, apply_stemming=True, n_top_emojis=-1, file_range=None, n_kmeans_cluster=-1, read_progress_callback=None, stem_progress_callback=None, emoji_mean=False, custom_target_emojis = None, min_words=0):
"""
generate, read and process train data in one step.
@param path: folder containing json files to process
@param only_emoticons: if True, only messages containing emoticons (provided by Tools.Emoji_Distance) are used
@param apply_stemming: apply stemming and lemmatization on dataset
@param n_top_emojis: only use messages containing one of <`n_top_emojis`>-top emojis. set to `-1` to prevent top emoji filtering
@param file_range: range of file's indices to read (eg `range(3)` to read the first three files). If `None`: all files are read
@param n_kmeans_cluster: generating multilabeled labels with kmeans with these number of clusters. Set to -1 to use the plain sentiment space as label
@return: sample_data_manager object
"""
sdm = sample_data_manager(path)
sdm.read_files(file_index_range=range(sdm.n_files) if file_range is None else file_range, only_emoticons=only_emoticons, progress_callback=read_progress_callback, emoji_mean=emoji_mean)
if apply_stemming:
sdm.apply_stemming_and_lemmatization(progress_callback=stem_progress_callback)
sdm.generate_emoji_count_and_weights()
if custom_target_emojis is not None:
sdm.filter_by_emoji_list(custom_target_emojis)
elif n_top_emojis > 0:
sdm.filter_by_top_emojis(n_top=n_top_emojis)
if n_kmeans_cluster > 0:
sdm.generate_kmeans_binary_label(only_emoticons=only_emoticons, n_clusters=n_kmeans_cluster)
if min_words > 0:
sdm.filter_by_sentence_length(min_words=min_words)
return sdm
def __init__(self, data_root_folder:str):
"""
constructor for manual initialization
@param data_root_folder: folder containing json files to process
"""
self.data_root_folder = data_root_folder
self.json_files = sorted(glob.glob(self.data_root_folder + "/*.json"))
self.n_files = len(self.json_files)
self.emojis = None
self.plain_text = None
self.labels = None
self.emoji_count = None
self.emoji_weights = None
self.X = None
self.y = None
self.Xt = None
self.yt = None
self.top_emojis = None
self.binary_labels = None
self.use_binary_labels = False
self.kmeans_cluster = None
self.label_binarizer = None
self.use_stemming = False
self.use_lemmatization = False
def read_files(self, file_index_range:list, only_emoticons=True, emoji_mean=False ,progress_callback=None):
"""
reading (multiple) files to one panda table.
@param file_index_range: range of file's indices to read (eg `range(3)` to read the first three files)
@param only_emoticons: if True, only messages containing emoticons (aka smileys) are used. This classification is derived from Tools.Emoji_Distance
@param emoji_mean: if True, using mean of all emojis instead of the last one
"""
assert np.min(file_index_range) >= 0 and np.max(file_index_range) < self.n_files
n = len(file_index_range)
for i in file_index_range:
print("reading file: " + self.json_files[i] + "...")
raw_data_i = pd.read_json(self.json_files[i], encoding="utf-8")
emojis_i = raw_data_i['EMOJI']
plain_text_i = raw_data_i['text']
# replacing keywords. TODO: maybe these information can be extracted and used
plain_text_i = plain_text_i.str.replace("(<EMOJI>|<USER>|<HASHTAG>)","").str.replace("[" + "".join(list(emoji_blacklist)) + "]","")
# filter empty labels
empty_labels = []
for e in emojis_i:
if len(e) < 1:
empty_labels.append(True)
else:
empty_labels.append(False)
empty_labels = np.array(empty_labels, dtype=np.bool_)
plain_text_i = plain_text_i[np.invert(empty_labels)]
emojis_i = emojis_i[np.invert(empty_labels)]
print("ignored " + str(np.sum(empty_labels)) + " empty labels")
if not emoji_mean:
# so far filtering for the latest emoji. TODO: maybe there are also better approaches
labels_i = emoji2sent([latest(e) for e in emojis_i], only_emoticons=only_emoticons )
else:
tmp = [np.nanmean(emoji2sent(e, only_emoticons=only_emoticons), axis=0, dtype=float) for e in emojis_i]
c = 0
for t in tmp:
# only to find and debug wrong formatted data
if str(type(t)) != "<class 'numpy.ndarray'>":
print(t, type(t))
print(emojis_i[c])
print(emoji2sent(emojis_i[c], only_emoticons=only_emoticons))
c += 1
labels_i = np.array(tmp, dtype=float)
# and filter out all samples we have no label for:
wrong_labels = np.isnan(np.linalg.norm(labels_i, axis=1))
labels_i = labels_i[np.invert(wrong_labels)]
plain_text_i = plain_text_i[np.invert(wrong_labels)]
emojis_i = emojis_i[np.invert(wrong_labels)]
print("imported " + str(len(labels_i)) + " samples")
if self.labels is None:
self.labels = labels_i
else:
self.labels = np.append(self.labels, labels_i, axis=0)
if self.emojis is None:
self.emojis = emojis_i
else:
self.emojis = pd.concat([self.emojis,emojis_i],ignore_index=True)
if self.plain_text is None:
self.plain_text = plain_text_i
else:
self.plain_text = pd.concat([self.plain_text,plain_text_i],ignore_index=True)
if progress_callback is not None:
progress_callback((i+1)/n)
def apply_stemming_and_lemmatization(self, progress_callback = None):
"""
apply stemming and lemmatization to plain text samples
"""
self.use_stemming = True
self.use_lemmatization = True
print("apply stemming and lemmatization...")
stemmer = SnowballStemmer("english")
n = self.plain_text.shape[0] * 2 # 2 for loops
i = 0
for key in self.plain_text.keys():
stemmed_sent = []
for word in self.plain_text[key].split(" "):
word_stemmed = stemmer.stem(word)
stemmed_sent.append(word_stemmed)
stemmed_sent = (" ").join(stemmed_sent)
self.plain_text[key] = stemmed_sent
i += 1
if progress_callback is not None and i % 1024 == 0:
progress_callback(i / n)
lemmatizer = WordNetLemmatizer()
for key in self.plain_text.keys():
lemmatized_sent = []
sent_pos = pos_tag(word_tokenize(self.plain_text[key]))
for word in sent_pos:
wordnet_pos = get_wordnet_pos(word[1].lower())
word_lemmatized = lemmatizer.lemmatize(word[0], pos=wordnet_pos)
lemmatized_sent.append(word_lemmatized)
lemmatized_sent = (" ").join(lemmatized_sent)
self.plain_text[key] = lemmatized_sent
i += 1
if progress_callback is not None and i % 1024 == 0:
progress_callback(i / n)
print("stemming and lemmatization done")
def generate_emoji_count_and_weights(self):
"""
counting occurences of emojis
"""
self.emoji_count = {}
for e_list in self.emojis:
for e in set(e_list):
if e not in self.emoji_count:
self.emoji_count[e] = 0
self.emoji_count[e] += 1
emoji_sum = sum([self.emoji_count[e] for e in self.emoji_count])
self.emoji_weights = {}
for e in self.emoji_count:
# tfidf for emojis
self.emoji_weights[e] = np.log((emoji_sum / self.emoji_count[e]))
weights_sum= sum([self.emoji_weights[x] for x in self.emoji_weights])
# normalize:
for e in self.emoji_weights:
self.emoji_weights[e] = self.emoji_weights[e] / weights_sum
self.emoji_weights['X'] = 0 # dummy values
self.emoji_count['X'] = 0
# dump count data to json:
f = open("count_from_read_progress_" + str(datetime.datetime.now()) + ".json", 'w')
f.write(json.dumps(self.emoji_count, ensure_ascii=False))
f.close()
def get_emoji_count(self):
"""
@return: descending list of tuples in form (<emoji as character>, <emoji count>)
"""
assert self.emoji_count is not None
sorted_emoji_count = list(reversed(sorted(self.emoji_count.items(), key=operator.itemgetter(1))))
#display(sorted_emoji_count)
return sorted_emoji_count
def filter_by_top_emojis(self,n_top = 20):
"""
filter out messages not containing one of the `n_top` emojis
@param n_top: number of top emojis used for filtering
"""
assert self.labels is not None # ← messages are already read in
self.top_emojis = [x[0] for x in self.get_emoji_count()[:n_top]]
in_top = [edist.sentiment_vector_to_emoji(x) in self.top_emojis for x in self.labels]
self.labels = self.labels[in_top]
self.plain_text = self.plain_text[in_top]
self.emojis = self.emojis[in_top]
print("remaining samples after top emoji filtering: ", len(self.labels))
def filter_by_emoji_list(self, custom_target_emojis):
assert self.labels is not None
in_list = [edist.sentiment_vector_to_emoji(x) in custom_target_emojis for x in self.labels]
self.labels = self.labels[in_list]
self.plain_text = self.plain_text[in_list]
self.emojis = self.emojis[in_list]
print("remaining samples after custom emoji filtering: ", len(self.labels))
def filter_by_sentence_length(self, min_words):
assert self.plain_text is not None
is_long = [True if len(x.split()) >= min_words else False for x in self.plain_text]
self.labels = self.labels[is_long]
self.plain_text = self.plain_text[is_long]
self.emojis = self.emojis[is_long]
print("remaining samples after sentence length filtering: ", len(self.labels))
def generate_kmeans_binary_label(self, only_emoticons=True, n_clusters=5):
"""
generate binary labels using kmeans.
@param only_emoticons: set whether we're using the full emoji set or only emoticons
@param n_clusters: number of cluster we're generating in emoji's sentiment space
"""
assert self.labels is not None
array_sentiment_vectors = edist.list_sentiment_emoticon_vectors if only_emoticons else edist.list_sentiment_vectors
array_sentiment_vectors = np.array(array_sentiment_vectors)
list_emojis = edist.list_emoticon_emojis if only_emoticons else edist.list_emojis
self.use_binary_labels = True
print("clustering following emojis: " + "".join(list_emojis) + "...")
self.kmeans_cluster = KMeans(n_clusters=n_clusters).fit(array_sentiment_vectors)
print("clustering done")
self.label_binarizer = LabelBinarizer()
multiclass_labels = self.kmeans_cluster.predict(self.labels)
# FIXME: we have to guarantee that in every dataset all classes occur.
# otherwise batch fitting is not possible!
# (or we have to precompute the mlb fitting process somewhere...)
self.binary_labels = self.label_binarizer.fit_transform(multiclass_labels)
def create_train_test_split(self, split = 0.1, random_state = 4222):
assert self.plain_text is not None and self.labels is not None
if self.X is not None:
sys.stderr.write("WARNING: overwriting existing train/test split \n")
labels = self.binary_labels if self.use_binary_labels else self.labels
assert labels is not None
self.X, self.Xt, self.y, self.yt = train_test_split(self.plain_text, labels, test_size=split, random_state=random_state)
# * the pipeline manager saves and stores sklearn pipelines. Keras models are handled differently, so the have to be named explicitly during save and load operations
# In[8]:
class pipeline_manager(object):
@staticmethod
def load_from_pipeline_file(pipeline_file:str):
"""
loading a json configuration file and using it's paramters to call 'load_pipeline_from_files'
"""
with open(pipeline_file, 'r') as f:
d = json.load(f)
keras_models = d['keras_models']
all_models = d['all_models']
return pipeline_manager.load_pipeline_from_files(pipeline_file.rsplit('.',1)[0], keras_models, all_models)
@staticmethod
def load_pipeline_from_files(file_prefix:str, keras_models = [], all_models = []):
"""
load a pipeline from files. A pipeline should be represented by multiple model files in the form '<file_prefix>.<model_name>'
@param file_prefix: basename of all files (without extension)
@param keras_models: list of keras models (keras model files, only extension name). Leave this list empty if this is not a keras pipeline
@param all_models: list of all models (including keras_models, only extension name).
@return a pipeline manager object
"""
pm = pipeline_manager(keras_models=keras_models)
pm.load(file_prefix, all_models)
return pm
@staticmethod
def create_keras_pipeline_with_vectorizer(vectorizer, layers, sdm:sample_data_manager, loss=None, optimizer=None, fit_vectorizer=True):
'''
creates pipeline with vectorizer and keras classifier
@param vectorizer: Vectorizer object. will be fitted with data provided by sdm
@param layers: list of keras layers. One keras layer is a tuple in form: (<#neurons:int>, <activation_func:str>)
@param sdm: sample data manager to get data for the vectorizer
@param loss: set keras loss function. Depending whether sdm use multiclass labels `categorical_crossentropy` or `mean_squared_error` is used as default
@param optimizer: set keras optimizer. Depending whether sdm use multiclass labels `sgd` or `adam` is used as default
@return: a pipeline manager object
'''
from keras.models import Sequential
from keras.layers import Dense
if fit_vectorizer:
if sdm.X is None:
sdm.create_train_test_split()
print("fit vectorizer...")
vec_train = vectorizer.fit_transform(sdm.X)
vec_test = vectorizer.transform(sdm.Xt)
print("fitting done")
# creating keras model:
model=Sequential()
keras_layers = []
first_layer = True
for layer in layers:
if first_layer:
size = None
if "size" in dir(vectorizer):
size = vectorizer.size
else:
size = vectorizer.transform([" "])[0]._shape[1]
model.add(Dense(units=layer[0], activation=layer[1], input_dim=size))
first_layer = False
else:
model.add(Dense(units=layer[0], activation=layer[1]))
if sdm.use_binary_labels:
loss_function = loss if loss is not None else 'categorical_crossentropy'
optimizer_function = optimizer if optimizer is not None else 'sgd'
model.compile(loss=loss_function,
optimizer=optimizer_function,
metrics=['accuracy'])
else:
loss_function = loss if loss is not None else 'mean_squared_error'
optimizer_function = optimizer if optimizer is not None else 'adam'
model.compile(loss=loss_function,
optimizer=optimizer_function)
pipeline = Pipeline([
('vectorizer',vectorizer),
('keras_model', model)
])
return pipeline_manager(pipeline=pipeline, keras_models=['keras_model'])
@staticmethod
def create_pipeline_with_classifier_and_vectorizer(vectorizer, classifier, sdm:sample_data_manager = None):
'''
creates pipeline with vectorizer and non-keras classifier
@param vectorizer: Vectorizer object. will be fitted with data provided by sdm
@param classifier: unfitted classifier object (should be compatible with all sklearn classifiers)
@param sdm: sample data manager to get data for the vectorizer
@return: a pipeline manager object
'''
if sdm is not None:
if sdm.X is None:
sdm.create_train_test_split()
vec_train = vectorizer.fit_transform(sdm.X)
vec_test = vectorizer.transform(sdm.Xt)
pipeline = Pipeline([
('vectorizer',vectorizer),
('classifier', classifier)
])
return pipeline_manager(pipeline=pipeline, keras_models=[])
def __init__(self, pipeline = None, keras_models = []):
"""
constructor
@param pipeline: a sklearn pipeline
@param keras_models: list of keras steps in pipeline. Neccessary because saving and loading from keras models differs from the scikit ones
"""
self.pipeline = pipeline
self.additional_objects = {}
self.keras_models = keras_models
def save(self, prefix:str):
"""
saving the pipeline. It generates one file per model in the form: '<prefix>.<model_name>'
@param prefix: file prefix for all models
"""
print(self.keras_models)
# doing this like explained here: https://stackoverflow.com/a/43415459
for step in self.pipeline.named_steps:
if step in self.keras_models:
self.pipeline.named_steps[step].model.save(prefix + "." + step)
else:
joblib.dump(self.pipeline.named_steps[step], prefix + "." + str(step))
load_command = "pipeline_manager.load_pipeline_from_files( '"
load_command += prefix + "', " + str(self.keras_models) + ", "
load_command += str(list(self.pipeline.named_steps.keys())) + ")"
with open(prefix + '.pipeline', 'w') as outfile:
json.dump({'keras_models': self.keras_models, 'all_models': [step for step in self.pipeline.named_steps]}, outfile)
import __main__ as main
if not hasattr(main, '__file__'):
display("saved pipeline. It can be loaded the following way:")
display(Markdown("> ```\n"+load_command+"\n```")) # ← if we're in jupyter, print the fancy way :)
else:
print("saved pipeline. It can be loaded the following way:")
print(load_command)
def load(self, prefix:str, models = []):
"""
load a pipeline. A pipeline should be represented by multiple model files in the form '<prefix>.<model_name>'
NOTE: keras model names (if there are some) have to be defined in self.keras_models first!
@param prefix: the prefix for all model files
@param models: model_names to load
"""
self.pipeline = None
model_list = []
for model in models:
if model in self.keras_models:
model_list.append((model, load_model(prefix + "." + model)))
else:
model_list.append((model, joblib.load(prefix+"." + model)))
self.pipeline = Pipeline(model_list)
def fit(self,X,y):
"""fitting the pipeline"""
self.pipeline.fit(X,y)
def predict(self,X, use_stemming=False, use_lemmatization=False):
"""predict"""
if use_stemming:
X = np.array(batch_stem(X))
if use_lemmatization:
X = np.array(batch_lemm(X))
return self.pipeline.predict(X)
# * the trainer class passes Data from the sample manager to the pipeline manager
# In[9]:
def to_dense_if_sparse(X):
"""
little hepler function to make data dense (if it is sparse).
is used in trainer.fit function
"""
if "todense" in dir(X):
return X.todense()
return X
class trainer(object):
def __init__(self, sdm:sample_data_manager, pm:pipeline_manager):
"""constructor"""
self.sdm = sdm
self.pm = pm
self.acc = []
self.val = []
def fit(self, max_size=1000000, disabled_fit_steps=['vectorizer'], keras_batch_fitting_layer=['keras_model'], batch_size=None, n_epochs=1, progress_callback=None):
"""
fitting data in the pipeline. Because we don't want to refit the vectorizer, the pipeline models containing the vectorizer have to be named explicitly
@param max_size: don't train more examples than that number
@param disabled_fit_steps: list of pipeline steps that we want to prevent to refit. Normally all vectorizer steps
"""
# TODO: make batch fitting available here (eg: continous waiting for data and fitting them)
if self.sdm.X is None:
self.sdm.create_train_test_split()
disabled_fits = {}
disabled_fit_transforms = {}
disabled_keras_fits = {}
named_steps = self.pm.pipeline.named_steps
for s in disabled_fit_steps:
# now it gets really dirty:
# replace fit functions we don't want to call again (e.g. for vectorizers)
disabled_fits[s] = named_steps[s].fit
disabled_fit_transforms[s] = named_steps[s].fit_transform
named_steps[s].fit = lambda self, X, y=None: self
named_steps[s].fit_transform = named_steps[s].transform
if batch_size is not None:
for k in keras_batch_fitting_layer:
# forcing batch fitting on keras
disabled_keras_fits[k]=named_steps[k].fit
named_steps[k].fit = lambda X, y: named_steps[k].train_on_batch(to_dense_if_sparse(X), y) # ← why has keras no sparse support on batch progressing!?!?!
if batch_size is None:
self.acc = []
self.val = []
for e in range(n_epochs):
print("epoch", e)
self.pm.fit(X = self.sdm.X[:max_size], y = self.sdm.y[:max_size])
pred, yt = self.test()
mean_squared_error = ((pred - yt)**2).mean(axis=0)
print("#" + str(e) + ": validation loss: ", mean_squared_error, "scalar: ", np.mean(mean_squared_error))
self.val.append(np.mean(mean_squared_error))
plot_sentiment_space(pred, top_20_sents, top_20)
plt.figure(figsize=(10,5))
plt.plot(self.val)
plt.savefig("val_error" + str(datetime.datetime.now()) + ".png", bbox_inches='tight')
plt.show()
else:
n = len(self.sdm.X) // batch_size
for i in range(n_epochs):
for j in range(n):
self.pm.fit(X = np.array(self.sdm.X[j*batch_size:(j+1)*batch_size]), y = np.array(self.sdm.y[j*batch_size:(j+1)*batch_size]))
if progress_callback is not None:
progress_callback(j / n)
pred, yt = self.test()
mean_squared_error = ((pred - yt)**2).mean(axis=0)
print("#" + str(j) + ": loss: ", mean_squared_error)
# restore replaced fit functions:
for s in disabled_fit_steps:
named_steps[s].fit = disabled_fits[s]
named_steps[s].fit_transform = disabled_fit_transforms[s]
if batch_size is not None:
for k in keras_batch_fitting_layer:
named_steps[k].fit = disabled_keras_fits[k]
def test(self, use_lemmatization=False, use_stemming=False, emoji_subset=None, only_test_on_valid_set = True):
'''
@param use_lemmatization:boolean
@param use_stemming:boolean
@param emoji_subset:list if given, only make predictions on samples containing one of these emojis as teacher value
@return: prediction:list, teacher:list
'''
if self.sdm.X is None:
self.sdm.create_train_test_split()
Xt = self.sdm.Xt
yt = self.sdm.yt
print("original validation size: " + str(len(yt)))
if emoji_subset is not None:
has_emoji = np.array([True if edist.sentiment_vector_to_emoji(y) in emoji_subset else False for y in yt])
Xt = Xt[has_emoji]
yt = yt[has_emoji]
print("filtered validation size: " + str(len(yt)))
return self.pm.predict(Xt, use_lemmatization=use_lemmatization, use_stemming=use_stemming), yt