diff --git a/Project/advanced_approach/simple_twitter_learning.ipynb b/Project/advanced_approach/simple_twitter_learning.ipynb deleted file mode 100644 index 52f4f5b..0000000 --- a/Project/advanced_approach/simple_twitter_learning.ipynb +++ /dev/null @@ -1,848 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import pandas as pd\n", - "from IPython.display import clear_output, Markdown, Math\n", - "import ipywidgets as widgets\n", - "import os\n", - "import glob\n", - "import json\n", - "import numpy as np\n", - "import itertools\n", - "import sklearn.utils as sku\n", - "from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer, HashingVectorizer\n", - "from sklearn.model_selection import train_test_split\n", - "from sklearn.preprocessing import MultiLabelBinarizer, LabelBinarizer\n", - "from sklearn.cluster import KMeans\n", - "import nltk\n", - "from keras.models import load_model\n", - "from sklearn.externals import joblib\n", - "import pickle\n", - "import operator\n", - "from sklearn.pipeline import Pipeline\n", - "import json\n", - "nltk.download('punkt')\n", - "nltk.download('averaged_perceptron_tagger')\n", - "nltk.download('wordnet')" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import sys\n", - "sys.path.append(\"..\")\n", - "\n", - "import Tools.Emoji_Distance as edist\n", - "\n", - "def emoji2sent(emoji_arr, only_emoticons=True):\n", - " return np.array([edist.emoji_to_sentiment_vector(e, only_emoticons=only_emoticons) for e in emoji_arr])\n", - "\n", - "def sent2emoji(sent_arr, custom_target_emojis=None, only_emoticons=True):\n", - " return [edist.sentiment_vector_to_emoji(s, custom_target_emojis=custom_target_emojis, only_emoticons=only_emoticons) for s in sent_arr]" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "SINGLE_LABEL = True" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "----\n", - "## classes and functions we are using later:\n", - "----" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "* functions for selecting items from a set / list" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "def latest(lst):\n", - " return lst[-1] if len(lst) > 0 else 'X' \n", - "def most_common(lst):\n", - " # trying to find the most common used emoji in the given lst\n", - " return max(set(lst), key=lst.count) if len(lst) > 0 else \"X\" # setting label to 'X' if there is an empty emoji list" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "* our emoji blacklist (skin and sex modifiers)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# defining blacklist for modifier emojis:\n", - "emoji_blacklist = set([\n", - " chr(0x1F3FB),\n", - " chr(0x1F3FC),\n", - " chr(0x1F3FD),\n", - " chr(0x1F3FE),\n", - " chr(0x1F3FF),\n", - " chr(0x2642),\n", - " chr(0x2640)\n", - "])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "* lemmatization helper functions" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from nltk.stem.snowball import SnowballStemmer\n", - "from nltk.stem import WordNetLemmatizer\n", - "from nltk import pos_tag\n", - "from nltk import word_tokenize\n", - "from nltk.corpus import wordnet\n", - "\n", - "def get_wordnet_pos(treebank_tag):\n", - "\n", - " if treebank_tag.startswith('J'):\n", - " return wordnet.ADJ\n", - " elif treebank_tag.startswith('V'):\n", - " return wordnet.VERB\n", - " elif treebank_tag.startswith('N'):\n", - " return wordnet.NOUN\n", - " elif treebank_tag.startswith('R'):\n", - " return wordnet.ADV\n", - " else:\n", - " return wordnet.NOUN" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### sample data manager\n", - "the sample data manager loads and preprocesses data\n", - "most common way to use:\n", - "\n", - "\n", - "* `sdm = sample_data_manager.generate_and_read(path:str, only_emoticons=True, apply_stemming=True, n_top_emojis=-1, file_range=None)`\n", - "\n", - " * Generates a sample_data_manager object and preprocess data in one step\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "class sample_data_manager(object):\n", - " @staticmethod\n", - " def generate_and_read(path:str, only_emoticons=True, apply_stemming=True, n_top_emojis=-1, file_range=None, n_kmeans_cluster=-1, progress_callback=None):\n", - " \"\"\"\n", - " generate, read and process train data in one step.\n", - " \n", - " @param path: folder containing json files to process\n", - " @param only_emoticons: if True, only messages containing emoticons (provided by Tools.Emoji_Distance) are used\n", - " @param apply_stemming: apply stemming and lemmatization on dataset\n", - " @param n_top_emojis: only use messages containing one of <`n_top_emojis`>-top emojis. set to `-1` to prevent top emoji filtering\n", - " @param file_range: range of file's indices to read (eg `range(3)` to read the first three files). If `None`: all files are read\n", - " @param n_kmeans_cluster: generating multilabeled labels with kmeans with these number of clusters. Set to -1 to use the plain sentiment space as label\n", - " \n", - " @return: sample_data_manager object\n", - " \"\"\"\n", - " sdm = sample_data_manager(path)\n", - " sdm.read_files(file_index_range=range(sdm.n_files) if file_range is None else file_range, only_emoticons=only_emoticons, progress_callback=progress_callback)\n", - " if apply_stemming:\n", - " sdm.apply_stemming_and_lemmatization()\n", - " \n", - " sdm.generate_emoji_count_and_weights()\n", - " \n", - " if n_top_emojis > 0:\n", - " sdm.filter_by_top_emojis(n_top=n_top_emojis)\n", - " \n", - " if n_kmeans_cluster > 0:\n", - " sdm.generate_kmeans_binary_label(only_emoticons=only_emoticons, n_clusters=n_kmeans_cluster)\n", - " \n", - " return sdm\n", - " \n", - " \n", - " def __init__(self, data_root_folder:str):\n", - " \"\"\"\n", - " constructor for manual initialization\n", - " \n", - " @param data_root_folder: folder containing json files to process\n", - " \"\"\"\n", - " self.data_root_folder = data_root_folder\n", - " self.json_files = sorted(glob.glob(self.data_root_folder + \"/*.json\"))\n", - " self.n_files = len(self.json_files)\n", - " self.raw_data = None\n", - " self.emojis = None\n", - " self.plain_text = None\n", - " self.labels = None\n", - " self.emoji_count = None\n", - " self.emoji_weights = None\n", - " self.X = None\n", - " self.y = None\n", - " self.Xt = None\n", - " self.yt = None\n", - " self.top_emojis = None\n", - " self.binary_labels = None\n", - " self.use_binary_labels = False\n", - " self.kmeans_cluster = None\n", - " self.label_binarizer = None\n", - " \n", - " def read_files(self, file_index_range:list, only_emoticons=True, progress_callback=None):\n", - " \"\"\"\n", - " reading (multiple) files to one panda table.\n", - " \n", - " @param file_index_range: range of file's indices to read (eg `range(3)` to read the first three files)\n", - " @param only_emoticons: if True, only messages containing emoticons (aka smileys) are used. This classification is derived from Tools.Emoji_Distance\n", - " \"\"\"\n", - " assert np.min(file_index_range) >= 0 and np.max(file_index_range) < self.n_files\n", - " for i in file_index_range:\n", - " print(\"reading file: \" + self.json_files[i] + \"...\")\n", - " if self.raw_data is None:\n", - " self.raw_data = pd.read_json(self.json_files[i], encoding=\"utf-8\")\n", - " else:\n", - " self.raw_data = self.raw_data.append(pd.read_json(self.json_files[i], encoding=\"utf-8\"))\n", - " if progress_callback is not None:\n", - " progress_callback()\n", - " self.emojis = self.raw_data['EMOJI']\n", - " self.plain_text = self.raw_data['text']\n", - " \n", - " # replacing keywords. TODO: maybe these information can be extracted and used\n", - " self.plain_text = self.plain_text.str.replace(\"(||)\",\"\").str.replace(\"[\" + \"\".join(list(emoji_blacklist)) + \"]\",\"\")\n", - " \n", - " # so far filtering for the latest emoji. TODO: maybe there are also better approaches\n", - " self.labels = emoji2sent([latest(e) for e in self.emojis], only_emoticons=only_emoticons )\n", - " \n", - " # and filter out all samples we have no label for:\n", - " wrong_labels = np.isnan(np.linalg.norm(self.labels, axis=1)) \n", - "\n", - " self.labels = self.labels[np.invert(wrong_labels)]\n", - " self.plain_text = self.plain_text[np.invert(wrong_labels)]\n", - " self.emojis = self.emojis[np.invert(wrong_labels)]\n", - " \n", - " print(\"imported \" + str(len(self.labels)) + \" samples\")\n", - " \n", - " def apply_stemming_and_lemmatization(self):\n", - " \"\"\"\n", - " apply stemming and lemmatization to plain text samples\n", - " \"\"\"\n", - " stemmer = SnowballStemmer(\"english\")\n", - " for key in self.plain_text.keys():\n", - " stemmed_sent = []\n", - " for word in self.plain_text[key].split(\" \"):\n", - " word_stemmed = stemmer.stem(word)\n", - " stemmed_sent.append(word_stemmed)\n", - " stemmed_sent = (\" \").join(stemmed_sent)\n", - " self.plain_text[key] = stemmed_sent\n", - " \n", - " lemmatizer = WordNetLemmatizer()\n", - " for key in self.plain_text.keys():\n", - " lemmatized_sent = []\n", - " sent_pos = pos_tag(word_tokenize(self.plain_text[key]))\n", - " for word in sent_pos:\n", - " wordnet_pos = get_wordnet_pos(word[1].lower())\n", - " word_lemmatized = lemmatizer.lemmatize(word[0], pos=wordnet_pos)\n", - " lemmatized_sent.append(word_lemmatized)\n", - " lemmatized_sent = (\" \").join(lemmatized_sent)\n", - " self.plain_text[key] = lemmatized_sent\n", - " \n", - " def generate_emoji_count_and_weights(self):\n", - " \"\"\"\n", - " counting occurences of emojis\n", - " \"\"\"\n", - " self.emoji_count = {}\n", - " for e_list in self.emojis:\n", - " for e in set(e_list):\n", - " if e not in self.emoji_count:\n", - " self.emoji_count[e] = 0\n", - " self.emoji_count[e] += 1\n", - " \n", - " emoji_sum = sum([self.emoji_count[e] for e in self.emoji_count])\n", - "\n", - " self.emoji_weights = {}\n", - " for e in self.emoji_count:\n", - " # tfidf for emojis\n", - " self.emoji_weights[e] = np.log((emoji_sum / self.emoji_count[e]))\n", - "\n", - " weights_sum= sum([self.emoji_weights[x] for x in self.emoji_weights])\n", - "\n", - " # normalize:\n", - " for e in self.emoji_weights:\n", - " self.emoji_weights[e] = self.emoji_weights[e] / weights_sum\n", - "\n", - " self.emoji_weights['X'] = 0 # dummy values\n", - " self.emoji_count['X'] = 0\n", - " \n", - " def get_emoji_count(self):\n", - " \"\"\"\n", - " @return: descending list of tuples in form (, ) \n", - " \"\"\"\n", - " assert self.emoji_count is not None\n", - " \n", - " sorted_emoji_count = list(reversed(sorted(self.emoji_count.items(), key=operator.itemgetter(1))))\n", - " #display(sorted_emoji_count)\n", - " return sorted_emoji_count\n", - " \n", - " def filter_by_top_emojis(self,n_top = 20):\n", - " \"\"\"\n", - " filgter out messages not containing one of the `n_top` emojis\n", - " \n", - " @param n_top: number of top emojis used for filtering\n", - " \"\"\"\n", - " assert self.labels is not None # ← messages are already read in\n", - " \n", - " self.top_emojis = [x[0] for x in self.get_emoji_count()[:n_top]]\n", - " in_top = [edist.sentiment_vector_to_emoji(x) in self.top_emojis for x in self.labels]\n", - " self.labels = self.labels[in_top]\n", - " self.plain_text = self.plain_text[in_top]\n", - " self.emojis = self.emojis[in_top]\n", - " print(\"remaining samples after top emoji filtering: \", len(self.labels))\n", - " \n", - " def generate_kmeans_binary_label(self, only_emoticons=True, n_clusters=5):\n", - " \"\"\"\n", - " generate binary labels using kmeans.\n", - " \n", - " @param only_emoticons: set whether we're using the full emoji set or only emoticons\n", - " @param n_clusters: number of cluster we're generating in emoji's sentiment space\n", - " \"\"\"\n", - " assert self.labels is not None\n", - " array_sentiment_vectors = edist.list_sentiment_emoticon_vectors if only_emoticons else edist.list_sentiment_vectors\n", - " array_sentiment_vectors = np.array(array_sentiment_vectors)\n", - " \n", - " list_emojis = edist.list_emoticon_emojis if only_emoticons else edist.list_emojis\n", - " self.use_binary_labels = True\n", - " print(\"clustering following emojis: \" + \"\".join(list_emojis) + \"...\")\n", - " self.kmeans_cluster = KMeans(n_clusters=n_clusters).fit(array_sentiment_vectors)\n", - " print(\"clustering done\")\n", - " self.label_binarizer = LabelBinarizer()\n", - " \n", - " multiclass_labels = self.kmeans_cluster.predict(self.labels)\n", - " \n", - " # FIXME: we have to guarantee that in every dataset all classes occur.\n", - " # otherwise batch fitting is not possible!\n", - " # (or we have to precompute the mlb fitting process somewhere...)\n", - " self.binary_labels = self.label_binarizer.fit_transform(multiclass_labels)\n", - " \n", - " \n", - " def create_train_test_split(self, split = 0.1, random_state = 4222):\n", - " assert self.plain_text is not None and self.labels is not None\n", - " if self.X is not None:\n", - " sys.stderr.write(\"WARNING: overwriting existing train/test split \\n\")\n", - " \n", - " labels = self.binary_labels if self.use_binary_labels else self.labels\n", - " assert labels is not None\n", - " self.X, self.Xt, self.y, self.yt = train_test_split(self.plain_text, labels, test_size=split, random_state=random_state)\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "* the pipeline manager saves and stores sklearn pipelines. Keras models are handled differently, so the have to be named explicitly during save and load operations" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "class pipeline_manager(object):\n", - " @staticmethod\n", - " def load_from_pipeline_file(pipeline_file:str):\n", - " \"\"\"\n", - " loading a json configuration file and using it's paramters to call 'load_pipeline_from_files'\n", - " \"\"\"\n", - " with open(pipeline_file, 'r') as f:\n", - " d = json.load(f)\n", - " \n", - " keras_models = d['keras_models']\n", - " all_models = d['all_models']\n", - " \n", - " return pipeline_manager.load_pipeline_from_files(pipeline_file.rsplit('.',1)[0], keras_models, all_models)\n", - "\n", - "\n", - " @staticmethod\n", - " def load_pipeline_from_files(file_prefix:str, keras_models = [], all_models = []):\n", - " \"\"\"\n", - " load a pipeline from files. A pipeline should be represented by multiple model files in the form '.'\n", - " \n", - " @param file_prefix: basename of all files (without extension)\n", - " @param keras_models: list of keras models (keras model files, only extension name). Leave this list empty if this is not a keras pipeline\n", - " @param all_models: list of all models (including keras_models, only extension name).\n", - " \n", - " @return a pipeline manager object\n", - " \"\"\"\n", - " \n", - " pm = pipeline_manager(keras_models=keras_models)\n", - " pm.load(file_prefix, all_models)\n", - " return pm\n", - " \n", - " @staticmethod\n", - " def create_keras_pipeline_with_vectorizer(vectorizer, layers, sdm:sample_data_manager, loss=None, optimizer=None):\n", - " '''\n", - " creates pipeline with vectorizer and keras classifier\n", - " \n", - " @param vectorizer: Vectorizer object. will be fitted with data provided by sdm\n", - " @param layers: list of keras layers. One keras layer is a tuple in form: (<#neurons:int>, )\n", - " @param sdm: sample data manager to get data for the vectorizer\n", - " @param loss: set keras loss function. Depending whether sdm use multiclass labels `categorical_crossentropy` or `mean_squared_error` is used as default\n", - " @param optimizer: set keras optimizer. Depending whether sdm use multiclass labels `sgd` or `adam` is used as default\n", - " \n", - " @return: a pipeline manager object\n", - " \n", - " '''\n", - " from keras.models import Sequential\n", - " from keras.layers import Dense\n", - " \n", - " if sdm.X is None:\n", - " sdm.create_train_test_split()\n", - " \n", - " vec_train = vectorizer.fit_transform(sdm.X)\n", - " vec_test = vectorizer.transform(sdm.Xt)\n", - " # creating keras model:\n", - " model=Sequential()\n", - " \n", - " keras_layers = []\n", - " first_layer = True\n", - " for layer in layers:\n", - " if first_layer:\n", - " model.add(Dense(units=layer[0], activation=layer[1], input_dim=vectorizer.transform([\" \"])[0]._shape[1]))\n", - " first_layer = False\n", - " else:\n", - " model.add(Dense(units=layer[0], activation=layer[1]))\n", - " \n", - " if sdm.use_binary_labels: \n", - " loss_function = loss if loss is not None else 'categorical_crossentropy'\n", - " optimizer_function = optimizer if optimizer is not None else 'sgd'\n", - " model.compile(loss=loss_function,\n", - " optimizer=optimizer_function,\n", - " metrics=['accuracy'])\n", - " else:\n", - " loss_function = loss if loss is not None else 'mean_squared_error'\n", - " optimizer_function = optimizer if optimizer is not None else 'adam'\n", - " model.compile(loss=loss_function,\n", - " optimizer=optimizer_function)\n", - " \n", - " pipeline = Pipeline([\n", - " ('vectorizer',vectorizer),\n", - " ('keras_model', model)\n", - " ])\n", - " \n", - " return pipeline_manager(pipeline=pipeline, keras_models=['keras_model'])\n", - " \n", - " @staticmethod\n", - " def create_pipeline_with_classifier_and_vectorizer(vectorizer, classifier, sdm:sample_data_manager = None):\n", - " '''\n", - " creates pipeline with vectorizer and non-keras classifier\n", - " \n", - " @param vectorizer: Vectorizer object. will be fitted with data provided by sdm\n", - " @param classifier: unfitted classifier object (should be compatible with all sklearn classifiers)\n", - " @param sdm: sample data manager to get data for the vectorizer\n", - " \n", - " @return: a pipeline manager object\n", - " '''\n", - " if sdm is not None:\n", - " if sdm.X is None:\n", - " sdm.create_train_test_split()\n", - "\n", - " vec_train = vectorizer.fit_transform(sdm.X)\n", - " vec_test = vectorizer.transform(sdm.Xt)\n", - " \n", - " pipeline = Pipeline([\n", - " ('vectorizer',vectorizer),\n", - " ('classifier', classifier)\n", - " ])\n", - " \n", - " return pipeline_manager(pipeline=pipeline, keras_models=[])\n", - " \n", - " def __init__(self, pipeline = None, keras_models = []):\n", - " \"\"\"\n", - " constructor\n", - " \n", - " @param pipeline: a sklearn pipeline\n", - " @param keras_models: list of keras steps in pipeline. Neccessary because saving and loading from keras models differs from the scikit ones\n", - " \"\"\"\n", - " \n", - " self.pipeline = pipeline\n", - " self.additional_objects = {}\n", - " self.keras_models = keras_models\n", - " \n", - " def save(self, prefix:str):\n", - " \"\"\"\n", - " saving the pipeline. It generates one file per model in the form: '.'\n", - " \n", - " @param prefix: file prefix for all models\n", - " \"\"\"\n", - " \n", - "\n", - " print(self.keras_models)\n", - " # doing this like explained here: https://stackoverflow.com/a/43415459\n", - " for step in self.pipeline.named_steps:\n", - " if step in self.keras_models:\n", - " self.pipeline.named_steps[step].model.save(prefix + \".\" + step)\n", - " else:\n", - " joblib.dump(self.pipeline.named_steps[step], prefix + \".\" + str(step))\n", - " \n", - " load_command = \"pipeline_manager.load_pipeline_from_files( '\"\n", - " load_command += prefix + \"', \" + str(self.keras_models) + \", \"\n", - " load_command += str(list(self.pipeline.named_steps.keys())) + \")\"\n", - "\n", - " with open(prefix + '.pipeline', 'w') as outfile:\n", - " json.dump({'keras_models': self.keras_models, 'all_models': [step for step in self.pipeline.named_steps]}, outfile)\n", - " \n", - " import __main__ as main\n", - " if not hasattr(main, '__file__'):\n", - " display(\"saved pipeline. It can be loaded the following way:\")\n", - " display(Markdown(\"> ```\\n\"+load_command+\"\\n```\")) # ← if we're in jupyter, print the fancy way :)\n", - " else:\n", - " print(\"saved pipeline. It can be loaded the following way:\")\n", - " print(load_command)\n", - " \n", - " \n", - " def load(self, prefix:str, models = []):\n", - " \"\"\"\n", - " load a pipeline. A pipeline should be represented by multiple model files in the form '.'\n", - " NOTE: keras model names (if there are some) have to be defined in self.keras_models first!\n", - " \n", - " @param prefix: the prefix for all model files\n", - " @param models: model_names to load\n", - " \"\"\"\n", - " self.pipeline = None\n", - " model_list = []\n", - " for model in models:\n", - " if model in self.keras_models:\n", - " model_list.append((model, load_model(prefix + \".\" + model)))\n", - " else:\n", - " model_list.append((model, joblib.load(prefix+\".\" + model)))\n", - " self.pipeline = Pipeline(model_list)\n", - " \n", - " def fit(self,X,y):\n", - " \"\"\"fitting the pipeline\"\"\"\n", - " self.pipeline.fit(X,y)\n", - " \n", - " def predict(self,X):\n", - " \"\"\"predict\"\"\"\n", - " return self.pipeline.predict(X)\n", - " " - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "* the trainer class passes Data from the sample manager to the pipeline manager" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "class trainer(object):\n", - " def __init__(self, sdm:sample_data_manager, pm:pipeline_manager):\n", - " \"\"\"constructor\"\"\"\n", - " self.sdm = sdm\n", - " self.pm = pm\n", - " \n", - " def fit(self, max_size=10000, disabled_fit_steps=['vectorizer'], keras_batch_fitting_layer=['keras_model'], batch_size=None, n_epochs=1, progress_callback=None):\n", - " \"\"\"\n", - " fitting data in the pipeline. Because we don't want to refit the vectorizer, the pipeline models containing the vectorizer have to be named explicitly\n", - " \n", - " @param max_size: don't train more examples than that number\n", - " @param disabled_fit_steps: list of pipeline steps that we want to prevent to refit. Normally all vectorizer steps\n", - " \"\"\"\n", - " # TODO: make batch fitting available here (eg: continous waiting for data and fitting them)\n", - " if self.sdm.X is None:\n", - " self.sdm.create_train_test_split()\n", - " disabled_fits = {}\n", - " disabled_fit_transforms = {}\n", - " \n", - " disabled_keras_fits = {}\n", - " \n", - " named_steps = self.pm.pipeline.named_steps\n", - " \n", - " for s in disabled_fit_steps:\n", - " # now it gets really dirty:\n", - " # replace fit functions we don't want to call again (e.g. for vectorizers)\n", - " disabled_fits[s] = named_steps[s].fit\n", - " disabled_fit_transforms[s] = named_steps[s].fit_transform\n", - " named_steps[s].fit = lambda self, X, y=None: self\n", - " named_steps[s].fit_transform = named_steps[s].transform\n", - " \n", - " for k in keras_batch_fitting_layer:\n", - " # forcing batch fitting on keras\n", - " disabled_keras_fits[k]=named_steps[k].fit\n", - " named_steps[k].fit = lambda X, y: named_steps[k].train_on_batch(X.todense(), y) # ← why has keras no sparse support on batch progressing!?!?!\n", - " \n", - " if batch_size is None:\n", - " self.pm.fit(X = self.sdm.X[:max_size], y = self.sdm.y[:max_size])\n", - " else:\n", - " n = len(self.sdm.X) // batch_size\n", - " for i in range(n_epochs):\n", - " for j in range(n):\n", - " self.pm.fit(X = np.array(self.sdm.X[j*batch_size:(j+1)*batch_size]), y = np.array(self.sdm.y[j*batch_size:(j+1)*batch_size]))\n", - " if progress_callback is not None:\n", - " progress_callback()\n", - " pred, yt = self.test()\n", - " mean_squared_error = ((pred - yt)**2).mean(axis=0)\n", - " print(\"#\" + str(j) + \": loss: \", mean_squared_error)\n", - "\n", - " \n", - " # restore replaced fit functions:\n", - " for s in disabled_fit_steps:\n", - " named_steps[s].fit = disabled_fits[s]\n", - " named_steps[s].fit_transform = disabled_fit_transforms[s]\n", - " \n", - " for k in keras_batch_fitting_layer:\n", - " named_steps[k].fit = disabled_keras_fits[k]\n", - " \n", - " def test(self):\n", - " '''\n", - " @return: prediction:list, teacher:list\n", - " '''\n", - " if self.sdm.X is None:\n", - " self.sdm.create_train_test_split()\n", - " return self.pm.predict(self.sdm.Xt), self.sdm.yt\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "----\n", - "## Train" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "* when in notebook environment: run the stuff below:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import __main__ as main\n", - "if not hasattr(main, '__file__'):\n", - " # we are in an interactive environment (probably in jupyter)\n", - " # load data:\n", - " \n", - " # setting n_kmeans_clusters to a value > 0 activates binarized labeling automatically! \n", - " # set to -1 to disable kmeans clustering and generating labels in plain sentiment space\n", - " \n", - " #n_kmeans_cluster = 5\n", - " n_kmeans_cluster = -1\n", - " sdm = sample_data_manager.generate_and_read(path=\"./data_en/\", n_top_emojis=20, file_range=range(1), n_kmeans_cluster=n_kmeans_cluster)\n", - " sdm.create_train_test_split()\n", - " #pm = pipeline_manager.create_keras_pipeline_with_vectorizer(vectorizer=TfidfVectorizer(stop_words='english'),\\n\",\n", - " # layers=[(10000, 'relu'),(5000, 'relu'),(2500, 'relu'),(y1[0].shape[0],None)], sdm=sdm)\\n\",\n", - " pm = pipeline_manager.create_keras_pipeline_with_vectorizer(vectorizer=TfidfVectorizer(stop_words='english'),\n", - " layers=[(2500, 'relu'),(sdm.y.shape[1],None)], sdm=sdm)\n", - " tr = trainer(sdm=sdm, pm=pm)\n", - " tr.fit(100)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "----\n", - "## save classifier" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import __main__ as main\n", - "if not hasattr(main, '__file__'):\n", - " pm.save('custom_classifier')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "----\n", - "## Prediction\n", - "\n", - "* predict and save to `test.csv`" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import __main__ as main\n", - "if not hasattr(main, '__file__'):\n", - " pred, teacher = tr.test()\n", - " \n", - " display(pred)\n", - " display(teacher)\n", - " \n", - " print('prediction variance: ', np.linalg.norm(np.var(pred, axis=0)))\n", - " print('teacher variance: ', np.linalg.norm(np.var(teacher, axis=0)))\n", - " \n", - " # build a dataframe to visualize test results:\n", - " testlist = pd.DataFrame({'text': sdm.Xt, \n", - " 'teacher': sent2emoji(sdm.yt),\n", - " 'teacher_sentiment': sdm.yt.tolist(),\n", - " 'predict': sent2emoji(pred, custom_target_emojis=sdm.top_emojis),\n", - " 'predicted_sentiment': pred.tolist()})\n", - " # display:\n", - " display(testlist.head())\n", - " \n", - " # mean squared error:\n", - " teacher_sentiments = np.array([sample[1]['teacher_sentiment'] for sample in testlist.iterrows()])\n", - " predicted_sentiments = np.array([sample[1]['predicted_sentiment'] for sample in testlist.iterrows()])\n", - "\n", - " mean_squared_error = ((teacher_sentiments - predicted_sentiments)**2).mean(axis=0)\n", - " print(\"Mean Squared Error: \", mean_squared_error)\n", - " print(\"Variance teacher: \", np.var(teacher_sentiments, axis=0))\n", - " print(\"Variance prediction: \", np.var(predicted_sentiments, axis=0))\n", - " \n", - " # save to csv:\n", - " testlist.to_csv('test.csv')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "----\n", - "## Load classifier\n", - "\n", - "* loading classifier and show a test widget" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import __main__ as main\n", - "if not hasattr(main, '__file__'):\n", - " try:\n", - " pm\n", - " except NameError:\n", - " pass\n", - " else:\n", - " del pm # delete existing pipeline manager if ther is one\n", - "\n", - " pm = pipeline_manager.load_pipeline_from_files( 'custom_classifier', ['keras_model'], ['vectorizer', 'keras_model'])\n", - " lookup_emojis = [#'😂',\n", - " '😭',\n", - " '😍',\n", - " '😩',\n", - " '😊',\n", - " '😘',\n", - " '🙏',\n", - " '🙌',\n", - " '😉',\n", - " '😁',\n", - " '😅',\n", - " '😎',\n", - " '😢',\n", - " '😒',\n", - " '😏',\n", - " '😌',\n", - " '😔',\n", - " '😋',\n", - " '😀',\n", - " '😤']\n", - " out = widgets.Output()\n", - "\n", - " t = widgets.Text()\n", - " b = widgets.Button(\n", - " description='get emoji',\n", - " disabled=False,\n", - " button_style='', # 'success', 'info', 'warning', 'danger' or ''\n", - " tooltip='Click me',\n", - " icon='check'\n", - " )\n", - "\n", - "\n", - "\n", - " def handle_submit(sender):\n", - " with out:\n", - " clear_output()\n", - " with out:\n", - " pred = pm.predict([t.value])\n", - "\n", - " display(Markdown(\"# Predicted Emoji \" + str(sent2emoji(pred, lookup_emojis)[0])))\n", - " display(Markdown(\"# Sentiment Vector: $$ \\pmatrix{\" + str(pred[0,0]) +\n", - " \"\\\\\\\\\" + str(pred[0,1]) + \"\\\\\\\\\" + str(pred[0,2]) + \"}$$\"))\n", - "\n", - " b.on_click(handle_submit)\n", - "\n", - " display(t)\n", - " display(widgets.VBox([b, out])) " - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.6.5" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -}