documentation and python file for simple_twitter_learning
This commit is contained in:
		| @ -9,6 +9,8 @@ | ||||
|      "name": "stderr", | ||||
|      "output_type": "stream", | ||||
|      "text": [ | ||||
|       "/home/jonas/.local/lib/python3.6/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n", | ||||
|       "  from ._conv import register_converters as _register_converters\n", | ||||
|       "Using TensorFlow backend.\n" | ||||
|      ] | ||||
|     }, | ||||
| @ -180,7 +182,14 @@ | ||||
|    "cell_type": "markdown", | ||||
|    "metadata": {}, | ||||
|    "source": [ | ||||
|     "* the sample data manager loads and preprocesses data" | ||||
|     "### sample data manager\n", | ||||
|     "the sample data manager loads and preprocesses data\n", | ||||
|     "most common way to use:\n", | ||||
|     "\n", | ||||
|     "\n", | ||||
|     "* `sdm = sample_data_manager.generate_and_read(path:str, only_emoticons=True, apply_stemming=True, n_top_emojis=-1, file_range=None)`\n", | ||||
|     "\n", | ||||
|     "    * Generates a sample_data_manager object and preprocess data in one step\n" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
| @ -192,6 +201,16 @@ | ||||
|     "class sample_data_manager(object):\n", | ||||
|     "    @staticmethod\n", | ||||
|     "    def generate_and_read(path:str, only_emoticons=True, apply_stemming=True, n_top_emojis=-1, file_range=None):\n", | ||||
|     "        \"\"\"\n", | ||||
|     "        generate, read and process train data in one step.\n", | ||||
|     "        \n", | ||||
|     "        @param path: folder containing json files to process\n", | ||||
|     "        @param only_emoticons: if True, only messages containing emoticons (provided by Tools.Emoji_Distance) are used\n", | ||||
|     "        @param apply_stemming: apply stemming and lemmatization on dataset\n", | ||||
|     "        @param n_top_emojis: only use messages containing one of <`n_top_emojis`>-top emojis. set to `-1` to prevent top emoji filtering\n", | ||||
|     "        @param file_range: range of file's indices to read (eg `range(3)` to read the first three files). If `None`: all files are read\n", | ||||
|     "        @return: sample_data_manager object\n", | ||||
|     "        \"\"\"\n", | ||||
|     "        sdm = sample_data_manager(path)\n", | ||||
|     "        sdm.read_files(file_index_range=range(sdm.n_files) if file_range is None else file_range, only_emoticons=only_emoticons)\n", | ||||
|     "        if apply_stemming:\n", | ||||
| @ -206,6 +225,11 @@ | ||||
|     "        \n", | ||||
|     "    \n", | ||||
|     "    def __init__(self, data_root_folder:str):\n", | ||||
|     "        \"\"\"\n", | ||||
|     "        constructor for manual initialization\n", | ||||
|     "        \n", | ||||
|     "        @param data_root_folder: folder containing json files to process\n", | ||||
|     "        \"\"\"\n", | ||||
|     "        self.data_root_folder = data_root_folder\n", | ||||
|     "        self.json_files = sorted(glob.glob(self.data_root_folder + \"/*.json\"))\n", | ||||
|     "        self.n_files = len(self.json_files)\n", | ||||
| @ -222,6 +246,12 @@ | ||||
|     "        self.top_emojis = None\n", | ||||
|     "    \n", | ||||
|     "    def read_files(self, file_index_range:list, only_emoticons=True):\n", | ||||
|     "        \"\"\"\n", | ||||
|     "        reading (multiple) files to one panda table.\n", | ||||
|     "        \n", | ||||
|     "        @param file_index_range: range of file's indices to read (eg `range(3)` to read the first three files)\n", | ||||
|     "        @param only_emoticons: if True, only messages containing emoticons (aka smileys) are used. This classification is derived from Tools.Emoji_Distance\n", | ||||
|     "        \"\"\"\n", | ||||
|     "        assert np.min(file_index_range) >= 0 and np.max(file_index_range) < self.n_files\n", | ||||
|     "        for i in file_index_range:\n", | ||||
|     "            print(\"reading file: \" + self.json_files[i] + \"...\")\n", | ||||
| @ -249,6 +279,9 @@ | ||||
|     "        print(\"imported \" + str(len(self.labels)) + \" samples\")\n", | ||||
|     "    \n", | ||||
|     "    def apply_stemming_and_lemmatization(self):\n", | ||||
|     "        \"\"\"\n", | ||||
|     "        apply stemming and lemmatization to plain text samples\n", | ||||
|     "        \"\"\"\n", | ||||
|     "        stemmer = SnowballStemmer(\"english\")\n", | ||||
|     "        for key in self.plain_text.keys():\n", | ||||
|     "            stemmed_sent = []\n", | ||||
| @ -270,6 +303,9 @@ | ||||
|     "            self.plain_text[key] = lemmatized_sent\n", | ||||
|     "    \n", | ||||
|     "    def generate_emoji_count_and_weights(self):\n", | ||||
|     "        \"\"\"\n", | ||||
|     "        counting occurences of emojis\n", | ||||
|     "        \"\"\"\n", | ||||
|     "        self.emoji_count = {}\n", | ||||
|     "        for e_list in self.emojis:\n", | ||||
|     "            for e in set(e_list):\n", | ||||
| @ -294,11 +330,23 @@ | ||||
|     "        self.emoji_count['X'] = 0\n", | ||||
|     "    \n", | ||||
|     "    def get_emoji_count(self):\n", | ||||
|     "        \"\"\"\n", | ||||
|     "        @return: descending list of tuples in form (<emoji as character>, <emoji count>) \n", | ||||
|     "        \"\"\"\n", | ||||
|     "        assert self.emoji_count is not None\n", | ||||
|     "        \n", | ||||
|     "        sorted_emoji_count = list(reversed(sorted(self.emoji_count.items(), key=operator.itemgetter(1))))\n", | ||||
|     "        #display(sorted_emoji_count)\n", | ||||
|     "        return sorted_emoji_count\n", | ||||
|     "    \n", | ||||
|     "    def filter_by_top_emojis(self,n_top = 20):\n", | ||||
|     "        \"\"\"\n", | ||||
|     "        filgter out messages not containing one of the `n_top` emojis\n", | ||||
|     "        \n", | ||||
|     "        @param n_top: number of top emojis used for filtering\n", | ||||
|     "        \"\"\"\n", | ||||
|     "        assert self.labels is not None # ← messages are already read in\n", | ||||
|     "        \n", | ||||
|     "        self.top_emojis = [x[0] for x in self.get_emoji_count()[:n_top]]\n", | ||||
|     "        in_top = [sentiment_vector_to_emoji(x) in self.top_emojis for x in self.labels]\n", | ||||
|     "        self.labels = self.labels[in_top]\n", | ||||
| @ -307,6 +355,8 @@ | ||||
|     "        print(\"remaining samples after top emoji filtering: \", len(self.labels))\n", | ||||
|     "    \n", | ||||
|     "    def create_train_test_split(self, split = 0.1, random_state = 4222):\n", | ||||
|     "        if self.X is not None:\n", | ||||
|     "            sys.stderr.write(\"WARNING: overwriting existing train/test split \\n\")\n", | ||||
|     "        self.X, self.Xt, self.y, self.yt = train_test_split(self.plain_text, self.labels, test_size=split, random_state=random_state)\n", | ||||
|     "\n" | ||||
|    ] | ||||
| @ -327,6 +377,16 @@ | ||||
|     "class pipeline_manager(object):\n", | ||||
|     "    @staticmethod\n", | ||||
|     "    def load_pipeline_from_files(file_prefix:str, keras_models = [], all_models = []):\n", | ||||
|     "        \"\"\"\n", | ||||
|     "        load a pipeline from files. A pipeline should be represented by multiple model files in the form '<file_prefix>.<model_name>'\n", | ||||
|     "        \n", | ||||
|     "        @param file_prefix: basename of all files (without extension)\n", | ||||
|     "        @param keras_models: list of keras models (keras model files, only extension name). Leave this list empty if this is not a keras pipeline\n", | ||||
|     "        @param all_models: list of all models (including keras_models, only extension name).\n", | ||||
|     "        \n", | ||||
|     "        @return a pipeline manager object\n", | ||||
|     "        \"\"\"\n", | ||||
|     "        \n", | ||||
|     "        pm = pipeline_manager(keras_models=keras_models)\n", | ||||
|     "        pm.load(file_prefix, all_models)\n", | ||||
|     "        return pm\n", | ||||
| @ -335,6 +395,13 @@ | ||||
|     "    def create_keras_pipeline_with_vectorizer(vectorizer, layers, sdm:sample_data_manager):\n", | ||||
|     "        '''\n", | ||||
|     "        creates pipeline with vectorizer and keras classifier\n", | ||||
|     "        \n", | ||||
|     "        @param vectorizer: Vectorizer object. will be fitted with data provided by sdm\n", | ||||
|     "        @param layers: list of keras layers. One keras layer is a tuple in form: (<#neurons:int>, <activation_func:str>)\n", | ||||
|     "        @param sdm: sample data manager to get data for the vectorizer\n", | ||||
|     "        \n", | ||||
|     "        @return: a pipeline manager object\n", | ||||
|     "        \n", | ||||
|     "        '''\n", | ||||
|     "        from keras.models import Sequential\n", | ||||
|     "        from keras.layers import Dense\n", | ||||
| @ -369,8 +436,13 @@ | ||||
|     "    @staticmethod\n", | ||||
|     "    def create_pipeline_with_classifier_and_vectorizer(vectorizer, classifier, sdm:sample_data_manager = None):\n", | ||||
|     "        '''\n", | ||||
|     "        creates a pipeline with vectorizer and classifier for non keras classifiers\n", | ||||
|     "        if sample data manager is given, the vectorizer will be also fitted!\n", | ||||
|     "        creates pipeline with vectorizer and non-keras classifier\n", | ||||
|     "        \n", | ||||
|     "        @param vectorizer: Vectorizer object. will be fitted with data provided by sdm\n", | ||||
|     "        @param classifier: unfitted classifier object (should be compatible with all sklearn classifiers)\n", | ||||
|     "        @param sdm: sample data manager to get data for the vectorizer\n", | ||||
|     "        \n", | ||||
|     "        @return: a pipeline manager object\n", | ||||
|     "        '''\n", | ||||
|     "        if sdm is not None:\n", | ||||
|     "            if sdm.X is None:\n", | ||||
| @ -387,11 +459,24 @@ | ||||
|     "        return pipeline_manager(pipeline=pipeline, keras_models=[])\n", | ||||
|     "    \n", | ||||
|     "    def __init__(self, pipeline = None, keras_models = []):\n", | ||||
|     "        \"\"\"\n", | ||||
|     "        constructor\n", | ||||
|     "        \n", | ||||
|     "        @param pipeline: a sklearn pipeline\n", | ||||
|     "        @param keras_models: list of keras steps in pipeline. Neccessary because saving and loading from keras models differs from the scikit ones\n", | ||||
|     "        \"\"\"\n", | ||||
|     "        \n", | ||||
|     "        self.pipeline = pipeline\n", | ||||
|     "        self.additional_objects = {}\n", | ||||
|     "        self.keras_models = keras_models\n", | ||||
|     "    \n", | ||||
|     "    def save(self, prefix:str):\n", | ||||
|     "        \"\"\"\n", | ||||
|     "        saving the pipeline. It generates one file per model in the form: '<prefix>.<model_name>'\n", | ||||
|     "        \n", | ||||
|     "        @param prefix: file prefix for all models\n", | ||||
|     "        \"\"\"\n", | ||||
|     "        \n", | ||||
|     "        print(self.keras_models)\n", | ||||
|     "        # doing this like explained here: https://stackoverflow.com/a/43415459\n", | ||||
|     "        for step in self.pipeline.named_steps:\n", | ||||
| @ -407,13 +492,20 @@ | ||||
|     "        import __main__ as main\n", | ||||
|     "        if not hasattr(main, '__file__'):\n", | ||||
|     "            display(\"saved pipeline. It can be loaded the following way:\")\n", | ||||
|     "            display(Markdown(\"> ```\\n\"+load_command+\"\\n```\"))\n", | ||||
|     "            display(Markdown(\"> ```\\n\"+load_command+\"\\n```\"))              # ← if we're in jupyter, print the fancy way :)\n", | ||||
|     "        else:\n", | ||||
|     "            print(\"saved pipeline. It can be loaded the following way:\")\n", | ||||
|     "            print(load_command)\n", | ||||
|     "        \n", | ||||
|     "    \n", | ||||
|     "    def load(self, prefix:str, models = []):\n", | ||||
|     "        \"\"\"\n", | ||||
|     "        load a pipeline. A pipeline should be represented by multiple model files in the form '<prefix>.<model_name>'\n", | ||||
|     "        NOTE: keras model names (if there are some) have to be defined in self.keras_models first!\n", | ||||
|     "        \n", | ||||
|     "        @param prefix: the prefix for all model files\n", | ||||
|     "        @param models: model_names to load\n", | ||||
|     "        \"\"\"\n", | ||||
|     "        self.pipeline = None\n", | ||||
|     "        model_list = []\n", | ||||
|     "        for model in models:\n", | ||||
| @ -424,9 +516,11 @@ | ||||
|     "        self.pipeline = Pipeline(model_list)\n", | ||||
|     "    \n", | ||||
|     "    def fit(self,X,y):\n", | ||||
|     "        \"\"\"fitting the pipeline\"\"\"\n", | ||||
|     "        self.pipeline.fit(X,y)\n", | ||||
|     "    \n", | ||||
|     "    def predict(self,X):\n", | ||||
|     "        \"\"\"predict\"\"\"\n", | ||||
|     "        return self.pipeline.predict(X)\n", | ||||
|     "    " | ||||
|    ] | ||||
| @ -446,10 +540,17 @@ | ||||
|    "source": [ | ||||
|     "class trainer(object):\n", | ||||
|     "    def __init__(self, sdm:sample_data_manager, pm:pipeline_manager):\n", | ||||
|     "        \"\"\"constructor\"\"\"\n", | ||||
|     "        self.sdm = sdm\n", | ||||
|     "        self.pm = pm\n", | ||||
|     "    \n", | ||||
|     "    def fit(self, max_size=10000, disabled_fit_steps=['vectorizer']):\n", | ||||
|     "        \"\"\"\n", | ||||
|     "        fitting data in the pipeline. Because we don't want to refit the vectorizer, the pipeline models containing the vectorizer have to be named explicitly\n", | ||||
|     "        \n", | ||||
|     "        @param max_size: don't train more examples than that number\n", | ||||
|     "        @param disabled_fit_steps: list of pipeline steps that we want to prevent to refit. Normally all vectorizer steps\n", | ||||
|     "        \"\"\"\n", | ||||
|     "        # TODO: make batch fitting available here (eg: continous waiting for data and fitting them)\n", | ||||
|     "        if self.sdm.X is None:\n", | ||||
|     "            self.sdm.create_train_test_split()\n", | ||||
| @ -475,7 +576,7 @@ | ||||
|     "    \n", | ||||
|     "    def test(self):\n", | ||||
|     "        '''\n", | ||||
|     "        return: prediction:list, teacher:list\n", | ||||
|     "        @return: prediction:list, teacher:list\n", | ||||
|     "        '''\n", | ||||
|     "        if self.sdm.X is None:\n", | ||||
|     "            self.sdm.create_train_test_split()\n", | ||||
| @ -510,7 +611,9 @@ | ||||
|      "text": [ | ||||
|       "reading file: ./data_en/2017-11-01.json...\n", | ||||
|       "imported 33368 samples\n", | ||||
|       "remaining samples after top emoji filtering:  26197\n" | ||||
|       "remaining samples after top emoji filtering:  26197\n", | ||||
|       "Epoch 1/1\n", | ||||
|       "100/100 [==============================] - 3s 27ms/step - loss: 0.1227\n" | ||||
|      ] | ||||
|     } | ||||
|    ], | ||||
| @ -520,31 +623,12 @@ | ||||
|     "    # we are in an interactive environment (probably in jupyter)\n", | ||||
|     "    # load data:\n", | ||||
|     "    sdm = sample_data_manager.generate_and_read(path=\"./data_en/\", n_top_emojis=20, file_range=range(1))\n", | ||||
|     "    " | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|    "cell_type": "code", | ||||
|    "execution_count": 11, | ||||
|    "metadata": {}, | ||||
|    "outputs": [ | ||||
|     { | ||||
|      "name": "stdout", | ||||
|      "output_type": "stream", | ||||
|      "text": [ | ||||
|       "Epoch 1/1\n", | ||||
|       "10000/10000 [==============================] - 109s 11ms/step - loss: 0.0197\n" | ||||
|      ] | ||||
|     } | ||||
|    ], | ||||
|    "source": [ | ||||
|     "    #pm = pipeline_manager.create_keras_pipeline_with_vectorizer(vectorizer=TfidfVectorizer(stop_words='english'),\n", | ||||
|     "    #                                                           layers=[(10000, 'relu'),(5000, 'relu'),(2500, 'relu'),(y1[0].shape[0],None)], sdm=sdm)\n", | ||||
|     "    #pm = pipeline_manager.create_keras_pipeline_with_vectorizer(vectorizer=TfidfVectorizer(stop_words='english'),\\n\",\n", | ||||
|     "    #                                                           layers=[(10000, 'relu'),(5000, 'relu'),(2500, 'relu'),(y1[0].shape[0],None)], sdm=sdm)\\n\",\n", | ||||
|     "    pm = pipeline_manager.create_keras_pipeline_with_vectorizer(vectorizer=TfidfVectorizer(stop_words='english'),\n", | ||||
|     "                                                           layers=[(2500, 'relu'),(3,None)], sdm=sdm)\n", | ||||
|     "    \n", | ||||
|     "    tr = trainer(sdm=sdm, pm=pm)\n", | ||||
|     "    tr.fit(10000)" | ||||
|     "    tr.fit(100)" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
| @ -610,7 +694,9 @@ | ||||
|    "metadata": {}, | ||||
|    "source": [ | ||||
|     "----\n", | ||||
|     "## Prediction" | ||||
|     "## Prediction\n", | ||||
|     "\n", | ||||
|     "* predict and save to `test.csv`" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
| @ -803,7 +889,9 @@ | ||||
|    "metadata": {}, | ||||
|    "source": [ | ||||
|     "----\n", | ||||
|     "## Load classifier" | ||||
|     "## Load classifier\n", | ||||
|     "\n", | ||||
|     "* loading classifier and show a test widget" | ||||
|    ] | ||||
|   }, | ||||
|   { | ||||
|  | ||||
							
								
								
									
										631
									
								
								Project/simple_approach/simple_twitter_learning.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										631
									
								
								Project/simple_approach/simple_twitter_learning.py
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,631 @@ | ||||
|  | ||||
| # coding: utf-8 | ||||
|  | ||||
| # In[1]: | ||||
|  | ||||
|  | ||||
| import pandas as pd | ||||
| from IPython.display import clear_output, Markdown, Math | ||||
| import ipywidgets as widgets | ||||
| import os | ||||
| import glob | ||||
| import json | ||||
| import numpy as np | ||||
| import itertools | ||||
| import sklearn.utils as sku | ||||
| from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer, HashingVectorizer | ||||
| from sklearn.model_selection import train_test_split | ||||
| from sklearn.preprocessing import MultiLabelBinarizer | ||||
| import nltk | ||||
| from keras.models import load_model | ||||
| from sklearn.externals import joblib | ||||
| import pickle | ||||
| import operator | ||||
| from sklearn.pipeline import Pipeline | ||||
| nltk.download('punkt') | ||||
| nltk.download('averaged_perceptron_tagger') | ||||
| nltk.download('wordnet') | ||||
|  | ||||
|  | ||||
| # In[2]: | ||||
|  | ||||
|  | ||||
| import sys | ||||
| sys.path.append("..") | ||||
|  | ||||
| from Tools.Emoji_Distance import sentiment_vector_to_emoji | ||||
| from Tools.Emoji_Distance import emoji_to_sentiment_vector | ||||
|  | ||||
| def emoji2sent(emoji_arr, only_emoticons=True): | ||||
|     return np.array([emoji_to_sentiment_vector(e, only_emoticons=only_emoticons) for e in emoji_arr]) | ||||
|  | ||||
| def sent2emoji(sent_arr, custom_target_emojis=None, only_emoticons=True): | ||||
|     return [sentiment_vector_to_emoji(s, custom_target_emojis=custom_target_emojis, only_emoticons=only_emoticons) for s in sent_arr] | ||||
|  | ||||
|  | ||||
| # In[3]: | ||||
|  | ||||
|  | ||||
| SINGLE_LABEL = True | ||||
|  | ||||
|  | ||||
| # ---- | ||||
| # ## classes and functions we are using later: | ||||
| # ---- | ||||
|  | ||||
| # * functions for selecting items from a set / list | ||||
|  | ||||
| # In[4]: | ||||
|  | ||||
|  | ||||
| def latest(lst): | ||||
|     return lst[-1] if len(lst) > 0 else 'X'  | ||||
| def most_common(lst): | ||||
|     # trying to find the most common used emoji in the given lst | ||||
|     return max(set(lst), key=lst.count) if len(lst) > 0 else "X" # setting label to 'X' if there is an empty emoji list | ||||
|  | ||||
|  | ||||
| # * our emoji blacklist (skin and sex modifiers) | ||||
|  | ||||
| # In[5]: | ||||
|  | ||||
|  | ||||
| # defining blacklist for modifier emojis: | ||||
| emoji_blacklist = set([ | ||||
|     chr(0x1F3FB), | ||||
|     chr(0x1F3FC), | ||||
|     chr(0x1F3FD), | ||||
|     chr(0x1F3FE), | ||||
|     chr(0x1F3FF), | ||||
|     chr(0x2642), | ||||
|     chr(0x2640) | ||||
| ]) | ||||
|  | ||||
|  | ||||
| # * lemmatization helper functions | ||||
|  | ||||
| # In[6]: | ||||
|  | ||||
|  | ||||
| from nltk.stem.snowball import SnowballStemmer | ||||
| from nltk.stem import WordNetLemmatizer | ||||
| from nltk import pos_tag | ||||
| from nltk import word_tokenize | ||||
| from nltk.corpus import wordnet | ||||
|  | ||||
| def get_wordnet_pos(treebank_tag): | ||||
|  | ||||
|     if treebank_tag.startswith('J'): | ||||
|         return wordnet.ADJ | ||||
|     elif treebank_tag.startswith('V'): | ||||
|         return wordnet.VERB | ||||
|     elif treebank_tag.startswith('N'): | ||||
|         return wordnet.NOUN | ||||
|     elif treebank_tag.startswith('R'): | ||||
|         return wordnet.ADV | ||||
|     else: | ||||
|         return wordnet.NOUN | ||||
|  | ||||
|  | ||||
| # ### sample data manager | ||||
| # the sample data manager loads and preprocesses data | ||||
| # most common way to use: | ||||
| #  | ||||
| #  | ||||
| # * `sdm = sample_data_manager.generate_and_read(path:str, only_emoticons=True, apply_stemming=True, n_top_emojis=-1, file_range=None)` | ||||
| #  | ||||
| #     * Generates a sample_data_manager object and preprocess data in one step | ||||
| #  | ||||
|  | ||||
| # In[7]: | ||||
|  | ||||
|  | ||||
| class sample_data_manager(object): | ||||
|     @staticmethod | ||||
|     def generate_and_read(path:str, only_emoticons=True, apply_stemming=True, n_top_emojis=-1, file_range=None): | ||||
|         """ | ||||
|         generate, read and process train data in one step. | ||||
|          | ||||
|         @param path: folder containing json files to process | ||||
|         @param only_emoticons: if True, only messages containing emoticons (provided by Tools.Emoji_Distance) are used | ||||
|         @param apply_stemming: apply stemming and lemmatization on dataset | ||||
|         @param n_top_emojis: only use messages containing one of <`n_top_emojis`>-top emojis. set to `-1` to prevent top emoji filtering | ||||
|         @param file_range: range of file's indices to read (eg `range(3)` to read the first three files). If `None`: all files are read | ||||
|         @return: sample_data_manager object | ||||
|         """ | ||||
|         sdm = sample_data_manager(path) | ||||
|         sdm.read_files(file_index_range=range(sdm.n_files) if file_range is None else file_range, only_emoticons=only_emoticons) | ||||
|         if apply_stemming: | ||||
|             sdm.apply_stemming_and_lemmatization() | ||||
|          | ||||
|         sdm.generate_emoji_count_and_weights() | ||||
|          | ||||
|         if n_top_emojis > 0: | ||||
|             sdm.filter_by_top_emojis(n_top=n_top_emojis) | ||||
|          | ||||
|         return sdm | ||||
|          | ||||
|      | ||||
|     def __init__(self, data_root_folder:str): | ||||
|         """ | ||||
|         constructor for manual initialization | ||||
|          | ||||
|         @param data_root_folder: folder containing json files to process | ||||
|         """ | ||||
|         self.data_root_folder = data_root_folder | ||||
|         self.json_files = sorted(glob.glob(self.data_root_folder + "/*.json")) | ||||
|         self.n_files = len(self.json_files) | ||||
|         self.raw_data = None | ||||
|         self.emojis = None | ||||
|         self.plain_text = None | ||||
|         self.labels = None | ||||
|         self.emoji_count = None | ||||
|         self.emoji_weights = None | ||||
|         self.X = None | ||||
|         self.y = None | ||||
|         self.Xt = None | ||||
|         self.yt = None | ||||
|         self.top_emojis = None | ||||
|      | ||||
|     def read_files(self, file_index_range:list, only_emoticons=True): | ||||
|         """ | ||||
|         reading (multiple) files to one panda table. | ||||
|          | ||||
|         @param file_index_range: range of file's indices to read (eg `range(3)` to read the first three files) | ||||
|         @param only_emoticons: if True, only messages containing emoticons (aka smileys) are used. This classification is derived from Tools.Emoji_Distance | ||||
|         """ | ||||
|         assert np.min(file_index_range) >= 0 and np.max(file_index_range) < self.n_files | ||||
|         for i in file_index_range: | ||||
|             print("reading file: " + self.json_files[i] + "...") | ||||
|             if self.raw_data is None: | ||||
|                 self.raw_data = pd.read_json(self.json_files[i], encoding="utf-8") | ||||
|             else: | ||||
|                 self.raw_data = self.raw_data.append(pd.read_json(self.json_files[i], encoding="utf-8")) | ||||
|          | ||||
|         self.emojis = self.raw_data['EMOJI'] | ||||
|         self.plain_text = self.raw_data['text'] | ||||
|          | ||||
|         # replacing keywords. TODO: maybe these information can be extracted and used | ||||
|         self.plain_text = self.plain_text.str.replace("(<EMOJI>|<USER>|<HASHTAG>)","").str.replace("[" + "".join(list(emoji_blacklist)) + "]","") | ||||
|          | ||||
|         # so far filtering for the latest emoji. TODO: maybe there are also better approaches | ||||
|         self.labels = emoji2sent([latest(e) for e in self.emojis], only_emoticons=only_emoticons ) | ||||
|          | ||||
|         # and filter out all samples we have no label for: | ||||
|         wrong_labels = np.isnan(np.linalg.norm(self.labels, axis=1))     | ||||
|  | ||||
|         self.labels = self.labels[np.invert(wrong_labels)] | ||||
|         self.plain_text = self.plain_text[np.invert(wrong_labels)] | ||||
|         self.emojis = self.emojis[np.invert(wrong_labels)] | ||||
|          | ||||
|         print("imported " + str(len(self.labels)) + " samples") | ||||
|      | ||||
|     def apply_stemming_and_lemmatization(self): | ||||
|         """ | ||||
|         apply stemming and lemmatization to plain text samples | ||||
|         """ | ||||
|         stemmer = SnowballStemmer("english") | ||||
|         for key in self.plain_text.keys(): | ||||
|             stemmed_sent = [] | ||||
|             for word in self.plain_text[key].split(" "): | ||||
|                 word_stemmed = stemmer.stem(word) | ||||
|                 stemmed_sent.append(word_stemmed) | ||||
|             stemmed_sent = (" ").join(stemmed_sent) | ||||
|             self.plain_text[key] = stemmed_sent | ||||
|              | ||||
|         lemmatizer = WordNetLemmatizer() | ||||
|         for key in self.plain_text.keys(): | ||||
|             lemmatized_sent = [] | ||||
|             sent_pos = pos_tag(word_tokenize(self.plain_text[key])) | ||||
|             for word in sent_pos: | ||||
|                 wordnet_pos = get_wordnet_pos(word[1].lower()) | ||||
|                 word_lemmatized = lemmatizer.lemmatize(word[0], pos=wordnet_pos) | ||||
|                 lemmatized_sent.append(word_lemmatized) | ||||
|             lemmatized_sent = (" ").join(lemmatized_sent) | ||||
|             self.plain_text[key] = lemmatized_sent | ||||
|      | ||||
|     def generate_emoji_count_and_weights(self): | ||||
|         """ | ||||
|         counting occurences of emojis | ||||
|         """ | ||||
|         self.emoji_count = {} | ||||
|         for e_list in self.emojis: | ||||
|             for e in set(e_list): | ||||
|                 if e not in self.emoji_count: | ||||
|                     self.emoji_count[e] = 0 | ||||
|                 self.emoji_count[e] += 1 | ||||
|          | ||||
|         emoji_sum = sum([self.emoji_count[e] for e in self.emoji_count]) | ||||
|  | ||||
|         self.emoji_weights = {} | ||||
|         for e in self.emoji_count: | ||||
|             # tfidf for emojis | ||||
|             self.emoji_weights[e] = np.log((emoji_sum / self.emoji_count[e])) | ||||
|  | ||||
|         weights_sum= sum([self.emoji_weights[x] for x in self.emoji_weights]) | ||||
|  | ||||
|         # normalize: | ||||
|         for e in self.emoji_weights: | ||||
|             self.emoji_weights[e] = self.emoji_weights[e] / weights_sum | ||||
|  | ||||
|         self.emoji_weights['X'] = 0  # dummy values | ||||
|         self.emoji_count['X'] = 0 | ||||
|      | ||||
|     def get_emoji_count(self): | ||||
|         """ | ||||
|         @return: descending list of tuples in form (<emoji as character>, <emoji count>)  | ||||
|         """ | ||||
|         assert self.emoji_count is not None | ||||
|          | ||||
|         sorted_emoji_count = list(reversed(sorted(self.emoji_count.items(), key=operator.itemgetter(1)))) | ||||
|         #display(sorted_emoji_count) | ||||
|         return sorted_emoji_count | ||||
|      | ||||
|     def filter_by_top_emojis(self,n_top = 20): | ||||
|         """ | ||||
|         filgter out messages not containing one of the `n_top` emojis | ||||
|          | ||||
|         @param n_top: number of top emojis used for filtering | ||||
|         """ | ||||
|         assert self.labels is not None # ← messages are already read in | ||||
|          | ||||
|         self.top_emojis = [x[0] for x in self.get_emoji_count()[:n_top]] | ||||
|         in_top = [sentiment_vector_to_emoji(x) in self.top_emojis for x in self.labels] | ||||
|         self.labels = self.labels[in_top] | ||||
|         self.plain_text = self.plain_text[in_top] | ||||
|         self.emojis = self.emojis[in_top] | ||||
|         print("remaining samples after top emoji filtering: ", len(self.labels)) | ||||
|      | ||||
|     def create_train_test_split(self, split = 0.1, random_state = 4222): | ||||
|         if self.X is not None: | ||||
|             sys.stderr.write("WARNING: overwriting existing train/test split \n") | ||||
|         self.X, self.Xt, self.y, self.yt = train_test_split(self.plain_text, self.labels, test_size=split, random_state=random_state) | ||||
|  | ||||
|  | ||||
|  | ||||
| # * the pipeline manager saves and stores sklearn pipelines. Keras models are handled differently, so the have to be named explicitly during save and load operations | ||||
|  | ||||
| # In[8]: | ||||
|  | ||||
|  | ||||
| class pipeline_manager(object): | ||||
|     @staticmethod | ||||
|     def load_pipeline_from_files(file_prefix:str, keras_models = [], all_models = []): | ||||
|         """ | ||||
|         load a pipeline from files. A pipeline should be represented by multiple model files in the form '<file_prefix>.<model_name>' | ||||
|          | ||||
|         @param file_prefix: basename of all files (without extension) | ||||
|         @param keras_models: list of keras models (keras model files, only extension name). Leave this list empty if this is not a keras pipeline | ||||
|         @param all_models: list of all models (including keras_models, only extension name). | ||||
|          | ||||
|         @return a pipeline manager object | ||||
|         """ | ||||
|          | ||||
|         pm = pipeline_manager(keras_models=keras_models) | ||||
|         pm.load(file_prefix, all_models) | ||||
|         return pm | ||||
|      | ||||
|     @staticmethod | ||||
|     def create_keras_pipeline_with_vectorizer(vectorizer, layers, sdm:sample_data_manager): | ||||
|         ''' | ||||
|         creates pipeline with vectorizer and keras classifier | ||||
|          | ||||
|         @param vectorizer: Vectorizer object. will be fitted with data provided by sdm | ||||
|         @param layers: list of keras layers. One keras layer is a tuple in form: (<#neurons:int>, <activation_func:str>) | ||||
|         @param sdm: sample data manager to get data for the vectorizer | ||||
|          | ||||
|         @return: a pipeline manager object | ||||
|          | ||||
|         ''' | ||||
|         from keras.models import Sequential | ||||
|         from keras.layers import Dense | ||||
|          | ||||
|         if sdm.X is None: | ||||
|             sdm.create_train_test_split() | ||||
|          | ||||
|         vec_train = vectorizer.fit_transform(sdm.X) | ||||
|         vec_test = vectorizer.transform(sdm.Xt) | ||||
|         # creating keras model: | ||||
|         model=Sequential() | ||||
|          | ||||
|         keras_layers = [] | ||||
|         first_layer = True | ||||
|         for layer in layers: | ||||
|             if first_layer: | ||||
|                 model.add(Dense(units=layer[0], activation=layer[1], input_dim=vectorizer.transform([" "])[0]._shape[1])) | ||||
|                 first_layer = False | ||||
|             else: | ||||
|                 model.add(Dense(units=layer[0], activation=layer[1])) | ||||
|          | ||||
|         model.compile(loss='mean_squared_error', | ||||
|                   optimizer='adam') | ||||
|          | ||||
|         pipeline = Pipeline([ | ||||
|             ('vectorizer',vectorizer), | ||||
|             ('keras_model', model) | ||||
|         ]) | ||||
|          | ||||
|         return pipeline_manager(pipeline=pipeline, keras_models=['keras_model']) | ||||
|      | ||||
|     @staticmethod | ||||
|     def create_pipeline_with_classifier_and_vectorizer(vectorizer, classifier, sdm:sample_data_manager = None): | ||||
|         ''' | ||||
|         creates pipeline with vectorizer and non-keras classifier | ||||
|          | ||||
|         @param vectorizer: Vectorizer object. will be fitted with data provided by sdm | ||||
|         @param classifier: unfitted classifier object (should be compatible with all sklearn classifiers) | ||||
|         @param sdm: sample data manager to get data for the vectorizer | ||||
|          | ||||
|         @return: a pipeline manager object | ||||
|         ''' | ||||
|         if sdm is not None: | ||||
|             if sdm.X is None: | ||||
|                 sdm.create_train_test_split() | ||||
|  | ||||
|             vec_train = vectorizer.fit_transform(sdm.X) | ||||
|             vec_test = vectorizer.transform(sdm.Xt) | ||||
|          | ||||
|         pipeline = Pipeline([ | ||||
|             ('vectorizer',vectorizer), | ||||
|             ('classifier', classifier) | ||||
|         ]) | ||||
|          | ||||
|         return pipeline_manager(pipeline=pipeline, keras_models=[]) | ||||
|      | ||||
|     def __init__(self, pipeline = None, keras_models = []): | ||||
|         """ | ||||
|         constructor | ||||
|          | ||||
|         @param pipeline: a sklearn pipeline | ||||
|         @param keras_models: list of keras steps in pipeline. Neccessary because saving and loading from keras models differs from the scikit ones | ||||
|         """ | ||||
|          | ||||
|         self.pipeline = pipeline | ||||
|         self.additional_objects = {} | ||||
|         self.keras_models = keras_models | ||||
|      | ||||
|     def save(self, prefix:str): | ||||
|         """ | ||||
|         saving the pipeline. It generates one file per model in the form: '<prefix>.<model_name>' | ||||
|          | ||||
|         @param prefix: file prefix for all models | ||||
|         """ | ||||
|          | ||||
|         print(self.keras_models) | ||||
|         # doing this like explained here: https://stackoverflow.com/a/43415459 | ||||
|         for step in self.pipeline.named_steps: | ||||
|             if step in self.keras_models: | ||||
|                 self.pipeline.named_steps[step].model.save(prefix + "." + step) | ||||
|             else: | ||||
|                 joblib.dump(self.pipeline.named_steps[step], prefix + "." + str(step)) | ||||
|          | ||||
|         load_command = "pipeline_manager.load_pipeline_from_files( '" | ||||
|         load_command += prefix + "', " + str(self.keras_models) + ", " | ||||
|         load_command += str(list(self.pipeline.named_steps.keys())) + ")" | ||||
|          | ||||
|         import __main__ as main | ||||
|         if not hasattr(main, '__file__'): | ||||
|             display("saved pipeline. It can be loaded the following way:") | ||||
|             display(Markdown("> ```\n"+load_command+"\n```"))              # ← if we're in jupyter, print the fancy way :) | ||||
|         else: | ||||
|             print("saved pipeline. It can be loaded the following way:") | ||||
|             print(load_command) | ||||
|          | ||||
|      | ||||
|     def load(self, prefix:str, models = []): | ||||
|         """ | ||||
|         load a pipeline. A pipeline should be represented by multiple model files in the form '<prefix>.<model_name>' | ||||
|         NOTE: keras model names (if there are some) have to be defined in self.keras_models first! | ||||
|          | ||||
|         @param prefix: the prefix for all model files | ||||
|         @param models: model_names to load | ||||
|         """ | ||||
|         self.pipeline = None | ||||
|         model_list = [] | ||||
|         for model in models: | ||||
|             if model in self.keras_models: | ||||
|                 model_list.append((model, load_model(prefix + "." + model))) | ||||
|             else: | ||||
|                 model_list.append((model, joblib.load(prefix+"." + model))) | ||||
|         self.pipeline = Pipeline(model_list) | ||||
|      | ||||
|     def fit(self,X,y): | ||||
|         """fitting the pipeline""" | ||||
|         self.pipeline.fit(X,y) | ||||
|      | ||||
|     def predict(self,X): | ||||
|         """predict""" | ||||
|         return self.pipeline.predict(X) | ||||
|      | ||||
|  | ||||
|  | ||||
| # * the trainer class passes Data from the sample manager to the pipeline manager | ||||
|  | ||||
| # In[9]: | ||||
|  | ||||
|  | ||||
| class trainer(object): | ||||
|     def __init__(self, sdm:sample_data_manager, pm:pipeline_manager): | ||||
|         """constructor""" | ||||
|         self.sdm = sdm | ||||
|         self.pm = pm | ||||
|      | ||||
|     def fit(self, max_size=10000, disabled_fit_steps=['vectorizer']): | ||||
|         """ | ||||
|         fitting data in the pipeline. Because we don't want to refit the vectorizer, the pipeline models containing the vectorizer have to be named explicitly | ||||
|          | ||||
|         @param max_size: don't train more examples than that number | ||||
|         @param disabled_fit_steps: list of pipeline steps that we want to prevent to refit. Normally all vectorizer steps | ||||
|         """ | ||||
|         # TODO: make batch fitting available here (eg: continous waiting for data and fitting them) | ||||
|         if self.sdm.X is None: | ||||
|             self.sdm.create_train_test_split() | ||||
|         disabled_fits = {} | ||||
|         disabled_fit_transforms = {} | ||||
|          | ||||
|         named_steps = self.pm.pipeline.named_steps | ||||
|          | ||||
|         for s in disabled_fit_steps: | ||||
|             # now it gets a little bit dirty: | ||||
|             # replace fit functions we don't want to call again (e.g. for vectorizers) | ||||
|             disabled_fits[s] = named_steps[s].fit | ||||
|             disabled_fit_transforms[s] = named_steps[s].fit_transform | ||||
|             named_steps[s].fit = lambda self, X, y=None: self | ||||
|             named_steps[s].fit_transform = named_steps[s].transform | ||||
|              | ||||
|         self.pm.fit(X = self.sdm.X[:max_size], y = self.sdm.y[:max_size]) | ||||
|          | ||||
|         # restore replaced fit functions: | ||||
|         for s in disabled_fit_steps: | ||||
|             named_steps[s].fit = disabled_fits[s] | ||||
|             named_steps[s].fit_transform = disabled_fit_transforms[s] | ||||
|      | ||||
|     def test(self): | ||||
|         ''' | ||||
|         @return: prediction:list, teacher:list | ||||
|         ''' | ||||
|         if self.sdm.X is None: | ||||
|             self.sdm.create_train_test_split() | ||||
|         return self.pm.predict(self.sdm.Xt), self.sdm.yt | ||||
|  | ||||
|      | ||||
|  | ||||
|  | ||||
| # ---- | ||||
| # ## Train | ||||
|  | ||||
| # * when in notebook environment: run the stuff below: | ||||
|  | ||||
| # In[10]: | ||||
|  | ||||
|  | ||||
| import __main__ as main | ||||
| if not hasattr(main, '__file__'): | ||||
|     # we are in an interactive environment (probably in jupyter) | ||||
|     # load data: | ||||
|     sdm = sample_data_manager.generate_and_read(path="./data_en/", n_top_emojis=20, file_range=range(1)) | ||||
|     #pm = pipeline_manager.create_keras_pipeline_with_vectorizer(vectorizer=TfidfVectorizer(stop_words='english'),\n", | ||||
|     #                                                           layers=[(10000, 'relu'),(5000, 'relu'),(2500, 'relu'),(y1[0].shape[0],None)], sdm=sdm)\n", | ||||
|     pm = pipeline_manager.create_keras_pipeline_with_vectorizer(vectorizer=TfidfVectorizer(stop_words='english'), | ||||
|                                                            layers=[(2500, 'relu'),(3,None)], sdm=sdm) | ||||
|     tr = trainer(sdm=sdm, pm=pm) | ||||
|     tr.fit(100) | ||||
|  | ||||
|  | ||||
| # ---- | ||||
| # ## save classifier | ||||
|  | ||||
| # In[13]: | ||||
|  | ||||
|  | ||||
| import __main__ as main | ||||
| if not hasattr(main, '__file__'): | ||||
|     pm.save('custom_classifier') | ||||
|  | ||||
|  | ||||
| # ---- | ||||
| # ## Prediction | ||||
| #  | ||||
| # * predict and save to `test.csv` | ||||
|  | ||||
| # In[14]: | ||||
|  | ||||
|  | ||||
| import __main__ as main | ||||
| if not hasattr(main, '__file__'): | ||||
|     pred, teacher = tr.test() | ||||
|      | ||||
|     display(pred) | ||||
|     display(teacher) | ||||
|      | ||||
|     print('prediction variance: ', np.linalg.norm(np.var(pred, axis=0))) | ||||
|     print('teacher variance: ', np.linalg.norm(np.var(teacher, axis=0))) | ||||
|      | ||||
|     # build a dataframe to visualize test results: | ||||
|     testlist = pd.DataFrame({'text': sdm.Xt,  | ||||
|                          'teacher': sent2emoji(sdm.yt), | ||||
|                          'teacher_sentiment': sdm.yt.tolist(), | ||||
|                          'predict': sent2emoji(pred, custom_target_emojis=sdm.top_emojis), | ||||
|                          'predicted_sentiment': pred.tolist()}) | ||||
|     # display: | ||||
|     display(testlist.head()) | ||||
|      | ||||
|     # mean squared error: | ||||
|     teacher_sentiments = np.array([sample[1]['teacher_sentiment'] for sample in testlist.iterrows()]) | ||||
|     predicted_sentiments = np.array([sample[1]['predicted_sentiment'] for sample in testlist.iterrows()]) | ||||
|  | ||||
|     mean_squared_error = ((teacher_sentiments - predicted_sentiments)**2).mean(axis=0) | ||||
|     print("Mean Squared Error: ", mean_squared_error) | ||||
|     print("Variance teacher: ", np.var(teacher_sentiments, axis=0)) | ||||
|     print("Variance prediction: ", np.var(predicted_sentiments, axis=0)) | ||||
|      | ||||
|     # save to csv: | ||||
|     testlist.to_csv('test.csv') | ||||
|  | ||||
|  | ||||
| # ---- | ||||
| # ## Load classifier | ||||
| #  | ||||
| # * loading classifier and show a test widget | ||||
|  | ||||
| # In[15]: | ||||
|  | ||||
|  | ||||
| import __main__ as main | ||||
| if not hasattr(main, '__file__'): | ||||
|     try: | ||||
|         pm | ||||
|     except NameError: | ||||
|         pass | ||||
|     else: | ||||
|         del pm # delete existing pipeline manager if ther is one | ||||
|  | ||||
|     pm = pipeline_manager.load_pipeline_from_files( 'custom_classifier', ['keras_model'], ['vectorizer', 'keras_model']) | ||||
|     lookup_emojis = [#'😂', | ||||
|          '😭', | ||||
|          '😍', | ||||
|          '😩', | ||||
|          '😊', | ||||
|          '😘', | ||||
|          '🙏', | ||||
|          '🙌', | ||||
|          '😉', | ||||
|          '😁', | ||||
|          '😅', | ||||
|          '😎', | ||||
|          '😢', | ||||
|          '😒', | ||||
|          '😏', | ||||
|          '😌', | ||||
|          '😔', | ||||
|          '😋', | ||||
|          '😀', | ||||
|          '😤'] | ||||
|     out = widgets.Output() | ||||
|  | ||||
|     t = widgets.Text() | ||||
|     b = widgets.Button( | ||||
|         description='get emoji', | ||||
|         disabled=False, | ||||
|         button_style='', # 'success', 'info', 'warning', 'danger' or '' | ||||
|         tooltip='Click me', | ||||
|         icon='check' | ||||
|     ) | ||||
|  | ||||
|  | ||||
|  | ||||
|     def handle_submit(sender): | ||||
|         with out: | ||||
|             clear_output() | ||||
|         with out: | ||||
|             pred = pm.predict([t.value]) | ||||
|  | ||||
|             display(Markdown("# Predicted Emoji " + str(sent2emoji(pred, lookup_emojis)[0]))) | ||||
|             display(Markdown("# Sentiment Vector: $$ \pmatrix{" + str(pred[0,0]) + | ||||
|                              "\\\\" + str(pred[0,1]) + "\\\\" + str(pred[0,2]) + "}$$")) | ||||
|  | ||||
|     b.on_click(handle_submit) | ||||
|  | ||||
|     display(t) | ||||
|     display(widgets.VBox([b, out]))   | ||||
|  | ||||
		Reference in New Issue
	
	Block a user