just some little updates
This commit is contained in:
parent
a45fa5f843
commit
dd95cfa144
@ -2,7 +2,7 @@
|
|||||||
"cells": [
|
"cells": [
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 1,
|
"execution_count": 15,
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [
|
"outputs": [
|
||||||
{
|
{
|
||||||
@ -25,7 +25,7 @@
|
|||||||
"True"
|
"True"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"execution_count": 1,
|
"execution_count": 15,
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"output_type": "execute_result"
|
"output_type": "execute_result"
|
||||||
}
|
}
|
||||||
@ -44,6 +44,10 @@
|
|||||||
"from sklearn.model_selection import train_test_split\n",
|
"from sklearn.model_selection import train_test_split\n",
|
||||||
"from sklearn.preprocessing import MultiLabelBinarizer\n",
|
"from sklearn.preprocessing import MultiLabelBinarizer\n",
|
||||||
"import nltk\n",
|
"import nltk\n",
|
||||||
|
"from keras.models import load_model\n",
|
||||||
|
"from sklearn.externals import joblib\n",
|
||||||
|
"import operator\n",
|
||||||
|
"from sklearn.pipeline import Pipeline\n",
|
||||||
"nltk.download('punkt')\n",
|
"nltk.download('punkt')\n",
|
||||||
"nltk.download('averaged_perceptron_tagger')\n",
|
"nltk.download('averaged_perceptron_tagger')\n",
|
||||||
"nltk.download('wordnet')"
|
"nltk.download('wordnet')"
|
||||||
@ -51,7 +55,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 2,
|
"execution_count": 11,
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
@ -61,11 +65,11 @@
|
|||||||
"from Tools.Emoji_Distance import sentiment_vector_to_emoji\n",
|
"from Tools.Emoji_Distance import sentiment_vector_to_emoji\n",
|
||||||
"from Tools.Emoji_Distance import emoji_to_sentiment_vector\n",
|
"from Tools.Emoji_Distance import emoji_to_sentiment_vector\n",
|
||||||
"\n",
|
"\n",
|
||||||
"def emoji2sent(emoji_arr):\n",
|
"def emoji2sent(emoji_arr, only_emoticons=True):\n",
|
||||||
" return np.array([emoji_to_sentiment_vector(e) for e in emoji_arr])\n",
|
" return np.array([emoji_to_sentiment_vector(e, only_emoticons=only_emoticons) for e in emoji_arr])\n",
|
||||||
"\n",
|
"\n",
|
||||||
"def sent2emoji(sent_arr, custom_target_emojis=None):\n",
|
"def sent2emoji(sent_arr, custom_target_emojis=None, only_emoticons=True):\n",
|
||||||
" return [sentiment_vector_to_emoji(s, custom_target_emojis=custom_target_emojis) for s in sent_arr]"
|
" return [sentiment_vector_to_emoji(s, custom_target_emojis=custom_target_emojis, only_emoticons=only_emoticons) for s in sent_arr]"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -77,6 +81,311 @@
|
|||||||
"SINGLE_LABEL = True"
|
"SINGLE_LABEL = True"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"----\n",
|
||||||
|
"## classes and functions we are using later:\n",
|
||||||
|
"----"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"* functions for selecting items from a set / list"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 4,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"def latest(lst):\n",
|
||||||
|
" return lst[-1] if len(lst) > 0 else 'X' \n",
|
||||||
|
"def most_common(lst):\n",
|
||||||
|
" # trying to find the most common used emoji in the given lst\n",
|
||||||
|
" return max(set(lst), key=lst.count) if len(lst) > 0 else \"X\" # setting label to 'X' if there is an empty emoji list"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"* our emoji blacklist (skin and sex modifiers)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 5,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"# defining blacklist for modifier emojis:\n",
|
||||||
|
"emoji_blacklist = set([\n",
|
||||||
|
" chr(0x1F3FB),\n",
|
||||||
|
" chr(0x1F3FC),\n",
|
||||||
|
" chr(0x1F3FD),\n",
|
||||||
|
" chr(0x1F3FE),\n",
|
||||||
|
" chr(0x1F3FF),\n",
|
||||||
|
" chr(0x2642),\n",
|
||||||
|
" chr(0x2640)\n",
|
||||||
|
"])"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"* lemmatization helper functions"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 6,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"from nltk.stem.snowball import SnowballStemmer\n",
|
||||||
|
"from nltk.stem import WordNetLemmatizer\n",
|
||||||
|
"from nltk import pos_tag\n",
|
||||||
|
"from nltk import word_tokenize\n",
|
||||||
|
"from nltk.corpus import wordnet\n",
|
||||||
|
"\n",
|
||||||
|
"def get_wordnet_pos(treebank_tag):\n",
|
||||||
|
"\n",
|
||||||
|
" if treebank_tag.startswith('J'):\n",
|
||||||
|
" return wordnet.ADJ\n",
|
||||||
|
" elif treebank_tag.startswith('V'):\n",
|
||||||
|
" return wordnet.VERB\n",
|
||||||
|
" elif treebank_tag.startswith('N'):\n",
|
||||||
|
" return wordnet.NOUN\n",
|
||||||
|
" elif treebank_tag.startswith('R'):\n",
|
||||||
|
" return wordnet.ADV\n",
|
||||||
|
" else:\n",
|
||||||
|
" return wordnet.NOUN"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"* the pipeline manager saves and stores sklearn pipelines. Keras models are handled differently, so the have to be named explicitly during save and load operations"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 16,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"class pipeline_manager(object):\n",
|
||||||
|
" @staticmethod\n",
|
||||||
|
" def load_pipeline_from_files(file_prefix:str, keras_models = []):\n",
|
||||||
|
" pm = pipeline_manager()\n",
|
||||||
|
" pm.load(file_prefix, keras_models=keras_models)\n",
|
||||||
|
" return pm\n",
|
||||||
|
" \n",
|
||||||
|
" def __init__(self, pipeline = None):\n",
|
||||||
|
" self.pipeline = pipeline\n",
|
||||||
|
" self.additional_objects = {}\n",
|
||||||
|
" \n",
|
||||||
|
" def save(prefix:str, keras_models = []):\n",
|
||||||
|
" # doing this like explained here: https://stackoverflow.com/a/43415459\n",
|
||||||
|
" for km in keras_models:\n",
|
||||||
|
" self.pipeline.named_steps[km].model.save(prefix + \".\" + km)\n",
|
||||||
|
" # setting this part to None:\n",
|
||||||
|
" self.pipeline.named_steps[km].model = None\n",
|
||||||
|
" \n",
|
||||||
|
" # now we can save the pipeline:\n",
|
||||||
|
" joblib.dump(self.pipeline, prefix + \".pipeline\")\n",
|
||||||
|
" \n",
|
||||||
|
" def load(prefix:str, keras_models=[]):\n",
|
||||||
|
" self.pipeline = joblib.load(prefix + \".pipeline\")\n",
|
||||||
|
" for km in keras_models:\n",
|
||||||
|
" self.pipeline.named_steps[km].model = load_model(prefix + \".\" + km)\n",
|
||||||
|
" \n",
|
||||||
|
" def fit(self,X,y):\n",
|
||||||
|
" self.pipeline.fit(X,y)\n",
|
||||||
|
" \n",
|
||||||
|
" def predict(self,X):\n",
|
||||||
|
" self.pipeline.predict(X)\n",
|
||||||
|
" "
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"* the sample data manager loads and preprocesses data"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 17,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"class sample_data_manager(object):\n",
|
||||||
|
" @staticmethod\n",
|
||||||
|
" def generate_and_read(path:str):\n",
|
||||||
|
" sdm = sample_data_manager(path):\n",
|
||||||
|
" \n",
|
||||||
|
" def __init__(self, data_root_folder:str):\n",
|
||||||
|
" self.data_root_folder = data_root_folder\n",
|
||||||
|
" self.json_files = sorted(glob.glob(self.data_root_folder + \"/*.json\"))\n",
|
||||||
|
" self.n_files = len(self.json_files)\n",
|
||||||
|
" self.raw_data = None\n",
|
||||||
|
" self.emojis = None\n",
|
||||||
|
" self.plain_text = None\n",
|
||||||
|
" self.labels = None\n",
|
||||||
|
" self.emoji_count = None\n",
|
||||||
|
" self.emoji_weights = None\n",
|
||||||
|
" self.X = None\n",
|
||||||
|
" self.y = None\n",
|
||||||
|
" self.Xt = None\n",
|
||||||
|
" self.yt = None\n",
|
||||||
|
" \n",
|
||||||
|
" def read_files(self, file_index_range:list, only_emoticons=True):\n",
|
||||||
|
" assert np.min(file_index_range) >= 0 and np.max(file_index_range) < self.n_files\n",
|
||||||
|
" for i in file_index_range:\n",
|
||||||
|
" print(\"reaing file: \" + self.json_files[i] + \"...\")\n",
|
||||||
|
" if self.raw_data is None:\n",
|
||||||
|
" self.raw_data = pd.read_json(self.json_files[i], encoding=\"utf-8\")\n",
|
||||||
|
" else:\n",
|
||||||
|
" self.raw_data = self.raw_data.append(pd.read_json(self.json_files[i], encoding=\"utf-8\"))\n",
|
||||||
|
" \n",
|
||||||
|
" self.emojis = self.raw_data['EMOJI']\n",
|
||||||
|
" self.plain_text = self.raw_data['text']\n",
|
||||||
|
" \n",
|
||||||
|
" # replacing keywords. TODO: maybe these information can be extracted and used\n",
|
||||||
|
" self.plain_text = self.plain_text.str.replace(\"(<EMOJI>|<USER>|<HASHTAG>)\",\"\").str.replace(\"[\" + \"\".join(list(emoji_blacklist)) + \"]\",\"\")\n",
|
||||||
|
" \n",
|
||||||
|
" # so far filtering for the latest emoji. TODO: maybe there are also better approaches\n",
|
||||||
|
" self.labels = emoji2sent([latest(e) for e in emojis], only_emoticons=only_emoticons )\n",
|
||||||
|
" \n",
|
||||||
|
" # and filter out all samples we have no label for:\n",
|
||||||
|
" wrong_labels = np.isnan(np.linalg.norm(self.labels, axis=1)) \n",
|
||||||
|
"\n",
|
||||||
|
" self.labels = self.labels[np.invert(wrong_labels)]\n",
|
||||||
|
" self.plain_text = self.plain_text[np.invert(wrong_labels)]\n",
|
||||||
|
" self.emojis = self.emojis[np.invert(wrong_labels)]\n",
|
||||||
|
" \n",
|
||||||
|
" print(\"imported \" + len(self.labels) + \" samples\")\n",
|
||||||
|
" \n",
|
||||||
|
" def apply_stemming_and_lemmatization(self):\n",
|
||||||
|
" stemmer = SnowballStemmer(\"english\")\n",
|
||||||
|
" for key in self.plain_text.keys():\n",
|
||||||
|
" stemmed_sent = []\n",
|
||||||
|
" for word in self.plain_text[key].split(\" \"):\n",
|
||||||
|
" word_stemmed = stemmer.stem(word)\n",
|
||||||
|
" stemmed_sent.append(word_stemmed)\n",
|
||||||
|
" stemmed_sent = (\" \").join(stemmed_sent)\n",
|
||||||
|
" self.plain_text[key] = stemmed_sent\n",
|
||||||
|
" \n",
|
||||||
|
" lemmatizer = WordNetLemmatizer()\n",
|
||||||
|
" for key in self.plain_text.keys():\n",
|
||||||
|
" lemmatized_sent = []\n",
|
||||||
|
" sent_pos = pos_tag(word_tokenize(self.plain_text[key]))\n",
|
||||||
|
" for word in sent_pos:\n",
|
||||||
|
" wordnet_pos = get_wordnet_pos(word[1].lower())\n",
|
||||||
|
" word_lemmatized = lemmatizer.lemmatize(word[0], pos=wordnet_pos)\n",
|
||||||
|
" lemmatized_sent.append(word_lemmatized)\n",
|
||||||
|
" lemmatized_sent = (\" \").join(lemmatized_sent)\n",
|
||||||
|
" self.plain_text[key] = lemmatized_sent\n",
|
||||||
|
" \n",
|
||||||
|
" def generate_emoji_count_and_weights(self):\n",
|
||||||
|
" self.emoji_count = {}\n",
|
||||||
|
" for e_list in self.emojis:\n",
|
||||||
|
" for e in set(e_list):\n",
|
||||||
|
" if e not in self.emoji_count:\n",
|
||||||
|
" self.emoji_count[e] = 0\n",
|
||||||
|
" self.emoji_count[e] += 1\n",
|
||||||
|
" \n",
|
||||||
|
" emoji_sum = sum([self.emoji_count[e] for e in self.emoji_count])\n",
|
||||||
|
"\n",
|
||||||
|
" self.emoji_weights = {}\n",
|
||||||
|
" for e in self.emoji_count:\n",
|
||||||
|
" # tfidf for emojis\n",
|
||||||
|
" self.emoji_weights[e] = np.log((emoji_sum / self.emoji_count[e]))\n",
|
||||||
|
"\n",
|
||||||
|
" weights_sum= sum([self.emoji_weights[x] for x in self.emoji_weights])\n",
|
||||||
|
"\n",
|
||||||
|
" # normalize:\n",
|
||||||
|
" for e in self.emoji_weights:\n",
|
||||||
|
" self.emoji_weights[e] = self.emoji_weights[e] / weights_sum\n",
|
||||||
|
"\n",
|
||||||
|
" self.emoji_weights['X'] = 0 # dummy values\n",
|
||||||
|
" self.emoji_count['X'] = 0\n",
|
||||||
|
" \n",
|
||||||
|
" def get_emoji_count(self):\n",
|
||||||
|
" sorted_emoji_count = list(reversed(sorted(self.emoji_count.items(), key=operator.itemgetter(1))))\n",
|
||||||
|
" return sorted_emoji_count\n",
|
||||||
|
" \n",
|
||||||
|
" def filter_by_top_emojis(self,n_top = 20):\n",
|
||||||
|
" in_top = [sentiment_vector_to_emoji(x) in self.get_emoji_count()[:n_top] for x in self.labels]\n",
|
||||||
|
" self.labels = self.labels[in_top]\n",
|
||||||
|
" self.plain_text = self.plain_text[in_top]\n",
|
||||||
|
" self.emojis = self.emojis[in_top]\n",
|
||||||
|
" print(\"remaining samples after top emoji filtering: \", len(labels))\n",
|
||||||
|
" \n",
|
||||||
|
" def create_train_test_split(self, split = 0.1, random_state = 4222):\n",
|
||||||
|
" self.X, self.Xt, self.y, self.yt = train_test_split(self.plain_text, self.labels, test_size=split, random_state=random_state)\n",
|
||||||
|
"\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"* the trainer class passes Data from the sample manager to the pipeline manager"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 23,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"class trainer(object):\n",
|
||||||
|
" def __init__(self, sdm:sample_data_manager, pm:pipeline_manager):\n",
|
||||||
|
" self.sdm = sdm\n",
|
||||||
|
" self.pm = pm\n",
|
||||||
|
" \n",
|
||||||
|
" def fit(self):\n",
|
||||||
|
" # TODO: make batch fitting available here\n",
|
||||||
|
" self.pm.fit(X = self.sdm.X, y = self.sdm.y)\n",
|
||||||
|
" "
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"* when in notebook environment: run the stuff below:"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 25,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "stdout",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"you are in a notebook\n"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"import __main__ as main\n",
|
||||||
|
"if not hasattr(main, '__file__'):\n",
|
||||||
|
" print(\"you are in a notebook\")"
|
||||||
|
]
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
@ -1298,26 +1607,6 @@
|
|||||||
"plain_text = plain_text.str.replace(\"(<EMOJI>|<USER>|<HASHTAG>)\",\"\").str.replace(\"[\" + \"\".join(list(emoji_blacklist)) + \"]\",\"\")"
|
"plain_text = plain_text.str.replace(\"(<EMOJI>|<USER>|<HASHTAG>)\",\"\").str.replace(\"[\" + \"\".join(list(emoji_blacklist)) + \"]\",\"\")"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"* defining different criterias for choosing a single emoji (currently `latest` is used)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 10,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"def latest(lst):\n",
|
|
||||||
" return lst[-1] if len(lst) > 0 else 'X' \n",
|
|
||||||
"def most_common(lst):\n",
|
|
||||||
" # trying to find the most common used emoji in the given lst\n",
|
|
||||||
" return max(set(lst), key=lst.count) if len(lst) > 0 else \"X\" # setting label to 'X' if there is an empty emoji list"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
@ -2722,7 +3011,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 1,
|
"execution_count": 14,
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
@ -2743,17 +3032,9 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 2,
|
"execution_count": null,
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [
|
"outputs": [],
|
||||||
{
|
|
||||||
"name": "stderr",
|
|
||||||
"output_type": "stream",
|
|
||||||
"text": [
|
|
||||||
"Using TensorFlow backend.\n"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"source": [
|
"source": [
|
||||||
"import keras\n",
|
"import keras\n",
|
||||||
"import pickle\n",
|
"import pickle\n",
|
||||||
|
Loading…
Reference in New Issue
Block a user