first refactored twitter_learning version
This commit is contained in:
parent
dd95cfa144
commit
e0ed251c8b
@ -2,9 +2,18 @@
|
|||||||
"cells": [
|
"cells": [
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 15,
|
"execution_count": 1,
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "stderr",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"/home/jonas/.local/lib/python3.6/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n",
|
||||||
|
" from ._conv import register_converters as _register_converters\n",
|
||||||
|
"Using TensorFlow backend.\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"name": "stdout",
|
"name": "stdout",
|
||||||
"output_type": "stream",
|
"output_type": "stream",
|
||||||
@ -25,7 +34,7 @@
|
|||||||
"True"
|
"True"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"execution_count": 15,
|
"execution_count": 1,
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"output_type": "execute_result"
|
"output_type": "execute_result"
|
||||||
}
|
}
|
||||||
@ -46,6 +55,7 @@
|
|||||||
"import nltk\n",
|
"import nltk\n",
|
||||||
"from keras.models import load_model\n",
|
"from keras.models import load_model\n",
|
||||||
"from sklearn.externals import joblib\n",
|
"from sklearn.externals import joblib\n",
|
||||||
|
"import pickle\n",
|
||||||
"import operator\n",
|
"import operator\n",
|
||||||
"from sklearn.pipeline import Pipeline\n",
|
"from sklearn.pipeline import Pipeline\n",
|
||||||
"nltk.download('punkt')\n",
|
"nltk.download('punkt')\n",
|
||||||
@ -55,7 +65,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 11,
|
"execution_count": 2,
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
@ -168,53 +178,6 @@
|
|||||||
" return wordnet.NOUN"
|
" return wordnet.NOUN"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"* the pipeline manager saves and stores sklearn pipelines. Keras models are handled differently, so the have to be named explicitly during save and load operations"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 16,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"class pipeline_manager(object):\n",
|
|
||||||
" @staticmethod\n",
|
|
||||||
" def load_pipeline_from_files(file_prefix:str, keras_models = []):\n",
|
|
||||||
" pm = pipeline_manager()\n",
|
|
||||||
" pm.load(file_prefix, keras_models=keras_models)\n",
|
|
||||||
" return pm\n",
|
|
||||||
" \n",
|
|
||||||
" def __init__(self, pipeline = None):\n",
|
|
||||||
" self.pipeline = pipeline\n",
|
|
||||||
" self.additional_objects = {}\n",
|
|
||||||
" \n",
|
|
||||||
" def save(prefix:str, keras_models = []):\n",
|
|
||||||
" # doing this like explained here: https://stackoverflow.com/a/43415459\n",
|
|
||||||
" for km in keras_models:\n",
|
|
||||||
" self.pipeline.named_steps[km].model.save(prefix + \".\" + km)\n",
|
|
||||||
" # setting this part to None:\n",
|
|
||||||
" self.pipeline.named_steps[km].model = None\n",
|
|
||||||
" \n",
|
|
||||||
" # now we can save the pipeline:\n",
|
|
||||||
" joblib.dump(self.pipeline, prefix + \".pipeline\")\n",
|
|
||||||
" \n",
|
|
||||||
" def load(prefix:str, keras_models=[]):\n",
|
|
||||||
" self.pipeline = joblib.load(prefix + \".pipeline\")\n",
|
|
||||||
" for km in keras_models:\n",
|
|
||||||
" self.pipeline.named_steps[km].model = load_model(prefix + \".\" + km)\n",
|
|
||||||
" \n",
|
|
||||||
" def fit(self,X,y):\n",
|
|
||||||
" self.pipeline.fit(X,y)\n",
|
|
||||||
" \n",
|
|
||||||
" def predict(self,X):\n",
|
|
||||||
" self.pipeline.predict(X)\n",
|
|
||||||
" "
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
@ -224,14 +187,25 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 17,
|
"execution_count": 11,
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"class sample_data_manager(object):\n",
|
"class sample_data_manager(object):\n",
|
||||||
" @staticmethod\n",
|
" @staticmethod\n",
|
||||||
" def generate_and_read(path:str):\n",
|
" def generate_and_read(path:str, only_emoticons=True, apply_stemming=True, n_top_emojis=-1, file_range=None):\n",
|
||||||
" sdm = sample_data_manager(path):\n",
|
" sdm = sample_data_manager(path)\n",
|
||||||
|
" sdm.read_files(file_index_range=range(sdm.n_files) if file_range is None else file_range, only_emoticons=only_emoticons)\n",
|
||||||
|
" if apply_stemming:\n",
|
||||||
|
" sdm.apply_stemming_and_lemmatization()\n",
|
||||||
|
" \n",
|
||||||
|
" sdm.generate_emoji_count_and_weights()\n",
|
||||||
|
" \n",
|
||||||
|
" if n_top_emojis > 0:\n",
|
||||||
|
" sdm.filter_by_top_emojis(n_top=n_top_emojis)\n",
|
||||||
|
" \n",
|
||||||
|
" return sdm\n",
|
||||||
|
" \n",
|
||||||
" \n",
|
" \n",
|
||||||
" def __init__(self, data_root_folder:str):\n",
|
" def __init__(self, data_root_folder:str):\n",
|
||||||
" self.data_root_folder = data_root_folder\n",
|
" self.data_root_folder = data_root_folder\n",
|
||||||
@ -247,11 +221,12 @@
|
|||||||
" self.y = None\n",
|
" self.y = None\n",
|
||||||
" self.Xt = None\n",
|
" self.Xt = None\n",
|
||||||
" self.yt = None\n",
|
" self.yt = None\n",
|
||||||
|
" self.top_emojis = None\n",
|
||||||
" \n",
|
" \n",
|
||||||
" def read_files(self, file_index_range:list, only_emoticons=True):\n",
|
" def read_files(self, file_index_range:list, only_emoticons=True):\n",
|
||||||
" assert np.min(file_index_range) >= 0 and np.max(file_index_range) < self.n_files\n",
|
" assert np.min(file_index_range) >= 0 and np.max(file_index_range) < self.n_files\n",
|
||||||
" for i in file_index_range:\n",
|
" for i in file_index_range:\n",
|
||||||
" print(\"reaing file: \" + self.json_files[i] + \"...\")\n",
|
" print(\"reading file: \" + self.json_files[i] + \"...\")\n",
|
||||||
" if self.raw_data is None:\n",
|
" if self.raw_data is None:\n",
|
||||||
" self.raw_data = pd.read_json(self.json_files[i], encoding=\"utf-8\")\n",
|
" self.raw_data = pd.read_json(self.json_files[i], encoding=\"utf-8\")\n",
|
||||||
" else:\n",
|
" else:\n",
|
||||||
@ -264,7 +239,7 @@
|
|||||||
" self.plain_text = self.plain_text.str.replace(\"(<EMOJI>|<USER>|<HASHTAG>)\",\"\").str.replace(\"[\" + \"\".join(list(emoji_blacklist)) + \"]\",\"\")\n",
|
" self.plain_text = self.plain_text.str.replace(\"(<EMOJI>|<USER>|<HASHTAG>)\",\"\").str.replace(\"[\" + \"\".join(list(emoji_blacklist)) + \"]\",\"\")\n",
|
||||||
" \n",
|
" \n",
|
||||||
" # so far filtering for the latest emoji. TODO: maybe there are also better approaches\n",
|
" # so far filtering for the latest emoji. TODO: maybe there are also better approaches\n",
|
||||||
" self.labels = emoji2sent([latest(e) for e in emojis], only_emoticons=only_emoticons )\n",
|
" self.labels = emoji2sent([latest(e) for e in self.emojis], only_emoticons=only_emoticons )\n",
|
||||||
" \n",
|
" \n",
|
||||||
" # and filter out all samples we have no label for:\n",
|
" # and filter out all samples we have no label for:\n",
|
||||||
" wrong_labels = np.isnan(np.linalg.norm(self.labels, axis=1)) \n",
|
" wrong_labels = np.isnan(np.linalg.norm(self.labels, axis=1)) \n",
|
||||||
@ -273,7 +248,7 @@
|
|||||||
" self.plain_text = self.plain_text[np.invert(wrong_labels)]\n",
|
" self.plain_text = self.plain_text[np.invert(wrong_labels)]\n",
|
||||||
" self.emojis = self.emojis[np.invert(wrong_labels)]\n",
|
" self.emojis = self.emojis[np.invert(wrong_labels)]\n",
|
||||||
" \n",
|
" \n",
|
||||||
" print(\"imported \" + len(self.labels) + \" samples\")\n",
|
" print(\"imported \" + str(len(self.labels)) + \" samples\")\n",
|
||||||
" \n",
|
" \n",
|
||||||
" def apply_stemming_and_lemmatization(self):\n",
|
" def apply_stemming_and_lemmatization(self):\n",
|
||||||
" stemmer = SnowballStemmer(\"english\")\n",
|
" stemmer = SnowballStemmer(\"english\")\n",
|
||||||
@ -322,20 +297,142 @@
|
|||||||
" \n",
|
" \n",
|
||||||
" def get_emoji_count(self):\n",
|
" def get_emoji_count(self):\n",
|
||||||
" sorted_emoji_count = list(reversed(sorted(self.emoji_count.items(), key=operator.itemgetter(1))))\n",
|
" sorted_emoji_count = list(reversed(sorted(self.emoji_count.items(), key=operator.itemgetter(1))))\n",
|
||||||
|
" #display(sorted_emoji_count)\n",
|
||||||
" return sorted_emoji_count\n",
|
" return sorted_emoji_count\n",
|
||||||
" \n",
|
" \n",
|
||||||
" def filter_by_top_emojis(self,n_top = 20):\n",
|
" def filter_by_top_emojis(self,n_top = 20):\n",
|
||||||
" in_top = [sentiment_vector_to_emoji(x) in self.get_emoji_count()[:n_top] for x in self.labels]\n",
|
" self.top_emojis = [x[0] for x in self.get_emoji_count()[:n_top]]\n",
|
||||||
|
" in_top = [sentiment_vector_to_emoji(x) in self.top_emojis for x in self.labels]\n",
|
||||||
" self.labels = self.labels[in_top]\n",
|
" self.labels = self.labels[in_top]\n",
|
||||||
" self.plain_text = self.plain_text[in_top]\n",
|
" self.plain_text = self.plain_text[in_top]\n",
|
||||||
" self.emojis = self.emojis[in_top]\n",
|
" self.emojis = self.emojis[in_top]\n",
|
||||||
" print(\"remaining samples after top emoji filtering: \", len(labels))\n",
|
" print(\"remaining samples after top emoji filtering: \", len(self.labels))\n",
|
||||||
" \n",
|
" \n",
|
||||||
" def create_train_test_split(self, split = 0.1, random_state = 4222):\n",
|
" def create_train_test_split(self, split = 0.1, random_state = 4222):\n",
|
||||||
" self.X, self.Xt, self.y, self.yt = train_test_split(self.plain_text, self.labels, test_size=split, random_state=random_state)\n",
|
" self.X, self.Xt, self.y, self.yt = train_test_split(self.plain_text, self.labels, test_size=split, random_state=random_state)\n",
|
||||||
"\n"
|
"\n"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"* the pipeline manager saves and stores sklearn pipelines. Keras models are handled differently, so the have to be named explicitly during save and load operations"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 21,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"class pipeline_manager(object):\n",
|
||||||
|
" @staticmethod\n",
|
||||||
|
" def load_pipeline_from_files(file_prefix:str, keras_models = [], all_models = []):\n",
|
||||||
|
" pm = pipeline_manager(keras_models=keras_models)\n",
|
||||||
|
" pm.load(file_prefix, all_models)\n",
|
||||||
|
" return pm\n",
|
||||||
|
" \n",
|
||||||
|
" @staticmethod\n",
|
||||||
|
" def create_keras_pipeline_with_vectorizer(vectorizer, layers, sdm:sample_data_manager):\n",
|
||||||
|
" '''\n",
|
||||||
|
" creates pipeline with vectorizer and keras classifier\n",
|
||||||
|
" '''\n",
|
||||||
|
" from keras.models import Sequential\n",
|
||||||
|
" from keras.layers import Dense\n",
|
||||||
|
" \n",
|
||||||
|
" if sdm.X is None:\n",
|
||||||
|
" sdm.create_train_test_split()\n",
|
||||||
|
" \n",
|
||||||
|
" vec_train = vectorizer.fit_transform(sdm.X)\n",
|
||||||
|
" vec_test = vectorizer.transform(sdm.Xt)\n",
|
||||||
|
" # creating keras model:\n",
|
||||||
|
" model=Sequential()\n",
|
||||||
|
" \n",
|
||||||
|
" keras_layers = []\n",
|
||||||
|
" first_layer = True\n",
|
||||||
|
" for layer in layers:\n",
|
||||||
|
" if first_layer:\n",
|
||||||
|
" model.add(Dense(units=layer[0], activation=layer[1], input_dim=vectorizer.transform([\" \"])[0]._shape[1]))\n",
|
||||||
|
" first_layer = False\n",
|
||||||
|
" else:\n",
|
||||||
|
" model.add(Dense(units=layer[0], activation=layer[1]))\n",
|
||||||
|
" \n",
|
||||||
|
" model.compile(loss='mean_squared_error',\n",
|
||||||
|
" optimizer='adam')\n",
|
||||||
|
" \n",
|
||||||
|
" pipeline = Pipeline([\n",
|
||||||
|
" ('vectorizer',vectorizer),\n",
|
||||||
|
" ('keras_model', model)\n",
|
||||||
|
" ])\n",
|
||||||
|
" \n",
|
||||||
|
" return pipeline_manager(pipeline=pipeline, keras_models=['keras_model'])\n",
|
||||||
|
" \n",
|
||||||
|
" @staticmethod\n",
|
||||||
|
" def create_pipeline_with_classifier_and_vectorizer(vectorizer, classifier, sdm:sample_data_manager = None):\n",
|
||||||
|
" '''\n",
|
||||||
|
" creates a pipeline with vectorizer and classifier for non keras classifiers\n",
|
||||||
|
" if sample data manager is given, the vectorizer will be also fitted!\n",
|
||||||
|
" '''\n",
|
||||||
|
" if sdm is not None:\n",
|
||||||
|
" if sdm.X is None:\n",
|
||||||
|
" sdm.create_train_test_split()\n",
|
||||||
|
"\n",
|
||||||
|
" vec_train = vectorizer.fit_transform(sdm.X)\n",
|
||||||
|
" vec_test = vectorizer.transform(sdm.Xt)\n",
|
||||||
|
" \n",
|
||||||
|
" pipeline = Pipeline([\n",
|
||||||
|
" ('vectorizer',vectorizer),\n",
|
||||||
|
" ('classifier', classifier)\n",
|
||||||
|
" ])\n",
|
||||||
|
" \n",
|
||||||
|
" return pipeline_manager(pipeline=pipeline, keras_models=[])\n",
|
||||||
|
" \n",
|
||||||
|
" def __init__(self, pipeline = None, keras_models = []):\n",
|
||||||
|
" self.pipeline = pipeline\n",
|
||||||
|
" self.additional_objects = {}\n",
|
||||||
|
" self.keras_models = keras_models\n",
|
||||||
|
" \n",
|
||||||
|
" def save(self, prefix:str):\n",
|
||||||
|
" print(self.keras_models)\n",
|
||||||
|
" # doing this like explained here: https://stackoverflow.com/a/43415459\n",
|
||||||
|
" for step in self.pipeline.named_steps:\n",
|
||||||
|
" if step in self.keras_models:\n",
|
||||||
|
" self.pipeline.named_steps[step].model.save(prefix + \".\" + step)\n",
|
||||||
|
" else:\n",
|
||||||
|
" joblib.dump(self.pipeline.named_steps[step], prefix + \".\" + str(step))\n",
|
||||||
|
" \n",
|
||||||
|
" load_command = \"pipeline_manager.load_pipeline_from_files( '\"\n",
|
||||||
|
" load_command += prefix + \"', \" + str(self.keras_models) + \", \"\n",
|
||||||
|
" load_command += str(list(self.pipeline.named_steps.keys())) + \")\"\n",
|
||||||
|
" \n",
|
||||||
|
" import __main__ as main\n",
|
||||||
|
" if not hasattr(main, '__file__'):\n",
|
||||||
|
" display(\"saved pipeline. It can be loaded the following way:\")\n",
|
||||||
|
" display(Markdown(\"> ```\\n\"+load_command+\"\\n```\"))\n",
|
||||||
|
" else:\n",
|
||||||
|
" print(\"saved pipeline. It can be loaded the following way:\")\n",
|
||||||
|
" print(load_command)\n",
|
||||||
|
" \n",
|
||||||
|
" \n",
|
||||||
|
" def load(self, prefix:str, models = []):\n",
|
||||||
|
" self.pipeline = None\n",
|
||||||
|
" model_list = []\n",
|
||||||
|
" for model in models:\n",
|
||||||
|
" if model in self.keras_models:\n",
|
||||||
|
" model_list.append((model, load_model(prefix + \".\" + model)))\n",
|
||||||
|
" else:\n",
|
||||||
|
" model_list.append((model, joblib.load(prefix+\".\" + model)))\n",
|
||||||
|
" self.pipeline = Pipeline(model_list)\n",
|
||||||
|
" \n",
|
||||||
|
" def fit(self,X,y):\n",
|
||||||
|
" self.pipeline.fit(X,y)\n",
|
||||||
|
" \n",
|
||||||
|
" def predict(self,X):\n",
|
||||||
|
" return self.pipeline.predict(X)\n",
|
||||||
|
" "
|
||||||
|
]
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
@ -345,7 +442,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 23,
|
"execution_count": 9,
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
@ -354,12 +451,49 @@
|
|||||||
" self.sdm = sdm\n",
|
" self.sdm = sdm\n",
|
||||||
" self.pm = pm\n",
|
" self.pm = pm\n",
|
||||||
" \n",
|
" \n",
|
||||||
" def fit(self):\n",
|
" def fit(self, max_size=10000, disabled_fit_steps=['vectorizer']):\n",
|
||||||
" # TODO: make batch fitting available here\n",
|
" # TODO: make batch fitting available here (eg: continous waiting for data and fitting them)\n",
|
||||||
" self.pm.fit(X = self.sdm.X, y = self.sdm.y)\n",
|
" if self.sdm.X is None:\n",
|
||||||
|
" self.sdm.create_train_test_split()\n",
|
||||||
|
" disabled_fits = {}\n",
|
||||||
|
" disabled_fit_transforms = {}\n",
|
||||||
|
" \n",
|
||||||
|
" named_steps = self.pm.pipeline.named_steps\n",
|
||||||
|
" \n",
|
||||||
|
" for s in disabled_fit_steps:\n",
|
||||||
|
" # now it gets a little bit dirty:\n",
|
||||||
|
" # replace fit functions we don't want to call again (e.g. for vectorizers)\n",
|
||||||
|
" disabled_fits[s] = named_steps[s].fit\n",
|
||||||
|
" disabled_fit_transforms[s] = named_steps[s].fit_transform\n",
|
||||||
|
" named_steps[s].fit = lambda self, X, y=None: self\n",
|
||||||
|
" named_steps[s].fit_transform = named_steps[s].transform\n",
|
||||||
|
" \n",
|
||||||
|
" self.pm.fit(X = self.sdm.X[:max_size], y = self.sdm.y[:max_size])\n",
|
||||||
|
" \n",
|
||||||
|
" # restore replaced fit functions:\n",
|
||||||
|
" for s in disabled_fit_steps:\n",
|
||||||
|
" named_steps[s].fit = disabled_fits[s]\n",
|
||||||
|
" named_steps[s].fit_transform = disabled_fit_transforms[s]\n",
|
||||||
|
" \n",
|
||||||
|
" def test(self):\n",
|
||||||
|
" '''\n",
|
||||||
|
" return: prediction:list, teacher:list\n",
|
||||||
|
" '''\n",
|
||||||
|
" if self.sdm.X is None:\n",
|
||||||
|
" self.sdm.create_train_test_split()\n",
|
||||||
|
" return self.pm.predict(self.sdm.Xt), self.sdm.yt\n",
|
||||||
|
"\n",
|
||||||
" "
|
" "
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"----\n",
|
||||||
|
"## Train"
|
||||||
|
]
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
@ -369,21 +503,371 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 25,
|
"execution_count": 12,
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [
|
"outputs": [
|
||||||
{
|
{
|
||||||
"name": "stdout",
|
"name": "stdout",
|
||||||
"output_type": "stream",
|
"output_type": "stream",
|
||||||
"text": [
|
"text": [
|
||||||
"you are in a notebook\n"
|
"reading file: ./data_en/2017-11-01.json...\n",
|
||||||
|
"imported 33368 samples\n",
|
||||||
|
"remaining samples after top emoji filtering: 26197\n"
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"source": [
|
"source": [
|
||||||
"import __main__ as main\n",
|
"import __main__ as main\n",
|
||||||
"if not hasattr(main, '__file__'):\n",
|
"if not hasattr(main, '__file__'):\n",
|
||||||
" print(\"you are in a notebook\")"
|
" # we are in an interactive environment (probably in jupyter)\n",
|
||||||
|
" # load data:\n",
|
||||||
|
" sdm = sample_data_manager.generate_and_read(path=\"./data_en/\", n_top_emojis=20, file_range=range(1))\n",
|
||||||
|
" "
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 22,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "stdout",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"Epoch 1/1\n",
|
||||||
|
"100/100 [==============================] - 3s 27ms/step - loss: 0.1225\n"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
" #pm = pipeline_manager.create_keras_pipeline_with_vectorizer(vectorizer=TfidfVectorizer(stop_words='english'),\n",
|
||||||
|
" # layers=[(10000, 'relu'),(5000, 'relu'),(2500, 'relu'),(y1[0].shape[0],None)], sdm=sdm)\n",
|
||||||
|
" pm = pipeline_manager.create_keras_pipeline_with_vectorizer(vectorizer=TfidfVectorizer(stop_words='english'),\n",
|
||||||
|
" layers=[(2500, 'relu'),(3,None)], sdm=sdm)\n",
|
||||||
|
" \n",
|
||||||
|
" tr = trainer(sdm=sdm, pm=pm)\n",
|
||||||
|
" tr.fit(100)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": []
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"----\n",
|
||||||
|
"## save classifier"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 23,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "stdout",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"['keras_model']\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/plain": [
|
||||||
|
"'saved pipeline. It can be loaded the following way:'"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "display_data"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/markdown": [
|
||||||
|
"> ```\n",
|
||||||
|
"pipeline_manager.load_pipeline_from_files( 'custom_classifier', ['keras_model'], ['vectorizer', 'keras_model'])\n",
|
||||||
|
"```"
|
||||||
|
],
|
||||||
|
"text/plain": [
|
||||||
|
"<IPython.core.display.Markdown object>"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "display_data"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"pm.save('custom_classifier')"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"----\n",
|
||||||
|
"## Prediction"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 33,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/plain": [
|
||||||
|
"array([[0.15801723, 0.11859037, 0.10975348],\n",
|
||||||
|
" [0.17035495, 0.10913695, 0.09354854],\n",
|
||||||
|
" [0.11777218, 0.06569621, 0.06620223],\n",
|
||||||
|
" ...,\n",
|
||||||
|
" [0.14746301, 0.09480572, 0.08052498],\n",
|
||||||
|
" [0.15932804, 0.11895895, 0.10343507],\n",
|
||||||
|
" [0.17135939, 0.1061406 , 0.09402546]], dtype=float32)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "display_data"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/plain": [
|
||||||
|
"array([[0.46813021, 0.24716181, 0.28470797],\n",
|
||||||
|
" [0.46813021, 0.24716181, 0.28470797],\n",
|
||||||
|
" [0.70401758, 0.05932203, 0.23666039],\n",
|
||||||
|
" ...,\n",
|
||||||
|
" [0.46813021, 0.24716181, 0.28470797],\n",
|
||||||
|
" [0.46813021, 0.24716181, 0.28470797],\n",
|
||||||
|
" [0.46813021, 0.24716181, 0.28470797]])"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "display_data"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "stdout",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"prediction variance: 0.0006294687\n",
|
||||||
|
"teacher variance: 0.03341702104519965\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/html": [
|
||||||
|
"<div>\n",
|
||||||
|
"<style scoped>\n",
|
||||||
|
" .dataframe tbody tr th:only-of-type {\n",
|
||||||
|
" vertical-align: middle;\n",
|
||||||
|
" }\n",
|
||||||
|
"\n",
|
||||||
|
" .dataframe tbody tr th {\n",
|
||||||
|
" vertical-align: top;\n",
|
||||||
|
" }\n",
|
||||||
|
"\n",
|
||||||
|
" .dataframe thead th {\n",
|
||||||
|
" text-align: right;\n",
|
||||||
|
" }\n",
|
||||||
|
"</style>\n",
|
||||||
|
"<table border=\"1\" class=\"dataframe\">\n",
|
||||||
|
" <thead>\n",
|
||||||
|
" <tr style=\"text-align: right;\">\n",
|
||||||
|
" <th></th>\n",
|
||||||
|
" <th>predict</th>\n",
|
||||||
|
" <th>predicted_sentiment</th>\n",
|
||||||
|
" <th>teacher</th>\n",
|
||||||
|
" <th>teacher_sentiment</th>\n",
|
||||||
|
" <th>text</th>\n",
|
||||||
|
" </tr>\n",
|
||||||
|
" </thead>\n",
|
||||||
|
" <tbody>\n",
|
||||||
|
" <tr>\n",
|
||||||
|
" <th>35671</th>\n",
|
||||||
|
" <td>😢</td>\n",
|
||||||
|
" <td>[0.15801723301410675, 0.11859036982059479, 0.1...</td>\n",
|
||||||
|
" <td>😂</td>\n",
|
||||||
|
" <td>[0.46813021474490496, 0.24716181096977158, 0.2...</td>\n",
|
||||||
|
" <td>i feel like i care so much more in everi situat</td>\n",
|
||||||
|
" </tr>\n",
|
||||||
|
" <tr>\n",
|
||||||
|
" <th>25683</th>\n",
|
||||||
|
" <td>😢</td>\n",
|
||||||
|
" <td>[0.1703549474477768, 0.10913695394992828, 0.09...</td>\n",
|
||||||
|
" <td>😂</td>\n",
|
||||||
|
" <td>[0.46813021474490496, 0.24716181096977158, 0.2...</td>\n",
|
||||||
|
" <td>i did not meat to add that 2 there ... hav see...</td>\n",
|
||||||
|
" </tr>\n",
|
||||||
|
" <tr>\n",
|
||||||
|
" <th>8985</th>\n",
|
||||||
|
" <td>😢</td>\n",
|
||||||
|
" <td>[0.1177721843123436, 0.06569620966911316, 0.06...</td>\n",
|
||||||
|
" <td>😊</td>\n",
|
||||||
|
" <td>[0.7040175768989329, 0.059322033898305086, 0.2...</td>\n",
|
||||||
|
" <td>never…</td>\n",
|
||||||
|
" </tr>\n",
|
||||||
|
" <tr>\n",
|
||||||
|
" <th>5410</th>\n",
|
||||||
|
" <td>😢</td>\n",
|
||||||
|
" <td>[0.18182337284088135, 0.12382747232913971, 0.0...</td>\n",
|
||||||
|
" <td>😂</td>\n",
|
||||||
|
" <td>[0.46813021474490496, 0.24716181096977158, 0.2...</td>\n",
|
||||||
|
" <td>lmao on me ! ! ! wtf wa he suppos to say</td>\n",
|
||||||
|
" </tr>\n",
|
||||||
|
" <tr>\n",
|
||||||
|
" <th>62611</th>\n",
|
||||||
|
" <td>😢</td>\n",
|
||||||
|
" <td>[0.1786666363477707, 0.11502400785684586, 0.10...</td>\n",
|
||||||
|
" <td>😊</td>\n",
|
||||||
|
" <td>[0.7040175768989329, 0.059322033898305086, 0.2...</td>\n",
|
||||||
|
" <td>this dude alway help me get through my school ...</td>\n",
|
||||||
|
" </tr>\n",
|
||||||
|
" </tbody>\n",
|
||||||
|
"</table>\n",
|
||||||
|
"</div>"
|
||||||
|
],
|
||||||
|
"text/plain": [
|
||||||
|
" predict predicted_sentiment teacher \\\n",
|
||||||
|
"35671 😢 [0.15801723301410675, 0.11859036982059479, 0.1... 😂 \n",
|
||||||
|
"25683 😢 [0.1703549474477768, 0.10913695394992828, 0.09... 😂 \n",
|
||||||
|
"8985 😢 [0.1177721843123436, 0.06569620966911316, 0.06... 😊 \n",
|
||||||
|
"5410 😢 [0.18182337284088135, 0.12382747232913971, 0.0... 😂 \n",
|
||||||
|
"62611 😢 [0.1786666363477707, 0.11502400785684586, 0.10... 😊 \n",
|
||||||
|
"\n",
|
||||||
|
" teacher_sentiment \\\n",
|
||||||
|
"35671 [0.46813021474490496, 0.24716181096977158, 0.2... \n",
|
||||||
|
"25683 [0.46813021474490496, 0.24716181096977158, 0.2... \n",
|
||||||
|
"8985 [0.7040175768989329, 0.059322033898305086, 0.2... \n",
|
||||||
|
"5410 [0.46813021474490496, 0.24716181096977158, 0.2... \n",
|
||||||
|
"62611 [0.7040175768989329, 0.059322033898305086, 0.2... \n",
|
||||||
|
"\n",
|
||||||
|
" text \n",
|
||||||
|
"35671 i feel like i care so much more in everi situat \n",
|
||||||
|
"25683 i did not meat to add that 2 there ... hav see... \n",
|
||||||
|
"8985 never… \n",
|
||||||
|
"5410 lmao on me ! ! ! wtf wa he suppos to say \n",
|
||||||
|
"62611 this dude alway help me get through my school ... "
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "display_data"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "stdout",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"Mean Squared Error: [0.14140389 0.04240099 0.02944344]\n",
|
||||||
|
"Variance teacher: [0.02183094 0.02513847 0.00285735]\n",
|
||||||
|
"Variance prediction: [0.00053908 0.00024232 0.00021658]\n"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"import __main__ as main\n",
|
||||||
|
"if not hasattr(main, '__file__'):\n",
|
||||||
|
" pred, teacher = tr.test()\n",
|
||||||
|
" \n",
|
||||||
|
" display(pred)\n",
|
||||||
|
" display(teacher)\n",
|
||||||
|
" \n",
|
||||||
|
" print('prediction variance: ', np.linalg.norm(np.var(pred, axis=0)))\n",
|
||||||
|
" print('teacher variance: ', np.linalg.norm(np.var(teacher, axis=0)))\n",
|
||||||
|
" \n",
|
||||||
|
" # build a dataframe to visualize test results:\n",
|
||||||
|
" testlist = pd.DataFrame({'text': sdm.Xt, \n",
|
||||||
|
" 'teacher': sent2emoji(sdm.yt),\n",
|
||||||
|
" 'teacher_sentiment': sdm.yt.tolist(),\n",
|
||||||
|
" 'predict': sent2emoji(pred, custom_target_emojis=sdm.top_emojis),\n",
|
||||||
|
" 'predicted_sentiment': pred.tolist()})\n",
|
||||||
|
" # display:\n",
|
||||||
|
" display(testlist.head())\n",
|
||||||
|
" \n",
|
||||||
|
" # mean squared error:\n",
|
||||||
|
" teacher_sentiments = np.array([sample[1]['teacher_sentiment'] for sample in testlist.iterrows()])\n",
|
||||||
|
" predicted_sentiments = np.array([sample[1]['predicted_sentiment'] for sample in testlist.iterrows()])\n",
|
||||||
|
"\n",
|
||||||
|
" mean_squared_error = ((teacher_sentiments - predicted_sentiments)**2).mean(axis=0)\n",
|
||||||
|
" print(\"Mean Squared Error: \", mean_squared_error)\n",
|
||||||
|
" print(\"Variance teacher: \", np.var(teacher_sentiments, axis=0))\n",
|
||||||
|
" print(\"Variance prediction: \", np.var(predicted_sentiments, axis=0))\n",
|
||||||
|
" \n",
|
||||||
|
" # save to csv:\n",
|
||||||
|
" testlist.to_csv('test.csv')"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"----\n",
|
||||||
|
"## Load classifier"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"import __main__ as main\n",
|
||||||
|
"if not hasattr(main, '__file__'):\n",
|
||||||
|
" try:\n",
|
||||||
|
" pm\n",
|
||||||
|
" except NameError:\n",
|
||||||
|
" pass\n",
|
||||||
|
" else:\n",
|
||||||
|
" del pm # delete existing pipeline manager if ther is one\n",
|
||||||
|
"\n",
|
||||||
|
" pm = pipeline_manager.load_pipeline_from_files( 'custom_classifier', ['keras_model'], ['vectorizer', 'keras_model'])\n",
|
||||||
|
" lookup_emojis = [#'😂',\n",
|
||||||
|
" '😭',\n",
|
||||||
|
" '😍',\n",
|
||||||
|
" '😩',\n",
|
||||||
|
" '😊',\n",
|
||||||
|
" '😘',\n",
|
||||||
|
" '🙏',\n",
|
||||||
|
" '🙌',\n",
|
||||||
|
" '😉',\n",
|
||||||
|
" '😁',\n",
|
||||||
|
" '😅',\n",
|
||||||
|
" '😎',\n",
|
||||||
|
" '😢',\n",
|
||||||
|
" '😒',\n",
|
||||||
|
" '😏',\n",
|
||||||
|
" '😌',\n",
|
||||||
|
" '😔',\n",
|
||||||
|
" '😋',\n",
|
||||||
|
" '😀',\n",
|
||||||
|
" '😤']\n",
|
||||||
|
" out = widgets.Output()\n",
|
||||||
|
"\n",
|
||||||
|
" t = widgets.Text()\n",
|
||||||
|
" b = widgets.Button(\n",
|
||||||
|
" description='get emoji',\n",
|
||||||
|
" disabled=False,\n",
|
||||||
|
" button_style='', # 'success', 'info', 'warning', 'danger' or ''\n",
|
||||||
|
" tooltip='Click me',\n",
|
||||||
|
" icon='check'\n",
|
||||||
|
" )\n",
|
||||||
|
"\n",
|
||||||
|
"\n",
|
||||||
|
"\n",
|
||||||
|
" def handle_submit(sender):\n",
|
||||||
|
" with out:\n",
|
||||||
|
" clear_output()\n",
|
||||||
|
" with out:\n",
|
||||||
|
" pred = pm.predict([t.value])\n",
|
||||||
|
"\n",
|
||||||
|
" display(Markdown(\"# Predicted Emoji \" + str(sent2emoji(pred, lookup_emojis)[0])))\n",
|
||||||
|
" display(Markdown(\"# Sentiment Vector: $$ \\pmatrix{\" + str(pred[0,0]) +\n",
|
||||||
|
" \"\\\\\\\\\" + str(pred[0,1]) + \"\\\\\\\\\" + str(pred[0,2]) + \"}$$\"))\n",
|
||||||
|
"\n",
|
||||||
|
" b.on_click(handle_submit)\n",
|
||||||
|
"\n",
|
||||||
|
" display(t)\n",
|
||||||
|
" display(widgets.VBox([b, out])) "
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
Loading…
Reference in New Issue
Block a user