messy merge on carstens laptop

This commit is contained in:
Carsten 2018-06-20 18:37:33 +02:00
commit e4aab33cee
12 changed files with 5361 additions and 2664 deletions

BIN
Project/Images/Neg_Neu.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 23 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 7.7 KiB

BIN
Project/Images/Pos_Neg.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 22 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 7.8 KiB

BIN
Project/Images/Pos_Neu.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 22 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 7.5 KiB

View File

@ -0,0 +1,206 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 13,
"metadata": {},
"outputs": [],
"source": [
"import sys\n",
"import numpy as np\n",
"from sklearn.cluster import KMeans\n",
"sys.path.append(\"..\")\n",
"\n",
"from Tools.Emoji_Distance import sentiment_vector_to_emoji\n",
"from Tools.Emoji_Distance import emoji_to_sentiment_vector\n",
"from Tools.Emoji_Distance import dataframe_to_dictionary\n",
"\n",
"def emoji2sent(emoji_arr):\n",
" return np.array([emoji_to_sentiment_vector(e) for e in emoji_arr])\n",
"\n",
"def sent2emoji(sent_arr, custom_target_emojis=None):\n",
" return [sentiment_vector_to_emoji(s, custom_target_emojis=custom_target_emojis) for s in sent_arr]"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {},
"outputs": [],
"source": [
"data , data_only_emoticons, list_sentiment_vectors , list_emojis , list_sentiment_emoticon_vectors , list_emoticon_emojis = dataframe_to_dictionary()"
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"array([[0.46813021, 0.24716181, 0.28470797],\n",
" [0.72967448, 0.05173769, 0.21858783],\n",
" [0.34310532, 0.43648208, 0.2204126 ],\n",
" [0.75466009, 0.0529057 , 0.19243421],\n",
" [0.70401758, 0.05932203, 0.23666039],\n",
" [0.57697579, 0.12699863, 0.29602558],\n",
" [0.22289823, 0.59126106, 0.18584071],\n",
" [0.49837557, 0.0805718 , 0.42105263],\n",
" [0.44415243, 0.11169514, 0.44415243],\n",
" [0.5634451 , 0.09927679, 0.33727811]])"
]
},
"execution_count": 10,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"array_sentiment_vectors = np.array(list_sentiment_emoticon_vectors)\n",
"array_sentiment_vectors[:10]"
]
},
{
"cell_type": "code",
"execution_count": 42,
"metadata": {},
"outputs": [],
"source": [
"kmeans = KMeans(n_clusters=5, random_state=0).fit(array_sentiment_vectors)"
]
},
{
"cell_type": "code",
"execution_count": 43,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"array([[0.43555605, 0.2777192 , 0.28672476],\n",
" [0.21254481, 0.57576584, 0.21168936],\n",
" [0.56669216, 0.13017252, 0.30313532],\n",
" [0.33453667, 0.45309312, 0.21237021],\n",
" [0.71664806, 0.06648547, 0.21686647]])"
]
},
"execution_count": 43,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"centers = kmeans.cluster_centers_\n",
"centers"
]
},
{
"cell_type": "code",
"execution_count": 44,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"🙇\n",
"😿\n",
"😄\n",
"😭\n",
"😍\n"
]
}
],
"source": [
"for center in centers:\n",
" print(sentiment_vector_to_emoji(center))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"* only most used emojis"
]
},
{
"cell_type": "code",
"execution_count": 46,
"metadata": {},
"outputs": [],
"source": [
"top_emojis = [('😂', 10182),\n",
" ('😭', 3893),\n",
" ('😍', 2866),\n",
" ('😩', 1647),\n",
" ('😊', 1450),\n",
" ('😘', 1151),\n",
" ('🙏', 1089),\n",
" ('🙌', 1003),\n",
" ('😉', 752),\n",
" ('😁', 697),\n",
" ('😅', 651),\n",
" ('😎', 606),\n",
" ('😢', 544),\n",
" ('😒', 539),\n",
" ('😏', 478),\n",
" ('😌', 434),\n",
" ('😔', 415),\n",
" ('😋', 397),\n",
" ('😀', 392),\n",
" ('😤', 368)]"
]
},
{
"cell_type": "code",
"execution_count": 47,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"😂\n",
"😒\n",
"😁\n",
"😭\n",
"😍\n"
]
}
],
"source": [
"for center in centers:\n",
" print(sentiment_vector_to_emoji(center, custom_target_emojis=top_emojis))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.4"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,688 @@
# coding: utf-8
# In[1]:
import pandas as pd
from IPython.display import clear_output, Markdown, Math
import ipywidgets as widgets
import os
import glob
import json
import numpy as np
import itertools
import sklearn.utils as sku
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer, HashingVectorizer
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MultiLabelBinarizer, LabelBinarizer
from sklearn.cluster import KMeans
import nltk
from keras.models import load_model
from sklearn.externals import joblib
import pickle
import operator
from sklearn.pipeline import Pipeline
nltk.download('punkt')
nltk.download('averaged_perceptron_tagger')
nltk.download('wordnet')
# In[2]:
import sys
sys.path.append("..")
import Tools.Emoji_Distance as edist
def emoji2sent(emoji_arr, only_emoticons=True):
return np.array([edist.emoji_to_sentiment_vector(e, only_emoticons=only_emoticons) for e in emoji_arr])
def sent2emoji(sent_arr, custom_target_emojis=None, only_emoticons=True):
return [edist.sentiment_vector_to_emoji(s, custom_target_emojis=custom_target_emojis, only_emoticons=only_emoticons) for s in sent_arr]
# In[3]:
SINGLE_LABEL = True
# ----
# ## classes and functions we are using later:
# ----
# * functions for selecting items from a set / list
# In[4]:
def latest(lst):
return lst[-1] if len(lst) > 0 else 'X'
def most_common(lst):
# trying to find the most common used emoji in the given lst
return max(set(lst), key=lst.count) if len(lst) > 0 else "X" # setting label to 'X' if there is an empty emoji list
# * our emoji blacklist (skin and sex modifiers)
# In[5]:
# defining blacklist for modifier emojis:
emoji_blacklist = set([
chr(0x1F3FB),
chr(0x1F3FC),
chr(0x1F3FD),
chr(0x1F3FE),
chr(0x1F3FF),
chr(0x2642),
chr(0x2640)
])
# * lemmatization helper functions
# In[6]:
from nltk.stem.snowball import SnowballStemmer
from nltk.stem import WordNetLemmatizer
from nltk import pos_tag
from nltk import word_tokenize
from nltk.corpus import wordnet
def get_wordnet_pos(treebank_tag):
if treebank_tag.startswith('J'):
return wordnet.ADJ
elif treebank_tag.startswith('V'):
return wordnet.VERB
elif treebank_tag.startswith('N'):
return wordnet.NOUN
elif treebank_tag.startswith('R'):
return wordnet.ADV
else:
return wordnet.NOUN
# ### sample data manager
# the sample data manager loads and preprocesses data
# most common way to use:
#
#
# * `sdm = sample_data_manager.generate_and_read(path:str, only_emoticons=True, apply_stemming=True, n_top_emojis=-1, file_range=None)`
#
# * Generates a sample_data_manager object and preprocess data in one step
#
# In[7]:
class sample_data_manager(object):
@staticmethod
def generate_and_read(path:str, only_emoticons=True, apply_stemming=True, n_top_emojis=-1, file_range=None, n_kmeans_cluster=-1):
"""
generate, read and process train data in one step.
@param path: folder containing json files to process
@param only_emoticons: if True, only messages containing emoticons (provided by Tools.Emoji_Distance) are used
@param apply_stemming: apply stemming and lemmatization on dataset
@param n_top_emojis: only use messages containing one of <`n_top_emojis`>-top emojis. set to `-1` to prevent top emoji filtering
@param file_range: range of file's indices to read (eg `range(3)` to read the first three files). If `None`: all files are read
@param n_kmeans_cluster: generating multilabeled labels with kmeans with these number of clusters. Set to -1 to use the plain sentiment space as label
@return: sample_data_manager object
"""
sdm = sample_data_manager(path)
sdm.read_files(file_index_range=range(sdm.n_files) if file_range is None else file_range, only_emoticons=only_emoticons)
if apply_stemming:
sdm.apply_stemming_and_lemmatization()
sdm.generate_emoji_count_and_weights()
if n_top_emojis > 0:
sdm.filter_by_top_emojis(n_top=n_top_emojis)
if n_kmeans_cluster > 0:
sdm.generate_kmeans_binary_label(only_emoticons=only_emoticons, n_clusters=n_kmeans_cluster)
return sdm
def __init__(self, data_root_folder:str):
"""
constructor for manual initialization
@param data_root_folder: folder containing json files to process
"""
self.data_root_folder = data_root_folder
self.json_files = sorted(glob.glob(self.data_root_folder + "/*.json"))
self.n_files = len(self.json_files)
self.raw_data = None
self.emojis = None
self.plain_text = None
self.labels = None
self.emoji_count = None
self.emoji_weights = None
self.X = None
self.y = None
self.Xt = None
self.yt = None
self.top_emojis = None
self.binary_labels = None
self.use_binary_labels = False
self.kmeans_cluster = None
self.label_binarizer = None
def read_files(self, file_index_range:list, only_emoticons=True):
"""
reading (multiple) files to one panda table.
@param file_index_range: range of file's indices to read (eg `range(3)` to read the first three files)
@param only_emoticons: if True, only messages containing emoticons (aka smileys) are used. This classification is derived from Tools.Emoji_Distance
"""
assert np.min(file_index_range) >= 0 and np.max(file_index_range) < self.n_files
for i in file_index_range:
print("reading file: " + self.json_files[i] + "...")
if self.raw_data is None:
self.raw_data = pd.read_json(self.json_files[i], encoding="utf-8")
else:
self.raw_data = self.raw_data.append(pd.read_json(self.json_files[i], encoding="utf-8"))
self.emojis = self.raw_data['EMOJI']
self.plain_text = self.raw_data['text']
# replacing keywords. TODO: maybe these information can be extracted and used
self.plain_text = self.plain_text.str.replace("(<EMOJI>|<USER>|<HASHTAG>)","").str.replace("[" + "".join(list(emoji_blacklist)) + "]","")
# so far filtering for the latest emoji. TODO: maybe there are also better approaches
self.labels = emoji2sent([latest(e) for e in self.emojis], only_emoticons=only_emoticons )
# and filter out all samples we have no label for:
wrong_labels = np.isnan(np.linalg.norm(self.labels, axis=1))
self.labels = self.labels[np.invert(wrong_labels)]
self.plain_text = self.plain_text[np.invert(wrong_labels)]
self.emojis = self.emojis[np.invert(wrong_labels)]
print("imported " + str(len(self.labels)) + " samples")
def apply_stemming_and_lemmatization(self):
"""
apply stemming and lemmatization to plain text samples
"""
stemmer = SnowballStemmer("english")
for key in self.plain_text.keys():
stemmed_sent = []
for word in self.plain_text[key].split(" "):
word_stemmed = stemmer.stem(word)
stemmed_sent.append(word_stemmed)
stemmed_sent = (" ").join(stemmed_sent)
self.plain_text[key] = stemmed_sent
lemmatizer = WordNetLemmatizer()
for key in self.plain_text.keys():
lemmatized_sent = []
sent_pos = pos_tag(word_tokenize(self.plain_text[key]))
for word in sent_pos:
wordnet_pos = get_wordnet_pos(word[1].lower())
word_lemmatized = lemmatizer.lemmatize(word[0], pos=wordnet_pos)
lemmatized_sent.append(word_lemmatized)
lemmatized_sent = (" ").join(lemmatized_sent)
self.plain_text[key] = lemmatized_sent
def generate_emoji_count_and_weights(self):
"""
counting occurences of emojis
"""
self.emoji_count = {}
for e_list in self.emojis:
for e in set(e_list):
if e not in self.emoji_count:
self.emoji_count[e] = 0
self.emoji_count[e] += 1
emoji_sum = sum([self.emoji_count[e] for e in self.emoji_count])
self.emoji_weights = {}
for e in self.emoji_count:
# tfidf for emojis
self.emoji_weights[e] = np.log((emoji_sum / self.emoji_count[e]))
weights_sum= sum([self.emoji_weights[x] for x in self.emoji_weights])
# normalize:
for e in self.emoji_weights:
self.emoji_weights[e] = self.emoji_weights[e] / weights_sum
self.emoji_weights['X'] = 0 # dummy values
self.emoji_count['X'] = 0
def get_emoji_count(self):
"""
@return: descending list of tuples in form (<emoji as character>, <emoji count>)
"""
assert self.emoji_count is not None
sorted_emoji_count = list(reversed(sorted(self.emoji_count.items(), key=operator.itemgetter(1))))
#display(sorted_emoji_count)
return sorted_emoji_count
def filter_by_top_emojis(self,n_top = 20):
"""
filgter out messages not containing one of the `n_top` emojis
@param n_top: number of top emojis used for filtering
"""
assert self.labels is not None # ← messages are already read in
self.top_emojis = [x[0] for x in self.get_emoji_count()[:n_top]]
in_top = [edist.sentiment_vector_to_emoji(x) in self.top_emojis for x in self.labels]
self.labels = self.labels[in_top]
self.plain_text = self.plain_text[in_top]
self.emojis = self.emojis[in_top]
print("remaining samples after top emoji filtering: ", len(self.labels))
def generate_kmeans_binary_label(self, only_emoticons=True, n_clusters=5):
"""
generate binary labels using kmeans.
@param only_emoticons: set whether we're using the full emoji set or only emoticons
@param n_clusters: number of cluster we're generating in emoji's sentiment space
"""
assert self.labels is not None
array_sentiment_vectors = edist.list_sentiment_emoticon_vectors if only_emoticons else edist.list_sentiment_vectors
array_sentiment_vectors = np.array(array_sentiment_vectors)
list_emojis = edist.list_emoticon_emojis if only_emoticons else edist.list_emojis
self.use_binary_labels = True
print("clustering following emojis: " + "".join(list_emojis) + "...")
self.kmeans_cluster = KMeans(n_clusters=n_clusters).fit(array_sentiment_vectors)
print("clustering done")
self.label_binarizer = LabelBinarizer()
multiclass_labels = self.kmeans_cluster.predict(self.labels)
# FIXME: we have to guarantee that in every dataset all classes occur.
# otherwise batch fitting is not possible!
# (or we have to precompute the mlb fitting process somewhere...)
self.binary_labels = self.label_binarizer.fit_transform(multiclass_labels)
def create_train_test_split(self, split = 0.1, random_state = 4222):
assert self.plain_text is not None and self.labels is not None
if self.X is not None:
sys.stderr.write("WARNING: overwriting existing train/test split \n")
labels = self.binary_labels if self.use_binary_labels else self.labels
assert labels is not None
self.X, self.Xt, self.y, self.yt = train_test_split(self.plain_text, labels, test_size=split, random_state=random_state)
# * the pipeline manager saves and stores sklearn pipelines. Keras models are handled differently, so the have to be named explicitly during save and load operations
# In[8]:
class pipeline_manager(object):
@staticmethod
def load_pipeline_from_files(file_prefix:str, keras_models = [], all_models = []):
"""
load a pipeline from files. A pipeline should be represented by multiple model files in the form '<file_prefix>.<model_name>'
@param file_prefix: basename of all files (without extension)
@param keras_models: list of keras models (keras model files, only extension name). Leave this list empty if this is not a keras pipeline
@param all_models: list of all models (including keras_models, only extension name).
@return a pipeline manager object
"""
pm = pipeline_manager(keras_models=keras_models)
pm.load(file_prefix, all_models)
return pm
@staticmethod
def create_keras_pipeline_with_vectorizer(vectorizer, layers, sdm:sample_data_manager, loss=None, optimizer=None):
'''
creates pipeline with vectorizer and keras classifier
@param vectorizer: Vectorizer object. will be fitted with data provided by sdm
@param layers: list of keras layers. One keras layer is a tuple in form: (<#neurons:int>, <activation_func:str>)
@param sdm: sample data manager to get data for the vectorizer
@param loss: set keras loss function. Depending whether sdm use multiclass labels `categorical_crossentropy` or `mean_squared_error` is used as default
@param optimizer: set keras optimizer. Depending whether sdm use multiclass labels `sgd` or `adam` is used as default
@return: a pipeline manager object
'''
from keras.models import Sequential
from keras.layers import Dense
if sdm.X is None:
sdm.create_train_test_split()
vec_train = vectorizer.fit_transform(sdm.X)
vec_test = vectorizer.transform(sdm.Xt)
# creating keras model:
model=Sequential()
keras_layers = []
first_layer = True
for layer in layers:
if first_layer:
model.add(Dense(units=layer[0], activation=layer[1], input_dim=vectorizer.transform([" "])[0]._shape[1]))
first_layer = False
else:
model.add(Dense(units=layer[0], activation=layer[1]))
if sdm.use_binary_labels:
loss_function = loss if loss is not None else 'categorical_crossentropy'
optimizer_function = optimizer if optimizer is not None else 'sgd'
model.compile(loss=loss_function,
optimizer=optimizer_function,
metrics=['accuracy'])
else:
loss_function = loss if loss is not None else 'mean_squared_error'
optimizer_function = optimizer if optimizer is not None else 'adam'
model.compile(loss=loss_function,
optimizer=optimizer_function)
pipeline = Pipeline([
('vectorizer',vectorizer),
('keras_model', model)
])
return pipeline_manager(pipeline=pipeline, keras_models=['keras_model'])
@staticmethod
def create_pipeline_with_classifier_and_vectorizer(vectorizer, classifier, sdm:sample_data_manager = None):
'''
creates pipeline with vectorizer and non-keras classifier
@param vectorizer: Vectorizer object. will be fitted with data provided by sdm
@param classifier: unfitted classifier object (should be compatible with all sklearn classifiers)
@param sdm: sample data manager to get data for the vectorizer
@return: a pipeline manager object
'''
if sdm is not None:
if sdm.X is None:
sdm.create_train_test_split()
vec_train = vectorizer.fit_transform(sdm.X)
vec_test = vectorizer.transform(sdm.Xt)
pipeline = Pipeline([
('vectorizer',vectorizer),
('classifier', classifier)
])
return pipeline_manager(pipeline=pipeline, keras_models=[])
def __init__(self, pipeline = None, keras_models = []):
"""
constructor
@param pipeline: a sklearn pipeline
@param keras_models: list of keras steps in pipeline. Neccessary because saving and loading from keras models differs from the scikit ones
"""
self.pipeline = pipeline
self.additional_objects = {}
self.keras_models = keras_models
def save(self, prefix:str):
"""
saving the pipeline. It generates one file per model in the form: '<prefix>.<model_name>'
@param prefix: file prefix for all models
"""
print(self.keras_models)
# doing this like explained here: https://stackoverflow.com/a/43415459
for step in self.pipeline.named_steps:
if step in self.keras_models:
self.pipeline.named_steps[step].model.save(prefix + "." + step)
else:
joblib.dump(self.pipeline.named_steps[step], prefix + "." + str(step))
load_command = "pipeline_manager.load_pipeline_from_files( '"
load_command += prefix + "', " + str(self.keras_models) + ", "
load_command += str(list(self.pipeline.named_steps.keys())) + ")"
import __main__ as main
if not hasattr(main, '__file__'):
display("saved pipeline. It can be loaded the following way:")
display(Markdown("> ```\n"+load_command+"\n```")) # ← if we're in jupyter, print the fancy way :)
else:
print("saved pipeline. It can be loaded the following way:")
print(load_command)
def load(self, prefix:str, models = []):
"""
load a pipeline. A pipeline should be represented by multiple model files in the form '<prefix>.<model_name>'
NOTE: keras model names (if there are some) have to be defined in self.keras_models first!
@param prefix: the prefix for all model files
@param models: model_names to load
"""
self.pipeline = None
model_list = []
for model in models:
if model in self.keras_models:
model_list.append((model, load_model(prefix + "." + model)))
else:
model_list.append((model, joblib.load(prefix+"." + model)))
self.pipeline = Pipeline(model_list)
def fit(self,X,y):
"""fitting the pipeline"""
self.pipeline.fit(X,y)
def predict(self,X):
"""predict"""
return self.pipeline.predict(X)
# * the trainer class passes Data from the sample manager to the pipeline manager
# In[9]:
class trainer(object):
def __init__(self, sdm:sample_data_manager, pm:pipeline_manager):
"""constructor"""
self.sdm = sdm
self.pm = pm
def fit(self, max_size=10000, disabled_fit_steps=['vectorizer']):
"""
fitting data in the pipeline. Because we don't want to refit the vectorizer, the pipeline models containing the vectorizer have to be named explicitly
@param max_size: don't train more examples than that number
@param disabled_fit_steps: list of pipeline steps that we want to prevent to refit. Normally all vectorizer steps
"""
# TODO: make batch fitting available here (eg: continous waiting for data and fitting them)
if self.sdm.X is None:
self.sdm.create_train_test_split()
disabled_fits = {}
disabled_fit_transforms = {}
named_steps = self.pm.pipeline.named_steps
for s in disabled_fit_steps:
# now it gets a little bit dirty:
# replace fit functions we don't want to call again (e.g. for vectorizers)
disabled_fits[s] = named_steps[s].fit
disabled_fit_transforms[s] = named_steps[s].fit_transform
named_steps[s].fit = lambda self, X, y=None: self
named_steps[s].fit_transform = named_steps[s].transform
self.pm.fit(X = self.sdm.X[:max_size], y = self.sdm.y[:max_size])
# restore replaced fit functions:
for s in disabled_fit_steps:
named_steps[s].fit = disabled_fits[s]
named_steps[s].fit_transform = disabled_fit_transforms[s]
def test(self):
'''
@return: prediction:list, teacher:list
'''
if self.sdm.X is None:
self.sdm.create_train_test_split()
return self.pm.predict(self.sdm.Xt), self.sdm.yt
# ----
# ## Train
# * when in notebook environment: run the stuff below:
# In[10]:
import __main__ as main
if not hasattr(main, '__file__'):
# we are in an interactive environment (probably in jupyter)
# load data:
# setting n_kmeans_clusters to a value > 0 activates binarized labeling automatically!
# set to -1 to disable kmeans clustering and generating labels in plain sentiment space
#n_kmeans_cluster = 5
n_kmeans_cluster = -1
sdm = sample_data_manager.generate_and_read(path="./data_en/", n_top_emojis=20, file_range=range(1), n_kmeans_cluster=n_kmeans_cluster)
sdm.create_train_test_split()
#pm = pipeline_manager.create_keras_pipeline_with_vectorizer(vectorizer=TfidfVectorizer(stop_words='english'),\n",
# layers=[(10000, 'relu'),(5000, 'relu'),(2500, 'relu'),(y1[0].shape[0],None)], sdm=sdm)\n",
pm = pipeline_manager.create_keras_pipeline_with_vectorizer(vectorizer=TfidfVectorizer(stop_words='english'),
layers=[(2500, 'relu'),(sdm.y.shape[1],None)], sdm=sdm)
tr = trainer(sdm=sdm, pm=pm)
tr.fit(100)
# ----
# ## save classifier
# In[11]:
import __main__ as main
if not hasattr(main, '__file__'):
pm.save('custom_classifier')
# ----
# ## Prediction
#
# * predict and save to `test.csv`
# In[12]:
import __main__ as main
if not hasattr(main, '__file__'):
pred, teacher = tr.test()
display(pred)
display(teacher)
print('prediction variance: ', np.linalg.norm(np.var(pred, axis=0)))
print('teacher variance: ', np.linalg.norm(np.var(teacher, axis=0)))
# build a dataframe to visualize test results:
testlist = pd.DataFrame({'text': sdm.Xt,
'teacher': sent2emoji(sdm.yt),
'teacher_sentiment': sdm.yt.tolist(),
'predict': sent2emoji(pred, custom_target_emojis=sdm.top_emojis),
'predicted_sentiment': pred.tolist()})
# display:
display(testlist.head())
# mean squared error:
teacher_sentiments = np.array([sample[1]['teacher_sentiment'] for sample in testlist.iterrows()])
predicted_sentiments = np.array([sample[1]['predicted_sentiment'] for sample in testlist.iterrows()])
mean_squared_error = ((teacher_sentiments - predicted_sentiments)**2).mean(axis=0)
print("Mean Squared Error: ", mean_squared_error)
print("Variance teacher: ", np.var(teacher_sentiments, axis=0))
print("Variance prediction: ", np.var(predicted_sentiments, axis=0))
# save to csv:
testlist.to_csv('test.csv')
# ----
# ## Load classifier
#
# * loading classifier and show a test widget
# In[13]:
import __main__ as main
if not hasattr(main, '__file__'):
try:
pm
except NameError:
pass
else:
del pm # delete existing pipeline manager if ther is one
pm = pipeline_manager.load_pipeline_from_files( 'custom_classifier', ['keras_model'], ['vectorizer', 'keras_model'])
lookup_emojis = [#'😂',
'😭',
'😍',
'😩',
'😊',
'😘',
'🙏',
'🙌',
'😉',
'😁',
'😅',
'😎',
'😢',
'😒',
'😏',
'😌',
'😔',
'😋',
'😀',
'😤']
out = widgets.Output()
t = widgets.Text()
b = widgets.Button(
description='get emoji',
disabled=False,
button_style='', # 'success', 'info', 'warning', 'danger' or ''
tooltip='Click me',
icon='check'
)
def handle_submit(sender):
with out:
clear_output()
with out:
pred = pm.predict([t.value])
display(Markdown("# Predicted Emoji " + str(sent2emoji(pred, lookup_emojis)[0])))
display(Markdown("# Sentiment Vector: $$ \pmatrix{" + str(pred[0,0]) +
"\\\\" + str(pred[0,1]) + "\\\\" + str(pred[0,2]) + "}$$"))
b.on_click(handle_submit)
display(t)
display(widgets.VBox([b, out]))

File diff suppressed because it is too large Load Diff