2018-06-20 13:38:18 +02:00
# coding: utf-8
# In[1]:
import pandas as pd
from IPython . display import clear_output , Markdown , Math
import ipywidgets as widgets
import os
import glob
import json
import numpy as np
import itertools
import sklearn . utils as sku
from sklearn . feature_extraction . text import CountVectorizer , TfidfVectorizer , HashingVectorizer
from sklearn . model_selection import train_test_split
2018-06-20 18:10:54 +02:00
from sklearn . preprocessing import MultiLabelBinarizer , LabelBinarizer
from sklearn . cluster import KMeans
2018-06-20 13:38:18 +02:00
import nltk
from keras . models import load_model
from sklearn . externals import joblib
import pickle
import operator
from sklearn . pipeline import Pipeline
nltk . download ( ' punkt ' )
nltk . download ( ' averaged_perceptron_tagger ' )
nltk . download ( ' wordnet ' )
# In[2]:
import sys
sys . path . append ( " .. " )
2018-06-20 18:10:54 +02:00
import Tools . Emoji_Distance as edist
2018-06-20 13:38:18 +02:00
def emoji2sent ( emoji_arr , only_emoticons = True ) :
2018-06-20 18:10:54 +02:00
return np . array ( [ edist . emoji_to_sentiment_vector ( e , only_emoticons = only_emoticons ) for e in emoji_arr ] )
2018-06-20 13:38:18 +02:00
def sent2emoji ( sent_arr , custom_target_emojis = None , only_emoticons = True ) :
2018-06-20 18:10:54 +02:00
return [ edist . sentiment_vector_to_emoji ( s , custom_target_emojis = custom_target_emojis , only_emoticons = only_emoticons ) for s in sent_arr ]
2018-06-20 13:38:18 +02:00
# In[3]:
SINGLE_LABEL = True
# ----
# ## classes and functions we are using later:
# ----
# * functions for selecting items from a set / list
# In[4]:
def latest ( lst ) :
return lst [ - 1 ] if len ( lst ) > 0 else ' X '
def most_common ( lst ) :
# trying to find the most common used emoji in the given lst
return max ( set ( lst ) , key = lst . count ) if len ( lst ) > 0 else " X " # setting label to 'X' if there is an empty emoji list
# * our emoji blacklist (skin and sex modifiers)
# In[5]:
# defining blacklist for modifier emojis:
emoji_blacklist = set ( [
chr ( 0x1F3FB ) ,
chr ( 0x1F3FC ) ,
chr ( 0x1F3FD ) ,
chr ( 0x1F3FE ) ,
chr ( 0x1F3FF ) ,
chr ( 0x2642 ) ,
chr ( 0x2640 )
] )
# * lemmatization helper functions
# In[6]:
from nltk . stem . snowball import SnowballStemmer
from nltk . stem import WordNetLemmatizer
from nltk import pos_tag
from nltk import word_tokenize
from nltk . corpus import wordnet
def get_wordnet_pos ( treebank_tag ) :
if treebank_tag . startswith ( ' J ' ) :
return wordnet . ADJ
elif treebank_tag . startswith ( ' V ' ) :
return wordnet . VERB
elif treebank_tag . startswith ( ' N ' ) :
return wordnet . NOUN
elif treebank_tag . startswith ( ' R ' ) :
return wordnet . ADV
else :
return wordnet . NOUN
# ### sample data manager
# the sample data manager loads and preprocesses data
# most common way to use:
#
#
# * `sdm = sample_data_manager.generate_and_read(path:str, only_emoticons=True, apply_stemming=True, n_top_emojis=-1, file_range=None)`
#
# * Generates a sample_data_manager object and preprocess data in one step
#
# In[7]:
class sample_data_manager ( object ) :
@staticmethod
2018-06-25 19:17:38 +02:00
def generate_and_read ( path : str , only_emoticons = True , apply_stemming = True , n_top_emojis = - 1 , file_range = None , n_kmeans_cluster = - 1 , progress_callback = None ) :
2018-06-20 13:38:18 +02:00
"""
generate , read and process train data in one step .
@param path : folder containing json files to process
@param only_emoticons : if True , only messages containing emoticons ( provided by Tools . Emoji_Distance ) are used
@param apply_stemming : apply stemming and lemmatization on dataset
@param n_top_emojis : only use messages containing one of < ` n_top_emojis ` > - top emojis . set to ` - 1 ` to prevent top emoji filtering
@param file_range : range of file ' s indices to read (eg `range(3)` to read the first three files). If `None`: all files are read
2018-06-20 18:10:54 +02:00
@param n_kmeans_cluster : generating multilabeled labels with kmeans with these number of clusters . Set to - 1 to use the plain sentiment space as label
2018-06-20 13:38:18 +02:00
@return : sample_data_manager object
"""
sdm = sample_data_manager ( path )
2018-06-25 19:17:38 +02:00
sdm . read_files ( file_index_range = range ( sdm . n_files ) if file_range is None else file_range , only_emoticons = only_emoticons , progress_callback = progress_callback )
2018-06-20 13:38:18 +02:00
if apply_stemming :
sdm . apply_stemming_and_lemmatization ( )
sdm . generate_emoji_count_and_weights ( )
if n_top_emojis > 0 :
sdm . filter_by_top_emojis ( n_top = n_top_emojis )
2018-06-20 18:10:54 +02:00
if n_kmeans_cluster > 0 :
sdm . generate_kmeans_binary_label ( only_emoticons = only_emoticons , n_clusters = n_kmeans_cluster )
2018-06-20 13:38:18 +02:00
return sdm
def __init__ ( self , data_root_folder : str ) :
"""
constructor for manual initialization
@param data_root_folder : folder containing json files to process
"""
self . data_root_folder = data_root_folder
self . json_files = sorted ( glob . glob ( self . data_root_folder + " /*.json " ) )
self . n_files = len ( self . json_files )
self . raw_data = None
self . emojis = None
self . plain_text = None
self . labels = None
self . emoji_count = None
self . emoji_weights = None
self . X = None
self . y = None
self . Xt = None
self . yt = None
self . top_emojis = None
2018-06-20 18:10:54 +02:00
self . binary_labels = None
self . use_binary_labels = False
self . kmeans_cluster = None
self . label_binarizer = None
2018-06-20 13:38:18 +02:00
2018-06-25 19:17:38 +02:00
def read_files ( self , file_index_range : list , only_emoticons = True , progress_callback = None ) :
2018-06-20 13:38:18 +02:00
"""
reading ( multiple ) files to one panda table .
@param file_index_range : range of file ' s indices to read (eg `range(3)` to read the first three files)
@param only_emoticons : if True , only messages containing emoticons ( aka smileys ) are used . This classification is derived from Tools . Emoji_Distance
"""
assert np . min ( file_index_range ) > = 0 and np . max ( file_index_range ) < self . n_files
for i in file_index_range :
print ( " reading file: " + self . json_files [ i ] + " ... " )
if self . raw_data is None :
self . raw_data = pd . read_json ( self . json_files [ i ] , encoding = " utf-8 " )
else :
self . raw_data = self . raw_data . append ( pd . read_json ( self . json_files [ i ] , encoding = " utf-8 " ) )
2018-06-25 19:17:38 +02:00
if progress_callback is not None :
progress_callback ( )
2018-06-20 13:38:18 +02:00
self . emojis = self . raw_data [ ' EMOJI ' ]
self . plain_text = self . raw_data [ ' text ' ]
# replacing keywords. TODO: maybe these information can be extracted and used
self . plain_text = self . plain_text . str . replace ( " (<EMOJI>|<USER>|<HASHTAG>) " , " " ) . str . replace ( " [ " + " " . join ( list ( emoji_blacklist ) ) + " ] " , " " )
# so far filtering for the latest emoji. TODO: maybe there are also better approaches
self . labels = emoji2sent ( [ latest ( e ) for e in self . emojis ] , only_emoticons = only_emoticons )
# and filter out all samples we have no label for:
wrong_labels = np . isnan ( np . linalg . norm ( self . labels , axis = 1 ) )
self . labels = self . labels [ np . invert ( wrong_labels ) ]
self . plain_text = self . plain_text [ np . invert ( wrong_labels ) ]
self . emojis = self . emojis [ np . invert ( wrong_labels ) ]
print ( " imported " + str ( len ( self . labels ) ) + " samples " )
def apply_stemming_and_lemmatization ( self ) :
"""
apply stemming and lemmatization to plain text samples
"""
stemmer = SnowballStemmer ( " english " )
for key in self . plain_text . keys ( ) :
stemmed_sent = [ ]
for word in self . plain_text [ key ] . split ( " " ) :
word_stemmed = stemmer . stem ( word )
stemmed_sent . append ( word_stemmed )
stemmed_sent = ( " " ) . join ( stemmed_sent )
self . plain_text [ key ] = stemmed_sent
lemmatizer = WordNetLemmatizer ( )
for key in self . plain_text . keys ( ) :
lemmatized_sent = [ ]
sent_pos = pos_tag ( word_tokenize ( self . plain_text [ key ] ) )
for word in sent_pos :
wordnet_pos = get_wordnet_pos ( word [ 1 ] . lower ( ) )
word_lemmatized = lemmatizer . lemmatize ( word [ 0 ] , pos = wordnet_pos )
lemmatized_sent . append ( word_lemmatized )
lemmatized_sent = ( " " ) . join ( lemmatized_sent )
self . plain_text [ key ] = lemmatized_sent
def generate_emoji_count_and_weights ( self ) :
"""
counting occurences of emojis
"""
self . emoji_count = { }
for e_list in self . emojis :
for e in set ( e_list ) :
if e not in self . emoji_count :
self . emoji_count [ e ] = 0
self . emoji_count [ e ] + = 1
emoji_sum = sum ( [ self . emoji_count [ e ] for e in self . emoji_count ] )
self . emoji_weights = { }
for e in self . emoji_count :
# tfidf for emojis
self . emoji_weights [ e ] = np . log ( ( emoji_sum / self . emoji_count [ e ] ) )
weights_sum = sum ( [ self . emoji_weights [ x ] for x in self . emoji_weights ] )
# normalize:
for e in self . emoji_weights :
self . emoji_weights [ e ] = self . emoji_weights [ e ] / weights_sum
self . emoji_weights [ ' X ' ] = 0 # dummy values
self . emoji_count [ ' X ' ] = 0
def get_emoji_count ( self ) :
"""
@return : descending list of tuples in form ( < emoji as character > , < emoji count > )
"""
assert self . emoji_count is not None
sorted_emoji_count = list ( reversed ( sorted ( self . emoji_count . items ( ) , key = operator . itemgetter ( 1 ) ) ) )
#display(sorted_emoji_count)
return sorted_emoji_count
def filter_by_top_emojis ( self , n_top = 20 ) :
"""
filgter out messages not containing one of the ` n_top ` emojis
@param n_top : number of top emojis used for filtering
"""
assert self . labels is not None # ← messages are already read in
self . top_emojis = [ x [ 0 ] for x in self . get_emoji_count ( ) [ : n_top ] ]
2018-06-20 18:10:54 +02:00
in_top = [ edist . sentiment_vector_to_emoji ( x ) in self . top_emojis for x in self . labels ]
2018-06-20 13:38:18 +02:00
self . labels = self . labels [ in_top ]
self . plain_text = self . plain_text [ in_top ]
self . emojis = self . emojis [ in_top ]
print ( " remaining samples after top emoji filtering: " , len ( self . labels ) )
2018-06-20 18:10:54 +02:00
def generate_kmeans_binary_label ( self , only_emoticons = True , n_clusters = 5 ) :
"""
generate binary labels using kmeans .
@param only_emoticons : set whether we ' re using the full emoji set or only emoticons
@param n_clusters : number of cluster we ' re generating in emoji ' s sentiment space
"""
assert self . labels is not None
array_sentiment_vectors = edist . list_sentiment_emoticon_vectors if only_emoticons else edist . list_sentiment_vectors
array_sentiment_vectors = np . array ( array_sentiment_vectors )
list_emojis = edist . list_emoticon_emojis if only_emoticons else edist . list_emojis
self . use_binary_labels = True
print ( " clustering following emojis: " + " " . join ( list_emojis ) + " ... " )
self . kmeans_cluster = KMeans ( n_clusters = n_clusters ) . fit ( array_sentiment_vectors )
print ( " clustering done " )
self . label_binarizer = LabelBinarizer ( )
multiclass_labels = self . kmeans_cluster . predict ( self . labels )
# FIXME: we have to guarantee that in every dataset all classes occur.
# otherwise batch fitting is not possible!
# (or we have to precompute the mlb fitting process somewhere...)
self . binary_labels = self . label_binarizer . fit_transform ( multiclass_labels )
2018-06-20 13:38:18 +02:00
def create_train_test_split ( self , split = 0.1 , random_state = 4222 ) :
2018-06-20 18:10:54 +02:00
assert self . plain_text is not None and self . labels is not None
2018-06-20 13:38:18 +02:00
if self . X is not None :
sys . stderr . write ( " WARNING: overwriting existing train/test split \n " )
2018-06-20 18:10:54 +02:00
labels = self . binary_labels if self . use_binary_labels else self . labels
assert labels is not None
self . X , self . Xt , self . y , self . yt = train_test_split ( self . plain_text , labels , test_size = split , random_state = random_state )
2018-06-20 13:38:18 +02:00
# * the pipeline manager saves and stores sklearn pipelines. Keras models are handled differently, so the have to be named explicitly during save and load operations
# In[8]:
class pipeline_manager ( object ) :
@staticmethod
def load_pipeline_from_files ( file_prefix : str , keras_models = [ ] , all_models = [ ] ) :
"""
load a pipeline from files . A pipeline should be represented by multiple model files in the form ' <file_prefix>.<model_name> '
@param file_prefix : basename of all files ( without extension )
@param keras_models : list of keras models ( keras model files , only extension name ) . Leave this list empty if this is not a keras pipeline
@param all_models : list of all models ( including keras_models , only extension name ) .
@return a pipeline manager object
"""
pm = pipeline_manager ( keras_models = keras_models )
pm . load ( file_prefix , all_models )
return pm
@staticmethod
2018-06-20 18:10:54 +02:00
def create_keras_pipeline_with_vectorizer ( vectorizer , layers , sdm : sample_data_manager , loss = None , optimizer = None ) :
2018-06-20 13:38:18 +02:00
'''
creates pipeline with vectorizer and keras classifier
@param vectorizer : Vectorizer object . will be fitted with data provided by sdm
@param layers : list of keras layers . One keras layer is a tuple in form : ( < #neurons:int>, <activation_func:str>)
@param sdm : sample data manager to get data for the vectorizer
2018-06-20 18:10:54 +02:00
@param loss : set keras loss function . Depending whether sdm use multiclass labels ` categorical_crossentropy ` or ` mean_squared_error ` is used as default
@param optimizer : set keras optimizer . Depending whether sdm use multiclass labels ` sgd ` or ` adam ` is used as default
2018-06-20 13:38:18 +02:00
@return : a pipeline manager object
'''
from keras . models import Sequential
from keras . layers import Dense
if sdm . X is None :
sdm . create_train_test_split ( )
vec_train = vectorizer . fit_transform ( sdm . X )
vec_test = vectorizer . transform ( sdm . Xt )
# creating keras model:
model = Sequential ( )
keras_layers = [ ]
first_layer = True
for layer in layers :
if first_layer :
model . add ( Dense ( units = layer [ 0 ] , activation = layer [ 1 ] , input_dim = vectorizer . transform ( [ " " ] ) [ 0 ] . _shape [ 1 ] ) )
first_layer = False
else :
model . add ( Dense ( units = layer [ 0 ] , activation = layer [ 1 ] ) )
2018-06-20 18:10:54 +02:00
if sdm . use_binary_labels :
loss_function = loss if loss is not None else ' categorical_crossentropy '
optimizer_function = optimizer if optimizer is not None else ' sgd '
model . compile ( loss = loss_function ,
optimizer = optimizer_function ,
metrics = [ ' accuracy ' ] )
else :
loss_function = loss if loss is not None else ' mean_squared_error '
optimizer_function = optimizer if optimizer is not None else ' adam '
model . compile ( loss = loss_function ,
optimizer = optimizer_function )
2018-06-20 13:38:18 +02:00
pipeline = Pipeline ( [
( ' vectorizer ' , vectorizer ) ,
( ' keras_model ' , model )
] )
return pipeline_manager ( pipeline = pipeline , keras_models = [ ' keras_model ' ] )
@staticmethod
def create_pipeline_with_classifier_and_vectorizer ( vectorizer , classifier , sdm : sample_data_manager = None ) :
'''
creates pipeline with vectorizer and non - keras classifier
@param vectorizer : Vectorizer object . will be fitted with data provided by sdm
@param classifier : unfitted classifier object ( should be compatible with all sklearn classifiers )
@param sdm : sample data manager to get data for the vectorizer
@return : a pipeline manager object
'''
if sdm is not None :
if sdm . X is None :
sdm . create_train_test_split ( )
vec_train = vectorizer . fit_transform ( sdm . X )
vec_test = vectorizer . transform ( sdm . Xt )
pipeline = Pipeline ( [
( ' vectorizer ' , vectorizer ) ,
( ' classifier ' , classifier )
] )
return pipeline_manager ( pipeline = pipeline , keras_models = [ ] )
def __init__ ( self , pipeline = None , keras_models = [ ] ) :
"""
constructor
@param pipeline : a sklearn pipeline
@param keras_models : list of keras steps in pipeline . Neccessary because saving and loading from keras models differs from the scikit ones
"""
self . pipeline = pipeline
self . additional_objects = { }
self . keras_models = keras_models
def save ( self , prefix : str ) :
"""
saving the pipeline . It generates one file per model in the form : ' <prefix>.<model_name> '
@param prefix : file prefix for all models
"""
print ( self . keras_models )
# doing this like explained here: https://stackoverflow.com/a/43415459
for step in self . pipeline . named_steps :
if step in self . keras_models :
self . pipeline . named_steps [ step ] . model . save ( prefix + " . " + step )
else :
joblib . dump ( self . pipeline . named_steps [ step ] , prefix + " . " + str ( step ) )
load_command = " pipeline_manager.load_pipeline_from_files( ' "
load_command + = prefix + " ' , " + str ( self . keras_models ) + " , "
load_command + = str ( list ( self . pipeline . named_steps . keys ( ) ) ) + " ) "
import __main__ as main
if not hasattr ( main , ' __file__ ' ) :
display ( " saved pipeline. It can be loaded the following way: " )
display ( Markdown ( " > ``` \n " + load_command + " \n ``` " ) ) # ← if we're in jupyter, print the fancy way :)
else :
print ( " saved pipeline. It can be loaded the following way: " )
print ( load_command )
def load ( self , prefix : str , models = [ ] ) :
"""
load a pipeline . A pipeline should be represented by multiple model files in the form ' <prefix>.<model_name> '
NOTE : keras model names ( if there are some ) have to be defined in self . keras_models first !
@param prefix : the prefix for all model files
@param models : model_names to load
"""
self . pipeline = None
model_list = [ ]
for model in models :
if model in self . keras_models :
model_list . append ( ( model , load_model ( prefix + " . " + model ) ) )
else :
model_list . append ( ( model , joblib . load ( prefix + " . " + model ) ) )
self . pipeline = Pipeline ( model_list )
def fit ( self , X , y ) :
""" fitting the pipeline """
self . pipeline . fit ( X , y )
def predict ( self , X ) :
""" predict """
return self . pipeline . predict ( X )
# * the trainer class passes Data from the sample manager to the pipeline manager
# In[9]:
class trainer ( object ) :
def __init__ ( self , sdm : sample_data_manager , pm : pipeline_manager ) :
""" constructor """
self . sdm = sdm
self . pm = pm
2018-06-25 19:17:38 +02:00
def fit ( self , max_size = 10000 , disabled_fit_steps = [ ' vectorizer ' ] , keras_batch_fitting_layer = [ ' keras_model ' ] , batch_size = None , n_epochs = 1 , progress_callback = None ) :
2018-06-20 13:38:18 +02:00
"""
fitting data in the pipeline . Because we don ' t want to refit the vectorizer, the pipeline models containing the vectorizer have to be named explicitly
@param max_size : don ' t train more examples than that number
@param disabled_fit_steps : list of pipeline steps that we want to prevent to refit . Normally all vectorizer steps
"""
# TODO: make batch fitting available here (eg: continous waiting for data and fitting them)
if self . sdm . X is None :
self . sdm . create_train_test_split ( )
disabled_fits = { }
disabled_fit_transforms = { }
2018-06-25 19:17:38 +02:00
disabled_keras_fits = { }
2018-06-20 13:38:18 +02:00
named_steps = self . pm . pipeline . named_steps
for s in disabled_fit_steps :
2018-06-25 19:17:38 +02:00
# now it gets really dirty:
2018-06-20 13:38:18 +02:00
# replace fit functions we don't want to call again (e.g. for vectorizers)
disabled_fits [ s ] = named_steps [ s ] . fit
disabled_fit_transforms [ s ] = named_steps [ s ] . fit_transform
named_steps [ s ] . fit = lambda self , X , y = None : self
named_steps [ s ] . fit_transform = named_steps [ s ] . transform
2018-06-25 19:17:38 +02:00
for k in keras_batch_fitting_layer :
# forcing batch fitting on keras
disabled_keras_fits [ k ] = named_steps [ k ] . fit
named_steps [ k ] . fit = lambda X , y : named_steps [ k ] . train_on_batch ( X . todense ( ) , y ) # ← why has keras no sparse support on batch progressing!?!?!
if batch_size is None :
self . pm . fit ( X = self . sdm . X [ : max_size ] , y = self . sdm . y [ : max_size ] )
else :
n = len ( self . sdm . X ) / / batch_size
for i in range ( n_epochs ) :
for j in range ( n ) :
self . pm . fit ( X = np . array ( self . sdm . X [ j * batch_size : ( j + 1 ) * batch_size ] ) , y = np . array ( self . sdm . y [ j * batch_size : ( j + 1 ) * batch_size ] ) )
if progress_callback is not None :
progress_callback ( )
pred , yt = self . test ( )
mean_squared_error = ( ( pred - yt ) * * 2 ) . mean ( axis = 0 )
print ( " # " + str ( j ) + " : loss: " , mean_squared_error )
2018-06-20 13:38:18 +02:00
# restore replaced fit functions:
for s in disabled_fit_steps :
named_steps [ s ] . fit = disabled_fits [ s ]
named_steps [ s ] . fit_transform = disabled_fit_transforms [ s ]
2018-06-25 19:17:38 +02:00
for k in keras_batch_fitting_layer :
named_steps [ k ] . fit = disabled_keras_fits [ k ]
2018-06-20 13:38:18 +02:00
def test ( self ) :
'''
@return : prediction : list , teacher : list
'''
if self . sdm . X is None :
self . sdm . create_train_test_split ( )
return self . pm . predict ( self . sdm . Xt ) , self . sdm . yt
2018-06-25 11:03:12 +02:00