Merge branch 'master' of ssh://gogs@the-cake-is-a-lie.net:20022/jonas/NLP-LAB.git
This commit is contained in:
		
							
								
								
									
										169
									
								
								Project/Tools/emoji_plotting.ipynb
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										169
									
								
								Project/Tools/emoji_plotting.ipynb
									
									
									
									
									
										Normal file
									
								
							
										
											
												File diff suppressed because one or more lines are too long
											
										
									
								
							@ -15,6 +15,7 @@ from nltk.corpus import wordnet
 | 
			
		||||
import math
 | 
			
		||||
import pprint
 | 
			
		||||
 | 
			
		||||
from gensim.models import Word2Vec, KeyedVectors
 | 
			
		||||
 | 
			
		||||
# # Naive Approach
 | 
			
		||||
table = pd.read_csv('../Tools/emoji_descriptions.csv')
 | 
			
		||||
@ -29,25 +30,25 @@ for index, row in table.iterrows():
 | 
			
		||||
# Helper functions
 | 
			
		||||
#######################
 | 
			
		||||
 | 
			
		||||
def stemming(messages):
 | 
			
		||||
    stemmed_messages = []
 | 
			
		||||
def stemming(message):
 | 
			
		||||
    ps = PorterStemmer()
 | 
			
		||||
    for m in messages:
 | 
			
		||||
        words = word_tokenize(m)
 | 
			
		||||
        sm = []
 | 
			
		||||
        for w in words:
 | 
			
		||||
            sm.append(ps.stem(w))
 | 
			
		||||
        m = (" ").join(sm)
 | 
			
		||||
        stemmed_messages.append(m)
 | 
			
		||||
    return stemmed_messages
 | 
			
		||||
    words = word_tokenize(message)
 | 
			
		||||
    sm = []
 | 
			
		||||
    for w in words:
 | 
			
		||||
        sm.append(ps.stem(w))
 | 
			
		||||
    stemmed_message = (" ").join(sm)
 | 
			
		||||
    return stemmed_message
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# * compare words to emoji descriptions
 | 
			
		||||
def evaluate_sentence(sentence, description_key = 'description', lang = 'eng', emojis_to_consider="all"):
 | 
			
		||||
def evaluate_sentence(sentence, description_key = 'description', lang = 'eng', emojis_to_consider="all", stem=True):
 | 
			
		||||
    # assumes there is a trained w2v model stored in the same directory!
 | 
			
		||||
    wv = KeyedVectors.load("word2vec.model", mmap='r')
 | 
			
		||||
    
 | 
			
		||||
    if (stem):
 | 
			
		||||
        sentence = stemming(sentence)
 | 
			
		||||
    tokenized_sentence = word_tokenize(sentence)
 | 
			
		||||
    n = len(tokenized_sentence)
 | 
			
		||||
    l = table.shape[0]
 | 
			
		||||
    matrix_list = []
 | 
			
		||||
    
 | 
			
		||||
    for index in tableDict.keys():
 | 
			
		||||
@ -57,20 +58,11 @@ def evaluate_sentence(sentence, description_key = 'description', lang = 'eng', e
 | 
			
		||||
        mat = np.zeros(shape=(m,n))
 | 
			
		||||
        for i in range(len(emoji_tokens)):
 | 
			
		||||
            for j in range(len(tokenized_sentence)):
 | 
			
		||||
                syn1 = wordnet.synsets(emoji_tokens[i],lang=lang)
 | 
			
		||||
                if len(syn1) == 0:
 | 
			
		||||
                    continue
 | 
			
		||||
                w1 = syn1[0]
 | 
			
		||||
                #print(j, tokenized_sentence)
 | 
			
		||||
                syn2 = wordnet.synsets(tokenized_sentence[j], lang=lang)
 | 
			
		||||
                if len(syn2) == 0:
 | 
			
		||||
                    continue
 | 
			
		||||
                w2 = syn2[0]
 | 
			
		||||
                val = w1.wup_similarity(w2)
 | 
			
		||||
                if val is None:
 | 
			
		||||
                try:
 | 
			
		||||
                    val = wv.similarity(emoji_tokens[i], tokenized_sentence[j])
 | 
			
		||||
                except KeyError:
 | 
			
		||||
                    continue
 | 
			
		||||
                mat[i,j] = val
 | 
			
		||||
        #print(row['character'], mat)
 | 
			
		||||
        matrix_list.append(mat)
 | 
			
		||||
            
 | 
			
		||||
    return matrix_list
 | 
			
		||||
@ -83,10 +75,13 @@ def evaluate_sentence(sentence, description_key = 'description', lang = 'eng', e
 | 
			
		||||
 | 
			
		||||
# load and preprocess data
 | 
			
		||||
# emojis_to_consider can be either a list or "all"
 | 
			
		||||
def prepareData(stemming=False):
 | 
			
		||||
    if(stemming):
 | 
			
		||||
def prepareData(stem=True, lower=True):
 | 
			
		||||
    if(stem):
 | 
			
		||||
        for index in tableDict.keys():
 | 
			
		||||
            tableDict[index][1] = stemming(tableDict[index][1])
 | 
			
		||||
    if(lower):
 | 
			
		||||
        for index in tableDict.keys():
 | 
			
		||||
            tableDict[index][1] = tableDict[index][1].lower()
 | 
			
		||||
    
 | 
			
		||||
    #collect the emojis
 | 
			
		||||
    lookup = {}
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										
											BIN
										
									
								
								Project/naive_approach/word2vec.model
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										
											BIN
										
									
								
								Project/naive_approach/word2vec.model
									
									
									
									
									
										Normal file
									
								
							
										
											Binary file not shown.
										
									
								
							@ -40,7 +40,8 @@
 | 
			
		||||
    "import simple_twitter_learning as stl\n",
 | 
			
		||||
    "import glob\n",
 | 
			
		||||
    "import sys\n",
 | 
			
		||||
    "from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer, HashingVectorizer"
 | 
			
		||||
    "from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer, HashingVectorizer\n",
 | 
			
		||||
    "import pickle"
 | 
			
		||||
   ]
 | 
			
		||||
  },
 | 
			
		||||
  {
 | 
			
		||||
@ -144,7 +145,7 @@
 | 
			
		||||
    {
 | 
			
		||||
     "data": {
 | 
			
		||||
      "application/vnd.jupyter.widget-view+json": {
 | 
			
		||||
       "model_id": "d304cda50752491da1637b292a9367e8",
 | 
			
		||||
       "model_id": "d00ff918ad4d473499b1e91d4dcb8702",
 | 
			
		||||
       "version_major": 2,
 | 
			
		||||
       "version_minor": 0
 | 
			
		||||
      },
 | 
			
		||||
@ -173,7 +174,8 @@
 | 
			
		||||
    "               ],\n",
 | 
			
		||||
    "               [\n",
 | 
			
		||||
    "                   (widgets.BoundedIntText(value=-1,disabled=True,min=-1, max=10), \"k_means_cluster\"),\n",
 | 
			
		||||
    "                   (widgets.BoundedIntText(value=20,disabled=True,min=-1, max=100), \"n_top_emojis\")\n",
 | 
			
		||||
    "                   (widgets.BoundedIntText(value=20,disabled=True,min=-1, max=100), \"n_top_emojis\"),\n",
 | 
			
		||||
    "                   (widgets.Dropdown(options=[\"latest\", \"mean\"], value=\"latest\"), \"label_criteria\")\n",
 | 
			
		||||
    "               ],\n",
 | 
			
		||||
    "               [\n",
 | 
			
		||||
    "                   (widgets.Button(disabled=True),\"load_data\")\n",
 | 
			
		||||
@ -205,6 +207,7 @@
 | 
			
		||||
    "               ],\n",
 | 
			
		||||
    "               [\n",
 | 
			
		||||
    "                   (widgets.Checkbox(value=True),\"use_doc2vec\"),\n",
 | 
			
		||||
    "                   (widgets.Checkbox(value=True),\"d2v_use_pretrained\"),\n",
 | 
			
		||||
    "                   (widgets.IntText(value=100),\"d2v_size\"),\n",
 | 
			
		||||
    "                   (widgets.IntText(value=8), \"d2v_window\"),\n",
 | 
			
		||||
    "                   (widgets.IntSlider(value=5, min=0, max=32), \"d2v_min_count\")\n",
 | 
			
		||||
@ -444,13 +447,16 @@
 | 
			
		||||
    "        if lemm_and_stemm:\n",
 | 
			
		||||
    "            p_s = progress_indicator(\"stemming progress\")\n",
 | 
			
		||||
    "        \n",
 | 
			
		||||
    "        emoji_mean = shown_widgets[\"label_criteria\"].value == \"mean\"\n",
 | 
			
		||||
    "        \n",
 | 
			
		||||
    "        sdm = stl.sample_data_manager.generate_and_read(path=shown_widgets[\"root_path\"].value,\n",
 | 
			
		||||
    "                                                    n_top_emojis=shown_widgets[\"n_top_emojis\"].value,\n",
 | 
			
		||||
    "                                                    file_range=range(r[0], r[1]),\n",
 | 
			
		||||
    "                                                    n_kmeans_cluster=shown_widgets[\"k_means_cluster\"].value,\n",
 | 
			
		||||
    "                                                    read_progress_callback=p_r.update,\n",
 | 
			
		||||
    "                                                    stem_progress_callback=p_s.update if lemm_and_stemm else None,\n",
 | 
			
		||||
    "                                                    apply_stemming = lemm_and_stemm)\n",
 | 
			
		||||
    "                                                    apply_stemming = lemm_and_stemm,\n",
 | 
			
		||||
    "                                                    emoji_mean=emoji_mean)\n",
 | 
			
		||||
    "        shown_widgets[\"batch_size\"].max = len(sdm.labels)\n",
 | 
			
		||||
    "        \n",
 | 
			
		||||
    "        \n",
 | 
			
		||||
@ -558,9 +564,12 @@
 | 
			
		||||
    "        # creating the vectorizer\n",
 | 
			
		||||
    "        vectorizer = None\n",
 | 
			
		||||
    "        if shown_widgets[\"use_doc2vec\"].value:\n",
 | 
			
		||||
    "            vectorizer = stl.skd2v.Doc2VecTransformer(size=shown_widgets[\"d2v_size\"].value,\n",
 | 
			
		||||
    "                                                     window=shown_widgets[\"d2v_window\"].value,\n",
 | 
			
		||||
    "                                                     min_count=shown_widgets[\"d2v_min_count\"].value)\n",
 | 
			
		||||
    "            if shown_widgets[\"d2v_use_pretrained\"].value:\n",
 | 
			
		||||
    "                vectorizer = pickle.load( open( \"doc2VecModel.p\", \"rb\" ) )\n",
 | 
			
		||||
    "            else:\n",
 | 
			
		||||
    "                vectorizer = stl.skd2v.Doc2VecTransformer(size=shown_widgets[\"d2v_size\"].value,\n",
 | 
			
		||||
    "                                                         window=shown_widgets[\"d2v_window\"].value,\n",
 | 
			
		||||
    "                                                         min_count=shown_widgets[\"d2v_min_count\"].value)\n",
 | 
			
		||||
    "        else:\n",
 | 
			
		||||
    "            vectorizer=TfidfVectorizer(stop_words='english')\n",
 | 
			
		||||
    "        \n",
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										
											BIN
										
									
								
								Project/simple_approach/doc2VecModel.p
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										
											BIN
										
									
								
								Project/simple_approach/doc2VecModel.p
									
									
									
									
									
										Normal file
									
								
							
										
											Binary file not shown.
										
									
								
							@ -28,6 +28,8 @@ nltk.download('punkt')
 | 
			
		||||
nltk.download('averaged_perceptron_tagger')
 | 
			
		||||
nltk.download('wordnet')
 | 
			
		||||
 | 
			
		||||
from keras import losses
 | 
			
		||||
 | 
			
		||||
# check whether the display function exists:
 | 
			
		||||
try:
 | 
			
		||||
    display
 | 
			
		||||
@ -52,7 +54,6 @@ def sent2emoji(sent_arr, custom_target_emojis=None, only_emoticons=True):
 | 
			
		||||
 | 
			
		||||
# In[3]:
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
SINGLE_LABEL = True
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -161,7 +162,7 @@ def batch_lemm(sentences):
 | 
			
		||||
 | 
			
		||||
class sample_data_manager(object):
 | 
			
		||||
    @staticmethod
 | 
			
		||||
    def generate_and_read(path:str, only_emoticons=True, apply_stemming=True, n_top_emojis=-1, file_range=None, n_kmeans_cluster=-1, read_progress_callback=None, stem_progress_callback=None):
 | 
			
		||||
    def generate_and_read(path:str, only_emoticons=True, apply_stemming=True, n_top_emojis=-1, file_range=None, n_kmeans_cluster=-1, read_progress_callback=None, stem_progress_callback=None, emoji_mean=False):
 | 
			
		||||
        """
 | 
			
		||||
        generate, read and process train data in one step.
 | 
			
		||||
        
 | 
			
		||||
@ -175,7 +176,7 @@ class sample_data_manager(object):
 | 
			
		||||
        @return: sample_data_manager object
 | 
			
		||||
        """
 | 
			
		||||
        sdm = sample_data_manager(path)
 | 
			
		||||
        sdm.read_files(file_index_range=range(sdm.n_files) if file_range is None else file_range, only_emoticons=only_emoticons, progress_callback=read_progress_callback)
 | 
			
		||||
        sdm.read_files(file_index_range=range(sdm.n_files) if file_range is None else file_range, only_emoticons=only_emoticons, progress_callback=read_progress_callback, emoji_mean=emoji_mean)
 | 
			
		||||
        if apply_stemming:
 | 
			
		||||
            sdm.apply_stemming_and_lemmatization(progress_callback=stem_progress_callback)
 | 
			
		||||
        
 | 
			
		||||
@ -239,7 +240,7 @@ class sample_data_manager(object):
 | 
			
		||||
                # so far filtering for the latest emoji. TODO: maybe there are also better approaches
 | 
			
		||||
                labels_i = emoji2sent([latest(e) for e in emojis_i], only_emoticons=only_emoticons )
 | 
			
		||||
            else:
 | 
			
		||||
                labels_i = np.array([np.mean(emoji2sent(e, only_emoticons=only_emoticons), axis=0) for e in emojis_i])
 | 
			
		||||
                labels_i = np.array([np.mean(emoji2sent(e, only_emoticons=only_emoticons), axis=0).tolist() for e in emojis_i])
 | 
			
		||||
 | 
			
		||||
            # and filter out all samples we have no label for:
 | 
			
		||||
            wrong_labels = np.isnan(np.linalg.norm(labels_i, axis=1))
 | 
			
		||||
@ -431,7 +432,7 @@ class pipeline_manager(object):
 | 
			
		||||
        return pm
 | 
			
		||||
    
 | 
			
		||||
    @staticmethod
 | 
			
		||||
    def create_keras_pipeline_with_vectorizer(vectorizer, layers, sdm:sample_data_manager, loss=None, optimizer=None):
 | 
			
		||||
    def create_keras_pipeline_with_vectorizer(vectorizer, layers, sdm:sample_data_manager, loss=None, optimizer=None, fit_vectorizer=True):
 | 
			
		||||
        '''
 | 
			
		||||
        creates pipeline with vectorizer and keras classifier
 | 
			
		||||
        
 | 
			
		||||
@ -447,11 +448,12 @@ class pipeline_manager(object):
 | 
			
		||||
        from keras.models import Sequential
 | 
			
		||||
        from keras.layers import Dense
 | 
			
		||||
        
 | 
			
		||||
        if sdm.X is None:
 | 
			
		||||
            sdm.create_train_test_split()
 | 
			
		||||
        
 | 
			
		||||
        vec_train = vectorizer.fit_transform(sdm.X)
 | 
			
		||||
        vec_test = vectorizer.transform(sdm.Xt)
 | 
			
		||||
        if fit_vectorizer:
 | 
			
		||||
            if sdm.X is None:
 | 
			
		||||
                sdm.create_train_test_split()
 | 
			
		||||
 | 
			
		||||
            vec_train = vectorizer.fit_transform(sdm.X)
 | 
			
		||||
            vec_test = vectorizer.transform(sdm.Xt)
 | 
			
		||||
        # creating keras model:
 | 
			
		||||
        model=Sequential()
 | 
			
		||||
        
 | 
			
		||||
@ -578,7 +580,7 @@ class pipeline_manager(object):
 | 
			
		||||
        """fitting the pipeline"""
 | 
			
		||||
        self.pipeline.fit(X,y)
 | 
			
		||||
    
 | 
			
		||||
    def predict(self,X, use_stemming=True, use_lemmatization=True):
 | 
			
		||||
    def predict(self,X, use_stemming=False, use_lemmatization=False):
 | 
			
		||||
        """predict"""
 | 
			
		||||
        if use_stemming:
 | 
			
		||||
            X = np.array(batch_stem(X))
 | 
			
		||||
@ -608,7 +610,7 @@ class trainer(object):
 | 
			
		||||
        self.sdm = sdm
 | 
			
		||||
        self.pm = pm
 | 
			
		||||
    
 | 
			
		||||
    def fit(self, max_size=10000, disabled_fit_steps=['vectorizer'], keras_batch_fitting_layer=['keras_model'], batch_size=None, n_epochs=1, progress_callback=None):
 | 
			
		||||
    def fit(self, max_size=1000000, disabled_fit_steps=['vectorizer'], keras_batch_fitting_layer=['keras_model'], batch_size=None, n_epochs=1, progress_callback=None):
 | 
			
		||||
        """
 | 
			
		||||
        fitting data in the pipeline. Because we don't want to refit the vectorizer, the pipeline models containing the vectorizer have to be named explicitly
 | 
			
		||||
        
 | 
			
		||||
@ -641,7 +643,12 @@ class trainer(object):
 | 
			
		||||
                named_steps[k].fit = lambda X, y: named_steps[k].train_on_batch(to_dense_if_sparse(X), y) # ← why has keras no sparse support on batch progressing!?!?!
 | 
			
		||||
            
 | 
			
		||||
        if batch_size is None:
 | 
			
		||||
            self.pm.fit(X = self.sdm.X[:max_size], y = self.sdm.y[:max_size])
 | 
			
		||||
            for e in range(n_epochs):
 | 
			
		||||
                print("epoch", e)
 | 
			
		||||
                self.pm.fit(X = self.sdm.X[:max_size], y = self.sdm.y[:max_size])
 | 
			
		||||
                pred, yt = self.test()
 | 
			
		||||
                mean_squared_error = ((pred - yt)**2).mean(axis=0)
 | 
			
		||||
                print("#" + str(e) + ": validation loss: ", mean_squared_error, "scalar: ", np.mean(mean_squared_error))
 | 
			
		||||
        else:
 | 
			
		||||
            n = len(self.sdm.X) // batch_size
 | 
			
		||||
            for i in range(n_epochs):
 | 
			
		||||
 | 
			
		||||
		Reference in New Issue
	
	Block a user