From 7f6c9791ae98d2c48bc6b6da82e44983d7b0cba2 Mon Sep 17 00:00:00 2001 From: Jonas Weinz Date: Tue, 10 Jul 2018 15:59:28 +0200 Subject: [PATCH] simple improvements --- Project/simple_approach/Continous_Learner.ipynb | 10 +++++++--- Project/simple_approach/simple_twitter_learning.py | 13 ++++++++++--- 2 files changed, 17 insertions(+), 6 deletions(-) diff --git a/Project/simple_approach/Continous_Learner.ipynb b/Project/simple_approach/Continous_Learner.ipynb index 2cafe29..454d176 100644 --- a/Project/simple_approach/Continous_Learner.ipynb +++ b/Project/simple_approach/Continous_Learner.ipynb @@ -145,7 +145,7 @@ { "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "5a488abefd074719adb15425714a076f", + "model_id": "d00ff918ad4d473499b1e91d4dcb8702", "version_major": 2, "version_minor": 0 }, @@ -174,7 +174,8 @@ " ],\n", " [\n", " (widgets.BoundedIntText(value=-1,disabled=True,min=-1, max=10), \"k_means_cluster\"),\n", - " (widgets.BoundedIntText(value=20,disabled=True,min=-1, max=100), \"n_top_emojis\")\n", + " (widgets.BoundedIntText(value=20,disabled=True,min=-1, max=100), \"n_top_emojis\"),\n", + " (widgets.Dropdown(options=[\"latest\", \"mean\"], value=\"latest\"), \"label_criteria\")\n", " ],\n", " [\n", " (widgets.Button(disabled=True),\"load_data\")\n", @@ -446,13 +447,16 @@ " if lemm_and_stemm:\n", " p_s = progress_indicator(\"stemming progress\")\n", " \n", + " emoji_mean = shown_widgets[\"label_criteria\"].value == \"mean\"\n", + " \n", " sdm = stl.sample_data_manager.generate_and_read(path=shown_widgets[\"root_path\"].value,\n", " n_top_emojis=shown_widgets[\"n_top_emojis\"].value,\n", " file_range=range(r[0], r[1]),\n", " n_kmeans_cluster=shown_widgets[\"k_means_cluster\"].value,\n", " read_progress_callback=p_r.update,\n", " stem_progress_callback=p_s.update if lemm_and_stemm else None,\n", - " apply_stemming = lemm_and_stemm)\n", + " apply_stemming = lemm_and_stemm,\n", + " emoji_mean=emoji_mean)\n", " shown_widgets[\"batch_size\"].max = len(sdm.labels)\n", " \n", " \n", diff --git a/Project/simple_approach/simple_twitter_learning.py b/Project/simple_approach/simple_twitter_learning.py index c9b82c7..a193819 100644 --- a/Project/simple_approach/simple_twitter_learning.py +++ b/Project/simple_approach/simple_twitter_learning.py @@ -28,6 +28,8 @@ nltk.download('punkt') nltk.download('averaged_perceptron_tagger') nltk.download('wordnet') +from keras import losses + # check whether the display function exists: try: display @@ -160,7 +162,7 @@ def batch_lemm(sentences): class sample_data_manager(object): @staticmethod - def generate_and_read(path:str, only_emoticons=True, apply_stemming=True, n_top_emojis=-1, file_range=None, n_kmeans_cluster=-1, read_progress_callback=None, stem_progress_callback=None): + def generate_and_read(path:str, only_emoticons=True, apply_stemming=True, n_top_emojis=-1, file_range=None, n_kmeans_cluster=-1, read_progress_callback=None, stem_progress_callback=None, emoji_mean=False): """ generate, read and process train data in one step. @@ -174,7 +176,7 @@ class sample_data_manager(object): @return: sample_data_manager object """ sdm = sample_data_manager(path) - sdm.read_files(file_index_range=range(sdm.n_files) if file_range is None else file_range, only_emoticons=only_emoticons, progress_callback=read_progress_callback) + sdm.read_files(file_index_range=range(sdm.n_files) if file_range is None else file_range, only_emoticons=only_emoticons, progress_callback=read_progress_callback, emoji_mean=emoji_mean) if apply_stemming: sdm.apply_stemming_and_lemmatization(progress_callback=stem_progress_callback) @@ -641,7 +643,12 @@ class trainer(object): named_steps[k].fit = lambda X, y: named_steps[k].train_on_batch(to_dense_if_sparse(X), y) # ← why has keras no sparse support on batch progressing!?!?! if batch_size is None: - self.pm.fit(X = self.sdm.X[:max_size], y = self.sdm.y[:max_size]) + for e in range(n_epochs): + print("epoch", e) + self.pm.fit(X = self.sdm.X[:max_size], y = self.sdm.y[:max_size]) + pred, yt = self.test() + mean_squared_error = ((pred - yt)**2).mean(axis=0) + print("#" + str(e) + ": validation loss: ", mean_squared_error, "scalar: ", np.mean(mean_squared_error)) else: n = len(self.sdm.X) // batch_size for i in range(n_epochs):