simple improvements
This commit is contained in:
parent
b4ae0b033e
commit
7f6c9791ae
@ -145,7 +145,7 @@
|
|||||||
{
|
{
|
||||||
"data": {
|
"data": {
|
||||||
"application/vnd.jupyter.widget-view+json": {
|
"application/vnd.jupyter.widget-view+json": {
|
||||||
"model_id": "5a488abefd074719adb15425714a076f",
|
"model_id": "d00ff918ad4d473499b1e91d4dcb8702",
|
||||||
"version_major": 2,
|
"version_major": 2,
|
||||||
"version_minor": 0
|
"version_minor": 0
|
||||||
},
|
},
|
||||||
@ -174,7 +174,8 @@
|
|||||||
" ],\n",
|
" ],\n",
|
||||||
" [\n",
|
" [\n",
|
||||||
" (widgets.BoundedIntText(value=-1,disabled=True,min=-1, max=10), \"k_means_cluster\"),\n",
|
" (widgets.BoundedIntText(value=-1,disabled=True,min=-1, max=10), \"k_means_cluster\"),\n",
|
||||||
" (widgets.BoundedIntText(value=20,disabled=True,min=-1, max=100), \"n_top_emojis\")\n",
|
" (widgets.BoundedIntText(value=20,disabled=True,min=-1, max=100), \"n_top_emojis\"),\n",
|
||||||
|
" (widgets.Dropdown(options=[\"latest\", \"mean\"], value=\"latest\"), \"label_criteria\")\n",
|
||||||
" ],\n",
|
" ],\n",
|
||||||
" [\n",
|
" [\n",
|
||||||
" (widgets.Button(disabled=True),\"load_data\")\n",
|
" (widgets.Button(disabled=True),\"load_data\")\n",
|
||||||
@ -446,13 +447,16 @@
|
|||||||
" if lemm_and_stemm:\n",
|
" if lemm_and_stemm:\n",
|
||||||
" p_s = progress_indicator(\"stemming progress\")\n",
|
" p_s = progress_indicator(\"stemming progress\")\n",
|
||||||
" \n",
|
" \n",
|
||||||
|
" emoji_mean = shown_widgets[\"label_criteria\"].value == \"mean\"\n",
|
||||||
|
" \n",
|
||||||
" sdm = stl.sample_data_manager.generate_and_read(path=shown_widgets[\"root_path\"].value,\n",
|
" sdm = stl.sample_data_manager.generate_and_read(path=shown_widgets[\"root_path\"].value,\n",
|
||||||
" n_top_emojis=shown_widgets[\"n_top_emojis\"].value,\n",
|
" n_top_emojis=shown_widgets[\"n_top_emojis\"].value,\n",
|
||||||
" file_range=range(r[0], r[1]),\n",
|
" file_range=range(r[0], r[1]),\n",
|
||||||
" n_kmeans_cluster=shown_widgets[\"k_means_cluster\"].value,\n",
|
" n_kmeans_cluster=shown_widgets[\"k_means_cluster\"].value,\n",
|
||||||
" read_progress_callback=p_r.update,\n",
|
" read_progress_callback=p_r.update,\n",
|
||||||
" stem_progress_callback=p_s.update if lemm_and_stemm else None,\n",
|
" stem_progress_callback=p_s.update if lemm_and_stemm else None,\n",
|
||||||
" apply_stemming = lemm_and_stemm)\n",
|
" apply_stemming = lemm_and_stemm,\n",
|
||||||
|
" emoji_mean=emoji_mean)\n",
|
||||||
" shown_widgets[\"batch_size\"].max = len(sdm.labels)\n",
|
" shown_widgets[\"batch_size\"].max = len(sdm.labels)\n",
|
||||||
" \n",
|
" \n",
|
||||||
" \n",
|
" \n",
|
||||||
|
@ -28,6 +28,8 @@ nltk.download('punkt')
|
|||||||
nltk.download('averaged_perceptron_tagger')
|
nltk.download('averaged_perceptron_tagger')
|
||||||
nltk.download('wordnet')
|
nltk.download('wordnet')
|
||||||
|
|
||||||
|
from keras import losses
|
||||||
|
|
||||||
# check whether the display function exists:
|
# check whether the display function exists:
|
||||||
try:
|
try:
|
||||||
display
|
display
|
||||||
@ -160,7 +162,7 @@ def batch_lemm(sentences):
|
|||||||
|
|
||||||
class sample_data_manager(object):
|
class sample_data_manager(object):
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def generate_and_read(path:str, only_emoticons=True, apply_stemming=True, n_top_emojis=-1, file_range=None, n_kmeans_cluster=-1, read_progress_callback=None, stem_progress_callback=None):
|
def generate_and_read(path:str, only_emoticons=True, apply_stemming=True, n_top_emojis=-1, file_range=None, n_kmeans_cluster=-1, read_progress_callback=None, stem_progress_callback=None, emoji_mean=False):
|
||||||
"""
|
"""
|
||||||
generate, read and process train data in one step.
|
generate, read and process train data in one step.
|
||||||
|
|
||||||
@ -174,7 +176,7 @@ class sample_data_manager(object):
|
|||||||
@return: sample_data_manager object
|
@return: sample_data_manager object
|
||||||
"""
|
"""
|
||||||
sdm = sample_data_manager(path)
|
sdm = sample_data_manager(path)
|
||||||
sdm.read_files(file_index_range=range(sdm.n_files) if file_range is None else file_range, only_emoticons=only_emoticons, progress_callback=read_progress_callback)
|
sdm.read_files(file_index_range=range(sdm.n_files) if file_range is None else file_range, only_emoticons=only_emoticons, progress_callback=read_progress_callback, emoji_mean=emoji_mean)
|
||||||
if apply_stemming:
|
if apply_stemming:
|
||||||
sdm.apply_stemming_and_lemmatization(progress_callback=stem_progress_callback)
|
sdm.apply_stemming_and_lemmatization(progress_callback=stem_progress_callback)
|
||||||
|
|
||||||
@ -641,7 +643,12 @@ class trainer(object):
|
|||||||
named_steps[k].fit = lambda X, y: named_steps[k].train_on_batch(to_dense_if_sparse(X), y) # ← why has keras no sparse support on batch progressing!?!?!
|
named_steps[k].fit = lambda X, y: named_steps[k].train_on_batch(to_dense_if_sparse(X), y) # ← why has keras no sparse support on batch progressing!?!?!
|
||||||
|
|
||||||
if batch_size is None:
|
if batch_size is None:
|
||||||
|
for e in range(n_epochs):
|
||||||
|
print("epoch", e)
|
||||||
self.pm.fit(X = self.sdm.X[:max_size], y = self.sdm.y[:max_size])
|
self.pm.fit(X = self.sdm.X[:max_size], y = self.sdm.y[:max_size])
|
||||||
|
pred, yt = self.test()
|
||||||
|
mean_squared_error = ((pred - yt)**2).mean(axis=0)
|
||||||
|
print("#" + str(e) + ": validation loss: ", mean_squared_error, "scalar: ", np.mean(mean_squared_error))
|
||||||
else:
|
else:
|
||||||
n = len(self.sdm.X) // batch_size
|
n = len(self.sdm.X) // batch_size
|
||||||
for i in range(n_epochs):
|
for i in range(n_epochs):
|
||||||
|
Loading…
Reference in New Issue
Block a user