diff --git a/Project/simple_approach/Continous_Learner.ipynb b/Project/simple_approach/Continous_Learner.ipynb index 8514c2c..59996ec 100644 --- a/Project/simple_approach/Continous_Learner.ipynb +++ b/Project/simple_approach/Continous_Learner.ipynb @@ -144,7 +144,7 @@ { "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "a4899ee1720f4db4a136a96657f3283a", + "model_id": "d304cda50752491da1637b292a9367e8", "version_major": 2, "version_minor": 0 }, diff --git a/Project/simple_approach/simple_twitter_learning.py b/Project/simple_approach/simple_twitter_learning.py index 73ebd5e..88267b3 100644 --- a/Project/simple_approach/simple_twitter_learning.py +++ b/Project/simple_approach/simple_twitter_learning.py @@ -641,7 +641,7 @@ class trainer(object): named_steps[k].fit = lambda X, y: named_steps[k].train_on_batch(to_dense_if_sparse(X), y) # ← why has keras no sparse support on batch progressing!?!?! if batch_size is None: - self.pm.fit(X = self.sdm.X[:max_size], y = self.sdm.y[:max_size], validation_split=0.1, epochs=n_epochs) + self.pm.fit(X = self.sdm.X[:max_size], y = self.sdm.y[:max_size]) else: n = len(self.sdm.X) // batch_size for i in range(n_epochs):