Merge branch 'master' of ssh://gogs@the-cake-is-a-lie.net:20022/jonas/NLP-LAB.git
This commit is contained in:
		| @ -144,7 +144,7 @@ | ||||
|     { | ||||
|      "data": { | ||||
|       "application/vnd.jupyter.widget-view+json": { | ||||
|        "model_id": "4fd5552e6a024dcaa0f35a594c77ae99", | ||||
|        "model_id": "d018a59d95fe45f2ae7be013a49b5900", | ||||
|        "version_major": 2, | ||||
|        "version_minor": 0 | ||||
|       }, | ||||
| @ -168,7 +168,8 @@ | ||||
|     "               ],\n", | ||||
|     "               [\n", | ||||
|     "                   (widgets.IntRangeSlider(disabled=True, min=0, max=0), \"file_range\"),\n", | ||||
|     "                   (widgets.Checkbox(value=True,disabled=True), \"only_emoticons\")\n", | ||||
|     "                   (widgets.Checkbox(value=True,disabled=True), \"only_emoticons\"),\n", | ||||
|     "                   (widgets.Checkbox(value=False,disabled=True), \"apply_lemmatization_and_stemming\")\n", | ||||
|     "               ],\n", | ||||
|     "               [\n", | ||||
|     "                   (widgets.BoundedIntText(value=-1,disabled=True,min=-1, max=10), \"k_means_cluster\"),\n", | ||||
| @ -203,6 +204,12 @@ | ||||
|     "                   (classifier_tab, \"classifier_tab\")\n", | ||||
|     "               ],\n", | ||||
|     "               [\n", | ||||
|     "                   (widgets.Checkbox(value=True),\"use_doc2vec\"),\n", | ||||
|     "                   (widgets.IntText(value=100),\"d2v_size\"),\n", | ||||
|     "                   (widgets.IntText(value=8), \"d2v_window\"),\n", | ||||
|     "                   (widgets.IntSlider(value=5, min=0, max=32), \"d2v_min_count\")\n", | ||||
|     "               ],\n", | ||||
|     "               [\n", | ||||
|     "                   (widgets.Button(), \"create_classifier\")\n", | ||||
|     "               ],\n", | ||||
|     "               [\n", | ||||
| @ -406,6 +413,7 @@ | ||||
|     "                               \"only_emoticons\",\n", | ||||
|     "                               \"k_means_cluster\",\n", | ||||
|     "                               \"n_top_emojis\",\n", | ||||
|     "                               \"apply_lemmatization_and_stemming\",\n", | ||||
|     "                               \"load_data\"], False)\n", | ||||
|     "            return\n", | ||||
|     "            \n", | ||||
| @ -415,6 +423,7 @@ | ||||
|     "                               \"only_emoticons\",\n", | ||||
|     "                               \"k_means_cluster\",\n", | ||||
|     "                               \"n_top_emojis\",\n", | ||||
|     "                               \"apply_lemmatization_and_stemming\",\n", | ||||
|     "                               \"load_data\"], True)\n", | ||||
|     "        shown_widgets[\"file_range\"].min=0\n", | ||||
|     "        shown_widgets[\"file_range\"].max=len(files) -1\n", | ||||
| @ -429,6 +438,10 @@ | ||||
|     "        r = (r[0], r[1] + 1) # range has to be exclusive according to the last element!\n", | ||||
|     "        \n", | ||||
|     "        p_r = progress_indicator(\"reading progress\")\n", | ||||
|     "        \n", | ||||
|     "        lemm_and_stemm = shown_widgets[\"apply_lemmatization_and_stemming\"].value\n", | ||||
|     "        \n", | ||||
|     "        if lemm_and_stemm:\n", | ||||
|     "            p_s = progress_indicator(\"stemming progress\")\n", | ||||
|     "        \n", | ||||
|     "        sdm = stl.sample_data_manager.generate_and_read(path=shown_widgets[\"root_path\"].value,\n", | ||||
| @ -436,7 +449,8 @@ | ||||
|     "                                                    file_range=range(r[0], r[1]),\n", | ||||
|     "                                                    n_kmeans_cluster=shown_widgets[\"k_means_cluster\"].value,\n", | ||||
|     "                                                    read_progress_callback=p_r.update,\n", | ||||
|     "                                                    stem_progress_callback=p_s.update)\n", | ||||
|     "                                                    stem_progress_callback=p_s.update if lemm_and_stemm else None,\n", | ||||
|     "                                                    apply_stemming = lemm_and_stemm)\n", | ||||
|     "        shown_widgets[\"batch_size\"].max = len(sdm.labels)\n", | ||||
|     "        \n", | ||||
|     "        \n", | ||||
| @ -541,6 +555,15 @@ | ||||
|     "        \n", | ||||
|     "        mp(\"**chosen classifier**: `\" + chosen_classifier + \"`\")\n", | ||||
|     "        \n", | ||||
|     "        # creating the vectorizer\n", | ||||
|     "        vectorizer = None\n", | ||||
|     "        if shown_widgets[\"use_doc2vec\"].value:\n", | ||||
|     "            vectorizer = stl.skd2v.Doc2VecTransformer(size=shown_widgets[\"d2v_size\"].value,\n", | ||||
|     "                                                     window=shown_widgets[\"d2v_window\"].value,\n", | ||||
|     "                                                     min_count=shown_widgets[\"d2v_min_count\"].value)\n", | ||||
|     "        else:\n", | ||||
|     "            vectorizer=TfidfVectorizer(stop_words='english')\n", | ||||
|     "        \n", | ||||
|     "        # TODO: add more classifier options here:\n", | ||||
|     "        if chosen_classifier is 'keras':\n", | ||||
|     "            sdm.create_train_test_split()\n", | ||||
| @ -562,8 +585,7 @@ | ||||
|     "            mp(\"**layers:** \")\n", | ||||
|     "            jp(layers, headers=['#neurons', 'activation_func'])\n", | ||||
|     "\n", | ||||
|     "            pm = stl.pipeline_manager.create_keras_pipeline_with_vectorizer(vectorizer=TfidfVectorizer(stop_words='english'),\n", | ||||
|     "                                                           layers=layers, sdm=sdm)\n", | ||||
|     "            pm = stl.pipeline_manager.create_keras_pipeline_with_vectorizer(vectorizer, layers=layers, sdm=sdm)\n", | ||||
|     "\n", | ||||
|     "def save_classifier(b):\n", | ||||
|     "    global sdm\n", | ||||
|  | ||||
| @ -42,6 +42,7 @@ import sys | ||||
| sys.path.append("..") | ||||
|  | ||||
| import Tools.Emoji_Distance as edist | ||||
| import Tools.sklearn_doc2vec as skd2v | ||||
|  | ||||
| def emoji2sent(emoji_arr, only_emoticons=True): | ||||
|     return np.array([edist.emoji_to_sentiment_vector(e, only_emoticons=only_emoticons) for e in emoji_arr]) | ||||
| @ -49,7 +50,6 @@ def emoji2sent(emoji_arr, only_emoticons=True): | ||||
| def sent2emoji(sent_arr, custom_target_emojis=None, only_emoticons=True): | ||||
|     return [edist.sentiment_vector_to_emoji(s, custom_target_emojis=custom_target_emojis, only_emoticons=only_emoticons) for s in sent_arr] | ||||
|  | ||||
|  | ||||
| # In[3]: | ||||
|  | ||||
|  | ||||
| @ -459,7 +459,12 @@ class pipeline_manager(object): | ||||
|         first_layer = True | ||||
|         for layer in layers: | ||||
|             if first_layer: | ||||
|                 model.add(Dense(units=layer[0], activation=layer[1], input_dim=vectorizer.transform([" "])[0]._shape[1])) | ||||
|                 size = None | ||||
|                 if "size" in dir(vectorizer): | ||||
|                     size = vectorizer.size | ||||
|                 else: | ||||
|                     size = vectorizer.transform([" "])[0]._shape[1] | ||||
|                 model.add(Dense(units=layer[0], activation=layer[1], input_dim=size)) | ||||
|                 first_layer = False | ||||
|             else: | ||||
|                 model.add(Dense(units=layer[0], activation=layer[1])) | ||||
| @ -587,6 +592,15 @@ class pipeline_manager(object): | ||||
|  | ||||
| # In[9]: | ||||
|  | ||||
| def to_dense_if_sparse(X): | ||||
|     """ | ||||
|     little hepler function to make data dense (if it is sparse). | ||||
|     is used in trainer.fit function | ||||
|     """ | ||||
|     if "todense" in dir(X): | ||||
|         return X.todense() | ||||
|     return X | ||||
|  | ||||
|  | ||||
| class trainer(object): | ||||
|     def __init__(self, sdm:sample_data_manager, pm:pipeline_manager): | ||||
| @ -622,7 +636,8 @@ class trainer(object): | ||||
|         for k in keras_batch_fitting_layer: | ||||
|             # forcing batch fitting on keras | ||||
|             disabled_keras_fits[k]=named_steps[k].fit | ||||
|             named_steps[k].fit = lambda X, y: named_steps[k].train_on_batch(X.todense(), y) # ← why has keras no sparse support on batch progressing!?!?! | ||||
|  | ||||
|             named_steps[k].fit = lambda X, y: named_steps[k].train_on_batch(to_dense_if_sparse(X), y) # ← why has keras no sparse support on batch progressing!?!?! | ||||
|          | ||||
|         if batch_size is None: | ||||
|             self.pm.fit(X = self.sdm.X[:max_size], y = self.sdm.y[:max_size]) | ||||
|  | ||||
		Reference in New Issue
	
	Block a user