merged kmeans approach
This commit is contained in:
parent
0bedb6060d
commit
b067c789a7
@ -51,7 +51,8 @@
|
|||||||
"import sklearn.utils as sku\n",
|
"import sklearn.utils as sku\n",
|
||||||
"from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer, HashingVectorizer\n",
|
"from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer, HashingVectorizer\n",
|
||||||
"from sklearn.model_selection import train_test_split\n",
|
"from sklearn.model_selection import train_test_split\n",
|
||||||
"from sklearn.preprocessing import MultiLabelBinarizer\n",
|
"from sklearn.preprocessing import MultiLabelBinarizer, LabelBinarizer\n",
|
||||||
|
"from sklearn.cluster import KMeans\n",
|
||||||
"import nltk\n",
|
"import nltk\n",
|
||||||
"from keras.models import load_model\n",
|
"from keras.models import load_model\n",
|
||||||
"from sklearn.externals import joblib\n",
|
"from sklearn.externals import joblib\n",
|
||||||
@ -72,14 +73,13 @@
|
|||||||
"import sys\n",
|
"import sys\n",
|
||||||
"sys.path.append(\"..\")\n",
|
"sys.path.append(\"..\")\n",
|
||||||
"\n",
|
"\n",
|
||||||
"from Tools.Emoji_Distance import sentiment_vector_to_emoji\n",
|
"import Tools.Emoji_Distance as edist\n",
|
||||||
"from Tools.Emoji_Distance import emoji_to_sentiment_vector\n",
|
|
||||||
"\n",
|
"\n",
|
||||||
"def emoji2sent(emoji_arr, only_emoticons=True):\n",
|
"def emoji2sent(emoji_arr, only_emoticons=True):\n",
|
||||||
" return np.array([emoji_to_sentiment_vector(e, only_emoticons=only_emoticons) for e in emoji_arr])\n",
|
" return np.array([edist.emoji_to_sentiment_vector(e, only_emoticons=only_emoticons) for e in emoji_arr])\n",
|
||||||
"\n",
|
"\n",
|
||||||
"def sent2emoji(sent_arr, custom_target_emojis=None, only_emoticons=True):\n",
|
"def sent2emoji(sent_arr, custom_target_emojis=None, only_emoticons=True):\n",
|
||||||
" return [sentiment_vector_to_emoji(s, custom_target_emojis=custom_target_emojis, only_emoticons=only_emoticons) for s in sent_arr]"
|
" return [edist.sentiment_vector_to_emoji(s, custom_target_emojis=custom_target_emojis, only_emoticons=only_emoticons) for s in sent_arr]"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -200,7 +200,7 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"class sample_data_manager(object):\n",
|
"class sample_data_manager(object):\n",
|
||||||
" @staticmethod\n",
|
" @staticmethod\n",
|
||||||
" def generate_and_read(path:str, only_emoticons=True, apply_stemming=True, n_top_emojis=-1, file_range=None):\n",
|
" def generate_and_read(path:str, only_emoticons=True, apply_stemming=True, n_top_emojis=-1, file_range=None, n_kmeans_cluster=-1):\n",
|
||||||
" \"\"\"\n",
|
" \"\"\"\n",
|
||||||
" generate, read and process train data in one step.\n",
|
" generate, read and process train data in one step.\n",
|
||||||
" \n",
|
" \n",
|
||||||
@ -209,6 +209,8 @@
|
|||||||
" @param apply_stemming: apply stemming and lemmatization on dataset\n",
|
" @param apply_stemming: apply stemming and lemmatization on dataset\n",
|
||||||
" @param n_top_emojis: only use messages containing one of <`n_top_emojis`>-top emojis. set to `-1` to prevent top emoji filtering\n",
|
" @param n_top_emojis: only use messages containing one of <`n_top_emojis`>-top emojis. set to `-1` to prevent top emoji filtering\n",
|
||||||
" @param file_range: range of file's indices to read (eg `range(3)` to read the first three files). If `None`: all files are read\n",
|
" @param file_range: range of file's indices to read (eg `range(3)` to read the first three files). If `None`: all files are read\n",
|
||||||
|
" @param n_kmeans_cluster: generating multilabeled labels with kmeans with these number of clusters. Set to -1 to use the plain sentiment space as label\n",
|
||||||
|
" \n",
|
||||||
" @return: sample_data_manager object\n",
|
" @return: sample_data_manager object\n",
|
||||||
" \"\"\"\n",
|
" \"\"\"\n",
|
||||||
" sdm = sample_data_manager(path)\n",
|
" sdm = sample_data_manager(path)\n",
|
||||||
@ -221,6 +223,9 @@
|
|||||||
" if n_top_emojis > 0:\n",
|
" if n_top_emojis > 0:\n",
|
||||||
" sdm.filter_by_top_emojis(n_top=n_top_emojis)\n",
|
" sdm.filter_by_top_emojis(n_top=n_top_emojis)\n",
|
||||||
" \n",
|
" \n",
|
||||||
|
" if n_kmeans_cluster > 0:\n",
|
||||||
|
" sdm.generate_kmeans_binary_label(only_emoticons=only_emoticons, n_clusters=n_kmeans_cluster)\n",
|
||||||
|
" \n",
|
||||||
" return sdm\n",
|
" return sdm\n",
|
||||||
" \n",
|
" \n",
|
||||||
" \n",
|
" \n",
|
||||||
@ -244,6 +249,10 @@
|
|||||||
" self.Xt = None\n",
|
" self.Xt = None\n",
|
||||||
" self.yt = None\n",
|
" self.yt = None\n",
|
||||||
" self.top_emojis = None\n",
|
" self.top_emojis = None\n",
|
||||||
|
" self.binary_labels = None\n",
|
||||||
|
" self.use_binary_labels = False\n",
|
||||||
|
" self.kmeans_cluster = None\n",
|
||||||
|
" self.label_binarizer = None\n",
|
||||||
" \n",
|
" \n",
|
||||||
" def read_files(self, file_index_range:list, only_emoticons=True):\n",
|
" def read_files(self, file_index_range:list, only_emoticons=True):\n",
|
||||||
" \"\"\"\n",
|
" \"\"\"\n",
|
||||||
@ -348,16 +357,46 @@
|
|||||||
" assert self.labels is not None # ← messages are already read in\n",
|
" assert self.labels is not None # ← messages are already read in\n",
|
||||||
" \n",
|
" \n",
|
||||||
" self.top_emojis = [x[0] for x in self.get_emoji_count()[:n_top]]\n",
|
" self.top_emojis = [x[0] for x in self.get_emoji_count()[:n_top]]\n",
|
||||||
" in_top = [sentiment_vector_to_emoji(x) in self.top_emojis for x in self.labels]\n",
|
" in_top = [edist.sentiment_vector_to_emoji(x) in self.top_emojis for x in self.labels]\n",
|
||||||
" self.labels = self.labels[in_top]\n",
|
" self.labels = self.labels[in_top]\n",
|
||||||
" self.plain_text = self.plain_text[in_top]\n",
|
" self.plain_text = self.plain_text[in_top]\n",
|
||||||
" self.emojis = self.emojis[in_top]\n",
|
" self.emojis = self.emojis[in_top]\n",
|
||||||
" print(\"remaining samples after top emoji filtering: \", len(self.labels))\n",
|
" print(\"remaining samples after top emoji filtering: \", len(self.labels))\n",
|
||||||
" \n",
|
" \n",
|
||||||
|
" def generate_kmeans_binary_label(self, only_emoticons=True, n_clusters=5):\n",
|
||||||
|
" \"\"\"\n",
|
||||||
|
" generate binary labels using kmeans.\n",
|
||||||
|
" \n",
|
||||||
|
" @param only_emoticons: set whether we're using the full emoji set or only emoticons\n",
|
||||||
|
" @param n_clusters: number of cluster we're generating in emoji's sentiment space\n",
|
||||||
|
" \"\"\"\n",
|
||||||
|
" assert self.labels is not None\n",
|
||||||
|
" array_sentiment_vectors = edist.list_sentiment_emoticon_vectors if only_emoticons else edist.list_sentiment_vectors\n",
|
||||||
|
" array_sentiment_vectors = np.array(array_sentiment_vectors)\n",
|
||||||
|
" \n",
|
||||||
|
" list_emojis = edist.list_emoticon_emojis if only_emoticons else edist.list_emojis\n",
|
||||||
|
" self.use_binary_labels = True\n",
|
||||||
|
" print(\"clustering following emojis: \" + \"\".join(list_emojis) + \"...\")\n",
|
||||||
|
" self.kmeans_cluster = KMeans(n_clusters=n_clusters).fit(array_sentiment_vectors)\n",
|
||||||
|
" print(\"clustering done\")\n",
|
||||||
|
" self.label_binarizer = LabelBinarizer()\n",
|
||||||
|
" \n",
|
||||||
|
" multiclass_labels = self.kmeans_cluster.predict(self.labels)\n",
|
||||||
|
" \n",
|
||||||
|
" # FIXME: we have to guarantee that in every dataset all classes occur.\n",
|
||||||
|
" # otherwise batch fitting is not possible!\n",
|
||||||
|
" # (or we have to precompute the mlb fitting process somewhere...)\n",
|
||||||
|
" self.binary_labels = self.label_binarizer.fit_transform(multiclass_labels)\n",
|
||||||
|
" \n",
|
||||||
|
" \n",
|
||||||
" def create_train_test_split(self, split = 0.1, random_state = 4222):\n",
|
" def create_train_test_split(self, split = 0.1, random_state = 4222):\n",
|
||||||
|
" assert self.plain_text is not None and self.labels is not None\n",
|
||||||
" if self.X is not None:\n",
|
" if self.X is not None:\n",
|
||||||
" sys.stderr.write(\"WARNING: overwriting existing train/test split \\n\")\n",
|
" sys.stderr.write(\"WARNING: overwriting existing train/test split \\n\")\n",
|
||||||
" self.X, self.Xt, self.y, self.yt = train_test_split(self.plain_text, self.labels, test_size=split, random_state=random_state)\n",
|
" \n",
|
||||||
|
" labels = self.binary_labels if self.use_binary_labels else self.labels\n",
|
||||||
|
" assert labels is not None\n",
|
||||||
|
" self.X, self.Xt, self.y, self.yt = train_test_split(self.plain_text, labels, test_size=split, random_state=random_state)\n",
|
||||||
"\n"
|
"\n"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@ -392,13 +431,15 @@
|
|||||||
" return pm\n",
|
" return pm\n",
|
||||||
" \n",
|
" \n",
|
||||||
" @staticmethod\n",
|
" @staticmethod\n",
|
||||||
" def create_keras_pipeline_with_vectorizer(vectorizer, layers, sdm:sample_data_manager):\n",
|
" def create_keras_pipeline_with_vectorizer(vectorizer, layers, sdm:sample_data_manager, loss=None, optimizer=None):\n",
|
||||||
" '''\n",
|
" '''\n",
|
||||||
" creates pipeline with vectorizer and keras classifier\n",
|
" creates pipeline with vectorizer and keras classifier\n",
|
||||||
" \n",
|
" \n",
|
||||||
" @param vectorizer: Vectorizer object. will be fitted with data provided by sdm\n",
|
" @param vectorizer: Vectorizer object. will be fitted with data provided by sdm\n",
|
||||||
" @param layers: list of keras layers. One keras layer is a tuple in form: (<#neurons:int>, <activation_func:str>)\n",
|
" @param layers: list of keras layers. One keras layer is a tuple in form: (<#neurons:int>, <activation_func:str>)\n",
|
||||||
" @param sdm: sample data manager to get data for the vectorizer\n",
|
" @param sdm: sample data manager to get data for the vectorizer\n",
|
||||||
|
" @param loss: set keras loss function. Depending whether sdm use multiclass labels `categorical_crossentropy` or `mean_squared_error` is used as default\n",
|
||||||
|
" @param optimizer: set keras optimizer. Depending whether sdm use multiclass labels `sgd` or `adam` is used as default\n",
|
||||||
" \n",
|
" \n",
|
||||||
" @return: a pipeline manager object\n",
|
" @return: a pipeline manager object\n",
|
||||||
" \n",
|
" \n",
|
||||||
@ -423,8 +464,17 @@
|
|||||||
" else:\n",
|
" else:\n",
|
||||||
" model.add(Dense(units=layer[0], activation=layer[1]))\n",
|
" model.add(Dense(units=layer[0], activation=layer[1]))\n",
|
||||||
" \n",
|
" \n",
|
||||||
" model.compile(loss='mean_squared_error',\n",
|
" if sdm.use_binary_labels: \n",
|
||||||
" optimizer='adam')\n",
|
" loss_function = loss if loss is not None else 'categorical_crossentropy'\n",
|
||||||
|
" optimizer_function = optimizer if optimizer is not None else 'sgd'\n",
|
||||||
|
" model.compile(loss=loss_function,\n",
|
||||||
|
" optimizer=optimizer_function,\n",
|
||||||
|
" metrics=['accuracy'])\n",
|
||||||
|
" else:\n",
|
||||||
|
" loss_function = loss if loss is not None else 'mean_squared_error'\n",
|
||||||
|
" optimizer_function = optimizer if optimizer is not None else 'adam'\n",
|
||||||
|
" model.compile(loss=loss_function,\n",
|
||||||
|
" optimizer=optimizer_function)\n",
|
||||||
" \n",
|
" \n",
|
||||||
" pipeline = Pipeline([\n",
|
" pipeline = Pipeline([\n",
|
||||||
" ('vectorizer',vectorizer),\n",
|
" ('vectorizer',vectorizer),\n",
|
||||||
@ -613,7 +663,7 @@
|
|||||||
"imported 33368 samples\n",
|
"imported 33368 samples\n",
|
||||||
"remaining samples after top emoji filtering: 26197\n",
|
"remaining samples after top emoji filtering: 26197\n",
|
||||||
"Epoch 1/1\n",
|
"Epoch 1/1\n",
|
||||||
"100/100 [==============================] - 3s 27ms/step - loss: 0.1227\n"
|
"100/100 [==============================] - 3s 28ms/step - loss: 0.1230\n"
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
@ -622,11 +672,18 @@
|
|||||||
"if not hasattr(main, '__file__'):\n",
|
"if not hasattr(main, '__file__'):\n",
|
||||||
" # we are in an interactive environment (probably in jupyter)\n",
|
" # we are in an interactive environment (probably in jupyter)\n",
|
||||||
" # load data:\n",
|
" # load data:\n",
|
||||||
" sdm = sample_data_manager.generate_and_read(path=\"./data_en/\", n_top_emojis=20, file_range=range(1))\n",
|
" \n",
|
||||||
|
" # setting n_kmeans_clusters to a value > 0 activates binarized labeling automatically! \n",
|
||||||
|
" # set to -1 to disable kmeans clustering and generating labels in plain sentiment space\n",
|
||||||
|
" \n",
|
||||||
|
" #n_kmeans_cluster = 5\n",
|
||||||
|
" n_kmeans_cluster = -1\n",
|
||||||
|
" sdm = sample_data_manager.generate_and_read(path=\"./data_en/\", n_top_emojis=20, file_range=range(1), n_kmeans_cluster=n_kmeans_cluster)\n",
|
||||||
|
" sdm.create_train_test_split()\n",
|
||||||
" #pm = pipeline_manager.create_keras_pipeline_with_vectorizer(vectorizer=TfidfVectorizer(stop_words='english'),\\n\",\n",
|
" #pm = pipeline_manager.create_keras_pipeline_with_vectorizer(vectorizer=TfidfVectorizer(stop_words='english'),\\n\",\n",
|
||||||
" # layers=[(10000, 'relu'),(5000, 'relu'),(2500, 'relu'),(y1[0].shape[0],None)], sdm=sdm)\\n\",\n",
|
" # layers=[(10000, 'relu'),(5000, 'relu'),(2500, 'relu'),(y1[0].shape[0],None)], sdm=sdm)\\n\",\n",
|
||||||
" pm = pipeline_manager.create_keras_pipeline_with_vectorizer(vectorizer=TfidfVectorizer(stop_words='english'),\n",
|
" pm = pipeline_manager.create_keras_pipeline_with_vectorizer(vectorizer=TfidfVectorizer(stop_words='english'),\n",
|
||||||
" layers=[(2500, 'relu'),(3,None)], sdm=sdm)\n",
|
" layers=[(2500, 'relu'),(sdm.y.shape[1],None)], sdm=sdm)\n",
|
||||||
" tr = trainer(sdm=sdm, pm=pm)\n",
|
" tr = trainer(sdm=sdm, pm=pm)\n",
|
||||||
" tr.fit(100)"
|
" tr.fit(100)"
|
||||||
]
|
]
|
||||||
@ -641,7 +698,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 13,
|
"execution_count": 11,
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [
|
"outputs": [
|
||||||
{
|
{
|
||||||
@ -651,14 +708,6 @@
|
|||||||
"['keras_model']\n"
|
"['keras_model']\n"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
|
||||||
"name": "stderr",
|
|
||||||
"output_type": "stream",
|
|
||||||
"text": [
|
|
||||||
"/home/jonas/.local/lib/python3.6/site-packages/keras/engine/sequential.py:109: UserWarning: `Sequential.model` is deprecated. `Sequential` is a subclass of `Model`, you can just use your `Sequential` instance directly.\n",
|
|
||||||
" warnings.warn('`Sequential.model` is deprecated. '\n"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
"data": {
|
"data": {
|
||||||
"text/plain": [
|
"text/plain": [
|
||||||
@ -701,19 +750,19 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 14,
|
"execution_count": 12,
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [
|
"outputs": [
|
||||||
{
|
{
|
||||||
"data": {
|
"data": {
|
||||||
"text/plain": [
|
"text/plain": [
|
||||||
"array([[0.4423941 , 0.22976081, 0.26076168],\n",
|
"array([[0.16062996, 0.08324276, 0.09433182],\n",
|
||||||
" [0.75167173, 0.2919423 , 0.3423372 ],\n",
|
" [0.16413 , 0.09421383, 0.07578427],\n",
|
||||||
" [0.48943695, 0.21931192, 0.22773138],\n",
|
" [0.11994962, 0.05705731, 0.06310127],\n",
|
||||||
" ...,\n",
|
" ...,\n",
|
||||||
" [0.51003224, 0.26002786, 0.25588542],\n",
|
" [0.13887292, 0.08502828, 0.08176519],\n",
|
||||||
" [0.5808168 , 0.30632192, 0.2964917 ],\n",
|
" [0.18185864, 0.09223703, 0.10704609],\n",
|
||||||
" [0.39000767, 0.31723523, 0.24713083]], dtype=float32)"
|
" [0.17687687, 0.09147045, 0.10650696]], dtype=float32)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
@ -738,7 +787,7 @@
|
|||||||
"name": "stdout",
|
"name": "stdout",
|
||||||
"output_type": "stream",
|
"output_type": "stream",
|
||||||
"text": [
|
"text": [
|
||||||
"prediction variance: 0.009700283\n",
|
"prediction variance: 0.0005431187\n",
|
||||||
"teacher variance: 0.03341702104519965\n"
|
"teacher variance: 0.03341702104519965\n"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@ -763,79 +812,79 @@
|
|||||||
" <thead>\n",
|
" <thead>\n",
|
||||||
" <tr style=\"text-align: right;\">\n",
|
" <tr style=\"text-align: right;\">\n",
|
||||||
" <th></th>\n",
|
" <th></th>\n",
|
||||||
" <th>text</th>\n",
|
|
||||||
" <th>teacher</th>\n",
|
|
||||||
" <th>teacher_sentiment</th>\n",
|
|
||||||
" <th>predict</th>\n",
|
" <th>predict</th>\n",
|
||||||
" <th>predicted_sentiment</th>\n",
|
" <th>predicted_sentiment</th>\n",
|
||||||
|
" <th>teacher</th>\n",
|
||||||
|
" <th>teacher_sentiment</th>\n",
|
||||||
|
" <th>text</th>\n",
|
||||||
" </tr>\n",
|
" </tr>\n",
|
||||||
" </thead>\n",
|
" </thead>\n",
|
||||||
" <tbody>\n",
|
" <tbody>\n",
|
||||||
" <tr>\n",
|
" <tr>\n",
|
||||||
" <th>35671</th>\n",
|
" <th>35671</th>\n",
|
||||||
" <td>i feel like i care so much more in everi situat</td>\n",
|
" <td>😂</td>\n",
|
||||||
|
" <td>[0.16062995791435242, 0.0832427591085434, 0.09...</td>\n",
|
||||||
" <td>😂</td>\n",
|
" <td>😂</td>\n",
|
||||||
" <td>[0.46813021474490496, 0.24716181096977158, 0.2...</td>\n",
|
" <td>[0.46813021474490496, 0.24716181096977158, 0.2...</td>\n",
|
||||||
" <td>😂</td>\n",
|
" <td>i feel like i care so much more in everi situat</td>\n",
|
||||||
" <td>[0.44239410758018494, 0.2297608107328415, 0.26...</td>\n",
|
|
||||||
" </tr>\n",
|
" </tr>\n",
|
||||||
" <tr>\n",
|
" <tr>\n",
|
||||||
" <th>25683</th>\n",
|
" <th>25683</th>\n",
|
||||||
" <td>i did not meat to add that 2 there ... hav see...</td>\n",
|
" <td>😢</td>\n",
|
||||||
|
" <td>[0.16413000226020813, 0.0942138284444809, 0.07...</td>\n",
|
||||||
" <td>😂</td>\n",
|
" <td>😂</td>\n",
|
||||||
" <td>[0.46813021474490496, 0.24716181096977158, 0.2...</td>\n",
|
" <td>[0.46813021474490496, 0.24716181096977158, 0.2...</td>\n",
|
||||||
" <td>😌</td>\n",
|
" <td>i did not meat to add that 2 there ... hav see...</td>\n",
|
||||||
" <td>[0.7516717314720154, 0.291942298412323, 0.3423...</td>\n",
|
|
||||||
" </tr>\n",
|
" </tr>\n",
|
||||||
" <tr>\n",
|
" <tr>\n",
|
||||||
" <th>8985</th>\n",
|
" <th>8985</th>\n",
|
||||||
" <td>never…</td>\n",
|
" <td>😂</td>\n",
|
||||||
|
" <td>[0.11994962394237518, 0.05705730617046356, 0.0...</td>\n",
|
||||||
" <td>😊</td>\n",
|
" <td>😊</td>\n",
|
||||||
" <td>[0.7040175768989329, 0.059322033898305086, 0.2...</td>\n",
|
" <td>[0.7040175768989329, 0.059322033898305086, 0.2...</td>\n",
|
||||||
" <td>😂</td>\n",
|
" <td>never…</td>\n",
|
||||||
" <td>[0.48943695425987244, 0.21931192278862, 0.2277...</td>\n",
|
|
||||||
" </tr>\n",
|
" </tr>\n",
|
||||||
" <tr>\n",
|
" <tr>\n",
|
||||||
" <th>5410</th>\n",
|
" <th>5410</th>\n",
|
||||||
" <td>lmao on me ! ! ! wtf wa he suppos to say</td>\n",
|
" <td>😂</td>\n",
|
||||||
|
" <td>[0.18114930391311646, 0.10199417173862457, 0.1...</td>\n",
|
||||||
" <td>😂</td>\n",
|
" <td>😂</td>\n",
|
||||||
" <td>[0.46813021474490496, 0.24716181096977158, 0.2...</td>\n",
|
" <td>[0.46813021474490496, 0.24716181096977158, 0.2...</td>\n",
|
||||||
" <td>😢</td>\n",
|
" <td>lmao on me ! ! ! wtf wa he suppos to say</td>\n",
|
||||||
" <td>[0.3661550283432007, 0.32579296827316284, 0.23...</td>\n",
|
|
||||||
" </tr>\n",
|
" </tr>\n",
|
||||||
" <tr>\n",
|
" <tr>\n",
|
||||||
" <th>62611</th>\n",
|
" <th>62611</th>\n",
|
||||||
" <td>this dude alway help me get through my school ...</td>\n",
|
" <td>😂</td>\n",
|
||||||
|
" <td>[0.16997836530208588, 0.08633847534656525, 0.0...</td>\n",
|
||||||
" <td>😊</td>\n",
|
" <td>😊</td>\n",
|
||||||
" <td>[0.7040175768989329, 0.059322033898305086, 0.2...</td>\n",
|
" <td>[0.7040175768989329, 0.059322033898305086, 0.2...</td>\n",
|
||||||
" <td>😂</td>\n",
|
" <td>this dude alway help me get through my school ...</td>\n",
|
||||||
" <td>[0.48689204454421997, 0.20729433000087738, 0.2...</td>\n",
|
|
||||||
" </tr>\n",
|
" </tr>\n",
|
||||||
" </tbody>\n",
|
" </tbody>\n",
|
||||||
"</table>\n",
|
"</table>\n",
|
||||||
"</div>"
|
"</div>"
|
||||||
],
|
],
|
||||||
"text/plain": [
|
"text/plain": [
|
||||||
" text teacher \\\n",
|
" predict predicted_sentiment teacher \\\n",
|
||||||
"35671 i feel like i care so much more in everi situat 😂 \n",
|
"35671 😂 [0.16062995791435242, 0.0832427591085434, 0.09... 😂 \n",
|
||||||
"25683 i did not meat to add that 2 there ... hav see... 😂 \n",
|
"25683 😢 [0.16413000226020813, 0.0942138284444809, 0.07... 😂 \n",
|
||||||
"8985 never… 😊 \n",
|
"8985 😂 [0.11994962394237518, 0.05705730617046356, 0.0... 😊 \n",
|
||||||
"5410 lmao on me ! ! ! wtf wa he suppos to say 😂 \n",
|
"5410 😂 [0.18114930391311646, 0.10199417173862457, 0.1... 😂 \n",
|
||||||
"62611 this dude alway help me get through my school ... 😊 \n",
|
"62611 😂 [0.16997836530208588, 0.08633847534656525, 0.0... 😊 \n",
|
||||||
"\n",
|
"\n",
|
||||||
" teacher_sentiment predict \\\n",
|
" teacher_sentiment \\\n",
|
||||||
"35671 [0.46813021474490496, 0.24716181096977158, 0.2... 😂 \n",
|
"35671 [0.46813021474490496, 0.24716181096977158, 0.2... \n",
|
||||||
"25683 [0.46813021474490496, 0.24716181096977158, 0.2... 😌 \n",
|
"25683 [0.46813021474490496, 0.24716181096977158, 0.2... \n",
|
||||||
"8985 [0.7040175768989329, 0.059322033898305086, 0.2... 😂 \n",
|
"8985 [0.7040175768989329, 0.059322033898305086, 0.2... \n",
|
||||||
"5410 [0.46813021474490496, 0.24716181096977158, 0.2... 😢 \n",
|
"5410 [0.46813021474490496, 0.24716181096977158, 0.2... \n",
|
||||||
"62611 [0.7040175768989329, 0.059322033898305086, 0.2... 😂 \n",
|
"62611 [0.7040175768989329, 0.059322033898305086, 0.2... \n",
|
||||||
"\n",
|
"\n",
|
||||||
" predicted_sentiment \n",
|
" text \n",
|
||||||
"35671 [0.44239410758018494, 0.2297608107328415, 0.26... \n",
|
"35671 i feel like i care so much more in everi situat \n",
|
||||||
"25683 [0.7516717314720154, 0.291942298412323, 0.3423... \n",
|
"25683 i did not meat to add that 2 there ... hav see... \n",
|
||||||
"8985 [0.48943695425987244, 0.21931192278862, 0.2277... \n",
|
"8985 never… \n",
|
||||||
"5410 [0.3661550283432007, 0.32579296827316284, 0.23... \n",
|
"5410 lmao on me ! ! ! wtf wa he suppos to say \n",
|
||||||
"62611 [0.48689204454421997, 0.20729433000087738, 0.2... "
|
"62611 this dude alway help me get through my school ... "
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
@ -845,9 +894,9 @@
|
|||||||
"name": "stdout",
|
"name": "stdout",
|
||||||
"output_type": "stream",
|
"output_type": "stream",
|
||||||
"text": [
|
"text": [
|
||||||
"Mean Squared Error: [0.02340565 0.02344435 0.00374819]\n",
|
"Mean Squared Error: [0.13877691 0.04682433 0.02937794]\n",
|
||||||
"Variance teacher: [0.02183094 0.02513847 0.00285735]\n",
|
"Variance teacher: [0.02183094 0.02513847 0.00285735]\n",
|
||||||
"Variance prediction: [0.0083875 0.00473354 0.00115709]\n"
|
"Variance prediction: [0.00046378 0.00019441 0.00020516]\n"
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
@ -896,16 +945,31 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 15,
|
"execution_count": 13,
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [
|
"outputs": [
|
||||||
{
|
{
|
||||||
"data": {
|
"data": {
|
||||||
"application/vnd.jupyter.widget-view+json": {
|
"application/vnd.jupyter.widget-view+json": {
|
||||||
"model_id": "2ca4e06fcd4f41c2bfd161f9f16ca594",
|
"model_id": "003ae16760b04c25bdc9f2fe2193747a",
|
||||||
"version_major": 2,
|
"version_major": 2,
|
||||||
"version_minor": 0
|
"version_minor": 0
|
||||||
},
|
},
|
||||||
|
"text/html": [
|
||||||
|
"<p>Failed to display Jupyter Widget of type <code>Text</code>.</p>\n",
|
||||||
|
"<p>\n",
|
||||||
|
" If you're reading this message in the Jupyter Notebook or JupyterLab Notebook, it may mean\n",
|
||||||
|
" that the widgets JavaScript is still loading. If this message persists, it\n",
|
||||||
|
" likely means that the widgets JavaScript library is either not installed or\n",
|
||||||
|
" not enabled. See the <a href=\"https://ipywidgets.readthedocs.io/en/stable/user_install.html\">Jupyter\n",
|
||||||
|
" Widgets Documentation</a> for setup instructions.\n",
|
||||||
|
"</p>\n",
|
||||||
|
"<p>\n",
|
||||||
|
" If you're reading this message in another frontend (for example, a static\n",
|
||||||
|
" rendering on GitHub or <a href=\"https://nbviewer.jupyter.org/\">NBViewer</a>),\n",
|
||||||
|
" it may mean that your frontend doesn't currently support widgets.\n",
|
||||||
|
"</p>\n"
|
||||||
|
],
|
||||||
"text/plain": [
|
"text/plain": [
|
||||||
"Text(value='')"
|
"Text(value='')"
|
||||||
]
|
]
|
||||||
@ -916,12 +980,27 @@
|
|||||||
{
|
{
|
||||||
"data": {
|
"data": {
|
||||||
"application/vnd.jupyter.widget-view+json": {
|
"application/vnd.jupyter.widget-view+json": {
|
||||||
"model_id": "a39abb79d70e4ae1952b2d928cfab174",
|
"model_id": "4580af82b30545f197a41e4359010556",
|
||||||
"version_major": 2,
|
"version_major": 2,
|
||||||
"version_minor": 0
|
"version_minor": 0
|
||||||
},
|
},
|
||||||
|
"text/html": [
|
||||||
|
"<p>Failed to display Jupyter Widget of type <code>VBox</code>.</p>\n",
|
||||||
|
"<p>\n",
|
||||||
|
" If you're reading this message in the Jupyter Notebook or JupyterLab Notebook, it may mean\n",
|
||||||
|
" that the widgets JavaScript is still loading. If this message persists, it\n",
|
||||||
|
" likely means that the widgets JavaScript library is either not installed or\n",
|
||||||
|
" not enabled. See the <a href=\"https://ipywidgets.readthedocs.io/en/stable/user_install.html\">Jupyter\n",
|
||||||
|
" Widgets Documentation</a> for setup instructions.\n",
|
||||||
|
"</p>\n",
|
||||||
|
"<p>\n",
|
||||||
|
" If you're reading this message in another frontend (for example, a static\n",
|
||||||
|
" rendering on GitHub or <a href=\"https://nbviewer.jupyter.org/\">NBViewer</a>),\n",
|
||||||
|
" it may mean that your frontend doesn't currently support widgets.\n",
|
||||||
|
"</p>\n"
|
||||||
|
],
|
||||||
"text/plain": [
|
"text/plain": [
|
||||||
"VBox(children=(Button(description='get emoji', icon='check', style=ButtonStyle(), tooltip='Click me'), Output(…"
|
"VBox(children=(Button(description='get emoji', icon='check', style=ButtonStyle(), tooltip='Click me'), Output()))"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
@ -987,13 +1066,6 @@
|
|||||||
" display(t)\n",
|
" display(t)\n",
|
||||||
" display(widgets.VBox([b, out])) "
|
" display(widgets.VBox([b, out])) "
|
||||||
]
|
]
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": null,
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": []
|
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"metadata": {
|
"metadata": {
|
||||||
|
@ -15,7 +15,8 @@ import itertools
|
|||||||
import sklearn.utils as sku
|
import sklearn.utils as sku
|
||||||
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer, HashingVectorizer
|
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer, HashingVectorizer
|
||||||
from sklearn.model_selection import train_test_split
|
from sklearn.model_selection import train_test_split
|
||||||
from sklearn.preprocessing import MultiLabelBinarizer
|
from sklearn.preprocessing import MultiLabelBinarizer, LabelBinarizer
|
||||||
|
from sklearn.cluster import KMeans
|
||||||
import nltk
|
import nltk
|
||||||
from keras.models import load_model
|
from keras.models import load_model
|
||||||
from sklearn.externals import joblib
|
from sklearn.externals import joblib
|
||||||
@ -33,14 +34,13 @@ nltk.download('wordnet')
|
|||||||
import sys
|
import sys
|
||||||
sys.path.append("..")
|
sys.path.append("..")
|
||||||
|
|
||||||
from Tools.Emoji_Distance import sentiment_vector_to_emoji
|
import Tools.Emoji_Distance as edist
|
||||||
from Tools.Emoji_Distance import emoji_to_sentiment_vector
|
|
||||||
|
|
||||||
def emoji2sent(emoji_arr, only_emoticons=True):
|
def emoji2sent(emoji_arr, only_emoticons=True):
|
||||||
return np.array([emoji_to_sentiment_vector(e, only_emoticons=only_emoticons) for e in emoji_arr])
|
return np.array([edist.emoji_to_sentiment_vector(e, only_emoticons=only_emoticons) for e in emoji_arr])
|
||||||
|
|
||||||
def sent2emoji(sent_arr, custom_target_emojis=None, only_emoticons=True):
|
def sent2emoji(sent_arr, custom_target_emojis=None, only_emoticons=True):
|
||||||
return [sentiment_vector_to_emoji(s, custom_target_emojis=custom_target_emojis, only_emoticons=only_emoticons) for s in sent_arr]
|
return [edist.sentiment_vector_to_emoji(s, custom_target_emojis=custom_target_emojis, only_emoticons=only_emoticons) for s in sent_arr]
|
||||||
|
|
||||||
|
|
||||||
# In[3]:
|
# In[3]:
|
||||||
@ -122,7 +122,7 @@ def get_wordnet_pos(treebank_tag):
|
|||||||
|
|
||||||
class sample_data_manager(object):
|
class sample_data_manager(object):
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def generate_and_read(path:str, only_emoticons=True, apply_stemming=True, n_top_emojis=-1, file_range=None):
|
def generate_and_read(path:str, only_emoticons=True, apply_stemming=True, n_top_emojis=-1, file_range=None, n_kmeans_cluster=-1):
|
||||||
"""
|
"""
|
||||||
generate, read and process train data in one step.
|
generate, read and process train data in one step.
|
||||||
|
|
||||||
@ -131,6 +131,8 @@ class sample_data_manager(object):
|
|||||||
@param apply_stemming: apply stemming and lemmatization on dataset
|
@param apply_stemming: apply stemming and lemmatization on dataset
|
||||||
@param n_top_emojis: only use messages containing one of <`n_top_emojis`>-top emojis. set to `-1` to prevent top emoji filtering
|
@param n_top_emojis: only use messages containing one of <`n_top_emojis`>-top emojis. set to `-1` to prevent top emoji filtering
|
||||||
@param file_range: range of file's indices to read (eg `range(3)` to read the first three files). If `None`: all files are read
|
@param file_range: range of file's indices to read (eg `range(3)` to read the first three files). If `None`: all files are read
|
||||||
|
@param n_kmeans_cluster: generating multilabeled labels with kmeans with these number of clusters. Set to -1 to use the plain sentiment space as label
|
||||||
|
|
||||||
@return: sample_data_manager object
|
@return: sample_data_manager object
|
||||||
"""
|
"""
|
||||||
sdm = sample_data_manager(path)
|
sdm = sample_data_manager(path)
|
||||||
@ -143,6 +145,9 @@ class sample_data_manager(object):
|
|||||||
if n_top_emojis > 0:
|
if n_top_emojis > 0:
|
||||||
sdm.filter_by_top_emojis(n_top=n_top_emojis)
|
sdm.filter_by_top_emojis(n_top=n_top_emojis)
|
||||||
|
|
||||||
|
if n_kmeans_cluster > 0:
|
||||||
|
sdm.generate_kmeans_binary_label(only_emoticons=only_emoticons, n_clusters=n_kmeans_cluster)
|
||||||
|
|
||||||
return sdm
|
return sdm
|
||||||
|
|
||||||
|
|
||||||
@ -166,6 +171,10 @@ class sample_data_manager(object):
|
|||||||
self.Xt = None
|
self.Xt = None
|
||||||
self.yt = None
|
self.yt = None
|
||||||
self.top_emojis = None
|
self.top_emojis = None
|
||||||
|
self.binary_labels = None
|
||||||
|
self.use_binary_labels = False
|
||||||
|
self.kmeans_cluster = None
|
||||||
|
self.label_binarizer = None
|
||||||
|
|
||||||
def read_files(self, file_index_range:list, only_emoticons=True):
|
def read_files(self, file_index_range:list, only_emoticons=True):
|
||||||
"""
|
"""
|
||||||
@ -270,16 +279,46 @@ class sample_data_manager(object):
|
|||||||
assert self.labels is not None # ← messages are already read in
|
assert self.labels is not None # ← messages are already read in
|
||||||
|
|
||||||
self.top_emojis = [x[0] for x in self.get_emoji_count()[:n_top]]
|
self.top_emojis = [x[0] for x in self.get_emoji_count()[:n_top]]
|
||||||
in_top = [sentiment_vector_to_emoji(x) in self.top_emojis for x in self.labels]
|
in_top = [edist.sentiment_vector_to_emoji(x) in self.top_emojis for x in self.labels]
|
||||||
self.labels = self.labels[in_top]
|
self.labels = self.labels[in_top]
|
||||||
self.plain_text = self.plain_text[in_top]
|
self.plain_text = self.plain_text[in_top]
|
||||||
self.emojis = self.emojis[in_top]
|
self.emojis = self.emojis[in_top]
|
||||||
print("remaining samples after top emoji filtering: ", len(self.labels))
|
print("remaining samples after top emoji filtering: ", len(self.labels))
|
||||||
|
|
||||||
|
def generate_kmeans_binary_label(self, only_emoticons=True, n_clusters=5):
|
||||||
|
"""
|
||||||
|
generate binary labels using kmeans.
|
||||||
|
|
||||||
|
@param only_emoticons: set whether we're using the full emoji set or only emoticons
|
||||||
|
@param n_clusters: number of cluster we're generating in emoji's sentiment space
|
||||||
|
"""
|
||||||
|
assert self.labels is not None
|
||||||
|
array_sentiment_vectors = edist.list_sentiment_emoticon_vectors if only_emoticons else edist.list_sentiment_vectors
|
||||||
|
array_sentiment_vectors = np.array(array_sentiment_vectors)
|
||||||
|
|
||||||
|
list_emojis = edist.list_emoticon_emojis if only_emoticons else edist.list_emojis
|
||||||
|
self.use_binary_labels = True
|
||||||
|
print("clustering following emojis: " + "".join(list_emojis) + "...")
|
||||||
|
self.kmeans_cluster = KMeans(n_clusters=n_clusters).fit(array_sentiment_vectors)
|
||||||
|
print("clustering done")
|
||||||
|
self.label_binarizer = LabelBinarizer()
|
||||||
|
|
||||||
|
multiclass_labels = self.kmeans_cluster.predict(self.labels)
|
||||||
|
|
||||||
|
# FIXME: we have to guarantee that in every dataset all classes occur.
|
||||||
|
# otherwise batch fitting is not possible!
|
||||||
|
# (or we have to precompute the mlb fitting process somewhere...)
|
||||||
|
self.binary_labels = self.label_binarizer.fit_transform(multiclass_labels)
|
||||||
|
|
||||||
|
|
||||||
def create_train_test_split(self, split = 0.1, random_state = 4222):
|
def create_train_test_split(self, split = 0.1, random_state = 4222):
|
||||||
|
assert self.plain_text is not None and self.labels is not None
|
||||||
if self.X is not None:
|
if self.X is not None:
|
||||||
sys.stderr.write("WARNING: overwriting existing train/test split \n")
|
sys.stderr.write("WARNING: overwriting existing train/test split \n")
|
||||||
self.X, self.Xt, self.y, self.yt = train_test_split(self.plain_text, self.labels, test_size=split, random_state=random_state)
|
|
||||||
|
labels = self.binary_labels if self.use_binary_labels else self.labels
|
||||||
|
assert labels is not None
|
||||||
|
self.X, self.Xt, self.y, self.yt = train_test_split(self.plain_text, labels, test_size=split, random_state=random_state)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@ -306,13 +345,15 @@ class pipeline_manager(object):
|
|||||||
return pm
|
return pm
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def create_keras_pipeline_with_vectorizer(vectorizer, layers, sdm:sample_data_manager):
|
def create_keras_pipeline_with_vectorizer(vectorizer, layers, sdm:sample_data_manager, loss=None, optimizer=None):
|
||||||
'''
|
'''
|
||||||
creates pipeline with vectorizer and keras classifier
|
creates pipeline with vectorizer and keras classifier
|
||||||
|
|
||||||
@param vectorizer: Vectorizer object. will be fitted with data provided by sdm
|
@param vectorizer: Vectorizer object. will be fitted with data provided by sdm
|
||||||
@param layers: list of keras layers. One keras layer is a tuple in form: (<#neurons:int>, <activation_func:str>)
|
@param layers: list of keras layers. One keras layer is a tuple in form: (<#neurons:int>, <activation_func:str>)
|
||||||
@param sdm: sample data manager to get data for the vectorizer
|
@param sdm: sample data manager to get data for the vectorizer
|
||||||
|
@param loss: set keras loss function. Depending whether sdm use multiclass labels `categorical_crossentropy` or `mean_squared_error` is used as default
|
||||||
|
@param optimizer: set keras optimizer. Depending whether sdm use multiclass labels `sgd` or `adam` is used as default
|
||||||
|
|
||||||
@return: a pipeline manager object
|
@return: a pipeline manager object
|
||||||
|
|
||||||
@ -337,8 +378,17 @@ class pipeline_manager(object):
|
|||||||
else:
|
else:
|
||||||
model.add(Dense(units=layer[0], activation=layer[1]))
|
model.add(Dense(units=layer[0], activation=layer[1]))
|
||||||
|
|
||||||
model.compile(loss='mean_squared_error',
|
if sdm.use_binary_labels:
|
||||||
optimizer='adam')
|
loss_function = loss if loss is not None else 'categorical_crossentropy'
|
||||||
|
optimizer_function = optimizer if optimizer is not None else 'sgd'
|
||||||
|
model.compile(loss=loss_function,
|
||||||
|
optimizer=optimizer_function,
|
||||||
|
metrics=['accuracy'])
|
||||||
|
else:
|
||||||
|
loss_function = loss if loss is not None else 'mean_squared_error'
|
||||||
|
optimizer_function = optimizer if optimizer is not None else 'adam'
|
||||||
|
model.compile(loss=loss_function,
|
||||||
|
optimizer=optimizer_function)
|
||||||
|
|
||||||
pipeline = Pipeline([
|
pipeline = Pipeline([
|
||||||
('vectorizer',vectorizer),
|
('vectorizer',vectorizer),
|
||||||
@ -503,11 +553,18 @@ import __main__ as main
|
|||||||
if not hasattr(main, '__file__'):
|
if not hasattr(main, '__file__'):
|
||||||
# we are in an interactive environment (probably in jupyter)
|
# we are in an interactive environment (probably in jupyter)
|
||||||
# load data:
|
# load data:
|
||||||
sdm = sample_data_manager.generate_and_read(path="./data_en/", n_top_emojis=20, file_range=range(1))
|
|
||||||
|
# setting n_kmeans_clusters to a value > 0 activates binarized labeling automatically!
|
||||||
|
# set to -1 to disable kmeans clustering and generating labels in plain sentiment space
|
||||||
|
|
||||||
|
#n_kmeans_cluster = 5
|
||||||
|
n_kmeans_cluster = -1
|
||||||
|
sdm = sample_data_manager.generate_and_read(path="./data_en/", n_top_emojis=20, file_range=range(1), n_kmeans_cluster=n_kmeans_cluster)
|
||||||
|
sdm.create_train_test_split()
|
||||||
#pm = pipeline_manager.create_keras_pipeline_with_vectorizer(vectorizer=TfidfVectorizer(stop_words='english'),\n",
|
#pm = pipeline_manager.create_keras_pipeline_with_vectorizer(vectorizer=TfidfVectorizer(stop_words='english'),\n",
|
||||||
# layers=[(10000, 'relu'),(5000, 'relu'),(2500, 'relu'),(y1[0].shape[0],None)], sdm=sdm)\n",
|
# layers=[(10000, 'relu'),(5000, 'relu'),(2500, 'relu'),(y1[0].shape[0],None)], sdm=sdm)\n",
|
||||||
pm = pipeline_manager.create_keras_pipeline_with_vectorizer(vectorizer=TfidfVectorizer(stop_words='english'),
|
pm = pipeline_manager.create_keras_pipeline_with_vectorizer(vectorizer=TfidfVectorizer(stop_words='english'),
|
||||||
layers=[(2500, 'relu'),(3,None)], sdm=sdm)
|
layers=[(2500, 'relu'),(sdm.y.shape[1],None)], sdm=sdm)
|
||||||
tr = trainer(sdm=sdm, pm=pm)
|
tr = trainer(sdm=sdm, pm=pm)
|
||||||
tr.fit(100)
|
tr.fit(100)
|
||||||
|
|
||||||
@ -515,7 +572,7 @@ if not hasattr(main, '__file__'):
|
|||||||
# ----
|
# ----
|
||||||
# ## save classifier
|
# ## save classifier
|
||||||
|
|
||||||
# In[13]:
|
# In[11]:
|
||||||
|
|
||||||
|
|
||||||
import __main__ as main
|
import __main__ as main
|
||||||
@ -528,7 +585,7 @@ if not hasattr(main, '__file__'):
|
|||||||
#
|
#
|
||||||
# * predict and save to `test.csv`
|
# * predict and save to `test.csv`
|
||||||
|
|
||||||
# In[14]:
|
# In[12]:
|
||||||
|
|
||||||
|
|
||||||
import __main__ as main
|
import __main__ as main
|
||||||
@ -568,7 +625,7 @@ if not hasattr(main, '__file__'):
|
|||||||
#
|
#
|
||||||
# * loading classifier and show a test widget
|
# * loading classifier and show a test widget
|
||||||
|
|
||||||
# In[15]:
|
# In[13]:
|
||||||
|
|
||||||
|
|
||||||
import __main__ as main
|
import __main__ as main
|
||||||
|
Loading…
Reference in New Issue
Block a user