Merge branch 'master' of ssh://gogs@the-cake-is-a-lie.net:20022/jonas/NLP-LAB.git
This commit is contained in:
commit
160821b1e3
128
Project/naive_approach/naive_approach.py
Normal file
128
Project/naive_approach/naive_approach.py
Normal file
@ -0,0 +1,128 @@
|
|||||||
|
# coding: utf-8
|
||||||
|
|
||||||
|
# In[1]:
|
||||||
|
|
||||||
|
|
||||||
|
import pandas as pd
|
||||||
|
from IPython.display import clear_output, Markdown, Math
|
||||||
|
import ipywidgets as widgets
|
||||||
|
import os
|
||||||
|
import unicodedata as uni
|
||||||
|
import numpy as np
|
||||||
|
from nltk.stem import PorterStemmer
|
||||||
|
from nltk.tokenize import sent_tokenize, word_tokenize
|
||||||
|
from nltk.corpus import wordnet
|
||||||
|
import math
|
||||||
|
import pprint
|
||||||
|
|
||||||
|
|
||||||
|
# # Naive Approach
|
||||||
|
table = pd.read_csv('../Tools/emoji_descriptions.csv')
|
||||||
|
|
||||||
|
#######################
|
||||||
|
# Helper functions
|
||||||
|
#######################
|
||||||
|
|
||||||
|
def stemming(messages):
|
||||||
|
stemmed_messages = []
|
||||||
|
ps = PorterStemmer()
|
||||||
|
for m in messages:
|
||||||
|
words = word_tokenize(m)
|
||||||
|
sm = []
|
||||||
|
for w in words:
|
||||||
|
sm.append(ps.stem(w))
|
||||||
|
m = (" ").join(sm)
|
||||||
|
stemmed_messages.append(m)
|
||||||
|
return stemmed_messages
|
||||||
|
|
||||||
|
|
||||||
|
# * compare words to emoji descriptions
|
||||||
|
def evaluate_sentence(sentence, table, description_key = 'description', lang = 'eng'):
|
||||||
|
|
||||||
|
tokenized_sentence = word_tokenize(sentence)
|
||||||
|
n = len(tokenized_sentence)
|
||||||
|
l = table.shape[0]
|
||||||
|
matrix_list = []
|
||||||
|
|
||||||
|
for index, row in table.iterrows():
|
||||||
|
emoji_tokens = word_tokenize(row[description_key])
|
||||||
|
m = len(emoji_tokens)
|
||||||
|
|
||||||
|
mat = np.zeros(shape=(m,n))
|
||||||
|
for i in range(len(emoji_tokens)):
|
||||||
|
for j in range(len(tokenized_sentence)):
|
||||||
|
syn1 = wordnet.synsets(emoji_tokens[i],lang=lang)
|
||||||
|
if len(syn1) == 0:
|
||||||
|
continue
|
||||||
|
w1 = syn1[0]
|
||||||
|
#print(j, tokenized_sentence)
|
||||||
|
syn2 = wordnet.synsets(tokenized_sentence[j], lang=lang)
|
||||||
|
if len(syn2) == 0:
|
||||||
|
continue
|
||||||
|
w2 = syn2[0]
|
||||||
|
val = w1.wup_similarity(w2)
|
||||||
|
if val is None:
|
||||||
|
continue
|
||||||
|
mat[i,j] = val
|
||||||
|
#print(row['character'], mat)
|
||||||
|
matrix_list.append(mat)
|
||||||
|
|
||||||
|
return matrix_list
|
||||||
|
|
||||||
|
|
||||||
|
###########################
|
||||||
|
#Functions to be called from main script
|
||||||
|
###########################
|
||||||
|
|
||||||
|
|
||||||
|
# load and preprocess data
|
||||||
|
# emojis_to_consider can be either a list or "all"
|
||||||
|
def prepareData(stemming=False, emojis_to_consider="all"):
|
||||||
|
|
||||||
|
table.head()
|
||||||
|
|
||||||
|
if(stemming):
|
||||||
|
table['description'] = stemming(table['description'])
|
||||||
|
|
||||||
|
#collect the emojis
|
||||||
|
lookup = {}
|
||||||
|
emoji_set = []
|
||||||
|
for index, row in table.iterrows():
|
||||||
|
if(emojis_to_consider=="all" or (type(emojis_to_consider)==list and row['character'] in emojis_to_consider)):
|
||||||
|
lookup[index] = row['character']
|
||||||
|
emoji_set.append(row['character'])
|
||||||
|
|
||||||
|
emoji_set = set(emoji_set)
|
||||||
|
|
||||||
|
return lookup
|
||||||
|
|
||||||
|
# make a prediction for an input sentence
|
||||||
|
def predict(sentence, lookup, emojis_to_consider="all", criteria="threshold", description_key='description', lang = 'eng', n=10, t=0.9):
|
||||||
|
|
||||||
|
result = evaluate_sentence(sentence, table, description_key, lang)
|
||||||
|
|
||||||
|
if(criteria=="summed"):
|
||||||
|
indexes = np.argsort([-np.sum(x) for x in result])[0:n]
|
||||||
|
elif (criteria=="max_val"):
|
||||||
|
indexes = np.argsort([-np.max(x) for x in result])[0:n]
|
||||||
|
elif(criteria=="avg"):
|
||||||
|
indexes = np.argsort([-np.mean(x) for x in result])[0:n]
|
||||||
|
else:
|
||||||
|
indexes= np.argsort([-len(np.where(x>t)[0]) / (x.shape[0] * x.shape[1]) for x in result])[0:n]
|
||||||
|
|
||||||
|
if(emojis_to_consider!="all"):
|
||||||
|
for i in indexes:
|
||||||
|
if (i not in lookup):
|
||||||
|
indexes = np.delete(indexes, [i])
|
||||||
|
|
||||||
|
# build a result table
|
||||||
|
table_array = [[lookup[indexes[i]], str(table.iloc[indexes[i]][description_key])] for i in range(n) ]
|
||||||
|
|
||||||
|
table_frame = pd.DataFrame(table_array, columns=[criteria, 'description'])
|
||||||
|
|
||||||
|
#display(table_frame)
|
||||||
|
|
||||||
|
return list(table_frame[criteria])
|
||||||
|
|
||||||
|
#predict("I like to travel by train", description_key='description' , lang='eng')
|
||||||
|
|
@ -144,7 +144,7 @@
|
|||||||
{
|
{
|
||||||
"data": {
|
"data": {
|
||||||
"application/vnd.jupyter.widget-view+json": {
|
"application/vnd.jupyter.widget-view+json": {
|
||||||
"model_id": "3c11801d12b643d9b059ba1058d66d5e",
|
"model_id": "5ac970d7d7cf4849b4f5adfb80a820c0",
|
||||||
"version_major": 2,
|
"version_major": 2,
|
||||||
"version_minor": 0
|
"version_minor": 0
|
||||||
},
|
},
|
||||||
@ -168,11 +168,11 @@
|
|||||||
" ],\n",
|
" ],\n",
|
||||||
" [\n",
|
" [\n",
|
||||||
" (widgets.IntRangeSlider(disabled=True, min=0, max=0), \"file_range\"),\n",
|
" (widgets.IntRangeSlider(disabled=True, min=0, max=0), \"file_range\"),\n",
|
||||||
" (widgets.Checkbox(disabled=True), \"only_emoticons\")\n",
|
" (widgets.Checkbox(value=True,disabled=True), \"only_emoticons\")\n",
|
||||||
" ],\n",
|
" ],\n",
|
||||||
" [\n",
|
" [\n",
|
||||||
" (widgets.BoundedIntText(disabled=True,min=-1, max=10), \"k_means_cluster\"),\n",
|
" (widgets.BoundedIntText(value=-1,disabled=True,min=-1, max=10), \"k_means_cluster\"),\n",
|
||||||
" (widgets.BoundedIntText(disabled=True,min=-1, max=10), \"n_top_emojis\")\n",
|
" (widgets.BoundedIntText(value=20,disabled=True,min=-1, max=10), \"n_top_emojis\")\n",
|
||||||
" ],\n",
|
" ],\n",
|
||||||
" [\n",
|
" [\n",
|
||||||
" (widgets.Button(disabled=True),\"load_data\")\n",
|
" (widgets.Button(disabled=True),\"load_data\")\n",
|
||||||
@ -197,7 +197,7 @@
|
|||||||
" None,\n",
|
" None,\n",
|
||||||
" classifier_tab)\n",
|
" classifier_tab)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"create_area(\"create classifier\",\n",
|
"create_area(\"create/save/load classifier\",\n",
|
||||||
" [\n",
|
" [\n",
|
||||||
" [\n",
|
" [\n",
|
||||||
" (classifier_tab, \"classifier_tab\")\n",
|
" (classifier_tab, \"classifier_tab\")\n",
|
||||||
@ -206,8 +206,19 @@
|
|||||||
" (widgets.Button(), \"create_classifier\")\n",
|
" (widgets.Button(), \"create_classifier\")\n",
|
||||||
" ],\n",
|
" ],\n",
|
||||||
" [\n",
|
" [\n",
|
||||||
" (widgets.Text(), \"classifier name\"),\n",
|
" (widgets.Label(\"save_area:\"), \"save_area:\")\n",
|
||||||
" (widgets.Button(), \"save classifier\")\n",
|
" ],\n",
|
||||||
|
" [\n",
|
||||||
|
" (widgets.Text(), \"classifier_name\"),\n",
|
||||||
|
" (widgets.Button(), \"save_classifier\")\n",
|
||||||
|
" ],\n",
|
||||||
|
" [\n",
|
||||||
|
" (widgets.Label(\"load_area:\"), \"load_area:\")\n",
|
||||||
|
" ],\n",
|
||||||
|
" [\n",
|
||||||
|
" (widgets.Select(options=sorted(glob.glob(\"./*.pipeline\"))), \"clf_file_selector\"),\n",
|
||||||
|
" (widgets.Text(), \"clf_file\"),\n",
|
||||||
|
" (widgets.Button(), \"load_classifier\")\n",
|
||||||
" ]\n",
|
" ]\n",
|
||||||
" ],\n",
|
" ],\n",
|
||||||
" \"create\")\n",
|
" \"create\")\n",
|
||||||
@ -541,9 +552,54 @@
|
|||||||
" pm = stl.pipeline_manager.create_keras_pipeline_with_vectorizer(vectorizer=TfidfVectorizer(stop_words='english'),\n",
|
" pm = stl.pipeline_manager.create_keras_pipeline_with_vectorizer(vectorizer=TfidfVectorizer(stop_words='english'),\n",
|
||||||
" layers=layers, sdm=sdm)\n",
|
" layers=layers, sdm=sdm)\n",
|
||||||
"\n",
|
"\n",
|
||||||
|
"def save_classifier(b):\n",
|
||||||
|
" global sdm\n",
|
||||||
|
" global pm\n",
|
||||||
|
" global tr\n",
|
||||||
|
" with out_areas[\"create\"]:\n",
|
||||||
|
" clear_output()\n",
|
||||||
|
" mp(\"----\")\n",
|
||||||
|
" if pm is None:\n",
|
||||||
|
" sys.stderr.write(\"ERROR: create classifier first\")\n",
|
||||||
|
" return\n",
|
||||||
|
" \n",
|
||||||
|
" pm.save(shown_widgets[\"classifier_name\"].value)\n",
|
||||||
|
"\n",
|
||||||
|
"def load_classifier(b):\n",
|
||||||
|
" global sdm\n",
|
||||||
|
" global pm\n",
|
||||||
|
" global tr\n",
|
||||||
|
" with out_areas[\"create\"]:\n",
|
||||||
|
" clear_output()\n",
|
||||||
|
" mp(\"----\")\n",
|
||||||
|
"\n",
|
||||||
|
"def update_file_selector(b):\n",
|
||||||
|
" shown_widgets[\"clf_file_selector\"].options = sorted(glob.glob(\"./*.pipeline\"))\n",
|
||||||
|
"\n",
|
||||||
|
"def clf_file_selector(b):\n",
|
||||||
|
" shown_widgets[\"clf_file\"].value = shown_widgets[\"clf_file_selector\"].value\n",
|
||||||
|
" update_file_selector(b)\n",
|
||||||
|
"\n",
|
||||||
|
"def load_classifier(b):\n",
|
||||||
|
" global sdm\n",
|
||||||
|
" global pm\n",
|
||||||
|
" global tr\n",
|
||||||
|
" with out_areas[\"create\"]:\n",
|
||||||
|
" clear_output()\n",
|
||||||
|
" mp(\"----\")\n",
|
||||||
|
" clf_file = shown_widgets[\"clf_file\"].value\n",
|
||||||
|
" pm = stl.pipeline_manager.load_from_pipeline_file(clf_file)\n",
|
||||||
|
" \n",
|
||||||
|
"\n",
|
||||||
"# link\n",
|
"# link\n",
|
||||||
"shown_widgets[\"n_keras_layer\"].observe(populate_keras_options)\n",
|
"shown_widgets[\"n_keras_layer\"].observe(populate_keras_options)\n",
|
||||||
"shown_widgets[\"create_classifier\"].on_click(create_classifier)"
|
"shown_widgets[\"create_classifier\"].on_click(create_classifier)\n",
|
||||||
|
"shown_widgets[\"save_classifier\"].on_click(save_classifier)\n",
|
||||||
|
"shown_widgets[\"load_classifier\"].on_click(load_classifier)\n",
|
||||||
|
"shown_widgets[\"clf_file_selector\"].observe(clf_file_selector)\n",
|
||||||
|
"\n",
|
||||||
|
"\n",
|
||||||
|
"\n"
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
|
@ -2,43 +2,9 @@
|
|||||||
"cells": [
|
"cells": [
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 1,
|
"execution_count": null,
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [
|
"outputs": [],
|
||||||
{
|
|
||||||
"name": "stderr",
|
|
||||||
"output_type": "stream",
|
|
||||||
"text": [
|
|
||||||
"/home/jonas/.local/lib/python3.6/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n",
|
|
||||||
" from ._conv import register_converters as _register_converters\n",
|
|
||||||
"Using TensorFlow backend.\n"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "stdout",
|
|
||||||
"output_type": "stream",
|
|
||||||
"text": [
|
|
||||||
"[nltk_data] Downloading package punkt to /home/jonas/nltk_data...\n",
|
|
||||||
"[nltk_data] Package punkt is already up-to-date!\n",
|
|
||||||
"[nltk_data] Downloading package averaged_perceptron_tagger to\n",
|
|
||||||
"[nltk_data] /home/jonas/nltk_data...\n",
|
|
||||||
"[nltk_data] Package averaged_perceptron_tagger is already up-to-\n",
|
|
||||||
"[nltk_data] date!\n",
|
|
||||||
"[nltk_data] Downloading package wordnet to /home/jonas/nltk_data...\n",
|
|
||||||
"[nltk_data] Package wordnet is already up-to-date!\n"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"data": {
|
|
||||||
"text/plain": [
|
|
||||||
"True"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"execution_count": 1,
|
|
||||||
"metadata": {},
|
|
||||||
"output_type": "execute_result"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"source": [
|
"source": [
|
||||||
"import pandas as pd\n",
|
"import pandas as pd\n",
|
||||||
"from IPython.display import clear_output, Markdown, Math\n",
|
"from IPython.display import clear_output, Markdown, Math\n",
|
||||||
@ -59,6 +25,7 @@
|
|||||||
"import pickle\n",
|
"import pickle\n",
|
||||||
"import operator\n",
|
"import operator\n",
|
||||||
"from sklearn.pipeline import Pipeline\n",
|
"from sklearn.pipeline import Pipeline\n",
|
||||||
|
"import json\n",
|
||||||
"nltk.download('punkt')\n",
|
"nltk.download('punkt')\n",
|
||||||
"nltk.download('averaged_perceptron_tagger')\n",
|
"nltk.download('averaged_perceptron_tagger')\n",
|
||||||
"nltk.download('wordnet')"
|
"nltk.download('wordnet')"
|
||||||
@ -66,7 +33,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 2,
|
"execution_count": null,
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
@ -84,7 +51,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 3,
|
"execution_count": null,
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
@ -109,7 +76,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 4,
|
"execution_count": null,
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
@ -129,7 +96,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 5,
|
"execution_count": null,
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
@ -154,7 +121,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 6,
|
"execution_count": null,
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
@ -194,13 +161,13 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 7,
|
"execution_count": null,
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"class sample_data_manager(object):\n",
|
"class sample_data_manager(object):\n",
|
||||||
" @staticmethod\n",
|
" @staticmethod\n",
|
||||||
" def generate_and_read(path:str, only_emoticons=True, apply_stemming=True, n_top_emojis=-1, file_range=None, n_kmeans_cluster=-1):\n",
|
" def generate_and_read(path:str, only_emoticons=True, apply_stemming=True, n_top_emojis=-1, file_range=None, n_kmeans_cluster=-1, progress_callback=None):\n",
|
||||||
" \"\"\"\n",
|
" \"\"\"\n",
|
||||||
" generate, read and process train data in one step.\n",
|
" generate, read and process train data in one step.\n",
|
||||||
" \n",
|
" \n",
|
||||||
@ -214,7 +181,7 @@
|
|||||||
" @return: sample_data_manager object\n",
|
" @return: sample_data_manager object\n",
|
||||||
" \"\"\"\n",
|
" \"\"\"\n",
|
||||||
" sdm = sample_data_manager(path)\n",
|
" sdm = sample_data_manager(path)\n",
|
||||||
" sdm.read_files(file_index_range=range(sdm.n_files) if file_range is None else file_range, only_emoticons=only_emoticons)\n",
|
" sdm.read_files(file_index_range=range(sdm.n_files) if file_range is None else file_range, only_emoticons=only_emoticons, progress_callback=progress_callback)\n",
|
||||||
" if apply_stemming:\n",
|
" if apply_stemming:\n",
|
||||||
" sdm.apply_stemming_and_lemmatization()\n",
|
" sdm.apply_stemming_and_lemmatization()\n",
|
||||||
" \n",
|
" \n",
|
||||||
@ -254,7 +221,7 @@
|
|||||||
" self.kmeans_cluster = None\n",
|
" self.kmeans_cluster = None\n",
|
||||||
" self.label_binarizer = None\n",
|
" self.label_binarizer = None\n",
|
||||||
" \n",
|
" \n",
|
||||||
" def read_files(self, file_index_range:list, only_emoticons=True):\n",
|
" def read_files(self, file_index_range:list, only_emoticons=True, progress_callback=None):\n",
|
||||||
" \"\"\"\n",
|
" \"\"\"\n",
|
||||||
" reading (multiple) files to one panda table.\n",
|
" reading (multiple) files to one panda table.\n",
|
||||||
" \n",
|
" \n",
|
||||||
@ -268,7 +235,8 @@
|
|||||||
" self.raw_data = pd.read_json(self.json_files[i], encoding=\"utf-8\")\n",
|
" self.raw_data = pd.read_json(self.json_files[i], encoding=\"utf-8\")\n",
|
||||||
" else:\n",
|
" else:\n",
|
||||||
" self.raw_data = self.raw_data.append(pd.read_json(self.json_files[i], encoding=\"utf-8\"))\n",
|
" self.raw_data = self.raw_data.append(pd.read_json(self.json_files[i], encoding=\"utf-8\"))\n",
|
||||||
" \n",
|
" if progress_callback is not None:\n",
|
||||||
|
" progress_callback()\n",
|
||||||
" self.emojis = self.raw_data['EMOJI']\n",
|
" self.emojis = self.raw_data['EMOJI']\n",
|
||||||
" self.plain_text = self.raw_data['text']\n",
|
" self.plain_text = self.raw_data['text']\n",
|
||||||
" \n",
|
" \n",
|
||||||
@ -396,8 +364,7 @@
|
|||||||
" \n",
|
" \n",
|
||||||
" labels = self.binary_labels if self.use_binary_labels else self.labels\n",
|
" labels = self.binary_labels if self.use_binary_labels else self.labels\n",
|
||||||
" assert labels is not None\n",
|
" assert labels is not None\n",
|
||||||
" self.X, self.Xt, self.y, self.yt = train_test_split(self.plain_text, labels, test_size=split, random_state=random_state)\n",
|
" self.X, self.Xt, self.y, self.yt = train_test_split(self.plain_text, labels, test_size=split, random_state=random_state)\n"
|
||||||
"\n"
|
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -409,12 +376,26 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 8,
|
"execution_count": null,
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"class pipeline_manager(object):\n",
|
"class pipeline_manager(object):\n",
|
||||||
" @staticmethod\n",
|
" @staticmethod\n",
|
||||||
|
" def load_from_pipeline_file(pipeline_file:str):\n",
|
||||||
|
" \"\"\"\n",
|
||||||
|
" loading a json configuration file and using it's paramters to call 'load_pipeline_from_files'\n",
|
||||||
|
" \"\"\"\n",
|
||||||
|
" with open(pipeline_file, 'r') as f:\n",
|
||||||
|
" d = json.load(f)\n",
|
||||||
|
" \n",
|
||||||
|
" keras_models = d['keras_models']\n",
|
||||||
|
" all_models = d['all_models']\n",
|
||||||
|
" \n",
|
||||||
|
" return pipeline_manager.load_pipeline_from_files(pipeline_file.rsplit('.',1)[0], keras_models, all_models)\n",
|
||||||
|
"\n",
|
||||||
|
"\n",
|
||||||
|
" @staticmethod\n",
|
||||||
" def load_pipeline_from_files(file_prefix:str, keras_models = [], all_models = []):\n",
|
" def load_pipeline_from_files(file_prefix:str, keras_models = [], all_models = []):\n",
|
||||||
" \"\"\"\n",
|
" \"\"\"\n",
|
||||||
" load a pipeline from files. A pipeline should be represented by multiple model files in the form '<file_prefix>.<model_name>'\n",
|
" load a pipeline from files. A pipeline should be represented by multiple model files in the form '<file_prefix>.<model_name>'\n",
|
||||||
@ -527,6 +508,7 @@
|
|||||||
" @param prefix: file prefix for all models\n",
|
" @param prefix: file prefix for all models\n",
|
||||||
" \"\"\"\n",
|
" \"\"\"\n",
|
||||||
" \n",
|
" \n",
|
||||||
|
"\n",
|
||||||
" print(self.keras_models)\n",
|
" print(self.keras_models)\n",
|
||||||
" # doing this like explained here: https://stackoverflow.com/a/43415459\n",
|
" # doing this like explained here: https://stackoverflow.com/a/43415459\n",
|
||||||
" for step in self.pipeline.named_steps:\n",
|
" for step in self.pipeline.named_steps:\n",
|
||||||
@ -539,6 +521,9 @@
|
|||||||
" load_command += prefix + \"', \" + str(self.keras_models) + \", \"\n",
|
" load_command += prefix + \"', \" + str(self.keras_models) + \", \"\n",
|
||||||
" load_command += str(list(self.pipeline.named_steps.keys())) + \")\"\n",
|
" load_command += str(list(self.pipeline.named_steps.keys())) + \")\"\n",
|
||||||
"\n",
|
"\n",
|
||||||
|
" with open(prefix + '.pipeline', 'w') as outfile:\n",
|
||||||
|
" json.dump({'keras_models': self.keras_models, 'all_models': [step for step in self.pipeline.named_steps]}, outfile)\n",
|
||||||
|
" \n",
|
||||||
" import __main__ as main\n",
|
" import __main__ as main\n",
|
||||||
" if not hasattr(main, '__file__'):\n",
|
" if not hasattr(main, '__file__'):\n",
|
||||||
" display(\"saved pipeline. It can be loaded the following way:\")\n",
|
" display(\"saved pipeline. It can be loaded the following way:\")\n",
|
||||||
@ -584,7 +569,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 9,
|
"execution_count": null,
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
@ -594,7 +579,7 @@
|
|||||||
" self.sdm = sdm\n",
|
" self.sdm = sdm\n",
|
||||||
" self.pm = pm\n",
|
" self.pm = pm\n",
|
||||||
" \n",
|
" \n",
|
||||||
" def fit(self, max_size=10000, disabled_fit_steps=['vectorizer']):\n",
|
" def fit(self, max_size=10000, disabled_fit_steps=['vectorizer'], keras_batch_fitting_layer=['keras_model'], batch_size=None, n_epochs=1, progress_callback=None):\n",
|
||||||
" \"\"\"\n",
|
" \"\"\"\n",
|
||||||
" fitting data in the pipeline. Because we don't want to refit the vectorizer, the pipeline models containing the vectorizer have to be named explicitly\n",
|
" fitting data in the pipeline. Because we don't want to refit the vectorizer, the pipeline models containing the vectorizer have to be named explicitly\n",
|
||||||
" \n",
|
" \n",
|
||||||
@ -607,32 +592,52 @@
|
|||||||
" disabled_fits = {}\n",
|
" disabled_fits = {}\n",
|
||||||
" disabled_fit_transforms = {}\n",
|
" disabled_fit_transforms = {}\n",
|
||||||
" \n",
|
" \n",
|
||||||
|
" disabled_keras_fits = {}\n",
|
||||||
|
" \n",
|
||||||
" named_steps = self.pm.pipeline.named_steps\n",
|
" named_steps = self.pm.pipeline.named_steps\n",
|
||||||
" \n",
|
" \n",
|
||||||
" for s in disabled_fit_steps:\n",
|
" for s in disabled_fit_steps:\n",
|
||||||
" # now it gets a little bit dirty:\n",
|
" # now it gets really dirty:\n",
|
||||||
" # replace fit functions we don't want to call again (e.g. for vectorizers)\n",
|
" # replace fit functions we don't want to call again (e.g. for vectorizers)\n",
|
||||||
" disabled_fits[s] = named_steps[s].fit\n",
|
" disabled_fits[s] = named_steps[s].fit\n",
|
||||||
" disabled_fit_transforms[s] = named_steps[s].fit_transform\n",
|
" disabled_fit_transforms[s] = named_steps[s].fit_transform\n",
|
||||||
" named_steps[s].fit = lambda self, X, y=None: self\n",
|
" named_steps[s].fit = lambda self, X, y=None: self\n",
|
||||||
" named_steps[s].fit_transform = named_steps[s].transform\n",
|
" named_steps[s].fit_transform = named_steps[s].transform\n",
|
||||||
" \n",
|
" \n",
|
||||||
|
" for k in keras_batch_fitting_layer:\n",
|
||||||
|
" # forcing batch fitting on keras\n",
|
||||||
|
" disabled_keras_fits[k]=named_steps[k].fit\n",
|
||||||
|
" named_steps[k].fit = lambda X, y: named_steps[k].train_on_batch(X.todense(), y) # ← why has keras no sparse support on batch progressing!?!?!\n",
|
||||||
|
" \n",
|
||||||
|
" if batch_size is None:\n",
|
||||||
" self.pm.fit(X = self.sdm.X[:max_size], y = self.sdm.y[:max_size])\n",
|
" self.pm.fit(X = self.sdm.X[:max_size], y = self.sdm.y[:max_size])\n",
|
||||||
|
" else:\n",
|
||||||
|
" n = len(self.sdm.X) // batch_size\n",
|
||||||
|
" for i in range(n_epochs):\n",
|
||||||
|
" for j in range(n):\n",
|
||||||
|
" self.pm.fit(X = np.array(self.sdm.X[j*batch_size:(j+1)*batch_size]), y = np.array(self.sdm.y[j*batch_size:(j+1)*batch_size]))\n",
|
||||||
|
" if progress_callback is not None:\n",
|
||||||
|
" progress_callback()\n",
|
||||||
|
" pred, yt = self.test()\n",
|
||||||
|
" mean_squared_error = ((pred - yt)**2).mean(axis=0)\n",
|
||||||
|
" print(\"#\" + str(j) + \": loss: \", mean_squared_error)\n",
|
||||||
|
"\n",
|
||||||
" \n",
|
" \n",
|
||||||
" # restore replaced fit functions:\n",
|
" # restore replaced fit functions:\n",
|
||||||
" for s in disabled_fit_steps:\n",
|
" for s in disabled_fit_steps:\n",
|
||||||
" named_steps[s].fit = disabled_fits[s]\n",
|
" named_steps[s].fit = disabled_fits[s]\n",
|
||||||
" named_steps[s].fit_transform = disabled_fit_transforms[s]\n",
|
" named_steps[s].fit_transform = disabled_fit_transforms[s]\n",
|
||||||
" \n",
|
" \n",
|
||||||
|
" for k in keras_batch_fitting_layer:\n",
|
||||||
|
" named_steps[k].fit = disabled_keras_fits[k]\n",
|
||||||
|
" \n",
|
||||||
" def test(self):\n",
|
" def test(self):\n",
|
||||||
" '''\n",
|
" '''\n",
|
||||||
" @return: prediction:list, teacher:list\n",
|
" @return: prediction:list, teacher:list\n",
|
||||||
" '''\n",
|
" '''\n",
|
||||||
" if self.sdm.X is None:\n",
|
" if self.sdm.X is None:\n",
|
||||||
" self.sdm.create_train_test_split()\n",
|
" self.sdm.create_train_test_split()\n",
|
||||||
" return self.pm.predict(self.sdm.Xt), self.sdm.yt\n",
|
" return self.pm.predict(self.sdm.Xt), self.sdm.yt\n"
|
||||||
"\n",
|
|
||||||
" "
|
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -652,21 +657,9 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 10,
|
"execution_count": null,
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [
|
"outputs": [],
|
||||||
{
|
|
||||||
"name": "stdout",
|
|
||||||
"output_type": "stream",
|
|
||||||
"text": [
|
|
||||||
"reading file: ./data_en/2017-11-01.json...\n",
|
|
||||||
"imported 33368 samples\n",
|
|
||||||
"remaining samples after top emoji filtering: 26197\n",
|
|
||||||
"Epoch 1/1\n",
|
|
||||||
"100/100 [==============================] - 3s 28ms/step - loss: 0.1230\n"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"source": [
|
"source": [
|
||||||
"import __main__ as main\n",
|
"import __main__ as main\n",
|
||||||
"if not hasattr(main, '__file__'):\n",
|
"if not hasattr(main, '__file__'):\n",
|
||||||
@ -698,40 +691,9 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 11,
|
"execution_count": null,
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [
|
"outputs": [],
|
||||||
{
|
|
||||||
"name": "stdout",
|
|
||||||
"output_type": "stream",
|
|
||||||
"text": [
|
|
||||||
"['keras_model']\n"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"data": {
|
|
||||||
"text/plain": [
|
|
||||||
"'saved pipeline. It can be loaded the following way:'"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"metadata": {},
|
|
||||||
"output_type": "display_data"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"data": {
|
|
||||||
"text/markdown": [
|
|
||||||
"> ```\n",
|
|
||||||
"pipeline_manager.load_pipeline_from_files( 'custom_classifier', ['keras_model'], ['vectorizer', 'keras_model'])\n",
|
|
||||||
"```"
|
|
||||||
],
|
|
||||||
"text/plain": [
|
|
||||||
"<IPython.core.display.Markdown object>"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"metadata": {},
|
|
||||||
"output_type": "display_data"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"source": [
|
"source": [
|
||||||
"import __main__ as main\n",
|
"import __main__ as main\n",
|
||||||
"if not hasattr(main, '__file__'):\n",
|
"if not hasattr(main, '__file__'):\n",
|
||||||
@ -750,156 +712,9 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 12,
|
"execution_count": null,
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [
|
"outputs": [],
|
||||||
{
|
|
||||||
"data": {
|
|
||||||
"text/plain": [
|
|
||||||
"array([[0.16062996, 0.08324276, 0.09433182],\n",
|
|
||||||
" [0.16413 , 0.09421383, 0.07578427],\n",
|
|
||||||
" [0.11994962, 0.05705731, 0.06310127],\n",
|
|
||||||
" ...,\n",
|
|
||||||
" [0.13887292, 0.08502828, 0.08176519],\n",
|
|
||||||
" [0.18185864, 0.09223703, 0.10704609],\n",
|
|
||||||
" [0.17687687, 0.09147045, 0.10650696]], dtype=float32)"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"metadata": {},
|
|
||||||
"output_type": "display_data"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"data": {
|
|
||||||
"text/plain": [
|
|
||||||
"array([[0.46813021, 0.24716181, 0.28470797],\n",
|
|
||||||
" [0.46813021, 0.24716181, 0.28470797],\n",
|
|
||||||
" [0.70401758, 0.05932203, 0.23666039],\n",
|
|
||||||
" ...,\n",
|
|
||||||
" [0.46813021, 0.24716181, 0.28470797],\n",
|
|
||||||
" [0.46813021, 0.24716181, 0.28470797],\n",
|
|
||||||
" [0.46813021, 0.24716181, 0.28470797]])"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"metadata": {},
|
|
||||||
"output_type": "display_data"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "stdout",
|
|
||||||
"output_type": "stream",
|
|
||||||
"text": [
|
|
||||||
"prediction variance: 0.0005431187\n",
|
|
||||||
"teacher variance: 0.03341702104519965\n"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"data": {
|
|
||||||
"text/html": [
|
|
||||||
"<div>\n",
|
|
||||||
"<style scoped>\n",
|
|
||||||
" .dataframe tbody tr th:only-of-type {\n",
|
|
||||||
" vertical-align: middle;\n",
|
|
||||||
" }\n",
|
|
||||||
"\n",
|
|
||||||
" .dataframe tbody tr th {\n",
|
|
||||||
" vertical-align: top;\n",
|
|
||||||
" }\n",
|
|
||||||
"\n",
|
|
||||||
" .dataframe thead th {\n",
|
|
||||||
" text-align: right;\n",
|
|
||||||
" }\n",
|
|
||||||
"</style>\n",
|
|
||||||
"<table border=\"1\" class=\"dataframe\">\n",
|
|
||||||
" <thead>\n",
|
|
||||||
" <tr style=\"text-align: right;\">\n",
|
|
||||||
" <th></th>\n",
|
|
||||||
" <th>predict</th>\n",
|
|
||||||
" <th>predicted_sentiment</th>\n",
|
|
||||||
" <th>teacher</th>\n",
|
|
||||||
" <th>teacher_sentiment</th>\n",
|
|
||||||
" <th>text</th>\n",
|
|
||||||
" </tr>\n",
|
|
||||||
" </thead>\n",
|
|
||||||
" <tbody>\n",
|
|
||||||
" <tr>\n",
|
|
||||||
" <th>35671</th>\n",
|
|
||||||
" <td>😂</td>\n",
|
|
||||||
" <td>[0.16062995791435242, 0.0832427591085434, 0.09...</td>\n",
|
|
||||||
" <td>😂</td>\n",
|
|
||||||
" <td>[0.46813021474490496, 0.24716181096977158, 0.2...</td>\n",
|
|
||||||
" <td>i feel like i care so much more in everi situat</td>\n",
|
|
||||||
" </tr>\n",
|
|
||||||
" <tr>\n",
|
|
||||||
" <th>25683</th>\n",
|
|
||||||
" <td>😢</td>\n",
|
|
||||||
" <td>[0.16413000226020813, 0.0942138284444809, 0.07...</td>\n",
|
|
||||||
" <td>😂</td>\n",
|
|
||||||
" <td>[0.46813021474490496, 0.24716181096977158, 0.2...</td>\n",
|
|
||||||
" <td>i did not meat to add that 2 there ... hav see...</td>\n",
|
|
||||||
" </tr>\n",
|
|
||||||
" <tr>\n",
|
|
||||||
" <th>8985</th>\n",
|
|
||||||
" <td>😂</td>\n",
|
|
||||||
" <td>[0.11994962394237518, 0.05705730617046356, 0.0...</td>\n",
|
|
||||||
" <td>😊</td>\n",
|
|
||||||
" <td>[0.7040175768989329, 0.059322033898305086, 0.2...</td>\n",
|
|
||||||
" <td>never…</td>\n",
|
|
||||||
" </tr>\n",
|
|
||||||
" <tr>\n",
|
|
||||||
" <th>5410</th>\n",
|
|
||||||
" <td>😂</td>\n",
|
|
||||||
" <td>[0.18114930391311646, 0.10199417173862457, 0.1...</td>\n",
|
|
||||||
" <td>😂</td>\n",
|
|
||||||
" <td>[0.46813021474490496, 0.24716181096977158, 0.2...</td>\n",
|
|
||||||
" <td>lmao on me ! ! ! wtf wa he suppos to say</td>\n",
|
|
||||||
" </tr>\n",
|
|
||||||
" <tr>\n",
|
|
||||||
" <th>62611</th>\n",
|
|
||||||
" <td>😂</td>\n",
|
|
||||||
" <td>[0.16997836530208588, 0.08633847534656525, 0.0...</td>\n",
|
|
||||||
" <td>😊</td>\n",
|
|
||||||
" <td>[0.7040175768989329, 0.059322033898305086, 0.2...</td>\n",
|
|
||||||
" <td>this dude alway help me get through my school ...</td>\n",
|
|
||||||
" </tr>\n",
|
|
||||||
" </tbody>\n",
|
|
||||||
"</table>\n",
|
|
||||||
"</div>"
|
|
||||||
],
|
|
||||||
"text/plain": [
|
|
||||||
" predict predicted_sentiment teacher \\\n",
|
|
||||||
"35671 😂 [0.16062995791435242, 0.0832427591085434, 0.09... 😂 \n",
|
|
||||||
"25683 😢 [0.16413000226020813, 0.0942138284444809, 0.07... 😂 \n",
|
|
||||||
"8985 😂 [0.11994962394237518, 0.05705730617046356, 0.0... 😊 \n",
|
|
||||||
"5410 😂 [0.18114930391311646, 0.10199417173862457, 0.1... 😂 \n",
|
|
||||||
"62611 😂 [0.16997836530208588, 0.08633847534656525, 0.0... 😊 \n",
|
|
||||||
"\n",
|
|
||||||
" teacher_sentiment \\\n",
|
|
||||||
"35671 [0.46813021474490496, 0.24716181096977158, 0.2... \n",
|
|
||||||
"25683 [0.46813021474490496, 0.24716181096977158, 0.2... \n",
|
|
||||||
"8985 [0.7040175768989329, 0.059322033898305086, 0.2... \n",
|
|
||||||
"5410 [0.46813021474490496, 0.24716181096977158, 0.2... \n",
|
|
||||||
"62611 [0.7040175768989329, 0.059322033898305086, 0.2... \n",
|
|
||||||
"\n",
|
|
||||||
" text \n",
|
|
||||||
"35671 i feel like i care so much more in everi situat \n",
|
|
||||||
"25683 i did not meat to add that 2 there ... hav see... \n",
|
|
||||||
"8985 never… \n",
|
|
||||||
"5410 lmao on me ! ! ! wtf wa he suppos to say \n",
|
|
||||||
"62611 this dude alway help me get through my school ... "
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"metadata": {},
|
|
||||||
"output_type": "display_data"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "stdout",
|
|
||||||
"output_type": "stream",
|
|
||||||
"text": [
|
|
||||||
"Mean Squared Error: [0.13877691 0.04682433 0.02937794]\n",
|
|
||||||
"Variance teacher: [0.02183094 0.02513847 0.00285735]\n",
|
|
||||||
"Variance prediction: [0.00046378 0.00019441 0.00020516]\n"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"source": [
|
"source": [
|
||||||
"import __main__ as main\n",
|
"import __main__ as main\n",
|
||||||
"if not hasattr(main, '__file__'):\n",
|
"if not hasattr(main, '__file__'):\n",
|
||||||
@ -945,68 +760,9 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 13,
|
"execution_count": null,
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [
|
"outputs": [],
|
||||||
{
|
|
||||||
"data": {
|
|
||||||
"application/vnd.jupyter.widget-view+json": {
|
|
||||||
"model_id": "003ae16760b04c25bdc9f2fe2193747a",
|
|
||||||
"version_major": 2,
|
|
||||||
"version_minor": 0
|
|
||||||
},
|
|
||||||
"text/html": [
|
|
||||||
"<p>Failed to display Jupyter Widget of type <code>Text</code>.</p>\n",
|
|
||||||
"<p>\n",
|
|
||||||
" If you're reading this message in the Jupyter Notebook or JupyterLab Notebook, it may mean\n",
|
|
||||||
" that the widgets JavaScript is still loading. If this message persists, it\n",
|
|
||||||
" likely means that the widgets JavaScript library is either not installed or\n",
|
|
||||||
" not enabled. See the <a href=\"https://ipywidgets.readthedocs.io/en/stable/user_install.html\">Jupyter\n",
|
|
||||||
" Widgets Documentation</a> for setup instructions.\n",
|
|
||||||
"</p>\n",
|
|
||||||
"<p>\n",
|
|
||||||
" If you're reading this message in another frontend (for example, a static\n",
|
|
||||||
" rendering on GitHub or <a href=\"https://nbviewer.jupyter.org/\">NBViewer</a>),\n",
|
|
||||||
" it may mean that your frontend doesn't currently support widgets.\n",
|
|
||||||
"</p>\n"
|
|
||||||
],
|
|
||||||
"text/plain": [
|
|
||||||
"Text(value='')"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"metadata": {},
|
|
||||||
"output_type": "display_data"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"data": {
|
|
||||||
"application/vnd.jupyter.widget-view+json": {
|
|
||||||
"model_id": "4580af82b30545f197a41e4359010556",
|
|
||||||
"version_major": 2,
|
|
||||||
"version_minor": 0
|
|
||||||
},
|
|
||||||
"text/html": [
|
|
||||||
"<p>Failed to display Jupyter Widget of type <code>VBox</code>.</p>\n",
|
|
||||||
"<p>\n",
|
|
||||||
" If you're reading this message in the Jupyter Notebook or JupyterLab Notebook, it may mean\n",
|
|
||||||
" that the widgets JavaScript is still loading. If this message persists, it\n",
|
|
||||||
" likely means that the widgets JavaScript library is either not installed or\n",
|
|
||||||
" not enabled. See the <a href=\"https://ipywidgets.readthedocs.io/en/stable/user_install.html\">Jupyter\n",
|
|
||||||
" Widgets Documentation</a> for setup instructions.\n",
|
|
||||||
"</p>\n",
|
|
||||||
"<p>\n",
|
|
||||||
" If you're reading this message in another frontend (for example, a static\n",
|
|
||||||
" rendering on GitHub or <a href=\"https://nbviewer.jupyter.org/\">NBViewer</a>),\n",
|
|
||||||
" it may mean that your frontend doesn't currently support widgets.\n",
|
|
||||||
"</p>\n"
|
|
||||||
],
|
|
||||||
"text/plain": [
|
|
||||||
"VBox(children=(Button(description='get emoji', icon='check', style=ButtonStyle(), tooltip='Click me'), Output()))"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"metadata": {},
|
|
||||||
"output_type": "display_data"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"source": [
|
"source": [
|
||||||
"import __main__ as main\n",
|
"import __main__ as main\n",
|
||||||
"if not hasattr(main, '__file__'):\n",
|
"if not hasattr(main, '__file__'):\n",
|
||||||
|
@ -23,6 +23,7 @@ from sklearn.externals import joblib
|
|||||||
import pickle
|
import pickle
|
||||||
import operator
|
import operator
|
||||||
from sklearn.pipeline import Pipeline
|
from sklearn.pipeline import Pipeline
|
||||||
|
import json
|
||||||
nltk.download('punkt')
|
nltk.download('punkt')
|
||||||
nltk.download('averaged_perceptron_tagger')
|
nltk.download('averaged_perceptron_tagger')
|
||||||
nltk.download('wordnet')
|
nltk.download('wordnet')
|
||||||
@ -329,6 +330,20 @@ class sample_data_manager(object):
|
|||||||
|
|
||||||
|
|
||||||
class pipeline_manager(object):
|
class pipeline_manager(object):
|
||||||
|
@staticmethod
|
||||||
|
def load_from_pipeline_file(pipeline_file:str):
|
||||||
|
"""
|
||||||
|
loading a json configuration file and using it's paramters to call 'load_pipeline_from_files'
|
||||||
|
"""
|
||||||
|
with open(pipeline_file, 'r') as f:
|
||||||
|
d = json.load(f)
|
||||||
|
|
||||||
|
keras_models = d['keras_models']
|
||||||
|
all_models = d['all_models']
|
||||||
|
|
||||||
|
return pipeline_manager.load_pipeline_from_files(pipeline_file.rsplit('.',1)[0], keras_models, all_models)
|
||||||
|
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def load_pipeline_from_files(file_prefix:str, keras_models = [], all_models = []):
|
def load_pipeline_from_files(file_prefix:str, keras_models = [], all_models = []):
|
||||||
"""
|
"""
|
||||||
@ -442,6 +457,7 @@ class pipeline_manager(object):
|
|||||||
@param prefix: file prefix for all models
|
@param prefix: file prefix for all models
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
|
||||||
print(self.keras_models)
|
print(self.keras_models)
|
||||||
# doing this like explained here: https://stackoverflow.com/a/43415459
|
# doing this like explained here: https://stackoverflow.com/a/43415459
|
||||||
for step in self.pipeline.named_steps:
|
for step in self.pipeline.named_steps:
|
||||||
@ -454,6 +470,9 @@ class pipeline_manager(object):
|
|||||||
load_command += prefix + "', " + str(self.keras_models) + ", "
|
load_command += prefix + "', " + str(self.keras_models) + ", "
|
||||||
load_command += str(list(self.pipeline.named_steps.keys())) + ")"
|
load_command += str(list(self.pipeline.named_steps.keys())) + ")"
|
||||||
|
|
||||||
|
with open(prefix + '.pipeline', 'w') as outfile:
|
||||||
|
json.dump({'keras_models': self.keras_models, 'all_models': [step for step in self.pipeline.named_steps]}, outfile)
|
||||||
|
|
||||||
import __main__ as main
|
import __main__ as main
|
||||||
if not hasattr(main, '__file__'):
|
if not hasattr(main, '__file__'):
|
||||||
display("saved pipeline. It can be loaded the following way:")
|
display("saved pipeline. It can be loaded the following way:")
|
||||||
|
Loading…
Reference in New Issue
Block a user