bugfixes and improvements in sample handling, working on sentiment mean as labels
This commit is contained in:
parent
10fd5817e7
commit
2a6a29b88b
@ -11,31 +11,9 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 1,
|
"execution_count": null,
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [
|
"outputs": [],
|
||||||
{
|
|
||||||
"name": "stderr",
|
|
||||||
"output_type": "stream",
|
|
||||||
"text": [
|
|
||||||
"Using TensorFlow backend.\n"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"name": "stdout",
|
|
||||||
"output_type": "stream",
|
|
||||||
"text": [
|
|
||||||
"[nltk_data] Downloading package punkt to /home/jonas/nltk_data...\n",
|
|
||||||
"[nltk_data] Package punkt is already up-to-date!\n",
|
|
||||||
"[nltk_data] Downloading package averaged_perceptron_tagger to\n",
|
|
||||||
"[nltk_data] /home/jonas/nltk_data...\n",
|
|
||||||
"[nltk_data] Package averaged_perceptron_tagger is already up-to-\n",
|
|
||||||
"[nltk_data] date!\n",
|
|
||||||
"[nltk_data] Downloading package wordnet to /home/jonas/nltk_data...\n",
|
|
||||||
"[nltk_data] Package wordnet is already up-to-date!\n"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"source": [
|
"source": [
|
||||||
"import simple_twitter_learning as stl\n",
|
"import simple_twitter_learning as stl\n",
|
||||||
"import glob\n",
|
"import glob\n",
|
||||||
@ -59,7 +37,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 2,
|
"execution_count": null,
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
@ -114,48 +92,9 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 3,
|
"execution_count": null,
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [
|
"outputs": [],
|
||||||
{
|
|
||||||
"data": {
|
|
||||||
"text/markdown": [
|
|
||||||
"----"
|
|
||||||
],
|
|
||||||
"text/plain": [
|
|
||||||
"<IPython.core.display.Markdown object>"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"metadata": {},
|
|
||||||
"output_type": "display_data"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"data": {
|
|
||||||
"text/markdown": [
|
|
||||||
"## User Interface"
|
|
||||||
],
|
|
||||||
"text/plain": [
|
|
||||||
"<IPython.core.display.Markdown object>"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"metadata": {},
|
|
||||||
"output_type": "display_data"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"data": {
|
|
||||||
"application/vnd.jupyter.widget-view+json": {
|
|
||||||
"model_id": "5ac970d7d7cf4849b4f5adfb80a820c0",
|
|
||||||
"version_major": 2,
|
|
||||||
"version_minor": 0
|
|
||||||
},
|
|
||||||
"text/plain": [
|
|
||||||
"Tab(children=(VBox(children=(HBox(children=(Text(value='./data_en/', description='root_path'), Button(descript…"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"metadata": {},
|
|
||||||
"output_type": "display_data"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"source": [
|
"source": [
|
||||||
"mp(\"----\")\n",
|
"mp(\"----\")\n",
|
||||||
"mp(\"## User Interface\")\n",
|
"mp(\"## User Interface\")\n",
|
||||||
@ -172,7 +111,7 @@
|
|||||||
" ],\n",
|
" ],\n",
|
||||||
" [\n",
|
" [\n",
|
||||||
" (widgets.BoundedIntText(value=-1,disabled=True,min=-1, max=10), \"k_means_cluster\"),\n",
|
" (widgets.BoundedIntText(value=-1,disabled=True,min=-1, max=10), \"k_means_cluster\"),\n",
|
||||||
" (widgets.BoundedIntText(value=20,disabled=True,min=-1, max=10), \"n_top_emojis\")\n",
|
" (widgets.BoundedIntText(value=20,disabled=True,min=-1, max=100), \"n_top_emojis\")\n",
|
||||||
" ],\n",
|
" ],\n",
|
||||||
" [\n",
|
" [\n",
|
||||||
" (widgets.Button(disabled=True),\"load_data\")\n",
|
" (widgets.Button(disabled=True),\"load_data\")\n",
|
||||||
@ -248,7 +187,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 4,
|
"execution_count": null,
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
@ -266,7 +205,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 5,
|
"execution_count": null,
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
@ -354,17 +293,17 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 6,
|
"execution_count": null,
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
"class progress_indicator(object):\n",
|
"class progress_indicator(object):\n",
|
||||||
" \n",
|
" \n",
|
||||||
" def __init__(self, n, description=\"progress\"):\n",
|
" def __init__(self, description=\"progress\"):\n",
|
||||||
" self.w = widgets.IntProgress(value=0, min=0,max=n, description = description)\n",
|
" self.w = widgets.FloatProgress(value=0, min=0,max=1, description = description)\n",
|
||||||
" display(self.w)\n",
|
" display(self.w)\n",
|
||||||
" def update(self, dn=1):\n",
|
" def update(self, val):\n",
|
||||||
" self.w.value += dn\n",
|
" self.w.value = val\n",
|
||||||
" "
|
" "
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@ -378,7 +317,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 7,
|
"execution_count": null,
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
@ -416,13 +355,15 @@
|
|||||||
" r = shown_widgets[\"file_range\"].value\n",
|
" r = shown_widgets[\"file_range\"].value\n",
|
||||||
" r = (r[0], r[1] + 1) # range has to be exclusive according to the last element!\n",
|
" r = (r[0], r[1] + 1) # range has to be exclusive according to the last element!\n",
|
||||||
" \n",
|
" \n",
|
||||||
" p = progress_indicator(r[1] - r[0], \"reading progress\")\n",
|
" p_r = progress_indicator(\"reading progress\")\n",
|
||||||
|
" p_s = progress_indicator(\"stemming progress\")\n",
|
||||||
" \n",
|
" \n",
|
||||||
" sdm = stl.sample_data_manager.generate_and_read(path=shown_widgets[\"root_path\"].value,\n",
|
" sdm = stl.sample_data_manager.generate_and_read(path=shown_widgets[\"root_path\"].value,\n",
|
||||||
" n_top_emojis=shown_widgets[\"n_top_emojis\"].value,\n",
|
" n_top_emojis=shown_widgets[\"n_top_emojis\"].value,\n",
|
||||||
" file_range=range(r[0], r[1]),\n",
|
" file_range=range(r[0], r[1]),\n",
|
||||||
" n_kmeans_cluster=shown_widgets[\"k_means_cluster\"].value,\n",
|
" n_kmeans_cluster=shown_widgets[\"k_means_cluster\"].value,\n",
|
||||||
" progress_callback=p.update)\n",
|
" read_progress_callback=p_r.update,\n",
|
||||||
|
" stem_progress_callback=p_s.update)\n",
|
||||||
" shown_widgets[\"batch_size\"].max = len(sdm.labels)\n",
|
" shown_widgets[\"batch_size\"].max = len(sdm.labels)\n",
|
||||||
" \n",
|
" \n",
|
||||||
" \n",
|
" \n",
|
||||||
@ -440,7 +381,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 8,
|
"execution_count": null,
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
@ -461,11 +402,10 @@
|
|||||||
" \n",
|
" \n",
|
||||||
" print(\"update train test split:\")\n",
|
" print(\"update train test split:\")\n",
|
||||||
" sdm.create_train_test_split(split=val_split)\n",
|
" sdm.create_train_test_split(split=val_split)\n",
|
||||||
" batch_n = len(sdm.X) // batch_size\n",
|
|
||||||
" \n",
|
" \n",
|
||||||
" print(\"fit\")\n",
|
" print(\"fit\")\n",
|
||||||
" \n",
|
" \n",
|
||||||
" p = progress_indicator(batch_n)\n",
|
" p = progress_indicator()\n",
|
||||||
" \n",
|
" \n",
|
||||||
" tr = stl.trainer(sdm=sdm, pm=pm)\n",
|
" tr = stl.trainer(sdm=sdm, pm=pm)\n",
|
||||||
" tr.fit(progress_callback=p.update, batch_size=batch_size, n_epochs=n_epochs)\n",
|
" tr.fit(progress_callback=p.update, batch_size=batch_size, n_epochs=n_epochs)\n",
|
||||||
@ -485,7 +425,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 9,
|
"execution_count": null,
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [],
|
||||||
"source": [
|
"source": [
|
||||||
|
@ -123,7 +123,7 @@ def get_wordnet_pos(treebank_tag):
|
|||||||
|
|
||||||
class sample_data_manager(object):
|
class sample_data_manager(object):
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def generate_and_read(path:str, only_emoticons=True, apply_stemming=True, n_top_emojis=-1, file_range=None, n_kmeans_cluster=-1, progress_callback=None):
|
def generate_and_read(path:str, only_emoticons=True, apply_stemming=True, n_top_emojis=-1, file_range=None, n_kmeans_cluster=-1, read_progress_callback=None, stem_progress_callback=None):
|
||||||
"""
|
"""
|
||||||
generate, read and process train data in one step.
|
generate, read and process train data in one step.
|
||||||
|
|
||||||
@ -137,9 +137,9 @@ class sample_data_manager(object):
|
|||||||
@return: sample_data_manager object
|
@return: sample_data_manager object
|
||||||
"""
|
"""
|
||||||
sdm = sample_data_manager(path)
|
sdm = sample_data_manager(path)
|
||||||
sdm.read_files(file_index_range=range(sdm.n_files) if file_range is None else file_range, only_emoticons=only_emoticons, progress_callback=progress_callback)
|
sdm.read_files(file_index_range=range(sdm.n_files) if file_range is None else file_range, only_emoticons=only_emoticons, progress_callback=read_progress_callback)
|
||||||
if apply_stemming:
|
if apply_stemming:
|
||||||
sdm.apply_stemming_and_lemmatization()
|
sdm.apply_stemming_and_lemmatization(progress_callback=stem_progress_callback)
|
||||||
|
|
||||||
sdm.generate_emoji_count_and_weights()
|
sdm.generate_emoji_count_and_weights()
|
||||||
|
|
||||||
@ -161,7 +161,6 @@ class sample_data_manager(object):
|
|||||||
self.data_root_folder = data_root_folder
|
self.data_root_folder = data_root_folder
|
||||||
self.json_files = sorted(glob.glob(self.data_root_folder + "/*.json"))
|
self.json_files = sorted(glob.glob(self.data_root_folder + "/*.json"))
|
||||||
self.n_files = len(self.json_files)
|
self.n_files = len(self.json_files)
|
||||||
self.raw_data = None
|
|
||||||
self.emojis = None
|
self.emojis = None
|
||||||
self.plain_text = None
|
self.plain_text = None
|
||||||
self.labels = None
|
self.labels = None
|
||||||
@ -177,45 +176,65 @@ class sample_data_manager(object):
|
|||||||
self.kmeans_cluster = None
|
self.kmeans_cluster = None
|
||||||
self.label_binarizer = None
|
self.label_binarizer = None
|
||||||
|
|
||||||
def read_files(self, file_index_range:list, only_emoticons=True, progress_callback=None):
|
def read_files(self, file_index_range:list, only_emoticons=True, emoji_mean=False ,progress_callback=None):
|
||||||
"""
|
"""
|
||||||
reading (multiple) files to one panda table.
|
reading (multiple) files to one panda table.
|
||||||
|
|
||||||
@param file_index_range: range of file's indices to read (eg `range(3)` to read the first three files)
|
@param file_index_range: range of file's indices to read (eg `range(3)` to read the first three files)
|
||||||
@param only_emoticons: if True, only messages containing emoticons (aka smileys) are used. This classification is derived from Tools.Emoji_Distance
|
@param only_emoticons: if True, only messages containing emoticons (aka smileys) are used. This classification is derived from Tools.Emoji_Distance
|
||||||
|
@param emoji_mean: if True, using mean of all emojis instead of the last one
|
||||||
"""
|
"""
|
||||||
assert np.min(file_index_range) >= 0 and np.max(file_index_range) < self.n_files
|
assert np.min(file_index_range) >= 0 and np.max(file_index_range) < self.n_files
|
||||||
|
n = len(file_index_range)
|
||||||
for i in file_index_range:
|
for i in file_index_range:
|
||||||
print("reading file: " + self.json_files[i] + "...")
|
print("reading file: " + self.json_files[i] + "...")
|
||||||
if self.raw_data is None:
|
raw_data_i = pd.read_json(self.json_files[i], encoding="utf-8")
|
||||||
self.raw_data = pd.read_json(self.json_files[i], encoding="utf-8")
|
emojis_i = raw_data_i['EMOJI']
|
||||||
else:
|
plain_text_i = raw_data_i['text']
|
||||||
self.raw_data = self.raw_data.append(pd.read_json(self.json_files[i], encoding="utf-8"))
|
|
||||||
if progress_callback is not None:
|
|
||||||
progress_callback()
|
|
||||||
self.emojis = self.raw_data['EMOJI']
|
|
||||||
self.plain_text = self.raw_data['text']
|
|
||||||
|
|
||||||
# replacing keywords. TODO: maybe these information can be extracted and used
|
|
||||||
self.plain_text = self.plain_text.str.replace("(<EMOJI>|<USER>|<HASHTAG>)","").str.replace("[" + "".join(list(emoji_blacklist)) + "]","")
|
|
||||||
|
|
||||||
# so far filtering for the latest emoji. TODO: maybe there are also better approaches
|
|
||||||
self.labels = emoji2sent([latest(e) for e in self.emojis], only_emoticons=only_emoticons )
|
|
||||||
|
|
||||||
# and filter out all samples we have no label for:
|
|
||||||
wrong_labels = np.isnan(np.linalg.norm(self.labels, axis=1))
|
|
||||||
|
|
||||||
self.labels = self.labels[np.invert(wrong_labels)]
|
# replacing keywords. TODO: maybe these information can be extracted and used
|
||||||
self.plain_text = self.plain_text[np.invert(wrong_labels)]
|
plain_text_i = plain_text_i.str.replace("(<EMOJI>|<USER>|<HASHTAG>)","").str.replace("[" + "".join(list(emoji_blacklist)) + "]","")
|
||||||
self.emojis = self.emojis[np.invert(wrong_labels)]
|
|
||||||
|
if not emoji_mean:
|
||||||
|
# so far filtering for the latest emoji. TODO: maybe there are also better approaches
|
||||||
|
labels_i = emoji2sent([latest(e) for e in emojis_i], only_emoticons=only_emoticons )
|
||||||
|
else:
|
||||||
|
labels_i = np.array([np.mean(emoji2sent(e, only_emoticons=only_emoticons), axis=0) for e in emojis_i])
|
||||||
|
|
||||||
|
# and filter out all samples we have no label for:
|
||||||
|
wrong_labels = np.isnan(np.linalg.norm(labels_i, axis=1))
|
||||||
|
labels_i = labels_i[np.invert(wrong_labels)]
|
||||||
|
plain_text_i = plain_text_i[np.invert(wrong_labels)]
|
||||||
|
emojis_i = emojis_i[np.invert(wrong_labels)]
|
||||||
|
print("imported " + str(len(labels_i)) + " samples")
|
||||||
|
|
||||||
|
if self.labels is None:
|
||||||
|
self.labels = labels_i
|
||||||
|
else:
|
||||||
|
self.labels = np.append(self.labels, labels_i, axis=0)
|
||||||
|
|
||||||
|
if self.emojis is None:
|
||||||
|
self.emojis = emojis_i
|
||||||
|
else:
|
||||||
|
self.emojis = pd.concat([self.emojis,emojis_i],ignore_index=True)
|
||||||
|
|
||||||
|
if self.plain_text is None:
|
||||||
|
self.plain_text = plain_text_i
|
||||||
|
else:
|
||||||
|
self.plain_text = pd.concat([self.plain_text,plain_text_i],ignore_index=True)
|
||||||
|
|
||||||
|
if progress_callback is not None:
|
||||||
|
progress_callback((i+1)/n)
|
||||||
|
|
||||||
print("imported " + str(len(self.labels)) + " samples")
|
|
||||||
|
|
||||||
def apply_stemming_and_lemmatization(self):
|
def apply_stemming_and_lemmatization(self, progress_callback = None):
|
||||||
"""
|
"""
|
||||||
apply stemming and lemmatization to plain text samples
|
apply stemming and lemmatization to plain text samples
|
||||||
"""
|
"""
|
||||||
|
print("apply stemming and lemmatization...")
|
||||||
stemmer = SnowballStemmer("english")
|
stemmer = SnowballStemmer("english")
|
||||||
|
n = self.plain_text.shape[0] * 2 # 2 for loops
|
||||||
|
i = 0
|
||||||
for key in self.plain_text.keys():
|
for key in self.plain_text.keys():
|
||||||
stemmed_sent = []
|
stemmed_sent = []
|
||||||
for word in self.plain_text[key].split(" "):
|
for word in self.plain_text[key].split(" "):
|
||||||
@ -223,6 +242,11 @@ class sample_data_manager(object):
|
|||||||
stemmed_sent.append(word_stemmed)
|
stemmed_sent.append(word_stemmed)
|
||||||
stemmed_sent = (" ").join(stemmed_sent)
|
stemmed_sent = (" ").join(stemmed_sent)
|
||||||
self.plain_text[key] = stemmed_sent
|
self.plain_text[key] = stemmed_sent
|
||||||
|
i += 1
|
||||||
|
if progress_callback is not None and i % 1024 == 0:
|
||||||
|
progress_callback(i / n)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
lemmatizer = WordNetLemmatizer()
|
lemmatizer = WordNetLemmatizer()
|
||||||
for key in self.plain_text.keys():
|
for key in self.plain_text.keys():
|
||||||
@ -234,6 +258,10 @@ class sample_data_manager(object):
|
|||||||
lemmatized_sent.append(word_lemmatized)
|
lemmatized_sent.append(word_lemmatized)
|
||||||
lemmatized_sent = (" ").join(lemmatized_sent)
|
lemmatized_sent = (" ").join(lemmatized_sent)
|
||||||
self.plain_text[key] = lemmatized_sent
|
self.plain_text[key] = lemmatized_sent
|
||||||
|
i += 1
|
||||||
|
if progress_callback is not None and i % 1024 == 0:
|
||||||
|
progress_callback(i / n)
|
||||||
|
print("stemming and lemmatization done")
|
||||||
|
|
||||||
def generate_emoji_count_and_weights(self):
|
def generate_emoji_count_and_weights(self):
|
||||||
"""
|
"""
|
||||||
@ -558,7 +586,7 @@ class trainer(object):
|
|||||||
for j in range(n):
|
for j in range(n):
|
||||||
self.pm.fit(X = np.array(self.sdm.X[j*batch_size:(j+1)*batch_size]), y = np.array(self.sdm.y[j*batch_size:(j+1)*batch_size]))
|
self.pm.fit(X = np.array(self.sdm.X[j*batch_size:(j+1)*batch_size]), y = np.array(self.sdm.y[j*batch_size:(j+1)*batch_size]))
|
||||||
if progress_callback is not None:
|
if progress_callback is not None:
|
||||||
progress_callback()
|
progress_callback(j / n)
|
||||||
pred, yt = self.test()
|
pred, yt = self.test()
|
||||||
mean_squared_error = ((pred - yt)**2).mean(axis=0)
|
mean_squared_error = ((pred - yt)**2).mean(axis=0)
|
||||||
print("#" + str(j) + ": loss: ", mean_squared_error)
|
print("#" + str(j) + ": loss: ", mean_squared_error)
|
||||||
|
Loading…
Reference in New Issue
Block a user