Merge branch 'master' of ssh://gogs@the-cake-is-a-lie.net:20022/jonas/NLP-LAB.git

This commit is contained in:
Carsten 2018-07-23 10:22:42 +02:00
commit b99b2006f7
5 changed files with 565 additions and 38 deletions

Binary file not shown.

View File

@ -42,14 +42,18 @@ def stemming(message):
# * compare words to emoji descriptions
def evaluate_sentence(sentence, description_key = 'description', lang = 'eng', emojis_to_consider="all",\
stem=True, use_wordnet=True):
stem=True, embeddings="wordnet"):
# assumes there is a trained w2v model stored in the same directory!
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
if use_wordnet==False:
wv = KeyedVectors.load(str(__location__)+"/word2vec.model", mmap='r')
if embeddings=="word2Vec":
wv = KeyedVectors.load(str(__location__)+"/word2vec.model", mmap='r')
elif embeddings=="fastText":
wv = KeyedVectors.load("/fastTextVectors.kv", mmap='r')
if (stem):
sentence = stemming(sentence)
tokenized_sentence = word_tokenize(sentence)
n = len(tokenized_sentence)
matrix_list = []
@ -61,7 +65,7 @@ def evaluate_sentence(sentence, description_key = 'description', lang = 'eng', e
mat = np.zeros(shape=(m,n))
for i in range(len(emoji_tokens)):
for j in range(len(tokenized_sentence)):
if use_wordnet:
if embeddings=="wordnet":
syn1 = wordnet.synsets(emoji_tokens[i],lang=lang)
if len(syn1) == 0:
continue
@ -74,7 +78,7 @@ def evaluate_sentence(sentence, description_key = 'description', lang = 'eng', e
val = w1.wup_similarity(w2)
if val is None:
continue
else:
elif (embeddings == "word2Vec" or embeddings == "fastText"):
try:
val = wv.similarity(emoji_tokens[i], tokenized_sentence[j])
except KeyError:
@ -112,11 +116,11 @@ def prepareData(stem=True, lower=True):
return lookup
# make a prediction for an input sentence
# use_wordnet=True --> use wordnet similarites, otherwise use Word2Vec
# embeddings = ["wordnet", "word2Vec", "fastText"]
def predict(sentence, lookup, emojis_to_consider="all", criteria="threshold", lang = 'eng',\
use_wordnet=True, n=10, t=0.9):
embeddings="wordnet", n=10, t=0.9):
result = evaluate_sentence(sentence, lang, emojis_to_consider=emojis_to_consider, use_wordnet=use_wordnet)
result = evaluate_sentence(sentence, lang, emojis_to_consider=emojis_to_consider, embeddings=embeddings)
try:
if(criteria=="summed"):

View File

@ -43,19 +43,6 @@
"[nltk_data] Downloading package wordnet to /home/jonas/nltk_data...\n",
"[nltk_data] Package wordnet is already up-to-date!\n"
]
},
{
"ename": "NameError",
"evalue": "name 'min_words' is not defined",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mNameError\u001b[0m Traceback (most recent call last)",
"\u001b[0;32m<ipython-input-2-ce00b6a80bda>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0;32mimport\u001b[0m \u001b[0msimple_twitter_learning\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mstl\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 2\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mglob\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 3\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0msys\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 4\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0msklearn\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfeature_extraction\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtext\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mCountVectorizer\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mTfidfVectorizer\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mHashingVectorizer\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 5\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mpickle\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m~/Dokumente/gitRepos/NLP-LAB/Project/simple_approach/simple_twitter_learning.py\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[1;32m 164\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 165\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 166\u001b[0;31m \u001b[0;32mclass\u001b[0m \u001b[0msample_data_manager\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mobject\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 167\u001b[0m \u001b[0;34m@\u001b[0m\u001b[0mstaticmethod\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 168\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mgenerate_and_read\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mpath\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0mstr\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0monly_emoticons\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mTrue\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mapply_stemming\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mTrue\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mn_top_emojis\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m-\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfile_range\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mNone\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mn_kmeans_cluster\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m-\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mread_progress_callback\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mNone\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mstem_progress_callback\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mNone\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0memoji_mean\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mFalse\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcustom_target_emojis\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmin_words\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m~/Dokumente/gitRepos/NLP-LAB/Project/simple_approach/simple_twitter_learning.py\u001b[0m in \u001b[0;36msample_data_manager\u001b[0;34m()\u001b[0m\n\u001b[1;32m 412\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"remaining samples after custom emoji filtering: \"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mlabels\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 413\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 414\u001b[0;31m \u001b[0;32mdef\u001b[0m \u001b[0mfilter_by_sentence_length\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmin_words\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mmin_words\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 415\u001b[0m \u001b[0;32massert\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mplain_text\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 416\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;31mNameError\u001b[0m: name 'min_words' is not defined"
]
}
],
"source": [
@ -63,7 +50,10 @@
"import glob\n",
"import sys\n",
"from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer, HashingVectorizer\n",
"import pickle"
"import pickle\n",
"import matplotlib.pyplot as plt\n",
"import matplotlib\n",
"import numpy as np"
]
},
{
@ -82,7 +72,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
@ -137,9 +127,48 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 4,
"metadata": {},
"outputs": [],
"outputs": [
{
"data": {
"text/markdown": [
"----"
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/markdown": [
"## User Interface"
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "233f744d595f4b81a362faef6b148fe7",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Tab(children=(VBox(children=(HBox(children=(Text(value='./data_en/', description='root_path'), Button(descript…"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"mp(\"----\")\n",
"mp(\"## User Interface\")\n",
@ -237,7 +266,12 @@
" (widgets.HTML(),\"prediction\")\n",
" ],\n",
" [\n",
" (widgets.Checkbox(),\"show_sorted_list\")\n",
" (widgets.Checkbox(),\"show_sorted_list\"),\n",
" (widgets.Button(),\"show_plot\")\n",
" ],\n",
" [\n",
" (widgets.Text(), \"validation_emojis\"),\n",
" (widgets.Button(),\"show_validation_plot\")\n",
" ]\n",
" ],\n",
" \"playground\")\n",
@ -255,7 +289,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 5,
"metadata": {},
"outputs": [],
"source": [
@ -273,7 +307,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 6,
"metadata": {},
"outputs": [],
"source": [
@ -361,7 +395,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 7,
"metadata": {},
"outputs": [],
"source": [
@ -385,7 +419,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 8,
"metadata": {},
"outputs": [],
"source": [
@ -465,7 +499,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 9,
"metadata": {},
"outputs": [],
"source": [
@ -509,7 +543,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 10,
"metadata": {},
"outputs": [],
"source": [
@ -637,6 +671,64 @@
"\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## plotting stuff for testing area"
]
},
{
"cell_type": "code",
"execution_count": 11,
"metadata": {},
"outputs": [],
"source": [
"def sentiment_score(s):\n",
" #(pos, neg, neu)^T\n",
" return s[0] - s[1]\n",
"\n",
"def plot_sentiment_space(predicted_sentiment_vectors, top_sentiments, top_emojis):\n",
" # sentiment score axis\n",
" top_X = np.array([sentiment_score(x) for x in top_sentiments])\n",
" pred_X = np.array([sentiment_score(x) for x in predicted_sentiment_vectors])\n",
" \n",
" # neutral axis:\n",
" top_Y = np.array([x[2] for x in top_sentiments])\n",
" pred_Y = np.array([x[2] for x in predicted_sentiment_vectors])\n",
" \n",
" fig_1, ax_1 = plt.subplots()#figsize=(15,10))\n",
" plt.title(\"sentiment-score-plot\")\n",
" plt.xlabel(\"sentiment score\")\n",
" plt.ylabel(\"neutrality\")\n",
" plt.xlim([-1,1])\n",
" plt.ylim([0,1])\n",
" for i in range(len(top_X)):\n",
" plt.text(top_X[i], top_Y[i], top_emojis[i])\n",
" plt.plot(pred_X, pred_Y, 'bo')\n",
" #plt.savefig(title + \" -- sentiment-plot.png\", bbox_inches='tight')\n",
" \n",
" # sentiment score axis\n",
" top_X = np.array([x[0] for x in top_sentiments])\n",
" pred_X = np.array([x[0] for x in predicted_sentiment_vectors])\n",
" \n",
" # neutral axis:\n",
" top_Y = np.array([x[1] for x in top_sentiments])\n",
" pred_Y = np.array([x[1] for x in predicted_sentiment_vectors])\n",
" \n",
" fig_2, ax_2 = plt.subplots()#figsize=(15,10))\n",
" plt.title(\"positive-negative-plot\")\n",
" plt.xlabel(\"positive\")\n",
" plt.ylabel(\"negative\")\n",
" plt.xlim([0,1])\n",
" plt.ylim([0,1])\n",
" for i in range(len(top_X)):\n",
" plt.text(top_X[i], top_Y[i], top_emojis[i])\n",
" plt.plot(pred_X, pred_Y, 'bo')\n",
" #plt.savefig(title + \" -- positive-negative-plot.png\", bbox_inches='tight')\n",
" plt.show()"
]
},
{
"cell_type": "markdown",
"metadata": {},
@ -646,14 +738,20 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 12,
"metadata": {},
"outputs": [],
"source": [
"top_20 = list(\"😳😋😀😌😏😔😒😎😢😅😁😉🙌🙏😘😊😩😍😭😂\")\n",
"top_20_sents = stl.emoji2sent(top_20)\n",
"\n",
"pred = None\n",
"\n",
"def test_input(b):\n",
" global sdm\n",
" global pm\n",
" global tr\n",
" global pred\n",
" with out_areas[\"playground\"]:\n",
" clear_output()\n",
" mp(\"----\")\n",
@ -665,9 +763,41 @@
" shown_widgets[\"prediction\"].value = \"<h1> \" + str(stl.sent2emoji(pred)[0]) + \"</h1>\"\n",
" if shown_widgets[\"show_sorted_list\"].value:\n",
" mp(\"## \" + \"\".join(stl.edist.sentiment_vector_to_emoji(pred, only_emoticons=True, n_results=100)))\n",
" \n",
"\n",
"\n",
"def plot_pred(b):\n",
" global sdm\n",
" global pm\n",
" global tr\n",
" global pred\n",
" with out_areas[\"playground\"]:\n",
" plot_sentiment_space(pred, top_20_sents, top_20)\n",
" \n",
" \n",
"def plot_subset_pred(b):\n",
" global sdm\n",
" global pm\n",
" global tr\n",
" global pred\n",
" with out_areas[\"playground\"]:\n",
" clear_output()\n",
" \n",
" if sdm is None or pm is None:\n",
" sys.stderr.write(\"ERROR: sample data and/or classifier missing!\\n\")\n",
" return\n",
" \n",
" if tr is None:\n",
" tr = stl.trainer(sdm=sdm, pm=pm)\n",
" \n",
" pred, y = tr.test(emoji_subset=list(shown_widgets[\"validation_emojis\"].value))\n",
" print(len(pred))\n",
" plot_sentiment_space(pred, top_20_sents, top_20)\n",
"\n",
"#link\n",
"shown_widgets[\"test_input\"].observe(test_input)"
"shown_widgets[\"test_input\"].observe(test_input)\n",
"shown_widgets[\"show_plot\"].on_click(plot_pred)\n",
"shown_widgets[\"show_validation_plot\"].on_click(plot_subset_pred)"
]
},
{

File diff suppressed because one or more lines are too long

View File

@ -59,6 +59,54 @@ def sent2emoji(sent_arr, custom_target_emojis=None, only_emoticons=True):
SINGLE_LABEL = True
# top 20 emojis:
top_20 = list("😳😋😀😌😏😔😒😎😢😅😁😉🙌🙏😘😊😩😍😭😂")
top_20_sents = emoji2sent(top_20)
# plotting function to evaluate stuff:
def sentiment_score(s):
#(pos, neg, neu)^T
return s[0] - s[1]
def plot_sentiment_space(predicted_sentiment_vectors, top_sentiments, top_emojis, style='bo'):
# sentiment score axis
top_X = np.array([sentiment_score(x) for x in top_sentiments])
pred_X = np.array([sentiment_score(x) for x in predicted_sentiment_vectors])
# neutral axis:
top_Y = np.array([x[2] for x in top_sentiments])
pred_Y = np.array([x[2] for x in predicted_sentiment_vectors])
fig_1, ax_1 = plt.subplots()#figsize=(15,10))
plt.title("sentiment-score-plot")
plt.xlabel("sentiment score")
plt.ylabel("neutrality")
plt.xlim([-1,1])
plt.ylim([0,1])
for i in range(len(top_X)):
plt.text(top_X[i], top_Y[i], top_emojis[i])
plt.plot(pred_X, pred_Y, style)
plt.savefig("val-error_sentiment-plot" + str(datetime.datetime.now()) + ".png", bbox_inches='tight')
# sentiment score axis
top_X = np.array([x[0] for x in top_sentiments])
pred_X = np.array([x[0] for x in predicted_sentiment_vectors])
# neutral axis:
top_Y = np.array([x[1] for x in top_sentiments])
pred_Y = np.array([x[1] for x in predicted_sentiment_vectors])
fig_2, ax_2 = plt.subplots()#figsize=(15,10))
plt.title("positive-negative-plot")
plt.xlabel("positive")
plt.ylabel("negative")
plt.xlim([0,1])
plt.ylim([0,1])
for i in range(len(top_X)):
plt.text(top_X[i], top_Y[i], top_emojis[i])
plt.plot(pred_X, pred_Y, style)
plt.savefig("val-error_positive-negative-plot" + str(datetime.datetime.now()) + ".png", bbox_inches='tight')
plt.show()
# ----
# ## classes and functions we are using later:
@ -515,9 +563,11 @@ class pipeline_manager(object):
if fit_vectorizer:
if sdm.X is None:
sdm.create_train_test_split()
print("fit vectorizer...")
vec_train = vectorizer.fit_transform(sdm.X)
vec_test = vectorizer.transform(sdm.Xt)
print("fitting done")
# creating keras model:
model=Sequential()
@ -718,10 +768,12 @@ class trainer(object):
mean_squared_error = ((pred - yt)**2).mean(axis=0)
print("#" + str(e) + ": validation loss: ", mean_squared_error, "scalar: ", np.mean(mean_squared_error))
self.val.append(np.mean(mean_squared_error))
plot_sentiment_space(pred, top_20_sents, top_20)
plt.figure(figsize=(10,5))
plt.plot(self.val)
plt.savefig("val_error" + str(datetime.datetime.now()) + ".png", bbox_inches='tight')
plt.show()
else:
n = len(self.sdm.X) // batch_size
for i in range(n_epochs):
@ -743,12 +795,32 @@ class trainer(object):
for k in keras_batch_fitting_layer:
named_steps[k].fit = disabled_keras_fits[k]
def test(self):
def test(self, use_lemmatization=False, use_stemming=False, emoji_subset=None, only_test_on_valid_set = True):
'''
@param use_lemmatization:boolean
@param use_stemming:boolean
@param emoji_subset:list if given, only make predictions on samples containing one of these emojis as teacher value
@return: prediction:list, teacher:list
'''
if self.sdm.X is None:
self.sdm.create_train_test_split()
return self.pm.predict(self.sdm.Xt, use_lemmatization=False, use_stemming=False), self.sdm.yt
Xt = self.sdm.Xt
yt = self.sdm.yt
print("original validation size: " + str(len(yt)))
if emoji_subset is not None:
has_emoji = np.array([True if edist.sentiment_vector_to_emoji(y) in emoji_subset else False for y in yt])
Xt = Xt[has_emoji]
yt = yt[has_emoji]
print("filtered validation size: " + str(len(yt)))
return self.pm.predict(Xt, use_lemmatization=use_lemmatization, use_stemming=use_stemming), yt