Merge branch 'master' of ssh://gogs@the-cake-is-a-lie.net:20022/jonas/NLP-LAB.git
This commit is contained in:
commit
33b1e26a2a
130
Project/naive_approach/naiveApproachTest.ipynb
Normal file
130
Project/naive_approach/naiveApproachTest.ipynb
Normal file
@ -0,0 +1,130 @@
|
|||||||
|
{
|
||||||
|
"cells": [
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 1,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"import naive_approach"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 2,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"top_emojis = ['😂',\n",
|
||||||
|
" '😭',\n",
|
||||||
|
" '😍',\n",
|
||||||
|
" '😩',\n",
|
||||||
|
" '😊',\n",
|
||||||
|
" '😘',\n",
|
||||||
|
" '🙏',\n",
|
||||||
|
" '🙌',\n",
|
||||||
|
" '😉',\n",
|
||||||
|
" '😁',\n",
|
||||||
|
" '😅',\n",
|
||||||
|
" '😎',\n",
|
||||||
|
" '😢',\n",
|
||||||
|
" '😒',\n",
|
||||||
|
" '😏',\n",
|
||||||
|
" '😌',\n",
|
||||||
|
" '😔',\n",
|
||||||
|
" '😋',\n",
|
||||||
|
" '😀',\n",
|
||||||
|
" '😤']"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 3,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"lookup = naive_approach.prepareData(emojis_to_consider=top_emojis)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 4,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"sentence=\"I am very happy today\""
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 7,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"ename": "KeyError",
|
||||||
|
"evalue": "357",
|
||||||
|
"output_type": "error",
|
||||||
|
"traceback": [
|
||||||
|
"\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
|
||||||
|
"\u001b[1;31mKeyError\u001b[0m Traceback (most recent call last)",
|
||||||
|
"\u001b[1;32m<ipython-input-7-a7b8b0832a7d>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m()\u001b[0m\n\u001b[1;32m----> 1\u001b[1;33m \u001b[0mpred\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mnaive_approach\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mpredict\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0msentence\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mlookup\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0memojis_to_consider\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mtop_emojis\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mn\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;36m3\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m",
|
||||||
|
"\u001b[1;32m~\\Desktop\\NLP-LAB\\Project\\naive_approach\\naive_approach.py\u001b[0m in \u001b[0;36mpredict\u001b[1;34m(sentence, lookup, emojis_to_consider, criteria, description_key, lang, n, t)\u001b[0m\n\u001b[0;32m 117\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 118\u001b[0m \u001b[1;31m# build a result table\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 119\u001b[1;33m \u001b[0mtable_array\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;33m[\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mlookup\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mindexes\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mi\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mstr\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mtable\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0miloc\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mindexes\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mi\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mdescription_key\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m]\u001b[0m \u001b[1;32mfor\u001b[0m \u001b[0mi\u001b[0m \u001b[1;32min\u001b[0m \u001b[0mrange\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mn\u001b[0m\u001b[1;33m)\u001b[0m \u001b[1;33m]\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 120\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 121\u001b[0m \u001b[0mtable_frame\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mpd\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mDataFrame\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mtable_array\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mcolumns\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mcriteria\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;34m'description'\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
|
||||||
|
"\u001b[1;32m~\\Desktop\\NLP-LAB\\Project\\naive_approach\\naive_approach.py\u001b[0m in \u001b[0;36m<listcomp>\u001b[1;34m(.0)\u001b[0m\n\u001b[0;32m 117\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 118\u001b[0m \u001b[1;31m# build a result table\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 119\u001b[1;33m \u001b[0mtable_array\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;33m[\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mlookup\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mindexes\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mi\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mstr\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mtable\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0miloc\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mindexes\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mi\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mdescription_key\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m]\u001b[0m \u001b[1;32mfor\u001b[0m \u001b[0mi\u001b[0m \u001b[1;32min\u001b[0m \u001b[0mrange\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mn\u001b[0m\u001b[1;33m)\u001b[0m \u001b[1;33m]\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 120\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 121\u001b[0m \u001b[0mtable_frame\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mpd\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mDataFrame\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mtable_array\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mcolumns\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mcriteria\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;34m'description'\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
|
||||||
|
"\u001b[1;31mKeyError\u001b[0m: 357"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"pred = naive_approach.predict(sentence, lookup, emojis_to_consider=top_emojis, n=3)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 9,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/plain": [
|
||||||
|
"['🎁', '🙋', '\\U0001f91f']"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"execution_count": 9,
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "execute_result"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"pred"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": []
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"kernelspec": {
|
||||||
|
"display_name": "Python 3",
|
||||||
|
"language": "python",
|
||||||
|
"name": "python3"
|
||||||
|
},
|
||||||
|
"language_info": {
|
||||||
|
"codemirror_mode": {
|
||||||
|
"name": "ipython",
|
||||||
|
"version": 3
|
||||||
|
},
|
||||||
|
"file_extension": ".py",
|
||||||
|
"mimetype": "text/x-python",
|
||||||
|
"name": "python",
|
||||||
|
"nbconvert_exporter": "python",
|
||||||
|
"pygments_lexer": "ipython3",
|
||||||
|
"version": "3.6.4"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nbformat": 4,
|
||||||
|
"nbformat_minor": 2
|
||||||
|
}
|
@ -37,7 +37,7 @@ def stemming(messages):
|
|||||||
|
|
||||||
|
|
||||||
# * compare words to emoji descriptions
|
# * compare words to emoji descriptions
|
||||||
def evaluate_sentence(sentence, table, description_key = 'description', lang = 'eng'):
|
def evaluate_sentence(sentence, table, description_key = 'description', lang = 'eng', emojis_to_consider="all"):
|
||||||
|
|
||||||
tokenized_sentence = word_tokenize(sentence)
|
tokenized_sentence = word_tokenize(sentence)
|
||||||
n = len(tokenized_sentence)
|
n = len(tokenized_sentence)
|
||||||
@ -77,10 +77,7 @@ def evaluate_sentence(sentence, table, description_key = 'description', lang = '
|
|||||||
|
|
||||||
# load and preprocess data
|
# load and preprocess data
|
||||||
# emojis_to_consider can be either a list or "all"
|
# emojis_to_consider can be either a list or "all"
|
||||||
def prepareData(stemming=False, emojis_to_consider="all"):
|
def prepareData(stemming=False):
|
||||||
|
|
||||||
table.head()
|
|
||||||
|
|
||||||
if(stemming):
|
if(stemming):
|
||||||
table['description'] = stemming(table['description'])
|
table['description'] = stemming(table['description'])
|
||||||
|
|
||||||
@ -88,9 +85,8 @@ def prepareData(stemming=False, emojis_to_consider="all"):
|
|||||||
lookup = {}
|
lookup = {}
|
||||||
emoji_set = []
|
emoji_set = []
|
||||||
for index, row in table.iterrows():
|
for index, row in table.iterrows():
|
||||||
if(emojis_to_consider=="all" or (type(emojis_to_consider)==list and row['character'] in emojis_to_consider)):
|
lookup[index] = row['character']
|
||||||
lookup[index] = row['character']
|
emoji_set.append(row['character'])
|
||||||
emoji_set.append(row['character'])
|
|
||||||
|
|
||||||
emoji_set = set(emoji_set)
|
emoji_set = set(emoji_set)
|
||||||
|
|
||||||
@ -99,30 +95,44 @@ def prepareData(stemming=False, emojis_to_consider="all"):
|
|||||||
# make a prediction for an input sentence
|
# make a prediction for an input sentence
|
||||||
def predict(sentence, lookup, emojis_to_consider="all", criteria="threshold", description_key='description', lang = 'eng', n=10, t=0.9):
|
def predict(sentence, lookup, emojis_to_consider="all", criteria="threshold", description_key='description', lang = 'eng', n=10, t=0.9):
|
||||||
|
|
||||||
result = evaluate_sentence(sentence, table, description_key, lang)
|
result = evaluate_sentence(sentence, table, description_key, lang, emojis_to_consider=emojis_to_consider)
|
||||||
|
|
||||||
if(criteria=="summed"):
|
try:
|
||||||
indexes = np.argsort([-np.sum(x) for x in result])[0:n]
|
if(criteria=="summed"):
|
||||||
elif (criteria=="max_val"):
|
resultValues = [-np.sum(x) for x in result]
|
||||||
indexes = np.argsort([-np.max(x) for x in result])[0:n]
|
elif (criteria=="max_val"):
|
||||||
elif(criteria=="avg"):
|
resultValues = [-np.max(x) for x in result]
|
||||||
indexes = np.argsort([-np.mean(x) for x in result])[0:n]
|
elif(criteria=="avg"):
|
||||||
else:
|
resultValues = [-np.mean(x) for x in result]
|
||||||
indexes= np.argsort([-len(np.where(x>t)[0]) / (x.shape[0] * x.shape[1]) for x in result])[0:n]
|
else:
|
||||||
|
resultValues = [-len(np.where(x>t)[0]) / (x.shape[0] * x.shape[1]) for x in result]
|
||||||
|
indexes = np.argsort(resultValues)
|
||||||
|
results = np.sort(resultValues)
|
||||||
|
|
||||||
if(emojis_to_consider!="all"):
|
if (emojis_to_consider != "all" and type(emojis_to_consider) == list):
|
||||||
for i in indexes:
|
indexes2 = []
|
||||||
if (i not in lookup):
|
results2 = []
|
||||||
indexes = np.delete(indexes, [i])
|
for i in range(len(indexes)):
|
||||||
|
if lookup[indexes[i]] in emojis_to_consider:
|
||||||
|
indexes2.append(indexes[i])
|
||||||
|
results2.append(results[i])
|
||||||
|
indexes = indexes2
|
||||||
|
results = results2
|
||||||
|
indexes = indexes[0:n]
|
||||||
|
results = results[0:n]
|
||||||
|
|
||||||
# build a result table
|
# build a result table
|
||||||
table_array = [[lookup[indexes[i]], str(table.iloc[indexes[i]][description_key])] for i in range(n) ]
|
table_array = [[lookup[indexes[i]], str(table.iloc[indexes[i]][description_key])] for i in range(n) ]
|
||||||
|
|
||||||
table_frame = pd.DataFrame(table_array, columns=[criteria, 'description'])
|
table_frame = pd.DataFrame(table_array, columns=[criteria, 'description'])
|
||||||
|
|
||||||
#display(table_frame)
|
#display(table_frame)
|
||||||
|
|
||||||
|
return list(table_frame[criteria]), results
|
||||||
|
|
||||||
|
except ZeroDivisionError as err:
|
||||||
|
print("There seems to be a problem with the input format. Please enter a nonempty string")
|
||||||
|
|
||||||
return list(table_frame[criteria])
|
|
||||||
|
|
||||||
#predict("I like to travel by train", description_key='description' , lang='eng')
|
#predict("I like to travel by train", description_key='description' , lang='eng')
|
||||||
|
|
||||||
|
@ -144,7 +144,7 @@
|
|||||||
{
|
{
|
||||||
"data": {
|
"data": {
|
||||||
"application/vnd.jupyter.widget-view+json": {
|
"application/vnd.jupyter.widget-view+json": {
|
||||||
"model_id": "5ac970d7d7cf4849b4f5adfb80a820c0",
|
"model_id": "4fd5552e6a024dcaa0f35a594c77ae99",
|
||||||
"version_major": 2,
|
"version_major": 2,
|
||||||
"version_minor": 0
|
"version_minor": 0
|
||||||
},
|
},
|
||||||
@ -172,7 +172,7 @@
|
|||||||
" ],\n",
|
" ],\n",
|
||||||
" [\n",
|
" [\n",
|
||||||
" (widgets.BoundedIntText(value=-1,disabled=True,min=-1, max=10), \"k_means_cluster\"),\n",
|
" (widgets.BoundedIntText(value=-1,disabled=True,min=-1, max=10), \"k_means_cluster\"),\n",
|
||||||
" (widgets.BoundedIntText(value=20,disabled=True,min=-1, max=10), \"n_top_emojis\")\n",
|
" (widgets.BoundedIntText(value=20,disabled=True,min=-1, max=100), \"n_top_emojis\")\n",
|
||||||
" ],\n",
|
" ],\n",
|
||||||
" [\n",
|
" [\n",
|
||||||
" (widgets.Button(disabled=True),\"load_data\")\n",
|
" (widgets.Button(disabled=True),\"load_data\")\n",
|
||||||
@ -235,6 +235,18 @@
|
|||||||
" ]\n",
|
" ]\n",
|
||||||
" ], \n",
|
" ], \n",
|
||||||
" \"train\" )\n",
|
" \"train\" )\n",
|
||||||
|
"create_area(\"playground 😎\",\n",
|
||||||
|
" [\n",
|
||||||
|
" [\n",
|
||||||
|
" (widgets.Text(),\"test_input\"),\n",
|
||||||
|
" (widgets.HTML(),\"prediction\")\n",
|
||||||
|
" ],\n",
|
||||||
|
" [\n",
|
||||||
|
" (widgets.Checkbox(),\"show_sorted_list\")\n",
|
||||||
|
" ]\n",
|
||||||
|
" ],\n",
|
||||||
|
" \"playground\")\n",
|
||||||
|
"\n",
|
||||||
"tab_manager"
|
"tab_manager"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@ -360,11 +372,11 @@
|
|||||||
"source": [
|
"source": [
|
||||||
"class progress_indicator(object):\n",
|
"class progress_indicator(object):\n",
|
||||||
" \n",
|
" \n",
|
||||||
" def __init__(self, n, description=\"progress\"):\n",
|
" def __init__(self, description=\"progress\"):\n",
|
||||||
" self.w = widgets.IntProgress(value=0, min=0,max=n, description = description)\n",
|
" self.w = widgets.FloatProgress(value=0, min=0,max=1, description = description)\n",
|
||||||
" display(self.w)\n",
|
" display(self.w)\n",
|
||||||
" def update(self, dn=1):\n",
|
" def update(self, val):\n",
|
||||||
" self.w.value += dn\n",
|
" self.w.value = val\n",
|
||||||
" "
|
" "
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
@ -416,13 +428,15 @@
|
|||||||
" r = shown_widgets[\"file_range\"].value\n",
|
" r = shown_widgets[\"file_range\"].value\n",
|
||||||
" r = (r[0], r[1] + 1) # range has to be exclusive according to the last element!\n",
|
" r = (r[0], r[1] + 1) # range has to be exclusive according to the last element!\n",
|
||||||
" \n",
|
" \n",
|
||||||
" p = progress_indicator(r[1] - r[0], \"reading progress\")\n",
|
" p_r = progress_indicator(\"reading progress\")\n",
|
||||||
|
" p_s = progress_indicator(\"stemming progress\")\n",
|
||||||
" \n",
|
" \n",
|
||||||
" sdm = stl.sample_data_manager.generate_and_read(path=shown_widgets[\"root_path\"].value,\n",
|
" sdm = stl.sample_data_manager.generate_and_read(path=shown_widgets[\"root_path\"].value,\n",
|
||||||
" n_top_emojis=shown_widgets[\"n_top_emojis\"].value,\n",
|
" n_top_emojis=shown_widgets[\"n_top_emojis\"].value,\n",
|
||||||
" file_range=range(r[0], r[1]),\n",
|
" file_range=range(r[0], r[1]),\n",
|
||||||
" n_kmeans_cluster=shown_widgets[\"k_means_cluster\"].value,\n",
|
" n_kmeans_cluster=shown_widgets[\"k_means_cluster\"].value,\n",
|
||||||
" progress_callback=p.update)\n",
|
" read_progress_callback=p_r.update,\n",
|
||||||
|
" stem_progress_callback=p_s.update)\n",
|
||||||
" shown_widgets[\"batch_size\"].max = len(sdm.labels)\n",
|
" shown_widgets[\"batch_size\"].max = len(sdm.labels)\n",
|
||||||
" \n",
|
" \n",
|
||||||
" \n",
|
" \n",
|
||||||
@ -461,11 +475,10 @@
|
|||||||
" \n",
|
" \n",
|
||||||
" print(\"update train test split:\")\n",
|
" print(\"update train test split:\")\n",
|
||||||
" sdm.create_train_test_split(split=val_split)\n",
|
" sdm.create_train_test_split(split=val_split)\n",
|
||||||
" batch_n = len(sdm.X) // batch_size\n",
|
|
||||||
" \n",
|
" \n",
|
||||||
" print(\"fit\")\n",
|
" print(\"fit\")\n",
|
||||||
" \n",
|
" \n",
|
||||||
" p = progress_indicator(batch_n)\n",
|
" p = progress_indicator()\n",
|
||||||
" \n",
|
" \n",
|
||||||
" tr = stl.trainer(sdm=sdm, pm=pm)\n",
|
" tr = stl.trainer(sdm=sdm, pm=pm)\n",
|
||||||
" tr.fit(progress_callback=p.update, batch_size=batch_size, n_epochs=n_epochs)\n",
|
" tr.fit(progress_callback=p.update, batch_size=batch_size, n_epochs=n_epochs)\n",
|
||||||
@ -601,6 +614,46 @@
|
|||||||
"\n",
|
"\n",
|
||||||
"\n"
|
"\n"
|
||||||
]
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"## testing area"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 10,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"def test_input(b):\n",
|
||||||
|
" global sdm\n",
|
||||||
|
" global pm\n",
|
||||||
|
" global tr\n",
|
||||||
|
" with out_areas[\"playground\"]:\n",
|
||||||
|
" clear_output()\n",
|
||||||
|
" mp(\"----\")\n",
|
||||||
|
" if pm is None:\n",
|
||||||
|
" sys.stderr.write(\"ERROR: load or create classifier first\")\n",
|
||||||
|
" return\n",
|
||||||
|
" X = shown_widgets[\"test_input\"].value\n",
|
||||||
|
" pred = pm.predict([X])\n",
|
||||||
|
" shown_widgets[\"prediction\"].value = \"<h1> \" + str(stl.sent2emoji(pred)[0]) + \"</h1>\"\n",
|
||||||
|
" if shown_widgets[\"show_sorted_list\"].value:\n",
|
||||||
|
" mp(\"## \" + \"\".join(stl.edist.sentiment_vector_to_emoji(pred, only_emoticons=True, n_results=100)))\n",
|
||||||
|
"\n",
|
||||||
|
"#link\n",
|
||||||
|
"shown_widgets[\"test_input\"].observe(test_input)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": []
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"metadata": {
|
"metadata": {
|
||||||
|
@ -28,6 +28,12 @@ nltk.download('punkt')
|
|||||||
nltk.download('averaged_perceptron_tagger')
|
nltk.download('averaged_perceptron_tagger')
|
||||||
nltk.download('wordnet')
|
nltk.download('wordnet')
|
||||||
|
|
||||||
|
# check whether the display function exists:
|
||||||
|
try:
|
||||||
|
display
|
||||||
|
except NameError:
|
||||||
|
print("no fancy display function found... using print instead")
|
||||||
|
display = print
|
||||||
|
|
||||||
# In[2]:
|
# In[2]:
|
||||||
|
|
||||||
@ -108,6 +114,38 @@ def get_wordnet_pos(treebank_tag):
|
|||||||
return wordnet.NOUN
|
return wordnet.NOUN
|
||||||
|
|
||||||
|
|
||||||
|
# global stemmer and lemmatizer function
|
||||||
|
stemmer = SnowballStemmer("english")
|
||||||
|
|
||||||
|
def stem(s):
|
||||||
|
stemmed_sent = []
|
||||||
|
for word in s.split(" "):
|
||||||
|
word_stemmed = stemmer.stem(word)
|
||||||
|
stemmed_sent.append(word_stemmed)
|
||||||
|
stemmed_sent = (" ").join(stemmed_sent)
|
||||||
|
return stemmed_sent
|
||||||
|
|
||||||
|
|
||||||
|
lemmatizer = WordNetLemmatizer()
|
||||||
|
|
||||||
|
def lemm(s):
|
||||||
|
lemmatized_sent = []
|
||||||
|
sent_pos = pos_tag(word_tokenize(s))
|
||||||
|
for word in sent_pos:
|
||||||
|
wordnet_pos = get_wordnet_pos(word[1].lower())
|
||||||
|
word_lemmatized = lemmatizer.lemmatize(word[0], pos=wordnet_pos)
|
||||||
|
lemmatized_sent.append(word_lemmatized)
|
||||||
|
lemmatized_sent = (" ").join(lemmatized_sent)
|
||||||
|
return lemmatized_sent
|
||||||
|
|
||||||
|
|
||||||
|
def batch_stem(sentences):
|
||||||
|
return [stem(s) for s in sentences]
|
||||||
|
|
||||||
|
def batch_lemm(sentences):
|
||||||
|
return [lemm(s) for s in sentences]
|
||||||
|
|
||||||
|
|
||||||
# ### sample data manager
|
# ### sample data manager
|
||||||
# the sample data manager loads and preprocesses data
|
# the sample data manager loads and preprocesses data
|
||||||
# most common way to use:
|
# most common way to use:
|
||||||
@ -123,7 +161,7 @@ def get_wordnet_pos(treebank_tag):
|
|||||||
|
|
||||||
class sample_data_manager(object):
|
class sample_data_manager(object):
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def generate_and_read(path:str, only_emoticons=True, apply_stemming=True, n_top_emojis=-1, file_range=None, n_kmeans_cluster=-1, progress_callback=None):
|
def generate_and_read(path:str, only_emoticons=True, apply_stemming=True, n_top_emojis=-1, file_range=None, n_kmeans_cluster=-1, read_progress_callback=None, stem_progress_callback=None):
|
||||||
"""
|
"""
|
||||||
generate, read and process train data in one step.
|
generate, read and process train data in one step.
|
||||||
|
|
||||||
@ -137,9 +175,9 @@ class sample_data_manager(object):
|
|||||||
@return: sample_data_manager object
|
@return: sample_data_manager object
|
||||||
"""
|
"""
|
||||||
sdm = sample_data_manager(path)
|
sdm = sample_data_manager(path)
|
||||||
sdm.read_files(file_index_range=range(sdm.n_files) if file_range is None else file_range, only_emoticons=only_emoticons, progress_callback=progress_callback)
|
sdm.read_files(file_index_range=range(sdm.n_files) if file_range is None else file_range, only_emoticons=only_emoticons, progress_callback=read_progress_callback)
|
||||||
if apply_stemming:
|
if apply_stemming:
|
||||||
sdm.apply_stemming_and_lemmatization()
|
sdm.apply_stemming_and_lemmatization(progress_callback=stem_progress_callback)
|
||||||
|
|
||||||
sdm.generate_emoji_count_and_weights()
|
sdm.generate_emoji_count_and_weights()
|
||||||
|
|
||||||
@ -161,7 +199,6 @@ class sample_data_manager(object):
|
|||||||
self.data_root_folder = data_root_folder
|
self.data_root_folder = data_root_folder
|
||||||
self.json_files = sorted(glob.glob(self.data_root_folder + "/*.json"))
|
self.json_files = sorted(glob.glob(self.data_root_folder + "/*.json"))
|
||||||
self.n_files = len(self.json_files)
|
self.n_files = len(self.json_files)
|
||||||
self.raw_data = None
|
|
||||||
self.emojis = None
|
self.emojis = None
|
||||||
self.plain_text = None
|
self.plain_text = None
|
||||||
self.labels = None
|
self.labels = None
|
||||||
@ -176,46 +213,70 @@ class sample_data_manager(object):
|
|||||||
self.use_binary_labels = False
|
self.use_binary_labels = False
|
||||||
self.kmeans_cluster = None
|
self.kmeans_cluster = None
|
||||||
self.label_binarizer = None
|
self.label_binarizer = None
|
||||||
|
self.use_stemming = False
|
||||||
|
self.use_lemmatization = False
|
||||||
|
|
||||||
def read_files(self, file_index_range:list, only_emoticons=True, progress_callback=None):
|
def read_files(self, file_index_range:list, only_emoticons=True, emoji_mean=False ,progress_callback=None):
|
||||||
"""
|
"""
|
||||||
reading (multiple) files to one panda table.
|
reading (multiple) files to one panda table.
|
||||||
|
|
||||||
@param file_index_range: range of file's indices to read (eg `range(3)` to read the first three files)
|
@param file_index_range: range of file's indices to read (eg `range(3)` to read the first three files)
|
||||||
@param only_emoticons: if True, only messages containing emoticons (aka smileys) are used. This classification is derived from Tools.Emoji_Distance
|
@param only_emoticons: if True, only messages containing emoticons (aka smileys) are used. This classification is derived from Tools.Emoji_Distance
|
||||||
|
@param emoji_mean: if True, using mean of all emojis instead of the last one
|
||||||
"""
|
"""
|
||||||
assert np.min(file_index_range) >= 0 and np.max(file_index_range) < self.n_files
|
assert np.min(file_index_range) >= 0 and np.max(file_index_range) < self.n_files
|
||||||
|
n = len(file_index_range)
|
||||||
for i in file_index_range:
|
for i in file_index_range:
|
||||||
print("reading file: " + self.json_files[i] + "...")
|
print("reading file: " + self.json_files[i] + "...")
|
||||||
if self.raw_data is None:
|
raw_data_i = pd.read_json(self.json_files[i], encoding="utf-8")
|
||||||
self.raw_data = pd.read_json(self.json_files[i], encoding="utf-8")
|
emojis_i = raw_data_i['EMOJI']
|
||||||
|
plain_text_i = raw_data_i['text']
|
||||||
|
|
||||||
|
# replacing keywords. TODO: maybe these information can be extracted and used
|
||||||
|
plain_text_i = plain_text_i.str.replace("(<EMOJI>|<USER>|<HASHTAG>)","").str.replace("[" + "".join(list(emoji_blacklist)) + "]","")
|
||||||
|
|
||||||
|
if not emoji_mean:
|
||||||
|
# so far filtering for the latest emoji. TODO: maybe there are also better approaches
|
||||||
|
labels_i = emoji2sent([latest(e) for e in emojis_i], only_emoticons=only_emoticons )
|
||||||
else:
|
else:
|
||||||
self.raw_data = self.raw_data.append(pd.read_json(self.json_files[i], encoding="utf-8"))
|
labels_i = np.array([np.mean(emoji2sent(e, only_emoticons=only_emoticons), axis=0) for e in emojis_i])
|
||||||
|
|
||||||
|
# and filter out all samples we have no label for:
|
||||||
|
wrong_labels = np.isnan(np.linalg.norm(labels_i, axis=1))
|
||||||
|
labels_i = labels_i[np.invert(wrong_labels)]
|
||||||
|
plain_text_i = plain_text_i[np.invert(wrong_labels)]
|
||||||
|
emojis_i = emojis_i[np.invert(wrong_labels)]
|
||||||
|
print("imported " + str(len(labels_i)) + " samples")
|
||||||
|
|
||||||
|
if self.labels is None:
|
||||||
|
self.labels = labels_i
|
||||||
|
else:
|
||||||
|
self.labels = np.append(self.labels, labels_i, axis=0)
|
||||||
|
|
||||||
|
if self.emojis is None:
|
||||||
|
self.emojis = emojis_i
|
||||||
|
else:
|
||||||
|
self.emojis = pd.concat([self.emojis,emojis_i],ignore_index=True)
|
||||||
|
|
||||||
|
if self.plain_text is None:
|
||||||
|
self.plain_text = plain_text_i
|
||||||
|
else:
|
||||||
|
self.plain_text = pd.concat([self.plain_text,plain_text_i],ignore_index=True)
|
||||||
|
|
||||||
if progress_callback is not None:
|
if progress_callback is not None:
|
||||||
progress_callback()
|
progress_callback((i+1)/n)
|
||||||
self.emojis = self.raw_data['EMOJI']
|
|
||||||
self.plain_text = self.raw_data['text']
|
|
||||||
|
|
||||||
# replacing keywords. TODO: maybe these information can be extracted and used
|
|
||||||
self.plain_text = self.plain_text.str.replace("(<EMOJI>|<USER>|<HASHTAG>)","").str.replace("[" + "".join(list(emoji_blacklist)) + "]","")
|
|
||||||
|
|
||||||
# so far filtering for the latest emoji. TODO: maybe there are also better approaches
|
def apply_stemming_and_lemmatization(self, progress_callback = None):
|
||||||
self.labels = emoji2sent([latest(e) for e in self.emojis], only_emoticons=only_emoticons )
|
|
||||||
|
|
||||||
# and filter out all samples we have no label for:
|
|
||||||
wrong_labels = np.isnan(np.linalg.norm(self.labels, axis=1))
|
|
||||||
|
|
||||||
self.labels = self.labels[np.invert(wrong_labels)]
|
|
||||||
self.plain_text = self.plain_text[np.invert(wrong_labels)]
|
|
||||||
self.emojis = self.emojis[np.invert(wrong_labels)]
|
|
||||||
|
|
||||||
print("imported " + str(len(self.labels)) + " samples")
|
|
||||||
|
|
||||||
def apply_stemming_and_lemmatization(self):
|
|
||||||
"""
|
"""
|
||||||
apply stemming and lemmatization to plain text samples
|
apply stemming and lemmatization to plain text samples
|
||||||
"""
|
"""
|
||||||
|
self.use_stemming = True
|
||||||
|
self.use_lemmatization = True
|
||||||
|
print("apply stemming and lemmatization...")
|
||||||
stemmer = SnowballStemmer("english")
|
stemmer = SnowballStemmer("english")
|
||||||
|
n = self.plain_text.shape[0] * 2 # 2 for loops
|
||||||
|
i = 0
|
||||||
for key in self.plain_text.keys():
|
for key in self.plain_text.keys():
|
||||||
stemmed_sent = []
|
stemmed_sent = []
|
||||||
for word in self.plain_text[key].split(" "):
|
for word in self.plain_text[key].split(" "):
|
||||||
@ -223,6 +284,11 @@ class sample_data_manager(object):
|
|||||||
stemmed_sent.append(word_stemmed)
|
stemmed_sent.append(word_stemmed)
|
||||||
stemmed_sent = (" ").join(stemmed_sent)
|
stemmed_sent = (" ").join(stemmed_sent)
|
||||||
self.plain_text[key] = stemmed_sent
|
self.plain_text[key] = stemmed_sent
|
||||||
|
i += 1
|
||||||
|
if progress_callback is not None and i % 1024 == 0:
|
||||||
|
progress_callback(i / n)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
lemmatizer = WordNetLemmatizer()
|
lemmatizer = WordNetLemmatizer()
|
||||||
for key in self.plain_text.keys():
|
for key in self.plain_text.keys():
|
||||||
@ -234,6 +300,10 @@ class sample_data_manager(object):
|
|||||||
lemmatized_sent.append(word_lemmatized)
|
lemmatized_sent.append(word_lemmatized)
|
||||||
lemmatized_sent = (" ").join(lemmatized_sent)
|
lemmatized_sent = (" ").join(lemmatized_sent)
|
||||||
self.plain_text[key] = lemmatized_sent
|
self.plain_text[key] = lemmatized_sent
|
||||||
|
i += 1
|
||||||
|
if progress_callback is not None and i % 1024 == 0:
|
||||||
|
progress_callback(i / n)
|
||||||
|
print("stemming and lemmatization done")
|
||||||
|
|
||||||
def generate_emoji_count_and_weights(self):
|
def generate_emoji_count_and_weights(self):
|
||||||
"""
|
"""
|
||||||
@ -503,8 +573,12 @@ class pipeline_manager(object):
|
|||||||
"""fitting the pipeline"""
|
"""fitting the pipeline"""
|
||||||
self.pipeline.fit(X,y)
|
self.pipeline.fit(X,y)
|
||||||
|
|
||||||
def predict(self,X):
|
def predict(self,X, use_stemming=True, use_lemmatization=True):
|
||||||
"""predict"""
|
"""predict"""
|
||||||
|
if use_stemming:
|
||||||
|
X = np.array(batch_stem(X))
|
||||||
|
if use_lemmatization:
|
||||||
|
X = np.array(batch_lemm(X))
|
||||||
return self.pipeline.predict(X)
|
return self.pipeline.predict(X)
|
||||||
|
|
||||||
|
|
||||||
@ -558,7 +632,7 @@ class trainer(object):
|
|||||||
for j in range(n):
|
for j in range(n):
|
||||||
self.pm.fit(X = np.array(self.sdm.X[j*batch_size:(j+1)*batch_size]), y = np.array(self.sdm.y[j*batch_size:(j+1)*batch_size]))
|
self.pm.fit(X = np.array(self.sdm.X[j*batch_size:(j+1)*batch_size]), y = np.array(self.sdm.y[j*batch_size:(j+1)*batch_size]))
|
||||||
if progress_callback is not None:
|
if progress_callback is not None:
|
||||||
progress_callback()
|
progress_callback(j / n)
|
||||||
pred, yt = self.test()
|
pred, yt = self.test()
|
||||||
mean_squared_error = ((pred - yt)**2).mean(axis=0)
|
mean_squared_error = ((pred - yt)**2).mean(axis=0)
|
||||||
print("#" + str(j) + ": loss: ", mean_squared_error)
|
print("#" + str(j) + ": loss: ", mean_squared_error)
|
||||||
@ -578,6 +652,6 @@ class trainer(object):
|
|||||||
'''
|
'''
|
||||||
if self.sdm.X is None:
|
if self.sdm.X is None:
|
||||||
self.sdm.create_train_test_split()
|
self.sdm.create_train_test_split()
|
||||||
return self.pm.predict(self.sdm.Xt), self.sdm.yt
|
return self.pm.predict(self.sdm.Xt, use_lemmatization=False, use_stemming=False), self.sdm.yt
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue
Block a user