lemmatization and stemming in prediction
This commit is contained in:
parent
2706e19aec
commit
b8bd92db06
@ -28,6 +28,12 @@ nltk.download('punkt')
|
|||||||
nltk.download('averaged_perceptron_tagger')
|
nltk.download('averaged_perceptron_tagger')
|
||||||
nltk.download('wordnet')
|
nltk.download('wordnet')
|
||||||
|
|
||||||
|
# check whether the display function exists:
|
||||||
|
try:
|
||||||
|
display
|
||||||
|
except NameError:
|
||||||
|
print("no fancy display function found... using print instead")
|
||||||
|
display = print
|
||||||
|
|
||||||
# In[2]:
|
# In[2]:
|
||||||
|
|
||||||
@ -108,6 +114,37 @@ def get_wordnet_pos(treebank_tag):
|
|||||||
return wordnet.NOUN
|
return wordnet.NOUN
|
||||||
|
|
||||||
|
|
||||||
|
# global stemmer and lemmatizer function
|
||||||
|
stemmer = SnowballStemmer("english")
|
||||||
|
|
||||||
|
def stem(s):
|
||||||
|
stemmed_sent = []
|
||||||
|
for word in s.split(" "):
|
||||||
|
word_stemmed = stemmer.stem(word)
|
||||||
|
stemmed_sent.append(word_stemmed)
|
||||||
|
stemmed_sent = (" ").join(stemmed_sent)
|
||||||
|
return stemmed_sent
|
||||||
|
|
||||||
|
|
||||||
|
lemmatizer = WordNetLemmatizer()
|
||||||
|
|
||||||
|
def lemm(s):
|
||||||
|
lemmatized_sent = []
|
||||||
|
sent_pos = pos_tag(word_tokenize(s))
|
||||||
|
for word in sent_pos:
|
||||||
|
wordnet_pos = get_wordnet_pos(word[1].lower())
|
||||||
|
word_lemmatized = lemmatizer.lemmatize(word[0], pos=wordnet_pos)
|
||||||
|
lemmatized_sent.append(word_lemmatized)
|
||||||
|
lemmatized_sent = (" ").join(lemmatized_sent)
|
||||||
|
|
||||||
|
|
||||||
|
def batch_stem(sentences):
|
||||||
|
return [stem(s) for s in sentences]
|
||||||
|
|
||||||
|
def batch_lemm(sentences):
|
||||||
|
return [lemm(s) for s in sentences]
|
||||||
|
|
||||||
|
|
||||||
# ### sample data manager
|
# ### sample data manager
|
||||||
# the sample data manager loads and preprocesses data
|
# the sample data manager loads and preprocesses data
|
||||||
# most common way to use:
|
# most common way to use:
|
||||||
@ -175,6 +212,8 @@ class sample_data_manager(object):
|
|||||||
self.use_binary_labels = False
|
self.use_binary_labels = False
|
||||||
self.kmeans_cluster = None
|
self.kmeans_cluster = None
|
||||||
self.label_binarizer = None
|
self.label_binarizer = None
|
||||||
|
self.use_stemming = False
|
||||||
|
self.use_lemmatization = False
|
||||||
|
|
||||||
def read_files(self, file_index_range:list, only_emoticons=True, emoji_mean=False ,progress_callback=None):
|
def read_files(self, file_index_range:list, only_emoticons=True, emoji_mean=False ,progress_callback=None):
|
||||||
"""
|
"""
|
||||||
@ -231,6 +270,8 @@ class sample_data_manager(object):
|
|||||||
"""
|
"""
|
||||||
apply stemming and lemmatization to plain text samples
|
apply stemming and lemmatization to plain text samples
|
||||||
"""
|
"""
|
||||||
|
self.use_stemming = True
|
||||||
|
self.use_lemmatization = True
|
||||||
print("apply stemming and lemmatization...")
|
print("apply stemming and lemmatization...")
|
||||||
stemmer = SnowballStemmer("english")
|
stemmer = SnowballStemmer("english")
|
||||||
n = self.plain_text.shape[0] * 2 # 2 for loops
|
n = self.plain_text.shape[0] * 2 # 2 for loops
|
||||||
@ -531,8 +572,12 @@ class pipeline_manager(object):
|
|||||||
"""fitting the pipeline"""
|
"""fitting the pipeline"""
|
||||||
self.pipeline.fit(X,y)
|
self.pipeline.fit(X,y)
|
||||||
|
|
||||||
def predict(self,X):
|
def predict(self,X, use_stemming=True, use_lemmatization=True):
|
||||||
"""predict"""
|
"""predict"""
|
||||||
|
if use_stemming:
|
||||||
|
X = batch_stem(X)
|
||||||
|
if use_lemmatization:
|
||||||
|
X = batch_lemm(X)
|
||||||
return self.pipeline.predict(X)
|
return self.pipeline.predict(X)
|
||||||
|
|
||||||
|
|
||||||
@ -606,6 +651,6 @@ class trainer(object):
|
|||||||
'''
|
'''
|
||||||
if self.sdm.X is None:
|
if self.sdm.X is None:
|
||||||
self.sdm.create_train_test_split()
|
self.sdm.create_train_test_split()
|
||||||
return self.pm.predict(self.sdm.Xt), self.sdm.yt
|
return self.pm.predict(self.sdm.Xt, use_lemmatization=False, use_stemming=False), self.sdm.yt
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue
Block a user