From f4766912e18a2b855aaebc5f209caa3202766322 Mon Sep 17 00:00:00 2001 From: Jonas Weinz Date: Thu, 19 Jul 2018 16:46:19 +0200 Subject: [PATCH] dump put emoji counts --- Project/simple_approach/simple_twitter_learning.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/Project/simple_approach/simple_twitter_learning.py b/Project/simple_approach/simple_twitter_learning.py index 8bfedae..8c4d9e9 100644 --- a/Project/simple_approach/simple_twitter_learning.py +++ b/Project/simple_approach/simple_twitter_learning.py @@ -24,6 +24,8 @@ import pickle import operator from sklearn.pipeline import Pipeline import json +import datetime + nltk.download('punkt') nltk.download('averaged_perceptron_tagger') nltk.download('wordnet') @@ -227,6 +229,7 @@ class sample_data_manager(object): """ assert np.min(file_index_range) >= 0 and np.max(file_index_range) < self.n_files n = len(file_index_range) + for i in file_index_range: print("reading file: " + self.json_files[i] + "...") raw_data_i = pd.read_json(self.json_files[i], encoding="utf-8") @@ -259,7 +262,7 @@ class sample_data_manager(object): tmp = [np.nanmean(emoji2sent(e, only_emoticons=only_emoticons), axis=0, dtype=float) for e in emojis_i] c = 0 for t in tmp: - + # only to find and debug wrong formatted data if str(type(t)) != "": print(t, type(t)) print(emojis_i[c]) @@ -358,6 +361,12 @@ class sample_data_manager(object): self.emoji_weights['X'] = 0 # dummy values self.emoji_count['X'] = 0 + + # dump count data to json: + f = open("count_from_read_progress_" + str(datetime.datetime.now()) + ".json", 'w') + f.write(json.dumps(self.emoji_count, ensure_ascii=False)) + f.close() + def get_emoji_count(self): """ @@ -371,7 +380,7 @@ class sample_data_manager(object): def filter_by_top_emojis(self,n_top = 20): """ - filgter out messages not containing one of the `n_top` emojis + filter out messages not containing one of the `n_top` emojis @param n_top: number of top emojis used for filtering """