Datenstruktur zu Dictionary geändert
This commit is contained in:
		| @ -19,6 +19,12 @@ import pprint | |||||||
| # # Naive Approach | # # Naive Approach | ||||||
| table = pd.read_csv('../Tools/emoji_descriptions.csv') | table = pd.read_csv('../Tools/emoji_descriptions.csv') | ||||||
|  |  | ||||||
|  | ##Store table in the format: | ||||||
|  | ## { index: [emoji, description]} | ||||||
|  | tableDict = {} | ||||||
|  | for index, row in table.iterrows(): | ||||||
|  |     tableDict.update({index: [row['character'], row['description']]}) | ||||||
|  |  | ||||||
| ####################### | ####################### | ||||||
| # Helper functions | # Helper functions | ||||||
| ####################### | ####################### | ||||||
| @ -37,15 +43,15 @@ def stemming(messages): | |||||||
|  |  | ||||||
|  |  | ||||||
| # * compare words to emoji descriptions | # * compare words to emoji descriptions | ||||||
| def evaluate_sentence(sentence, table, description_key = 'description', lang = 'eng', emojis_to_consider="all"): | def evaluate_sentence(sentence, description_key = 'description', lang = 'eng', emojis_to_consider="all"): | ||||||
|      |      | ||||||
|     tokenized_sentence = word_tokenize(sentence) |     tokenized_sentence = word_tokenize(sentence) | ||||||
|     n = len(tokenized_sentence) |     n = len(tokenized_sentence) | ||||||
|     l = table.shape[0] |     l = table.shape[0] | ||||||
|     matrix_list = [] |     matrix_list = [] | ||||||
|      |      | ||||||
|     for index, row in table.iterrows(): |     for index in tableDict.keys(): | ||||||
|         emoji_tokens = word_tokenize(row[description_key]) |         emoji_tokens = word_tokenize(tableDict[index][1]) | ||||||
|         m = len(emoji_tokens) |         m = len(emoji_tokens) | ||||||
|  |  | ||||||
|         mat = np.zeros(shape=(m,n)) |         mat = np.zeros(shape=(m,n)) | ||||||
| @ -79,23 +85,24 @@ def evaluate_sentence(sentence, table, description_key = 'description', lang = ' | |||||||
| # emojis_to_consider can be either a list or "all" | # emojis_to_consider can be either a list or "all" | ||||||
| def prepareData(stemming=False): | def prepareData(stemming=False): | ||||||
|     if(stemming): |     if(stemming): | ||||||
|         table['description'] = stemming(table['description']) |         for index in tableDict.keys(): | ||||||
|  |             tableDict[index][1] = stemming(tableDict[index][1]) | ||||||
|      |      | ||||||
|     #collect the emojis |     #collect the emojis | ||||||
|     lookup = {} |     lookup = {} | ||||||
|     emoji_set = [] |     emoji_set = [] | ||||||
|     for index, row in table.iterrows(): |     for index in tableDict.keys(): | ||||||
|         lookup[index] = row['character'] |         lookup[index] = tableDict[index][0] | ||||||
|         emoji_set.append(row['character']) |         emoji_set.append(tableDict[index][0]) | ||||||
|  |  | ||||||
|     emoji_set = set(emoji_set) |     emoji_set = set(emoji_set) | ||||||
|      |      | ||||||
|     return lookup |     return lookup | ||||||
|  |  | ||||||
| # make a prediction for an input sentence | # make a prediction for an input sentence | ||||||
| def predict(sentence, lookup, emojis_to_consider="all", criteria="threshold", description_key='description', lang = 'eng', n=10, t=0.9): | def predict(sentence, lookup, emojis_to_consider="all", criteria="threshold", lang = 'eng', n=10, t=0.9): | ||||||
|  |  | ||||||
|     result = evaluate_sentence(sentence, table, description_key, lang, emojis_to_consider=emojis_to_consider) |     result = evaluate_sentence(sentence, lang, emojis_to_consider=emojis_to_consider) | ||||||
|      |      | ||||||
|     try: |     try: | ||||||
|         if(criteria=="summed"): |         if(criteria=="summed"): | ||||||
| @ -118,20 +125,22 @@ def predict(sentence, lookup, emojis_to_consider="all", criteria="threshold", de | |||||||
|                     results2.append(results[i]) |                     results2.append(results[i]) | ||||||
|             indexes = indexes2 |             indexes = indexes2 | ||||||
|             results = results2 |             results = results2 | ||||||
|  |              | ||||||
|         indexes = indexes[0:n] |         indexes = indexes[0:n] | ||||||
|         results = results[0:n] |         results = results[0:n] | ||||||
|          |          | ||||||
|         # build a result table |         # build a result table | ||||||
|         table_array = [[lookup[indexes[i]], str(table.iloc[indexes[i]][description_key])] for i in range(n) ] |         table_array = [lookup[indexes[i]] for i in range(n) ] | ||||||
|            |            | ||||||
|         table_frame = pd.DataFrame(table_array, columns=[criteria, 'description']) |         #table_frame = pd.DataFrame(table_array, columns=[criteria, 'description']) | ||||||
|          |          | ||||||
|         #display(table_frame) |         #display(table_frame) | ||||||
|          |          | ||||||
|         return list(table_frame[criteria]), results |         return table_array, results | ||||||
|      |      | ||||||
|     except ZeroDivisionError as err: |     except ZeroDivisionError as err: | ||||||
|         print("There seems to be a problem with the input format. Please enter a nonempty string") |         print("There seems to be a problem with the input format. Please enter a nonempty string") | ||||||
|  |         return [], [] | ||||||
|  |  | ||||||
|  |  | ||||||
| #predict("I like to travel by train", description_key='description' , lang='eng') | #predict("I like to travel by train", description_key='description' , lang='eng') | ||||||
|  | |||||||
		Reference in New Issue
	
	Block a user