FastText eingebunden
This commit is contained in:
		
							
								
								
									
										
											BIN
										
									
								
								Project/naive_approach/fastTextVectors.kv
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										
											BIN
										
									
								
								Project/naive_approach/fastTextVectors.kv
									
									
									
									
									
										Normal file
									
								
							
										
											Binary file not shown.
										
									
								
							| @ -42,14 +42,18 @@ def stemming(message): | |||||||
|  |  | ||||||
| # * compare words to emoji descriptions | # * compare words to emoji descriptions | ||||||
| def evaluate_sentence(sentence, description_key = 'description', lang = 'eng', emojis_to_consider="all",\ | def evaluate_sentence(sentence, description_key = 'description', lang = 'eng', emojis_to_consider="all",\ | ||||||
|                       stem=True, use_wordnet=True): |                       stem=True, embeddings="wordnet"): | ||||||
|     # assumes there is a trained w2v model stored in the same directory! |     # assumes there is a trained w2v model stored in the same directory! | ||||||
|     __location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))) |     __location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))) | ||||||
|     if use_wordnet==False: |  | ||||||
|         wv = KeyedVectors.load(str(__location__)+"/word2vec.model", mmap='r') |  | ||||||
|      |      | ||||||
|  |     if embeddings=="word2Vec": | ||||||
|  |         wv = KeyedVectors.load(str(__location__)+"/word2vec.model", mmap='r') | ||||||
|  |     elif embeddings=="fastText": | ||||||
|  |         wv = KeyedVectors.load("/fastTextVectors.kv", mmap='r') | ||||||
|  |          | ||||||
|     if (stem): |     if (stem): | ||||||
|         sentence = stemming(sentence) |         sentence = stemming(sentence) | ||||||
|  |          | ||||||
|     tokenized_sentence = word_tokenize(sentence) |     tokenized_sentence = word_tokenize(sentence) | ||||||
|     n = len(tokenized_sentence) |     n = len(tokenized_sentence) | ||||||
|     matrix_list = [] |     matrix_list = [] | ||||||
| @ -61,7 +65,7 @@ def evaluate_sentence(sentence, description_key = 'description', lang = 'eng', e | |||||||
|         mat = np.zeros(shape=(m,n)) |         mat = np.zeros(shape=(m,n)) | ||||||
|         for i in range(len(emoji_tokens)): |         for i in range(len(emoji_tokens)): | ||||||
|             for j in range(len(tokenized_sentence)): |             for j in range(len(tokenized_sentence)): | ||||||
|                 if use_wordnet: |                 if embeddings=="wordnet": | ||||||
|                     syn1 = wordnet.synsets(emoji_tokens[i],lang=lang) |                     syn1 = wordnet.synsets(emoji_tokens[i],lang=lang) | ||||||
|                     if len(syn1) == 0: |                     if len(syn1) == 0: | ||||||
|                         continue |                         continue | ||||||
| @ -74,7 +78,7 @@ def evaluate_sentence(sentence, description_key = 'description', lang = 'eng', e | |||||||
|                     val = w1.wup_similarity(w2) |                     val = w1.wup_similarity(w2) | ||||||
|                     if val is None: |                     if val is None: | ||||||
|                         continue |                         continue | ||||||
|                 else: |                 elif (embeddings == "word2Vec" or embeddings == "fastText"): | ||||||
|                     try: |                     try: | ||||||
|                         val = wv.similarity(emoji_tokens[i], tokenized_sentence[j]) |                         val = wv.similarity(emoji_tokens[i], tokenized_sentence[j]) | ||||||
|                     except KeyError: |                     except KeyError: | ||||||
| @ -112,11 +116,11 @@ def prepareData(stem=True, lower=True): | |||||||
|     return lookup |     return lookup | ||||||
|  |  | ||||||
| # make a prediction for an input sentence | # make a prediction for an input sentence | ||||||
| # use_wordnet=True --> use wordnet similarites, otherwise use Word2Vec | # embeddings = ["wordnet", "word2Vec", "fastText"] | ||||||
| def predict(sentence, lookup, emojis_to_consider="all", criteria="threshold", lang = 'eng',\ | def predict(sentence, lookup, emojis_to_consider="all", criteria="threshold", lang = 'eng',\ | ||||||
|             use_wordnet=True, n=10, t=0.9): |             embeddings="wordnet", n=10, t=0.9): | ||||||
|  |  | ||||||
|     result = evaluate_sentence(sentence, lang, emojis_to_consider=emojis_to_consider, use_wordnet=use_wordnet) |     result = evaluate_sentence(sentence, lang, emojis_to_consider=emojis_to_consider, embeddings=embeddings) | ||||||
|      |      | ||||||
|     try: |     try: | ||||||
|         if(criteria=="summed"): |         if(criteria=="summed"): | ||||||
|  | |||||||
		Reference in New Issue
	
	Block a user