Merge branch 'master' of ssh://the-cake-is-a-lie.net:20022/jonas/NLP-LAB
This commit is contained in:
commit
006f0fa0aa
96
Project/Tools/json_stream_filter/README.md
Normal file
96
Project/Tools/json_stream_filter/README.md
Normal file
@ -0,0 +1,96 @@
|
|||||||
|
# JSON stream filter
|
||||||
|
|
||||||
|
a little tool for performing regex operations on string-values in json files (or streams)
|
||||||
|
|
||||||
|
----
|
||||||
|
|
||||||
|
## Basics
|
||||||
|
|
||||||
|
let this be an example set:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"id": "obj_1",
|
||||||
|
"key1": "some example text! 1234",
|
||||||
|
"key2": "another example"
|
||||||
|
}
|
||||||
|
{
|
||||||
|
"id": "obj_2",
|
||||||
|
"key1": "...",
|
||||||
|
"key2": "..."
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
an example regex operation replacing all numbers in all `key1`-fields with the string `NUMBER` and storing all matches in a new field called `found_numbers` can be defined as:
|
||||||
|
|
||||||
|
```
|
||||||
|
;[0-9]+;key1;NUMBER;found_numbers
|
||||||
|
```
|
||||||
|
|
||||||
|
the first character (here `;`) is used as separater for the following fields. It can be any unicode character. The following fields in detail:
|
||||||
|
|
||||||
|
* `[0-9]+`: the regex expression which is used for finding matches. In this case at least one character in the range 0-9
|
||||||
|
* `key1`: the key on which the expression is performed. All other keys are ignored
|
||||||
|
* `NUMBER` the string to replace found matches. Can be any string (also an empty string) or regular expression (which is accepted by pythons `re` library for a substitution).
|
||||||
|
* **NOTE**: use `null` here for not doing any substitution
|
||||||
|
* `found_numbers`: key name for storing found matches as a list there. Set to an empty string for not storing the matches
|
||||||
|
|
||||||
|
the result for the example:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"id": "obj_1",
|
||||||
|
"key1": "some example text! NUMBER",
|
||||||
|
"key2": "another example",
|
||||||
|
"found_numbers": [
|
||||||
|
"1234"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
{
|
||||||
|
"id": "obj_2",
|
||||||
|
"key1": "...",
|
||||||
|
"key2": "...",
|
||||||
|
"found_numbers": []
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
----
|
||||||
|
|
||||||
|
## Command line interface
|
||||||
|
|
||||||
|
just run the python file with every regex operation as own argument. Output is written to stdout, progress information and errors to stderr. Input will be collected by stdin.
|
||||||
|
|
||||||
|
Assuming the example file above is stored in `example.json` and we can store the result to `result.json` by doing
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cat example.json | json_stream_filter.py ";[0-9]+;key1;NUMBER;found_numbers" > result.json
|
||||||
|
```
|
||||||
|
|
||||||
|
----
|
||||||
|
|
||||||
|
## using as a python module
|
||||||
|
|
||||||
|
the file can regularly be imported. It contains two classes:
|
||||||
|
|
||||||
|
* `regex_tuple`: for handling the regex operations (like mentioned above). A tuple can be initialized either as a string like above:
|
||||||
|
|
||||||
|
* `regex_tuple.create_from_string(s)`
|
||||||
|
|
||||||
|
or by passing the already separated fields to the constructor:
|
||||||
|
|
||||||
|
* `regex_tuple(regexp_str, keys_to_process, replace_str = None, key_to_store_match = None)`
|
||||||
|
* **NOTE**: `keys_to_process` has to be a list of keys (for future modifications), but so far only one key is supported, so at the moment this should be initialized as a list of one key
|
||||||
|
|
||||||
|
the second class is the file processor:
|
||||||
|
|
||||||
|
* `json_streamer`: is initialized the following way:
|
||||||
|
|
||||||
|
* `json_streamer(output_keys, regex_tuple_list, json_indent=JSON_INDENT)`
|
||||||
|
* `output_keys`: ignored so far. will be used for just filtering out by key names
|
||||||
|
* `regex_tuple_list` list of used `regex_tuple` objects
|
||||||
|
* `json_indent` indent of formatted json output. Set to `2` by default
|
||||||
|
|
||||||
|
for starting the main filter process:
|
||||||
|
|
||||||
|
* `main_stream(stream_input=sys.stdin, stream_output=sys.stdout, stream_error=sys.stderr)`
|
||||||
|
* by default it is using the default system inputs/outputs, but any file object can be set as input/output parameter
|
140
Project/Tools/json_stream_filter/json_stream_filter.py
Executable file
140
Project/Tools/json_stream_filter/json_stream_filter.py
Executable file
@ -0,0 +1,140 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
|
import fileinput as fi
|
||||||
|
import json
|
||||||
|
import sys
|
||||||
|
import re
|
||||||
|
|
||||||
|
JSON_INDENT=2
|
||||||
|
|
||||||
|
# a little helper tool for processing and manipulating json streams.
|
||||||
|
# just because jq is a little bit limited in its abilities to process json values.
|
||||||
|
# especially here to filter out emojis with regexp and put filtered elements into new key-value pairs
|
||||||
|
|
||||||
|
class regex_tuple(object):
|
||||||
|
'''
|
||||||
|
a regex tuple object consists of:
|
||||||
|
* regexp_str: regexp string
|
||||||
|
* keys_to_process: keys which are processed
|
||||||
|
* key_to_store_match
|
||||||
|
'''
|
||||||
|
def __init__(self, regexp_str, keys_to_process, replace_str = None, key_to_store_match = None):
|
||||||
|
self.regexp_str = regexp_str
|
||||||
|
self.keys_to_process = keys_to_process
|
||||||
|
self.key_to_store_match = key_to_store_match
|
||||||
|
self.replace_str = replace_str
|
||||||
|
self.regexp_obj = re.compile(self.regexp_str)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def create_from_string(s):
|
||||||
|
# delimiter is the first char:
|
||||||
|
d = s[0]
|
||||||
|
vals = s[1:].split(d)
|
||||||
|
if len(vals) != 4:
|
||||||
|
sys.stderr.write("Error creating regex object from string: " + s + "\n")
|
||||||
|
return None
|
||||||
|
reg_str = vals[0]
|
||||||
|
reg_key = vals[1]
|
||||||
|
reg_rep_str = vals[2] if vals[2] != "null" else None
|
||||||
|
reg_store = vals[3] if vals[3] != "null" and len(vals[3]) > 0 else None
|
||||||
|
|
||||||
|
return regex_tuple(reg_str,[reg_key],reg_rep_str, reg_store)
|
||||||
|
|
||||||
|
class json_streamer(object):
|
||||||
|
def __init__(self, output_keys, regex_tuple_list, json_indent=JSON_INDENT):
|
||||||
|
self.output_keys = output_keys
|
||||||
|
self.regex_tuple_list = regex_tuple_list
|
||||||
|
self.json_indent = json_indent
|
||||||
|
self.regex_dict = None
|
||||||
|
self.create_regex_dict()
|
||||||
|
|
||||||
|
def create_regex_dict(self):
|
||||||
|
d = {}
|
||||||
|
for item in self.regex_tuple_list:
|
||||||
|
for k in item.keys_to_process:
|
||||||
|
if k in d:
|
||||||
|
d[k].append(item)
|
||||||
|
else:
|
||||||
|
d[k] = [item]
|
||||||
|
self.regex_dict = d
|
||||||
|
|
||||||
|
def process_json_object(self, stream_dict):
|
||||||
|
# for every registered key, look whether we have one in our stream object
|
||||||
|
for key, r_list in self.regex_dict.items():
|
||||||
|
if key in stream_dict:
|
||||||
|
for r in r_list:
|
||||||
|
if r.key_to_store_match is not None:
|
||||||
|
# looking for all occurences and storing them in an json key
|
||||||
|
matches = r.regexp_obj.findall(stream_dict[key])
|
||||||
|
stream_dict[r.key_to_store_match] = matches # TODO: can not handle multiple rules storing in the same key!!!
|
||||||
|
if r.replace_str is not None:
|
||||||
|
# replacing all occurences
|
||||||
|
stream_dict[key] = r.regexp_obj.sub(r.replace_str, stream_dict[key])
|
||||||
|
|
||||||
|
def main_stream(self, stream_input = sys.stdin, stream_output = sys.stdout, stream_error = sys.stderr):
|
||||||
|
processed_buffer= ""
|
||||||
|
depth = 0 # bracket depth (in case we have to handle nested json objects)
|
||||||
|
inside_quotes = False # used for deteting whether we are inside a string in order to ignore brackets inside quotes
|
||||||
|
line_counter = 0
|
||||||
|
current_batch_start_line = 0
|
||||||
|
success_batch_counter = 0
|
||||||
|
fail_batch_counter = 0
|
||||||
|
|
||||||
|
prev_c = ''
|
||||||
|
|
||||||
|
for line in stream_input:
|
||||||
|
line_counter+=1
|
||||||
|
for c in line:
|
||||||
|
if c == '{' and not inside_quotes:
|
||||||
|
if depth == 0:
|
||||||
|
current_batch_start_line = line_counter
|
||||||
|
depth += 1
|
||||||
|
processed_buffer += c
|
||||||
|
elif c == '}' and depth > 0 and not inside_quotes:
|
||||||
|
depth -= 1
|
||||||
|
processed_buffer += c
|
||||||
|
if depth == 0:
|
||||||
|
try:
|
||||||
|
d = json.loads(processed_buffer)
|
||||||
|
self.process_json_object(d)
|
||||||
|
stream_output.write(json.dumps(d, indent=self.json_indent, ensure_ascii=False))
|
||||||
|
processed_buffer = ""
|
||||||
|
success_batch_counter += 1
|
||||||
|
except json.decoder.JSONDecodeError:
|
||||||
|
stream_error.write("Error processing json object. Ignoring the following lines (starting at line "
|
||||||
|
+ str(current_batch_start_line) + "):\n\n")
|
||||||
|
stream_error.write(processed_buffer + "\n\n")
|
||||||
|
processed_buffer = ""
|
||||||
|
fail_batch_counter += 1
|
||||||
|
|
||||||
|
elif depth > 0:
|
||||||
|
processed_buffer += c
|
||||||
|
else:
|
||||||
|
stream_output.write(c)
|
||||||
|
|
||||||
|
# flipping quotes status (and don't forget to exlude escaped quotes!)
|
||||||
|
if c == '"' and prev_c != '\\':
|
||||||
|
inside_quotes = not inside_quotes
|
||||||
|
|
||||||
|
# setting previous c. only exception: if a double backslash (= escaped backslash) occurs ignore it.
|
||||||
|
# Because that would break our escaped character detection
|
||||||
|
prev_c = c if not (c == '\\' and prev_c == '\\') else ''
|
||||||
|
|
||||||
|
stream_error.write("\n\nReached EOF. #Processed objects: " + str(success_batch_counter) + ", #failed objects: " + str(fail_batch_counter) + "\n\n")
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
|
||||||
|
args = sys.argv[1:]
|
||||||
|
|
||||||
|
if len(args) == 0:
|
||||||
|
print("missing arguments")
|
||||||
|
sys.exit(-1)
|
||||||
|
|
||||||
|
reg_tuples = []
|
||||||
|
while len(args) > 0:
|
||||||
|
reg_tuples.append(regex_tuple.create_from_string(args[0]))
|
||||||
|
args = args[1:]
|
||||||
|
|
||||||
|
streamer = json_streamer(None, reg_tuples)
|
||||||
|
streamer.main_stream()
|
||||||
|
|
427
Project/naive_approach/naive_approach.ipynb
Normal file
427
Project/naive_approach/naive_approach.ipynb
Normal file
@ -0,0 +1,427 @@
|
|||||||
|
{
|
||||||
|
"cells": [
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 149,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"import pandas as pd\n",
|
||||||
|
"from IPython.display import clear_output, Markdown, Math\n",
|
||||||
|
"import ipywidgets as widgets\n",
|
||||||
|
"import os\n",
|
||||||
|
"import unicodedata as uni\n",
|
||||||
|
"import numpy as np\n",
|
||||||
|
"from nltk.stem import PorterStemmer\n",
|
||||||
|
"from nltk.tokenize import sent_tokenize, word_tokenize\n",
|
||||||
|
"from nltk.corpus import wordnet\n",
|
||||||
|
"import math\n",
|
||||||
|
"import pprint\n",
|
||||||
|
"\n",
|
||||||
|
"pp=pprint.PrettyPrinter(indent=4)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"# Naive Approach"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"* read in table"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 2,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/html": [
|
||||||
|
"<div>\n",
|
||||||
|
"<style scoped>\n",
|
||||||
|
" .dataframe tbody tr th:only-of-type {\n",
|
||||||
|
" vertical-align: middle;\n",
|
||||||
|
" }\n",
|
||||||
|
"\n",
|
||||||
|
" .dataframe tbody tr th {\n",
|
||||||
|
" vertical-align: top;\n",
|
||||||
|
" }\n",
|
||||||
|
"\n",
|
||||||
|
" .dataframe thead th {\n",
|
||||||
|
" text-align: right;\n",
|
||||||
|
" }\n",
|
||||||
|
"</style>\n",
|
||||||
|
"<table border=\"1\" class=\"dataframe\">\n",
|
||||||
|
" <thead>\n",
|
||||||
|
" <tr style=\"text-align: right;\">\n",
|
||||||
|
" <th></th>\n",
|
||||||
|
" <th>Unnamed: 0</th>\n",
|
||||||
|
" <th>code</th>\n",
|
||||||
|
" <th>character</th>\n",
|
||||||
|
" <th>description</th>\n",
|
||||||
|
" <th>Unnamed: 4</th>\n",
|
||||||
|
" </tr>\n",
|
||||||
|
" </thead>\n",
|
||||||
|
" <tbody>\n",
|
||||||
|
" <tr>\n",
|
||||||
|
" <th>0</th>\n",
|
||||||
|
" <td>0</td>\n",
|
||||||
|
" <td>126980</td>\n",
|
||||||
|
" <td>🀄</td>\n",
|
||||||
|
" <td>MAHJONG TILE RED DRAGON</td>\n",
|
||||||
|
" <td>NaN</td>\n",
|
||||||
|
" </tr>\n",
|
||||||
|
" <tr>\n",
|
||||||
|
" <th>1</th>\n",
|
||||||
|
" <td>1</td>\n",
|
||||||
|
" <td>129525</td>\n",
|
||||||
|
" <td>🧵</td>\n",
|
||||||
|
" <td>SPOOL OF THREAD</td>\n",
|
||||||
|
" <td>NaN</td>\n",
|
||||||
|
" </tr>\n",
|
||||||
|
" <tr>\n",
|
||||||
|
" <th>2</th>\n",
|
||||||
|
" <td>2</td>\n",
|
||||||
|
" <td>129526</td>\n",
|
||||||
|
" <td>🧶</td>\n",
|
||||||
|
" <td>BALL OF YARN</td>\n",
|
||||||
|
" <td>NaN</td>\n",
|
||||||
|
" </tr>\n",
|
||||||
|
" <tr>\n",
|
||||||
|
" <th>3</th>\n",
|
||||||
|
" <td>3</td>\n",
|
||||||
|
" <td>127183</td>\n",
|
||||||
|
" <td>🃏</td>\n",
|
||||||
|
" <td>PLAYING CARD BLACK JOKER</td>\n",
|
||||||
|
" <td>NaN</td>\n",
|
||||||
|
" </tr>\n",
|
||||||
|
" <tr>\n",
|
||||||
|
" <th>4</th>\n",
|
||||||
|
" <td>4</td>\n",
|
||||||
|
" <td>129296</td>\n",
|
||||||
|
" <td>🤐</td>\n",
|
||||||
|
" <td>ZIPPER-MOUTH FACE</td>\n",
|
||||||
|
" <td>NaN</td>\n",
|
||||||
|
" </tr>\n",
|
||||||
|
" </tbody>\n",
|
||||||
|
"</table>\n",
|
||||||
|
"</div>"
|
||||||
|
],
|
||||||
|
"text/plain": [
|
||||||
|
" Unnamed: 0 code character description Unnamed: 4\n",
|
||||||
|
"0 0 126980 🀄 MAHJONG TILE RED DRAGON NaN\n",
|
||||||
|
"1 1 129525 🧵 SPOOL OF THREAD NaN\n",
|
||||||
|
"2 2 129526 🧶 BALL OF YARN NaN\n",
|
||||||
|
"3 3 127183 🃏 PLAYING CARD BLACK JOKER NaN\n",
|
||||||
|
"4 4 129296 🤐 ZIPPER-MOUTH FACE NaN"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"execution_count": 2,
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "execute_result"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"table = pd.read_csv('../Tools/emoji_descriptions.csv')\n",
|
||||||
|
"table.head()"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"* todo: read in a lot of messages"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 3,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"messages = [\"Hello, this is a testing message\", \"this is a very sunny day today, i am very happy\"]"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 4,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"ps = PorterStemmer()"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 5,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"stemmed_messages = []\n",
|
||||||
|
"for m in messages:\n",
|
||||||
|
" words = word_tokenize(m)\n",
|
||||||
|
" sm = []\n",
|
||||||
|
" for w in words:\n",
|
||||||
|
" sm.append(ps.stem(w))\n",
|
||||||
|
" stemmed_messages.append(sm)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 6,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/plain": [
|
||||||
|
"[['hello', ',', 'thi', 'is', 'a', 'test', 'messag'],\n",
|
||||||
|
" ['thi',\n",
|
||||||
|
" 'is',\n",
|
||||||
|
" 'a',\n",
|
||||||
|
" 'veri',\n",
|
||||||
|
" 'sunni',\n",
|
||||||
|
" 'day',\n",
|
||||||
|
" 'today',\n",
|
||||||
|
" ',',\n",
|
||||||
|
" 'i',\n",
|
||||||
|
" 'am',\n",
|
||||||
|
" 'veri',\n",
|
||||||
|
" 'happi']]"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"execution_count": 6,
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "execute_result"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"stemmed_messages"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 7,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"text/plain": [
|
||||||
|
"(1027, 5)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"execution_count": 7,
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "execute_result"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"table.shape"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"* compare words to emoji descriptions"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 59,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"def evaluate_sentence(sentence):\n",
|
||||||
|
" tokenized_sentence = word_tokenize(sentence)\n",
|
||||||
|
" n = len(tokenized_sentence)\n",
|
||||||
|
" l = table.shape[0]\n",
|
||||||
|
" matrix_list = []\n",
|
||||||
|
" \n",
|
||||||
|
" for index, row in table.iterrows():\n",
|
||||||
|
" emoji_tokens = word_tokenize(row['description'])\n",
|
||||||
|
" m = len(emoji_tokens)\n",
|
||||||
|
"\n",
|
||||||
|
" mat = np.zeros(shape=(m,n))\n",
|
||||||
|
" for i in range(len(emoji_tokens)):\n",
|
||||||
|
" for j in range(len(tokenized_sentence)):\n",
|
||||||
|
" syn1 = wordnet.synsets(emoji_tokens[i])\n",
|
||||||
|
" if len(syn1) == 0:\n",
|
||||||
|
" continue\n",
|
||||||
|
" w1 = syn1[0]\n",
|
||||||
|
" #print(j, tokenized_sentence)\n",
|
||||||
|
" syn2 = wordnet.synsets(tokenized_sentence[j])\n",
|
||||||
|
" if len(syn2) == 0:\n",
|
||||||
|
" continue\n",
|
||||||
|
" w2 = syn2[0]\n",
|
||||||
|
" val = w1.wup_similarity(w2)\n",
|
||||||
|
" if val is None:\n",
|
||||||
|
" continue\n",
|
||||||
|
" mat[i,j] = val\n",
|
||||||
|
" #print(row['character'], mat)\n",
|
||||||
|
" matrix_list.append(mat)\n",
|
||||||
|
" \n",
|
||||||
|
" return matrix_list\n",
|
||||||
|
" \n",
|
||||||
|
" "
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 130,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"result = evaluate_sentence(\"I like playing soccer\")"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"* building a lookup table:"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 131,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"lookup = {}\n",
|
||||||
|
"for index, row in table.iterrows():\n",
|
||||||
|
" lookup[index] = row['character']"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "markdown",
|
||||||
|
"metadata": {},
|
||||||
|
"source": [
|
||||||
|
"* sorting"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 139,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"summed = np.argsort([-np.sum(x) for x in result])\n",
|
||||||
|
"max_val = np.argsort([-np.max(x) for x in result])\n",
|
||||||
|
"avg = np.argsort([-np.mean(x) for x in result])\n",
|
||||||
|
"\n",
|
||||||
|
"t = 0.9\n",
|
||||||
|
"threshold = np.argsort([-len(np.where(x>t)[0]) / (x.shape[0] * x.shape[1]) for x in result])\n"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 156,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": [
|
||||||
|
"def print_best_results(sorted_indices, n=10):\n",
|
||||||
|
" pp.pprint([lookup[x] + \" -- \" + str(table.iloc[x]['description']) for x in sorted_indices[:10]])\n",
|
||||||
|
" pp.pprint([result[x] for x in sorted_indices[:10]])"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": 157,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [
|
||||||
|
{
|
||||||
|
"name": "stdout",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"[ '⚽ -- SOCCER BALL',\n",
|
||||||
|
" '🏉 -- RUGBY FOOTBALL',\n",
|
||||||
|
" '🏈 -- AMERICAN FOOTBALL',\n",
|
||||||
|
" '🎴 -- FLOWER PLAYING CARDS',\n",
|
||||||
|
" '🃏 -- PLAYING CARD BLACK JOKER',\n",
|
||||||
|
" '🇮 -- REGIONAL INDICATOR SYMBOL LETTER I',\n",
|
||||||
|
" '\\U0001f91f -- I LOVE YOU HAND SIGN',\n",
|
||||||
|
" '📧 -- E-MAIL SYMBOL',\n",
|
||||||
|
" '📭 -- OPEN MAILBOX WITH LOWERED FLAG',\n",
|
||||||
|
" '📥 -- INBOX TRAY']\n",
|
||||||
|
"[ array([[0.25 , 0.28571429, 0.58333333, 1. ],\n",
|
||||||
|
" [0.26666667, 0.10526316, 0.1 , 0.1 ]]),\n",
|
||||||
|
" array([[0.25 , 0.28571429, 0.58333333, 0.84615385],\n",
|
||||||
|
" [0.26666667, 0.3 , 0.60869565, 0.96 ]]),\n",
|
||||||
|
" array([[0.33333333, 0.125 , 0.11764706, 0.11764706],\n",
|
||||||
|
" [0.26666667, 0.3 , 0.60869565, 0.96 ]]),\n",
|
||||||
|
" array([[0.23529412, 0.0952381 , 0.09090909, 0.09090909],\n",
|
||||||
|
" [0.25 , 0.47619048, 1. , 0.58333333],\n",
|
||||||
|
" [0.30769231, 0.33333333, 0.57142857, 0.7 ]]),\n",
|
||||||
|
" array([[0.25 , 0.47619048, 1. , 0.58333333],\n",
|
||||||
|
" [0.53333333, 0.22222222, 0.21052632, 0.21052632],\n",
|
||||||
|
" [0.30769231, 0.22222222, 0.21052632, 0.21052632],\n",
|
||||||
|
" [0.28571429, 0.11111111, 0.10526316, 0.10526316]]),\n",
|
||||||
|
" array([[0. , 0. , 0. , 0. ],\n",
|
||||||
|
" [0.33333333, 0.23529412, 0.22222222, 0.22222222],\n",
|
||||||
|
" [0.4 , 0.26666667, 0.25 , 0.25 ],\n",
|
||||||
|
" [0.30769231, 0.22222222, 0.21052632, 0.21052632],\n",
|
||||||
|
" [1. , 0.26666667, 0.25 , 0.25 ]]),\n",
|
||||||
|
" array([[1. , 0.26666667, 0.25 , 0.25 ],\n",
|
||||||
|
" [0.33333333, 0.23529412, 0.22222222, 0.22222222],\n",
|
||||||
|
" [0. , 0. , 0. , 0. ],\n",
|
||||||
|
" [0.28571429, 0.11111111, 0.10526316, 0.10526316],\n",
|
||||||
|
" [0.33333333, 0.23529412, 0.22222222, 0.22222222]]),\n",
|
||||||
|
" array([[0.28571429, 0.31578947, 0.45454545, 0.5 ],\n",
|
||||||
|
" [0.4 , 0.26666667, 0.25 , 0.25 ]]),\n",
|
||||||
|
" array([[0.30769231, 0.11764706, 0.11111111, 0.11111111],\n",
|
||||||
|
" [0.26666667, 0.10526316, 0.1 , 0.1 ],\n",
|
||||||
|
" [0. , 0. , 0. , 0. ],\n",
|
||||||
|
" [0.22222222, 0.14285714, 0.13333333, 0.13333333],\n",
|
||||||
|
" [0.26666667, 0.10526316, 0.1 , 0.1 ]]),\n",
|
||||||
|
" array([[0. , 0. , 0. , 0. ],\n",
|
||||||
|
" [0.26666667, 0.10526316, 0.1 , 0.1 ]])]\n"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"print_best_results(threshold)"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": []
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"cell_type": "code",
|
||||||
|
"execution_count": null,
|
||||||
|
"metadata": {},
|
||||||
|
"outputs": [],
|
||||||
|
"source": []
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"metadata": {
|
||||||
|
"kernelspec": {
|
||||||
|
"display_name": "Python 3",
|
||||||
|
"language": "python",
|
||||||
|
"name": "python3"
|
||||||
|
},
|
||||||
|
"language_info": {
|
||||||
|
"codemirror_mode": {
|
||||||
|
"name": "ipython",
|
||||||
|
"version": 3
|
||||||
|
},
|
||||||
|
"file_extension": ".py",
|
||||||
|
"mimetype": "text/x-python",
|
||||||
|
"name": "python",
|
||||||
|
"nbconvert_exporter": "python",
|
||||||
|
"pygments_lexer": "ipython3",
|
||||||
|
"version": "3.6.5"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nbformat": 4,
|
||||||
|
"nbformat_minor": 2
|
||||||
|
}
|
Loading…
Reference in New Issue
Block a user