crosswords/data/data_tests.ipynb

1102 lines
53 KiB
Plaintext
Raw Normal View History

2021-06-13 21:06:55 +02:00
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"import json\n",
"import re\n",
"import tqdm\n",
"from IPython.display import Markdown"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## prepare raw wikiextract"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"%%bash\n",
"cat english.words| jq -s . > english.json"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"with open('english.json', 'r') as f:\n",
" english_vocab_db = json.load(f)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## helper functions to filter senses, synonyms and antonyms"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"def filter_senses(senses_list, word):\n",
" filtered_senses = []\n",
" for entry in senses_list:\n",
" if 'glosses' in entry:\n",
" glosses = entry['glosses']\n",
" for gloss_entry in glosses:\n",
" for gloss in gloss_entry.split(';'):\n",
" s = re.sub('\\s+', ' ', re.sub(\"[\\(\\[].*?[\\)\\]]\", \"\", gloss).replace(\".\", \"\")).strip()\n",
" if len(s) > 0 and word not in s:\n",
" filtered_senses.append(s)\n",
" return filtered_senses"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [],
"source": [
"def filter_synonyms(synonyms_list):\n",
" filtered_synonyms = []\n",
" for entry in synonyms_list:\n",
" if 'word' in entry:\n",
" filtered_synonyms.append(entry['word'])\n",
" return filtered_synonyms"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [],
"source": [
"def filter_antonyms(antonym_list):\n",
" filtered_antonyms = []\n",
" for entry in antonym_list:\n",
" if 'word' in entry:\n",
" filtered_antonyms.append(entry['word'])\n",
" return filtered_antonyms"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [],
"source": [
"def add_wiktionary_item(item, db):\n",
" item = {}\n",
" if 'word' not in item.keys():\n",
" return\n",
" "
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## build a dictionary_database from the raw extract\n",
"using words as keys, enumerate those keys (e.g. `house`, `house1`, `house2` etc.) on duplicates"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"100%|██████████| 1023195/1023195 [00:00<00:00, 1386092.63it/s]\n"
]
}
],
"source": [
"dictionary_database = {}\n",
"for item in tqdm.tqdm(english_vocab_db):\n",
" if 'word' in item:\n",
" word = item['word']\n",
" i = 0\n",
" while word in dictionary_database:\n",
" i += 1\n",
" word = f\"{item['word']}{i}\"\n",
" dictionary_database[word] = item\n",
" "
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {},
"outputs": [],
"source": [
"class NoDataException(Exception):\n",
" pass\n",
"\n",
"def get_attribute(word:str, attr:str, db = dictionary_database):\n",
" if word not in db:\n",
" raise NoDataException\n",
" item = db[word]\n",
" if attr not in item:\n",
" raise NoDataException\n",
" return item[attr]\n",
"\n",
"def get_senses(word: str):\n",
" \n",
" senses = get_attribute(word, 'senses')\n",
" \n",
" return filter_senses(senses, word)\n",
"\n",
"def get_synonyms(word: str):\n",
" syns = get_attribute(word, 'synonyms')\n",
" \n",
" return filter_synonyms(syns)\n",
"\n",
"def get_antonyms(word: str):\n",
" ants = get_attribute(word, 'antonyms')\n",
" \n",
" return filter_antonyms(ants)\n",
"\n",
"def display_info(word: str):\n",
" \n",
" if word not in dictionary_database:\n",
" raise NoDataException\n",
" \n",
" try:\n",
" senses = get_senses(word)\n",
" except NoDataException:\n",
" senses = []\n",
" \n",
" try:\n",
" synonyms = get_synonyms(word)\n",
" except NoDataException:\n",
" synonyms = []\n",
" \n",
" try:\n",
" antonyms = get_antonyms(word)\n",
" except NoDataException:\n",
" antonyms = []\n",
" \n",
" senses_str = '\\n'.join([f\"* {sense}\" for sense in senses])\n",
" \n",
" synonyms_str = '\\n'.join([f\"* {syn}\" for syn in synonyms])\n",
" \n",
" antonyms_str = '\\n'.join([f\"* {ant}\" for ant in antonyms])\n",
" \n",
" markdown_string = f\"\"\"\n",
"## {word}\n",
"\n",
"### Senses:\n",
"\n",
"{senses_str}\n",
"\n",
"### Synonyms:\n",
"\n",
"{synonyms_str}\n",
"\n",
"### Antonyms:\n",
"\n",
"{antonyms_str}\n",
" \n",
" \"\"\"\n",
" \n",
" return Markdown(markdown_string)"
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"100%|██████████| 1023195/1023195 [00:06<00:00, 159434.15it/s]\n"
]
}
],
"source": [
"import random\n",
"list_words = []\n",
"for word in tqdm.tqdm(dictionary_database.keys()):\n",
" item = dictionary_database[word]\n",
" try:\n",
" senses = get_senses(word)\n",
" if len(senses) > 1:\n",
" list_words.append(word)\n",
" \n",
" except NoDataException:\n",
" pass\n",
" "
]
},
{
"cell_type": "code",
"execution_count": 18,
"metadata": {},
"outputs": [
{
"data": {
"text/markdown": [
"\n",
"## cockmonger\n",
"\n",
"### Senses:\n",
"\n",
"* A frequent consumer of cocks\n",
"* A procurer of cocks\n",
"* A person with a large penis\n",
"\n",
"### Synonyms:\n",
"\n",
"\n",
"\n",
"### Antonyms:\n",
"\n",
"\n",
" \n",
" "
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"display(display_info(\"cockmonger\"))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"----\n",
"## algorithm to build a grid of words"
]
},
{
"cell_type": "code",
"execution_count": 358,
"metadata": {},
"outputs": [],
"source": [
"import numpy as np\n",
"import random\n",
"from string import digits\n",
"\n",
"class WordInfo(object):\n",
" def __init__(self, word:str, y:int, x:int, is_vertical: bool, database=dictionary_database):\n",
" self._dictionary_database = dictionary_database\n",
" self._y = y\n",
" self._x = x\n",
" self._word = word\n",
" self._hint = None\n",
" self._is_vertical = is_vertical\n",
" \n",
" self.choose_info()\n",
" \n",
" def get_best_antonym(self) -> str:\n",
" antonyms = get_antonyms(self._word)\n",
" return random.choice(antonyms)\n",
" \n",
" def get_best_synonym(self) -> str:\n",
" synonyms = get_synonyms(self._word)\n",
" return random.choice(synonyms)\n",
" \n",
" def get_best_sense(self) -> str:\n",
" senses = get_senses(self._word)\n",
" return random.choice(senses)\n",
" \n",
" def choose_info(self):\n",
" # first choose antonyms, then synonyms, then senses\n",
" \n",
" try:\n",
" self._hint = f\"opposite of {self.get_best_antonym()}\"\n",
" except NoDataException:\n",
" pass\n",
" \n",
" try:\n",
" self._hint = f\"other word for {self.get_best_synonym()}\"\n",
" except NoDataException:\n",
" pass\n",
" \n",
" self._hint = self.get_best_sense()\n",
" \n",
" def get_hint(self) -> str:\n",
" return self._hint\n",
" \n",
" def get_hint_location(self):\n",
" x = self._x if self._is_vertical else self._x - 1\n",
" y = self._y if self._is_vertical else self._y - 1\n",
" return (y, x)\n",
" \n",
" def is_vertical(self):\n",
" return self._is_vertical\n",
"\n",
"def create_word_grid(w: int, h: int, word_list = list_words, target_density = 0.5):\n",
" grid = np.full(shape=(h,w), dtype=np.unicode, fill_value = ' ')\n",
" \n",
" locations = {}\n",
" \n",
" word_hints = {}\n",
" \n",
" def store_location(char: str, y: int, x: int):\n",
" assert len(char) == 1\n",
" \n",
" if char not in locations:\n",
" locations[char] = []\n",
" \n",
" locations[char].append([y,x])\n",
" \n",
" remove_digits = str.maketrans('', '', digits)\n",
" n_words = len(list_words)\n",
" \n",
" def get_word(max_length: int, min_length = 0):\n",
" assert max_length > 1\n",
" \n",
" index = random.randint(0,n_words-1)\n",
" word = list_words[index][:]\n",
" \n",
" while len(word) >= max_length or not word.isalnum() or len(word) <= min_length:\n",
" index = random.randint(0,n_words-1)\n",
" word = list_words[index][:]\n",
" \n",
" return word\n",
" \n",
" def normalize_word(word:str):\n",
" word = word.translate(remove_digits)\n",
" return word.lower()\n",
" \n",
" def place_word(word:str, y: int, x:int, vertical:bool = False):\n",
" normalized_word = normalize_word(word)\n",
" n = len(normalized_word)\n",
" if vertical:\n",
" assert grid.shape[0] - n >= y\n",
" for i, char in enumerate(normalized_word):\n",
" grid[y + i,x] = char\n",
" store_location(char, y+i, x)\n",
" else:\n",
" assert grid.shape[1] - n >= x\n",
" for i, char in enumerate(normalized_word):\n",
" grid[y,x + i] = char\n",
" store_location(char, y, x+i)\n",
" \n",
" word_hints[normalized_word] = WordInfo(word, y, x, vertical)\n",
" \n",
" \n",
" \n",
" def density():\n",
" return 1 - (grid == \" \").sum() / (w * h)\n",
" \n",
" \n",
" \n",
" def check_if_fits(word:str, y:int, x:int, vertical:bool):\n",
" n = len(word)\n",
" if vertical:\n",
" \n",
" # check if there is space before and after\n",
" if y - 1 >= 0 and grid[y - 1, x] != \" \":\n",
" return False\n",
" if y + n < grid.shape[0] - 1 and grid[y+n,x] != \" \":\n",
" return False\n",
" \n",
" if grid.shape[0] - n < y or y < 0:\n",
" #print(\"over board\")\n",
" return False\n",
" \n",
" for i, char in enumerate(word):\n",
" char_x = x\n",
" char_y = y + i\n",
" \n",
" if not (grid[char_y, char_x] == \" \" or grid[char_y, char_x] == char):\n",
" #print(\"not matching\")\n",
" return False\n",
" \n",
" if grid[char_y, char_x] == \" \":\n",
" # check for horizonatal neighbors:\n",
" if char_x - 1 >= 0 and grid[char_y, char_x - 1] != \" \":\n",
" #print(\"3\")\n",
" return False\n",
" if char_x + 1 < grid.shape[1] and grid[char_y, char_x + 1] != \" \":\n",
" #print(\"4\")\n",
" return False\n",
" \n",
" else:\n",
" \n",
" # check if there is space before and after\n",
" if x - 1 >= 0 and grid[y, x - 1] != \" \":\n",
" return False\n",
" if x + n < grid.shape[1] - 1 and grid[y,x + n] != \" \":\n",
" return False\n",
" \n",
" if grid.shape[1] - n < x or x < 0:\n",
" #print(\"over board\")\n",
" return False\n",
" \n",
" for i, char in enumerate(word):\n",
" char_x = x + i\n",
" char_y = y\n",
" \n",
" if not (grid[char_y, char_x] == \" \" or grid[char_y, char_x] == char):\n",
" #print(\"not matching\")\n",
" return False\n",
" \n",
" if grid[char_y, char_x] == \" \":\n",
" # check for vertical neighbors:\n",
" if char_y - 1 >= 0 and grid[char_y - 1, char_x] != \" \":\n",
" #print(\"1\")\n",
" return False\n",
" if char_y + 1 < grid.shape[0] and grid[char_y + 1, char_x] != \" \":\n",
" #print(\"2\")\n",
" return False\n",
" \n",
" return True\n",
" \n",
" \n",
" def get_crossover(word: str):\n",
" # returns Tuple of: (y,x, is_vertical?) or None\n",
" \n",
" shuffled_order = list(range(len(word)))\n",
" random.shuffle(shuffled_order)\n",
" \n",
" for index in shuffled_order:\n",
" # check for existing locations\n",
" char = word[index]\n",
" if char in locations:\n",
" char_locations = locations[char]\n",
" \n",
" for char_loc in char_locations:\n",
" # test vertical\n",
" y = char_loc[0] - index\n",
" x = char_loc[1]\n",
" \n",
" if check_if_fits(word, y, x, vertical=True):\n",
" return (y,x,True)\n",
" \n",
" # test horizontal\n",
" y = char_loc[0]\n",
" x = char_loc[1] - index\n",
" \n",
" if check_if_fits(word, y, x, vertical=False):\n",
" return (y,x,False)\n",
" \n",
" return None\n",
" \n",
" min_shape = min(w,h,30)\n",
" \n",
" # place first word:\n",
" first_word = get_word(max_length=min_shape, min_length=min(10,grid.shape[1] - 2))\n",
" \n",
" # find random place:\n",
" x = random.randint(0, grid.shape[1] - len(first_word) - 1)\n",
" y = random.randint(0, grid.shape[0] - 1)\n",
" \n",
" place_word(first_word, y, x, vertical=False)\n",
" \n",
" i = 0\n",
" \n",
" \n",
" current_density = density()\n",
" \n",
" while current_density < target_density:\n",
" word = get_word(max_length=(1 - current_density ** 0.4) * min_shape,\n",
" min_length=max(min(10, 0.5 * (1 - current_density ** 0.3) * min_shape), 2))\n",
" \n",
" normalized_word = normalize_word(word)\n",
" \n",
" if normalized_word in word_hints:\n",
" continue\n",
" \n",
" # check if matching characters exist:\n",
" crossover = get_crossover(normalized_word)\n",
" \n",
" i += 1\n",
" if i % 100000 == 0:\n",
" print(i)\n",
" if i > 100000:\n",
" break\n",
" \n",
" if crossover == None:\n",
" current_density = density()\n",
" continue\n",
" \n",
" y,x,is_vertical = crossover\n",
" \n",
" place_word(word, y,x, is_vertical)\n",
" \n",
" current_density = density()\n",
" \n",
" print(i)\n",
" return grid, word_hints\n",
" \n",
" \n",
" \n",
" \n",
" \n",
" "
]
},
{
"cell_type": "code",
"execution_count": 359,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"968\n"
]
},
{
"data": {
"text/markdown": [
"| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |\n",
"|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|\n",
"| | | | |**c**| |**a**|**e**|**r**|**o**|**p**|**h**|**o**|**b**|**i**|**c**| |**m**|**g**|**m**| |**f**|**a**|**d**|**d**|**y**| |**f**| | |**o**| |**c**| | |**g**|**r**|**a**|**y**| | | | |**q**|**u**|**a**|**y**| |\n",
"|**f**|**b**|**i**| |**l**| | | |**h**| | |**i**| |**f**| | | |**m**| | |**r**| |**c**| |**a**| | |**o**|**v**|**e**|**r**|**w**|**o**|**r**|**d**| |**p**| | |**t**| |**s**| |**u**| | | |**r**|\n",
"|**i**| | |**s**|**m**|**s**| | |**i**| | |**a**| | |**m**| | |**p**|**a**|**c**|**i**|**f**|**i**|**s**|**m**| | |**o**| |**x**| | |**m**| | |**p**|**a**|**i**|**g**|**e**| |**i**| |**a**| |**h**| |**e**|\n",
"|**g**|**l**|**e**|**e**| |**y**| |**c**|**a**|**r**|**o**|**t**|**e**|**n**|**o**|**l**| | | |**o**| | |**c**| | |**a**|**u**|**f**| |**p**|**o**|**p**|**p**|**l**|**e**| | |**m**| |**n**| |**l**| |**d**|**r**|**o**|**f**|**f**|\n",
"|**u**| | |**c**| |**m**| | |**n**| | |**t**| | |**r**| | |**d**|**o**|**g**|**g**|**i**|**l**|**y**| | | | |**s**| | | |**r**| | |**g**|**y**|**p**| |**t**| |**v**| |**r**| |**r**| |**a**|\n",
"|**r**| | |**o**|**r**|**p**|**h**|**a**|**n**|**r**|**y**| | | |**t**| |**e**| | |**n**| | |**e**| | | |**s**|**p**|**o**|**n**|**g**|**i**|**o**|**s**|**e**| | |**r**| | | |**e**| |**u**| |**s**| |**n**|\n",
"|**a**| | |**n**| |**t**| | |**o**| | | |**r**| |**a**|**f**|**r**|**o**|**d**|**i**|**t**|**e**| |**a**| |**a**| | |**l**| | | |**m**| | | |**c**|**o**|**l**| |**d**|**r**|**i**|**p**| |**e**| | |\n",
"|**l**| |**e**|**d**|**d**|**o**| | |**n**|**g**|**p**| |**y**| |**l**| |**v**| | |**z**| |**v**| |**m**| |**f**| | |**a**|**f**|**v**| |**i**| | | | |**p**| |**e**| |**i**| |**l**| |**l**| | |\n",
"| |**e**| |**e**| |**m**| |**s**| | | | |**n**| | |**n**| | |**w**|**a**|**g**|**e**|**n**|**b**|**o**|**o**|**m**| |**r**| |**a**|**r**|**s**|**e**|**w**|**i**|**p**|**e**| |**m**|**i**|**n**|**d**|**e**|**l**|**o**| |**u**|\n",
"|**u**|**l**|**l**|**r**| |**a**|**n**|**t**|**e**|**c**|**r**|**i**|**t**|**i**|**c**|**a**|**l**| | |**n**| |**n**| |**i**| |**u**| | |**y**| |**t**| |**e**| | |**m**| |**r**|**o**|**b**| |**g**| | | |**r**| |**n**|\n",
"| |**e**| | | |**t**| |**e**| |**r**| | | | | |**n**| |**p**| |**c**|**u**|**e**| |**t**| |**l**|**a**|**t**| |**m**| | | | | |**m**| | | |**r**| | |**t**| |**l**|**e**|**s**|**s**|\n",
"| |**c**|**u**|**r**|**r**|**i**|**e**|**r**| |**a**| |**o**|**x**|**a**|**z**|**o**|**l**|**i**|**n**|**e**| | | | |**w**| | | |**s**|**e**|**i**|**s**|**m**|**o**|**l**|**o**|**g**|**i**|**c**|**a**|**l**|**l**|**y**| | | | |**t**|\n",
"| |**t**| | | |**c**| |**c**| |**c**| | | | | |**m**| |**l**| | |**m**| |**s**| |**o**| | | |**h**| | |**c**| | | |**r**| | | |**i**| | |**p**| | | | |**i**|\n",
"|**c**|**r**|**a**|**m**|**p**| |**m**|**o**|**c**|**k**|**e**|**r**|**y**| |**a**|**a**|**a**|**e**| |**t**|**o**|**s**|**h**|**e**|**r**| |**r**|**h**|**a**|**p**|**s**|**o**|**d**|**i**|**s**|**t**|**i**|**c**| |**d**| |**p**|**a**|**p**|**p**|**y**| |**t**|\n",
"| |**e**| | | | | |**r**| |**i**| | | |**d**| |**c**| | | | |**g**| |**e**| |**m**| |**f**| | |**e**| | | | | |**a**| |**d**| | |**s**| |**l**| |**r**| |**o**|**c**|\n",
"| | |**d**|**m**| |**c**|**r**|**y**|**i**|**n**|**g**| |**p**|**u**|**s**|**h**|**b**|**a**|**l**|**l**| |**u**|**n**|**p**|**h**|**o**|**t**|**o**|**g**|**r**|**a**|**p**|**h**|**a**|**b**|**l**|**e**| |**m**| |**h**| | | |**i**| | |**h**|\n",
"| |**g**| |**u**| |**m**| | | |**g**| | | |**e**| |**i**| |**a**| | |**m**| |**a**| |**o**| | | | | |**a**| | |**l**| |**i**| | |**u**| |**e**|**s**|**k**|**i**|**m**|**o**| | |\n",
"| |**c**|**o**|**l**|**e**|**a**|**d**| | | | |**p**| | | |**n**| |**b**| | |**i**| |**n**| |**l**| |**u**|**n**|**v**|**i**|**s**|**u**|**a**|**l**|**i**|**z**|**a**|**b**|**l**|**e**| |**i**| | |**i**| | | |\n",
"| |**e**| |**t**| | | | | |**s**| |**l**|**e**|**t**|**t**|**e**|**r**| |**p**|**u**|**n**|**t**|**i**|**t**|**e**| | | | | | | |**d**| | |**e**| | |**t**| | |**m**| | |**g**| | | |\n",
"| | |**l**|**i**|**g**|**h**|**t**|**s**|**c**|**a**|**p**|**e**| | | |**r**| | | | | | |**g**| | | | |**c**|**t**|**c**| | |**s**|**i**|**e**| | |**l**|**a**|**m**|**b**|**a**|**s**|**t**|**e**|**d**| | |\n",
"| | | |**t**| | |**b**| | |**d**| |**a**| | |**p**|**y**|**r**|**a**|**z**|**o**|**l**|**e**|**a**|**m**|**i**|**d**|**e**| |**o**| |**f**| | | | | |**l**| |**n**| | | | | |**n**| | |**g**|\n",
"| | | |**h**| |**m**| |**b**| |**d**| |**d**| | |**a**| | | |**e**| | | |**n**| | |**r**| | |**i**| |**a**|**z**|**a**|**c**|**y**|**t**|**o**|**s**|**i**|**n**|**e**| | |**d**|**o**|**t**|**t**|**y**|\n",
"| | |**s**|**e**|**r**|**i**|**c**|**a**|**t**|**e**| |**i**| | |**l**| | | |**r**| |**n**| |**r**| | |**u**| | |**n**| |**w**| | | | | |**u**| |**m**| | | | | |**u**| | |**m**|\n",
"| |**j**| |**i**| |**n**| |**n**| |**n**| |**n**| |**d**|**e**|**p**|**o**|**s**|**i**|**t**|**a**|**r**|**y**| | |**m**|**o**|**r**|**g**|**e**|**n**| |**c**|**a**|**i**|**r**|**n**|**g**|**o**|**r**|**m**|**s**| | |**s**|**k**|**i**|**n**|\n",
"| |**o**| |**s**| |**y**| |**d**| | | |**g**| | | | |**u**| |**b**| |**i**| | |**r**| |**b**| |**a**| |**d**| |**v**| |**m**| | | | |**u**| | |**u**| |**r**| |**o**| |**i**|\n",
"| |**h**|**a**|**m**|**f**|**a**|**t**| |**c**|**y**|**c**|**l**|**o**|**p**|**a**|**e**|**d**|**i**|**a**| |**r**| |**c**|**o**|**l**|**l**|**a**|**r**| |**e**| |**h**|**e**|**a**|**d**|**h**|**o**|**u**|**s**|**e**| |**r**|**c**|**s**| |**k**| |**c**|\n",
"| |**n**| | | |**n**| | | | | |**y**| | | | | |**t**| |**t**|**a**|**u**| |**e**| |**e**| |**e**| |**n**| |**d**| |**r**| | | | | | | |**e**| |**o**| |**o**| | |\n"
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"name": "stdout",
"output_type": "stream",
"text": [
" "
]
},
{
"data": {
"text/plain": [
" 854104 function calls (854099 primitive calls) in 0.447 seconds\n",
"\n",
" Ordered by: internal time\n",
"\n",
" ncalls tottime percall cumtime percall filename:lineno(function)\n",
" 385008 0.289 0.000 0.306 0.000 <ipython-input-358-bafd40978d5d>:111(check_if_fits)\n",
" 968 0.072 0.000 0.383 0.000 <ipython-input-358-bafd40978d5d>:174(get_crossover)\n",
" 395346 0.018 0.000 0.018 0.000 {built-in method builtins.len}\n",
" 969 0.017 0.000 0.027 0.000 <ipython-input-358-bafd40978d5d>:106(density)\n",
" 969 0.008 0.000 0.008 0.000 {method 'reduce' of 'numpy.ufunc' objects}\n",
" 973 0.007 0.000 0.016 0.000 <ipython-input-358-bafd40978d5d>:72(get_word)\n",
" 1 0.005 0.005 0.444 0.444 <ipython-input-358-bafd40978d5d>:54(create_word_grid)\n",
" 9573 0.005 0.000 0.007 0.000 random.py:238(_randbelow_with_getrandbits)\n",
" 5549 0.003 0.000 0.007 0.000 random.py:291(randrange)\n",
" 968 0.002 0.000 0.005 0.000 random.py:349(shuffle)\n",
" 1123 0.002 0.000 0.002 0.000 {method 'sub' of 're.Pattern' objects}\n",
" 17858 0.002 0.000 0.002 0.000 {method 'getrandbits' of '_random.Random' objects}\n",
" 5549 0.002 0.000 0.009 0.000 random.py:335(randint)\n",
" 162 0.001 0.000 0.005 0.000 <ipython-input-3-e368cbcc470d>:1(filter_senses)\n",
" 162 0.001 0.000 0.010 0.000 <ipython-input-358-bafd40978d5d>:88(place_word)\n",
" 1134 0.001 0.000 0.001 0.000 {method 'translate' of 'str' objects}\n",
" 18 0.001 0.000 0.001 0.000 {method 'acquire' of '_thread.lock' objects}\n",
" 9573 0.001 0.000 0.001 0.000 {method 'bit_length' of 'int' objects}\n",
" 922 0.001 0.000 0.001 0.000 <ipython-input-358-bafd40978d5d>:61(store_location)\n",
" 969 0.001 0.000 0.009 0.000 {method 'sum' of 'numpy.ndarray' objects}\n",
" 1134 0.001 0.000 0.002 0.000 <ipython-input-358-bafd40978d5d>:84(normalize_word)\n",
" 974 0.001 0.000 0.001 0.000 {built-in method builtins.min}\n",
" 1092 0.001 0.000 0.001 0.000 re.py:289(_compile)\n",
" 27 0.001 0.000 0.001 0.000 <string>:9(<listcomp>)\n",
" 1092 0.001 0.000 0.003 0.000 re.py:203(sub)\n",
" 162 0.000 0.000 0.007 0.000 <ipython-input-358-bafd40978d5d>:28(choose_info)\n",
" 973 0.000 0.000 0.000 0.000 {built-in method builtins.max}\n",
" 486 0.000 0.000 0.000 0.000 <ipython-input-8-6946a10b23e1>:4(get_attribute)\n",
" 969 0.000 0.000 0.009 0.000 _methods.py:45(_sum)\n",
" 1 0.000 0.000 0.447 0.447 {built-in method builtins.exec}\n",
" 1229 0.000 0.000 0.000 0.000 {built-in method builtins.isinstance}\n",
" 162 0.000 0.000 0.008 0.000 <ipython-input-358-bafd40978d5d>:6(__init__)\n",
" 162 0.000 0.000 0.001 0.000 <ipython-input-358-bafd40978d5d>:16(get_best_antonym)\n",
" 1083 0.000 0.000 0.000 0.000 {method 'isalnum' of 'str' objects}\n",
" 162 0.000 0.000 0.006 0.000 <ipython-input-358-bafd40978d5d>:24(get_best_sense)\n",
" 1134 0.000 0.000 0.000 0.000 {method 'lower' of 'str' objects}\n",
" 1542 0.000 0.000 0.000 0.000 {method 'append' of 'list' objects}\n",
" 548 0.000 0.000 0.000 0.000 {method 'replace' of 'str' objects}\n",
" 162 0.000 0.000 0.006 0.000 <ipython-input-8-6946a10b23e1>:12(get_senses)\n",
" 424 0.000 0.000 0.000 0.000 {method 'split' of 'str' objects}\n",
" 188 0.000 0.000 0.000 0.000 random.py:344(choice)\n",
" 162 0.000 0.000 0.000 0.000 <ipython-input-8-6946a10b23e1>:23(get_antonyms)\n",
" 546 0.000 0.000 0.000 0.000 {method 'strip' of 'str' objects}\n",
" 162 0.000 0.000 0.000 0.000 <ipython-input-358-bafd40978d5d>:20(get_best_synonym)\n",
" 162 0.000 0.000 0.000 0.000 <ipython-input-8-6946a10b23e1>:18(get_synonyms)\n",
" 12 0.000 0.000 0.000 0.000 inspect.py:2926(_bind)\n",
" 8 0.000 0.000 0.000 0.000 socket.py:438(send)\n",
" 155 0.000 0.000 0.000 0.000 traitlets.py:564(__get__)\n",
" 1 0.000 0.000 0.446 0.446 <string>:1(<module>)\n",
" 11 0.000 0.000 0.000 0.000 formatters.py:397(lookup_by_type)\n",
" 1 0.000 0.000 0.001 0.001 <string>:2(grid2mdown)\n",
" 155 0.000 0.000 0.000 0.000 traitlets.py:533(get)\n",
" 4 0.000 0.000 0.000 0.000 encoder.py:304(iterencode)\n",
" 6/1 0.000 0.000 0.000 0.000 jsonutil.py:73(json_clean)\n",
" 21 0.000 0.000 0.000 0.000 <ipython-input-4-4a64f25b9fde>:1(filter_synonyms)\n",
" 5 0.000 0.000 0.000 0.000 {method 'update' of '_hashlib.HASH' objects}\n",
" 44 0.000 0.000 0.000 0.000 formatters.py:550(_in_deferred_types)\n",
" 1 0.000 0.000 0.001 0.001 formatters.py:89(format)\n",
" 12 0.000 0.000 0.000 0.000 inspect.py:2665(args)\n",
" 12 0.000 0.000 0.000 0.000 inspect.py:2718(apply_defaults)\n",
" 12 0.000 0.000 0.000 0.000 decorator.py:199(fix)\n",
" 8 0.000 0.000 0.000 0.000 iostream.py:195(schedule)\n",
" 1 0.000 0.000 0.000 0.000 {built-in method numpy.core._multiarray_umath.implement_array_function}\n",
" 129 0.000 0.000 0.000 0.000 {built-in method builtins.getattr}\n",
" 1 0.000 0.000 0.000 0.000 {built-in method numpy.empty}\n",
" 1 0.000 0.000 0.002 0.002 display.py:131(display)\n",
" 11 0.000 0.000 0.000 0.000 formatters.py:374(lookup)\n",
" 9 0.000 0.000 0.000 0.000 formatters.py:331(__call__)\n",
" 31 0.000 0.000 0.000 0.000 encoder.py:38(encode_basestring)\n",
" 12 0.000 0.000 0.001 0.000 decorator.py:229(fun)\n",
" 12 0.000 0.000 0.000 0.000 inspect.py:2688(kwargs)\n",
" 12 0.000 0.000 0.000 0.000 formatters.py:220(catch_format_error)\n",
" 1 0.000 0.000 0.001 0.001 zmqshell.py:97(publish)\n",
" 2 0.000 0.000 0.001 0.000 iostream.py:335(flush)\n",
" 1 0.000 0.000 0.000 0.000 {built-in method posix.stat}\n",
" 33 0.000 0.000 0.000 0.000 {method 'join' of 'str' objects}\n",
" 11 0.000 0.000 0.000 0.000 dir2.py:54(get_real_method)\n",
" 74 0.000 0.000 0.000 0.000 {built-in method builtins.next}\n",
" 1 0.000 0.000 0.000 0.000 pretty.py:356(pretty)\n",
" 134 0.000 0.000 0.000 0.000 inspect.py:2577(kind)\n",
" 4 0.000 0.000 0.000 0.000 __init__.py:294(dumps)\n",
" 10 0.000 0.000 0.000 0.000 threading.py:1093(is_alive)\n",
" 2 0.000 0.000 0.000 0.000 iostream.py:384(write)\n",
" 1 0.000 0.000 0.000 0.000 session.py:662(send)\n",
" 1 0.000 0.000 0.000 0.000 formatters.py:689(__call__)\n",
" 12 0.000 0.000 0.000 0.000 {method 'format' of 'str' objects}\n",
" 1 0.000 0.000 0.000 0.000 numeric.py:268(full)\n",
" 2 0.000 0.000 0.000 0.000 {method 'isoformat' of 'datetime.datetime' objects}\n",
" 4 0.000 0.000 0.000 0.000 jsonapi.py:32(dumps)\n",
" 2 0.000 0.000 0.001 0.000 threading.py:280(wait)\n",
" 2 0.000 0.000 0.000 0.000 threading.py:228(__init__)\n",
" 1 0.000 0.000 0.000 0.000 session.py:603(serialize)\n",
" 1 0.000 0.000 0.001 0.001 display.py:81(publish_display_data)\n",
" 4 0.000 0.000 0.000 0.000 encoder.py:275(encode)\n",
" 1 0.000 0.000 0.000 0.000 pretty.py:697(_repr_pprint)\n",
" 29 0.000 0.000 0.000 0.000 encoder.py:60(replace)\n",
" 1 0.000 0.000 0.000 0.000 <__array_function__ internals>:2(copyto)\n",
" 1 0.000 0.000 0.000 0.000 hmac.py:115(copy)\n",
" 1 0.000 0.000 0.000 0.000 pretty.py:185(__init__)\n",
" 1 0.000 0.000 0.000 0.000 display.py:591(__init__)\n",
" 10 0.000 0.000 0.000 0.000 threading.py:1039(_wait_for_tstate_lock)\n",
" 12 0.000 0.000 0.000 0.000 inspect.py:3057(bind)\n",
" 4 0.000 0.000 0.000 0.000 encoder.py:141(__init__)\n",
" 5 0.000 0.000 0.000 0.000 <ipython-input-5-f2033d81c51c>:1(filter_antonyms)\n",
" 48 0.000 0.000 0.000 0.000 inspect.py:2882(parameters)\n",
" 12 0.000 0.000 0.000 0.000 abc.py:96(__instancecheck__)\n",
" 12 0.000 0.000 0.000 0.000 {built-in method _abc._abc_instancecheck}\n",
" 8 0.000 0.000 0.000 0.000 iostream.py:91(_event_pipe)\n",
" 22 0.000 0.000 0.000 0.000 {built-in method builtins.hasattr}\n",
" 2 0.000 0.000 0.001 0.000 threading.py:556(wait)\n",
" 1 0.000 0.000 0.000 0.000 {built-in method builtins.print}\n",
" 1 0.000 0.000 0.000 0.000 session.py:588(sign)\n",
" 1 0.000 0.000 0.001 0.001 zmqshell.py:80(_flush_streams)\n",
" 51 0.000 0.000 0.000 0.000 inspect.py:2565(name)\n",
" 36 0.000 0.000 0.000 0.000 {method 'items' of 'mappingproxy' objects}\n",
" 12 0.000 0.000 0.000 0.000 pretty.py:305(_get_mro)\n",
" 12 0.000 0.000 0.000 0.000 inspect.py:2657(__init__)\n",
" 11 0.000 0.000 0.000 0.000 formatters.py:274(_get_type)\n",
" 1 0.000 0.000 0.000 0.000 formatters.py:903(__call__)\n",
" 2 0.000 0.000 0.000 0.000 threading.py:521(__init__)\n",
" 24 0.000 0.000 0.000 0.000 {built-in method builtins.iter}\n",
" 1 0.000 0.000 0.000 0.000 session.py:149(utcnow)\n",
" 3 0.000 0.000 0.000 0.000 pretty.py:479(enq)\n",
" 12 0.000 0.000 0.000 0.000 {method 'values' of 'mappingproxy' objects}\n",
" 4 0.000 0.000 0.000 0.000 session.py:82(<lambda>)\n",
" 2 0.000 0.000 0.000 0.000 jsonutil.py:84(date_default)\n",
" 1 0.000 0.000 0.000 0.000 session.py:569(msg)\n",
" 1 0.000 0.000 0.000 0.000 contextlib.py:86(__init__)\n",
" 2 0.000 0.000 0.000 0.000 threading.py:259(__exit__)\n",
" 2 0.000 0.000 0.000 0.000 iostream.py:308(_is_master_process)\n",
" 2 0.000 0.000 0.000 0.000 pretty.py:264(begin_group)\n",
" 2 0.000 0.000 0.000 0.000 pretty.py:288(end_group)\n",
" 11 0.000 0.000 0.000 0.000 inspect.py:73(isclass)\n",
" 4 0.000 0.000 0.000 0.000 hmac.py:111(update)\n",
" 1 0.000 0.000 0.000 0.000 genericpath.py:16(exists)\n",
" 29 0.000 0.000 0.000 0.000 {method 'group' of 're.Match' objects}\n",
" 1 0.000 0.000 0.000 0.000 pretty.py:216(text)\n",
" 3 0.000 0.000 0.000 0.000 {method 'copy' of '_hashlib.HASH' objects}\n",
" 11 0.000 0.000 0.000 0.000 formatters.py:359(_check_return)\n",
" 1 0.000 0.000 0.000 0.000 {method 'replace' of 'datetime.datetime' objects}\n",
" 1 0.000 0.000 0.000 0.000 formatters.py:949(__call__)\n",
" 1 0.000 0.000 0.000 0.000 pretty.py:339(__init__)\n",
" 3 0.000 0.000 0.000 0.000 pretty.py:466(__init__)\n",
" 12 0.000 0.000 0.000 0.000 {built-in method builtins.id}\n",
" 1 0.000 0.000 0.000 0.000 {built-in method maketrans}\n",
" 3 0.000 0.000 0.000 0.000 pretty.py:401(_in_deferred_types)\n",
" 1 0.000 0.000 0.000 0.000 display.py:34(_safe_exists)\n",
" 1 0.000 0.000 0.000 0.000 session.py:566(msg_header)\n",
" 1 0.000 0.000 0.000 0.000 zmqshell.py:90(_hooks)\n",
" 10 0.000 0.000 0.000 0.000 threading.py:529(is_set)\n",
" 1 0.000 0.000 0.000 0.000 hmac.py:128(_current)\n",
" 2 0.000 0.000 0.000 0.000 threading.py:256(__enter__)\n",
" 1 0.000 0.000 0.000 0.000 pretty.py:297(flush)\n",
" 1 0.000 0.000 0.000 0.000 contextlib.py:242(helper)\n",
" 1 0.000 0.000 0.000 0.000 iostream.py:260(send_multipart)\n",
" 1 0.000 0.000 0.000 0.000 configurable.py:551(initialized)\n",
" 5 0.000 0.000 0.000 0.000 {method 'encode' of 'str' objects}\n",
" 1 0.000 0.000 0.000 0.000 display.py:640(__repr__)\n",
" 2 0.000 0.000 0.000 0.000 threading.py:268(_acquire_restore)\n",
" 1 0.000 0.000 0.000 0.000 formatters.py:940(_check_return)\n",
" 2 0.000 0.000 0.000 0.000 pretty.py:168(group)\n",
" 14 0.000 0.000 0.000 0.000 {built-in method builtins.callable}\n",
" 1 0.000 0.000 0.000 0.000 session.py:224(extract_header)\n",
" 1 0.000 0.000 0.000 0.000 displaypub.py:43(_validate_data)\n",
" 1 0.000 0.000 0.000 0.000 session.py:513(msg_id)\n",
" 7 0.000 0.000 0.000 0.000 pretty.py:102(_safe_getattr)\n",
" 3 0.000 0.000 0.000 0.000 {built-in method posix.getpid}\n",
" 2 0.000 0.000 0.000 0.000 configurable.py:507(instance)\n",
" 1 0.000 0.000 0.000 0.000 session.py:744(<listcomp>)\n",
" 2 0.000 0.000 0.000 0.000 iostream.py:321(_schedule_flush)\n",
" 2 0.000 0.000 0.000 0.000 pretty.py:496(remove)\n",
" 2 0.000 0.000 0.000 0.000 threading.py:265(_release_save)\n",
" 10 0.000 0.000 0.000 0.000 {method 'append' of 'collections.deque' objects}\n",
" 1 0.000 0.000 0.000 0.000 hmac.py:147(hexdigest)\n",
" 1 0.000 0.000 0.000 0.000 session.py:218(msg_header)\n",
" 1 0.000 0.000 0.000 0.000 {built-in method builtins.repr}\n",
" 1 0.000 0.000 0.000 0.000 contextlib.py:121(__exit__)\n",
" 1 0.000 0.000 0.000 0.000 pretty.py:474(__init__)\n",
" 1 0.000 0.000 0.000 0.000 contextlib.py:112(__enter__)\n",
" 4 0.000 0.000 0.000 0.000 {built-in method _thread.allocate_lock}\n",
" 5 0.000 0.000 0.000 0.000 {method 'items' of 'dict' objects}\n",
" 1 0.000 0.000 0.000 0.000 {method 'digest' of '_hashlib.HASH' objects}\n",
" 1 0.000 0.000 0.000 0.000 display.py:741(_repr_markdown_)\n",
" 4 0.000 0.000 0.000 0.000 {method 'clear' of 'dict' objects}\n",
" 2 0.000 0.000 0.000 0.000 jsonutil.py:31(_ensure_tzinfo)\n",
" 1 0.000 0.000 0.000 0.000 {built-in method utcnow}\n",
" 3 0.000 0.000 0.000 0.000 {method 'get' of 'mappingproxy' objects}\n",
" 2 0.000 0.000 0.000 0.000 threading.py:271(_is_owned)\n",
" 1 0.000 0.000 0.000 0.000 display.py:659(reload)\n",
" 1 0.000 0.000 0.000 0.000 multiarray.py:1043(copyto)\n",
" 1 0.000 0.000 0.000 0.000 iostream.py:207(send_multipart)\n",
" 1 0.000 0.000 0.000 0.000 {built-in method builtins.locals}\n",
" 2 0.000 0.000 0.000 0.000 {method 'remove' of 'list' objects}\n",
" 1 0.000 0.000 0.000 0.000 {method 'hexdigest' of '_hashlib.HASH' objects}\n",
" 3 0.000 0.000 0.000 0.000 {method 'pop' of 'list' objects}\n",
" 1 0.000 0.000 0.000 0.000 display.py:652(_data_and_metadata)\n",
" 1 0.000 0.000 0.000 0.000 {method 'splitlines' of 'str' objects}\n",
" 3 0.000 0.000 0.000 0.000 {method 'get' of 'dict' objects}\n",
" 4 0.000 0.000 0.000 0.000 {method 'pop' of 'dict' objects}\n",
" 1 0.000 0.000 0.000 0.000 display.py:698(_check_data)\n",
" 2 0.000 0.000 0.000 0.000 {built-in method _imp.lock_held}\n",
" 2 0.000 0.000 0.000 0.000 {method '__enter__' of '_thread.lock' objects}\n",
" 1 0.000 0.000 0.000 0.000 {built-in method __new__ of type object at 0x90efa0}\n",
" 2 0.000 0.000 0.000 0.000 {method 'release' of '_thread.lock' objects}\n",
" 1 0.000 0.000 0.000 0.000 {method 'getvalue' of '_io.StringIO' objects}\n",
" 2 0.000 0.000 0.000 0.000 {method '__exit__' of '_thread.lock' objects}\n",
" 1 0.000 0.000 0.000 0.000 {method 'startswith' of 'str' objects}\n",
" 1 0.000 0.000 0.000 0.000 {method 'disable' of '_lsprof.Profiler' objects}\n",
" 1 0.000 0.000 0.000 0.000 {method 'write' of '_io.StringIO' objects}\n",
" 2 0.000 0.000 0.000 0.000 {method 'extend' of 'list' objects}\n",
" 1 0.000 0.000 0.000 0.000 {method 'clear' of 'collections.deque' objects}\n",
" 1 0.000 0.000 0.000 0.000 jsonutil.py:46(encode_images)\n",
" 1 0.000 0.000 0.000 0.000 formatters.py:824(_check_return)\n",
" 1 0.000 0.000 0.000 0.000 tz.py:74(utcoffset)\n",
" 1 0.000 0.000 0.000 0.000 {method 'copy' of 'dict' objects}"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"%%prun\n",
"gr, infos = create_word_grid(16*3,9*3, target_density=0.55)\n",
"def grid2mdown(grid):\n",
" h,w = grid.shape\n",
" \n",
" md_str = \" \".join([\"|\"] * (w+1)) + \"\\n\"\n",
" md_str += \":---:\".join([\"|\"] * (w+1)) + \"\\n\"\n",
" \n",
" for y in range(h):\n",
" md_str += \"|\" + \"|\".join([(f\"**{grid[y,x]}**\" if grid[y,x] != \" \" else \" \") for x in range(w)]) + \"|\\n\"\n",
" \n",
" return Markdown(md_str)\n",
"\n",
"display(grid2mdown(gr))\n",
" \n",
" "
]
},
{
"cell_type": "code",
"execution_count": 279,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"edie -- surname\n"
]
}
],
"source": [
"i=220\n",
"\n",
"word = list(infos.keys())[i]\n",
"\n",
"print(word, \"--\", infos[word].get_hint())"
]
},
{
"cell_type": "code",
"execution_count": 360,
"metadata": {},
"outputs": [
{
"data": {
"text/markdown": [
"\n",
"## wagenboom\n",
"\n",
"### Senses:\n",
"\n",
"* A South African proteaceous tree\n",
"* The tough wood of this tree, used for making wagon wheels\n",
"\n",
"### Synonyms:\n",
"\n",
"* waboom\n",
"* wagon tree\n",
"\n",
"### Antonyms:\n",
"\n",
"\n",
" \n",
" "
],
"text/plain": [
"<IPython.core.display.Markdown object>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"display(display_info(\"wagenboom\"))"
]
},
{
"cell_type": "code",
"execution_count": 374,
"metadata": {},
"outputs": [],
"source": [
"from enum import Enum\n",
"\n",
"class HintOrientation(Enum):\n",
" VERTICAL = 0\n",
" HORIZONTAL = 1\n",
" BOTH = 2\n",
" \n",
"class FieldType(Enum):\n",
" EMPTY = 0\n",
" HINT = 1\n",
" LETTER = 2\n",
"\n",
"class Field(object):\n",
" def __init__(self, field_type: FieldType = FieldType.EMPTY):\n",
" self._field_type = field_type\n",
" \n",
" def get_type(self) -> FieldType:\n",
" return self._field_type\n",
" \n",
" def get_content(self) -> str:\n",
" return None\n",
"\n",
"class HintField(Field):\n",
" def __init__(self, horizontal_hint: str = None, vertical_hint: str = None):\n",
" super().__init__(field_type=FieldType.HINT)\n",
" \n",
" self._horizontal_hint = horizontal_hint\n",
" self._vertical_hint = vertical_hint\n",
" \n",
" def get_horizontal_hint(self) -> str:\n",
" return self._horizontal_hint\n",
" \n",
" def get_vertical_hint(self) -> str:\n",
" return self._vertical_hint\n",
" \n",
" def set_horizintal_hint(hint: str):\n",
" self._horizontal_hint = hint\n",
" \n",
" def set_vertical_hint(hint:str):\n",
" self._vertical_hint = hint\n",
"\n",
"class LetterField(Field):\n",
" def __init__(self, letter: str):\n",
" assert len(letter) <= 1\n",
" \n",
" super().__init__(field_type = FieldType.LETTER)\n",
" \n",
" self._letter = letter\n",
" self._revealed = False\n",
" \n",
" def get_content(self) -> str:\n",
" return self._letter\n",
" \n",
" def reveal(self):\n",
" self._revealed = True\n",
" \n",
" def is_revealed(self) -> bool:\n",
" return self._revealed\n",
"\n",
"class Grid(object):\n",
" def __init__(self, width: int, height: int, density = 0.5):\n",
" self._width = width\n",
" self._height = height\n",
" self._density = density\n",
" self._grid = []\n",
" self._build_grid()\n",
" \n",
" def _build_grid(self):\n",
" raw_grid, word_infos = create_word_grid(self._width - 1, self._height - 1, target_density=self._density)\n",
" \n",
" # note: we will append an additional row and column, to have enough space to place hint fields\n",
" \n",
" self._grid = [[Field()] * self._width] # initialize with empty row\n",
" \n",
" for y in range(self._height):\n",
" row = [Field()] # initialize row with empty column\n",
" for x in range(self._width):\n",
" raw_cell = raw_grid[y-1,x-1]\n",
" if raw_cell == \" \":\n",
" row.append(Field())\n",
" else:\n",
" row.append(LetterField(raw_cell))\n",
" \n",
" self._grid.append(row)\n",
" \n",
" # place hint fields:\n",
" for word, info in word_infos.items():\n",
" y,x = info.get_hint_location()\n",
" # correct offset\n",
" y += 1\n",
" x += 1\n",
" \n",
" cell = self._grid[y][x]\n",
" \n",
" # check if we already have a hint here:\n",
" if cell.get_type() == FieldType.HINT:\n",
" if info.is_vertical():\n",
" cell.set_vertical_hint(info.get_hint())\n",
" else:\n",
" cell.set_horizintal_hint(info.get_hint())\n",
" elif cell.get_type() == FieldType.LETTER:\n",
" # edge case: a word has \"eaten up\" another one, skipping that case\n",
" pass\n",
" \n",
" else:\n",
" if info.is_vertical():\n",
" self._grid[y][x] = HintField(vertical_hint=info.get_hint())\n",
" else:\n",
" self._grid[y][x] = HintField(horizontal_hint=info.get_hint())\n",
"\n",
" \n",
"class Crossword(object):\n",
" def __init__(self, width: int, height: int, database: dict):\n",
" self._width = width\n",
" self._height = height\n",
" self._database = database\n",
" self._grid = Grid(width, height)\n",
" \n",
" \n",
" \n",
" "
]
},
{
"cell_type": "code",
"execution_count": 375,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"234\n"
]
}
],
"source": [
"cw = Crossword(16*3, 9*3, english_vocab_db)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.5"
}
},
"nbformat": 4,
"nbformat_minor": 4
}