crosswords/data/data_tests_de.ipynb

544 lines
17 KiB
Plaintext

{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"source": [
"import json\n",
"import re\n",
"import tqdm\n",
"from IPython.display import Markdown\n",
"\n",
"from bz2file import BZ2File\n",
"from wiktionary_de_parser import Parser\n",
"\n",
"from parse import *\n",
"\n",
"import wikitextparser as wtp\n",
"import numpy as np\n",
"import unidecode"
],
"outputs": [],
"metadata": {}
},
{
"cell_type": "code",
"execution_count": 2,
"source": [
"def extract_data(wikitext: str, title: str):\n",
"\n",
" data = {\n",
" 'word': title\n",
" }\n",
"\n",
" splitted_text = wikitext.split(\"\\n\")\n",
"\n",
" senses = []\n",
" synonyms = []\n",
" antonyms = []\n",
" num_translations = 0\n",
"\n",
" senses_on = False\n",
" synonyms_on = False\n",
" antonyms_on = False\n",
" translations_on = False\n",
" first_translation_line = False\n",
"\n",
" for i, line in enumerate(splitted_text):\n",
"\n",
" if senses_on:\n",
" try:\n",
" sense = parse(\":[{}] {}\", line)[1]\n",
" sense = sense.replace(\"]\",\"\")\n",
" sense = sense.replace(\"[\",\"\")\n",
" senses.append(sense)\n",
" continue\n",
" except:\n",
" senses_on = False\n",
" \n",
" if synonyms_on:\n",
" try:\n",
" synonym = parse(\":[{}] {}\", line)[1]\n",
" synonym = synonym.replace(\"]\",\"\")\n",
" synonym = synonym.replace(\"[\",\"\")\n",
" synonyms.append(synonym)\n",
" continue\n",
" except:\n",
" synonyms_on = False\n",
" \n",
" if antonyms_on:\n",
" try:\n",
" antonym = parse(\":[{}] {}\", line)[1]\n",
" antonym = antonym.replace(\"]\",\"\")\n",
" antonym = antonym.replace(\"[\",\"\")\n",
" antonyms.append(antonym)\n",
" continue\n",
" except:\n",
" antonyms_on = False\n",
" \n",
" if translations_on:\n",
" if first_translation_line:\n",
" first_translation_line = False\n",
" continue\n",
" try:\n",
" _, __, ___ = parse(\"{} [{}] {}\", line)\n",
" num_translations += 2 # very simple heuristic, assuming there are two translation items in each row :D\n",
" continue\n",
" except:\n",
" translations_on = False\n",
"\n",
" if \"{{Bedeutungen}}\" in line:\n",
" senses_on = True\n",
" continue\n",
"\n",
" if \"{{Gegenwörter}}\" in line:\n",
" antonyms_on = True\n",
" continue\n",
"\n",
" if \"{{Synonyme}}\" in line:\n",
" synonyms_on = True\n",
" continue\n",
"\n",
" if \"{{Übersetzungen}}\" in line:\n",
" translations_on = True\n",
" first_translation_line = True\n",
" continue\n",
" \n",
" data['senses'] = senses\n",
" data['synonyms'] = synonyms\n",
" data['antonyms'] = antonyms\n",
" data['num_translations'] = num_translations\n",
"\n",
" \n",
" return data\n"
],
"outputs": [],
"metadata": {}
},
{
"cell_type": "code",
"execution_count": 3,
"source": [
"bzfile_path = \"dewiktionary-20210201-pages-articles.xml.bz2\"\n",
"bz = BZ2File(bzfile_path)\n",
"\n",
"db = {}\n",
"\n",
"for i, record in enumerate(tqdm.tqdm(Parser(bz))):\n",
" if 'langCode' not in record or record['langCode'] != 'de':\n",
" continue\n",
"\n",
" word = record['title']\n",
" word = word.replace(\"ä\", \"ae\")\n",
" word = word.replace(\"ü\", \"ue\")\n",
" word = word.replace(\"ö\", \"oe\")\n",
" word = word.replace(\"Ä\", \"Ae\")\n",
" word = word.replace(\"Ü\", \"Ue\")\n",
" word = word.replace(\"Ö\", \"Oe\")\n",
" word = word.replace(\"ß\", \"ss\")\n",
"\n",
" \n",
" data = extract_data(record['wikitext'], word)\n",
" \n",
"\n",
" title = word.lower()\n",
" key = title\n",
"\n",
" j = 0\n",
"\n",
" while key in db:\n",
" key = title + str(j)\n",
" j += 1\n",
" \n",
" db[key] = data\n",
"\n",
" i += 1\n",
"\n"
],
"outputs": [
{
"output_type": "stream",
"name": "stderr",
"text": [
"989351it [07:14, 2274.95it/s]\n"
]
}
],
"metadata": {
"tags": []
}
},
{
"cell_type": "markdown",
"source": [
"## filter database from links and only keep \"relevant\" items"
],
"metadata": {}
},
{
"cell_type": "code",
"execution_count": 4,
"source": [
"def clean_text(s:str):\n",
"\n",
" if s.startswith(\"{{\"):\n",
" raw_categories, text = parse(\"{{{{{}}}}}{}\", s)\n",
" categories = raw_categories.split('|')\n",
" if \"K\" in categories:\n",
" categories.remove(\"K\")\n",
" \n",
" categories_text = f\"({', '.join(categories)}):\"\n",
" else:\n",
" categories_text = \"\"\n",
" text = s\n",
"\n",
" cleaned_text = text\n",
" while \"<ref \" in cleaned_text and not \"</ref>\" in cleaned_text:\n",
" pre = \"\"\n",
" post = \"\"\n",
" try:\n",
" pre, _, post = parse(\"{}<ref{}/>{}\", cleaned_text)\n",
" except:\n",
" try:\n",
" _, post = parse(\"<ref{}/>{}\", cleaned_text)\n",
" except:\n",
" try:\n",
" pre, _ = parse(\"{}<ref{}/>\", cleaned_text)\n",
" except:\n",
" print(\"cannot clean inline ref from text\")\n",
" break\n",
" cleaned_text = pre + post\n",
"\n",
" while \"<ref\" in cleaned_text:\n",
" pre = \"\"\n",
" post = \"\"\n",
" try:\n",
" pre, _, post = parse(\"{}<ref{}</ref>{}\", cleaned_text)\n",
" except:\n",
" try:\n",
" _, post = parse(\"<ref{}</ref>{}\", cleaned_text)\n",
" except:\n",
" try:\n",
" pre, _ = parse(\"{}<ref{}</ref>\", cleaned_text)\n",
" except:\n",
" print(\"cannot clean ref from text\")\n",
" break\n",
" cleaned_text = pre + post\n",
"\n",
" cleaned_text = cleaned_text.replace(\"'\",\"\")\n",
"\n",
" text = categories_text + cleaned_text\n",
"\n",
" text = text.replace(\"::\", \":\")\n",
"\n",
" return text\n",
"\n"
],
"outputs": [],
"metadata": {
"tags": []
}
},
{
"cell_type": "code",
"execution_count": 12,
"source": [
"def word_wise_filtering(sentence: str):\n",
" words = sentence.split()\n",
" for i, word in enumerate(words.copy()):\n",
" if \"|\" in word:\n",
" splitted = word.split(\"|\")\n",
" if len(splitted) == 2:\n",
" words[i] = splitted[1]\n",
" return \" \".join(words)\n"
],
"outputs": [],
"metadata": {}
},
{
"cell_type": "code",
"execution_count": 23,
"source": [
"parse(\"{}[{:d}]{}\", \"[12]\") is None"
],
"outputs": [
{
"output_type": "execute_result",
"data": {
"text/plain": [
"True"
]
},
"metadata": {},
"execution_count": 23
}
],
"metadata": {}
},
{
"cell_type": "code",
"execution_count": 14,
"source": [
"word_wise_filtering(\"hallo welten|welt|df\")"
],
"outputs": [
{
"output_type": "execute_result",
"data": {
"text/plain": [
"'hallo welten|welt|df'"
]
},
"metadata": {},
"execution_count": 14
}
],
"metadata": {}
},
{
"cell_type": "code",
"execution_count": 7,
"source": [
"filtered_db = {}\n",
"\n",
"for key, item in tqdm.tqdm(db.items()):\n",
" senses = item['senses']\n",
" synonyms = item['synonyms']\n",
" antonyms = item['antonyms']\n",
"\n",
" def filter_entries(entries: list):\n",
" new_list = []\n",
" for entry in entries:\n",
" try:\n",
" cleaned_entry = clean_text(entry)\n",
" # sort out bad hints:\n",
" if \"{\" in cleaned_entry:\n",
" continue\n",
" if item['word'].lower() in cleaned_entry.lower():\n",
" continue\n",
" if \"familienname\" in cleaned_entry.lower():\n",
" continue\n",
" if \"ortsteil von\" in cleaned_entry.lower():\n",
" continue\n",
" if \"dorf in\" in cleaned_entry.lower():\n",
" continue\n",
" if parse(\"{}[{:d}]{}\", cleaned_entry) is not None:\n",
" continue\n",
" if entry.isupper(): # try to sort out initialisms\n",
" continue\n",
" \n",
" \n",
" cleaned_entry = word_wise_filtering (cleaned_entry)\n",
"\n",
" new_list.append(cleaned_entry)\n",
" except:\n",
" #print(\"cannot process item\", entry)\n",
" # just skipping unprocessable items\n",
" pass\n",
"\n",
" \n",
" return new_list\n",
" \n",
" item['senses'] = filter_entries(senses)\n",
" item['synonyms'] = filter_entries(synonyms)\n",
" item['antonyms'] = filter_entries(antonyms)\n",
"\n",
" # clean key from special characters:\n",
" unaccented_key = unidecode.unidecode(key)\n",
" unaccented_word = unidecode.unidecode(item['word'])\n",
"\n",
" def has_digit(s: str):\n",
" for c in s:\n",
" if c.isdigit():\n",
" return True\n",
" return False\n",
"\n",
" n_hints = len(item['senses']) + len(item['synonyms']) + len(item['antonyms'])\n",
"\n",
" if (n_hints == 1 and item['num_translations'] >= 6) or n_hints > 1:\n",
" if not has_digit(item['word']):\n",
" item['word'] = unaccented_word\n",
" filtered_db[unaccented_key] = item\n"
],
"outputs": [
{
"output_type": "stream",
"name": "stderr",
"text": [
" 1%|▏ | 9931/726049 [00:00<00:13, 51192.22it/s]cannot clean ref from text\n",
"cannot clean ref from text\n",
"cannot clean ref from text\n",
"cannot clean ref from text\n",
"cannot clean ref from text\n",
" 3%|▎ | 22420/726049 [00:00<00:12, 58324.47it/s]cannot clean ref from text\n",
"cannot clean ref from text\n",
"cannot clean ref from text\n",
"cannot clean ref from text\n",
" 5%|▍ | 35021/726049 [00:00<00:11, 60170.85it/s]cannot clean ref from text\n",
"cannot clean ref from text\n",
"cannot clean ref from text\n",
"cannot clean ref from text\n",
"cannot clean ref from text\n",
"cannot clean ref from text\n",
"cannot clean ref from text\n",
"cannot clean ref from text\n",
"cannot clean ref from text\n",
"cannot clean ref from text\n",
"cannot clean inline ref from text\n",
"cannot clean ref from text\n",
"cannot clean ref from text\n",
"cannot clean ref from text\n",
" 10%|▉ | 71566/726049 [00:00<00:05, 112907.67it/s]cannot clean ref from text\n",
"cannot clean ref from text\n",
"cannot clean ref from text\n",
"cannot clean ref from text\n",
"cannot clean ref from text\n",
"cannot clean ref from text\n",
"cannot clean ref from text\n",
" 18%|█▊ | 130217/726049 [00:01<00:03, 165772.31it/s]cannot clean ref from text\n",
"cannot clean ref from text\n",
"cannot clean ref from text\n",
"cannot clean ref from text\n",
"cannot clean ref from text\n",
"cannot clean ref from text\n",
"cannot clean ref from text\n",
"cannot clean ref from text\n",
"cannot clean ref from text\n",
"cannot clean ref from text\n",
"cannot clean ref from text\n",
"cannot clean ref from text\n",
" 33%|███▎ | 237155/726049 [00:01<00:01, 253304.61it/s]cannot clean ref from text\n",
"cannot clean ref from text\n",
" 40%|███▉ | 287866/726049 [00:01<00:01, 240537.77it/s]cannot clean ref from text\n",
"cannot clean ref from text\n",
"cannot clean ref from text\n",
"cannot clean ref from text\n",
"cannot clean ref from text\n",
" 53%|█████▎ | 387184/726049 [00:02<00:01, 300131.76it/s]cannot clean ref from text\n",
" 65%|██████▌ | 474466/726049 [00:02<00:01, 245717.70it/s]cannot clean ref from text\n",
"cannot clean inline ref from text\n",
"cannot clean ref from text\n",
" 85%|████████▌ | 618002/726049 [00:02<00:00, 284270.19it/s]cannot clean ref from text\n",
"100%|██████████| 726049/726049 [00:03<00:00, 215277.92it/s]\n"
]
}
],
"metadata": {
"tags": []
}
},
{
"cell_type": "code",
"execution_count": 13,
"source": [
"db['muenchen']"
],
"outputs": [
{
"output_type": "execute_result",
"data": {
"text/plain": [
"{'word': 'Muenchen',\n",
" 'senses': ['Stadt in Deutschland, die Hauptstadt von Bayern',\n",
" 'Ortsteil von Bad Berka in Thüringen',\n",
" 'Ortsteil von Hutthurm in Bayern',\n",
" 'Ortsteil von Hirschbach in Bayern',\n",
" 'Ortsteil von Uebigau-Wahrenbrück in Brandenburg',\n",
" 'Dorf in Nordböhmen (Mnichov u Lučního Chvojna)',\n",
" 'Wüstung (Mnichov) bei Zahrádky u České Lípy in Nordböhmen'],\n",
" 'synonyms': [],\n",
" 'antonyms': [],\n",
" 'num_translations': 72}"
]
},
"metadata": {},
"execution_count": 13
}
],
"metadata": {}
},
{
"cell_type": "code",
"execution_count": 16,
"source": [
"len(filtered_db)"
],
"outputs": [
{
"output_type": "execute_result",
"data": {
"text/plain": [
"64385"
]
},
"metadata": {},
"execution_count": 16
}
],
"metadata": {}
},
{
"cell_type": "code",
"execution_count": 18,
"source": [
"with open('de.json', 'w') as f:\n",
" json.dump(filtered_db, f, indent = 4)"
],
"outputs": [],
"metadata": {}
},
{
"cell_type": "code",
"execution_count": 25,
"source": [
"l = [1,2,3]\n",
"l.remove(4)\n",
"l"
],
"outputs": [
{
"output_type": "error",
"ename": "ValueError",
"evalue": "list.remove(x): x not in list",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mValueError\u001b[0m Traceback (most recent call last)",
"\u001b[0;32m<ipython-input-25-0101445e8506>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[0ml\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;36m2\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;36m3\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 2\u001b[0;31m \u001b[0ml\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mremove\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m4\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 3\u001b[0m \u001b[0ml\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;31mValueError\u001b[0m: list.remove(x): x not in list"
]
}
],
"metadata": {}
},
{
"cell_type": "code",
"execution_count": null,
"source": [],
"outputs": [],
"metadata": {}
}
],
"metadata": {
"kernelspec": {
"name": "python3",
"display_name": "Python 3.9.5 64-bit"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.5"
},
"interpreter": {
"hash": "916dbcbb3f70747c44a77c7bcd40155683ae19c65e1c03b4aa3499c5328201f1"
}
},
"nbformat": 4,
"nbformat_minor": 4
}