updated english database
This commit is contained in:
		| @ -2,7 +2,7 @@ | |||||||
|  "cells": [ |  "cells": [ | ||||||
|   { |   { | ||||||
|    "cell_type": "code", |    "cell_type": "code", | ||||||
|    "execution_count": 1, |    "execution_count": 3, | ||||||
|    "metadata": {}, |    "metadata": {}, | ||||||
|    "outputs": [], |    "outputs": [], | ||||||
|    "source": [ |    "source": [ | ||||||
| @ -31,7 +31,7 @@ | |||||||
|   }, |   }, | ||||||
|   { |   { | ||||||
|    "cell_type": "code", |    "cell_type": "code", | ||||||
|    "execution_count": 3, |    "execution_count": 4, | ||||||
|    "metadata": {}, |    "metadata": {}, | ||||||
|    "outputs": [], |    "outputs": [], | ||||||
|    "source": [ |    "source": [ | ||||||
| @ -39,6 +39,24 @@ | |||||||
|     "    english_vocab_db = json.load(f)" |     "    english_vocab_db = json.load(f)" | ||||||
|    ] |    ] | ||||||
|   }, |   }, | ||||||
|  |   { | ||||||
|  |    "cell_type": "code", | ||||||
|  |    "execution_count": 7, | ||||||
|  |    "metadata": {}, | ||||||
|  |    "outputs": [ | ||||||
|  |     { | ||||||
|  |      "output_type": "execute_result", | ||||||
|  |      "data": { | ||||||
|  |       "text/plain": [ | ||||||
|  |        "1023195" | ||||||
|  |       ] | ||||||
|  |      }, | ||||||
|  |      "metadata": {}, | ||||||
|  |      "execution_count": 7 | ||||||
|  |     } | ||||||
|  |    ], | ||||||
|  |    "source": [] | ||||||
|  |   }, | ||||||
|   { |   { | ||||||
|    "cell_type": "markdown", |    "cell_type": "markdown", | ||||||
|    "metadata": {}, |    "metadata": {}, | ||||||
| @ -48,7 +66,7 @@ | |||||||
|   }, |   }, | ||||||
|   { |   { | ||||||
|    "cell_type": "code", |    "cell_type": "code", | ||||||
|    "execution_count": 4, |    "execution_count": null, | ||||||
|    "metadata": {}, |    "metadata": {}, | ||||||
|    "outputs": [], |    "outputs": [], | ||||||
|    "source": [ |    "source": [ | ||||||
| @ -67,7 +85,7 @@ | |||||||
|   }, |   }, | ||||||
|   { |   { | ||||||
|    "cell_type": "code", |    "cell_type": "code", | ||||||
|    "execution_count": 5, |    "execution_count": null, | ||||||
|    "metadata": {}, |    "metadata": {}, | ||||||
|    "outputs": [], |    "outputs": [], | ||||||
|    "source": [ |    "source": [ | ||||||
| @ -81,7 +99,7 @@ | |||||||
|   }, |   }, | ||||||
|   { |   { | ||||||
|    "cell_type": "code", |    "cell_type": "code", | ||||||
|    "execution_count": 6, |    "execution_count": null, | ||||||
|    "metadata": {}, |    "metadata": {}, | ||||||
|    "outputs": [], |    "outputs": [], | ||||||
|    "source": [ |    "source": [ | ||||||
| @ -95,7 +113,7 @@ | |||||||
|   }, |   }, | ||||||
|   { |   { | ||||||
|    "cell_type": "code", |    "cell_type": "code", | ||||||
|    "execution_count": 7, |    "execution_count": null, | ||||||
|    "metadata": {}, |    "metadata": {}, | ||||||
|    "outputs": [], |    "outputs": [], | ||||||
|    "source": [ |    "source": [ | ||||||
| @ -116,17 +134,9 @@ | |||||||
|   }, |   }, | ||||||
|   { |   { | ||||||
|    "cell_type": "code", |    "cell_type": "code", | ||||||
|    "execution_count": 8, |    "execution_count": null, | ||||||
|    "metadata": {}, |    "metadata": {}, | ||||||
|    "outputs": [ |    "outputs": [], | ||||||
|     { |  | ||||||
|      "name": "stderr", |  | ||||||
|      "output_type": "stream", |  | ||||||
|      "text": [ |  | ||||||
|       "100%|██████████| 1023195/1023195 [00:00<00:00, 1368488.50it/s]\n" |  | ||||||
|      ] |  | ||||||
|     } |  | ||||||
|    ], |  | ||||||
|    "source": [ |    "source": [ | ||||||
|     "dictionary_database = {}\n", |     "dictionary_database = {}\n", | ||||||
|     "for item in tqdm.tqdm(english_vocab_db):\n", |     "for item in tqdm.tqdm(english_vocab_db):\n", | ||||||
| @ -142,7 +152,7 @@ | |||||||
|   }, |   }, | ||||||
|   { |   { | ||||||
|    "cell_type": "code", |    "cell_type": "code", | ||||||
|    "execution_count": 9, |    "execution_count": null, | ||||||
|    "metadata": {}, |    "metadata": {}, | ||||||
|    "outputs": [], |    "outputs": [], | ||||||
|    "source": [ |    "source": [ | ||||||
| @ -221,17 +231,9 @@ | |||||||
|   }, |   }, | ||||||
|   { |   { | ||||||
|    "cell_type": "code", |    "cell_type": "code", | ||||||
|    "execution_count": 13, |    "execution_count": null, | ||||||
|    "metadata": {}, |    "metadata": {}, | ||||||
|    "outputs": [ |    "outputs": [], | ||||||
|     { |  | ||||||
|      "name": "stderr", |  | ||||||
|      "output_type": "stream", |  | ||||||
|      "text": [ |  | ||||||
|       "100%|██████████| 1023195/1023195 [00:08<00:00, 125917.77it/s]\n" |  | ||||||
|      ] |  | ||||||
|     } |  | ||||||
|    ], |  | ||||||
|    "source": [ |    "source": [ | ||||||
|     "import random\n", |     "import random\n", | ||||||
|     "list_words = []\n", |     "list_words = []\n", | ||||||
| @ -257,39 +259,9 @@ | |||||||
|   }, |   }, | ||||||
|   { |   { | ||||||
|    "cell_type": "code", |    "cell_type": "code", | ||||||
|    "execution_count": 11, |    "execution_count": null, | ||||||
|    "metadata": {}, |    "metadata": {}, | ||||||
|    "outputs": [ |    "outputs": [], | ||||||
|     { |  | ||||||
|      "data": { |  | ||||||
|       "text/markdown": [ |  | ||||||
|        "\n", |  | ||||||
|        "## cockmonger\n", |  | ||||||
|        "\n", |  | ||||||
|        "### Senses:\n", |  | ||||||
|        "\n", |  | ||||||
|        "* A frequent consumer of cocks\n", |  | ||||||
|        "* A procurer of cocks\n", |  | ||||||
|        "* A person with a large penis\n", |  | ||||||
|        "\n", |  | ||||||
|        "### Synonyms:\n", |  | ||||||
|        "\n", |  | ||||||
|        "\n", |  | ||||||
|        "\n", |  | ||||||
|        "### Antonyms:\n", |  | ||||||
|        "\n", |  | ||||||
|        "\n", |  | ||||||
|        "    \n", |  | ||||||
|        "    " |  | ||||||
|       ], |  | ||||||
|       "text/plain": [ |  | ||||||
|        "<IPython.core.display.Markdown object>" |  | ||||||
|       ] |  | ||||||
|      }, |  | ||||||
|      "metadata": {}, |  | ||||||
|      "output_type": "display_data" |  | ||||||
|     } |  | ||||||
|    ], |  | ||||||
|    "source": [ |    "source": [ | ||||||
|     "display(display_info(\"cockmonger\"))" |     "display(display_info(\"cockmonger\"))" | ||||||
|    ] |    ] | ||||||
| @ -304,17 +276,9 @@ | |||||||
|   }, |   }, | ||||||
|   { |   { | ||||||
|    "cell_type": "code", |    "cell_type": "code", | ||||||
|    "execution_count": 14, |    "execution_count": null, | ||||||
|    "metadata": {}, |    "metadata": {}, | ||||||
|    "outputs": [ |    "outputs": [], | ||||||
|     { |  | ||||||
|      "name": "stderr", |  | ||||||
|      "output_type": "stream", |  | ||||||
|      "text": [ |  | ||||||
|       "100%|██████████| 68842/68842 [00:01<00:00, 50914.78it/s]\n" |  | ||||||
|      ] |  | ||||||
|     } |  | ||||||
|    ], |  | ||||||
|    "source": [ |    "source": [ | ||||||
|     "## store an optimized library version for production\n", |     "## store an optimized library version for production\n", | ||||||
|     "optimized_lib = {}\n", |     "optimized_lib = {}\n", | ||||||
| @ -343,6 +307,15 @@ | |||||||
|     "    optimized_lib[word] = item" |     "    optimized_lib[word] = item" | ||||||
|    ] |    ] | ||||||
|   }, |   }, | ||||||
|  |   { | ||||||
|  |    "cell_type": "code", | ||||||
|  |    "execution_count": null, | ||||||
|  |    "metadata": {}, | ||||||
|  |    "outputs": [], | ||||||
|  |    "source": [ | ||||||
|  |     "for key,item in optimized_lib" | ||||||
|  |    ] | ||||||
|  |   }, | ||||||
|   { |   { | ||||||
|    "cell_type": "code", |    "cell_type": "code", | ||||||
|    "execution_count": 15, |    "execution_count": 15, | ||||||
| @ -2390,9 +2363,8 @@ | |||||||
|  ], |  ], | ||||||
|  "metadata": { |  "metadata": { | ||||||
|   "kernelspec": { |   "kernelspec": { | ||||||
|    "display_name": "Python 3", |    "name": "python3", | ||||||
|    "language": "python", |    "display_name": "Python 3.9.5 64-bit" | ||||||
|    "name": "python3" |  | ||||||
|   }, |   }, | ||||||
|   "language_info": { |   "language_info": { | ||||||
|    "codemirror_mode": { |    "codemirror_mode": { | ||||||
| @ -2405,8 +2377,11 @@ | |||||||
|    "nbconvert_exporter": "python", |    "nbconvert_exporter": "python", | ||||||
|    "pygments_lexer": "ipython3", |    "pygments_lexer": "ipython3", | ||||||
|    "version": "3.9.5" |    "version": "3.9.5" | ||||||
|  |   }, | ||||||
|  |   "interpreter": { | ||||||
|  |    "hash": "916dbcbb3f70747c44a77c7bcd40155683ae19c65e1c03b4aa3499c5328201f1" | ||||||
|   } |   } | ||||||
|  }, |  }, | ||||||
|  "nbformat": 4, |  "nbformat": 4, | ||||||
|  "nbformat_minor": 4 |  "nbformat_minor": 4 | ||||||
| } | } | ||||||
							
								
								
									
										482
									
								
								data/data_tests_de.ipynb
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										482
									
								
								data/data_tests_de.ipynb
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,482 @@ | |||||||
|  | { | ||||||
|  |  "cells": [ | ||||||
|  |   { | ||||||
|  |    "cell_type": "code", | ||||||
|  |    "execution_count": 1, | ||||||
|  |    "metadata": {}, | ||||||
|  |    "outputs": [], | ||||||
|  |    "source": [ | ||||||
|  |     "import json\n", | ||||||
|  |     "import re\n", | ||||||
|  |     "import tqdm\n", | ||||||
|  |     "from IPython.display import Markdown\n", | ||||||
|  |     "\n", | ||||||
|  |     "from bz2file import BZ2File\n", | ||||||
|  |     "from wiktionary_de_parser import Parser\n", | ||||||
|  |     "\n", | ||||||
|  |     "from parse import *\n", | ||||||
|  |     "\n", | ||||||
|  |     "import wikitextparser as wtp\n", | ||||||
|  |     "import numpy as np\n", | ||||||
|  |     "import unidecode" | ||||||
|  |    ] | ||||||
|  |   }, | ||||||
|  |   { | ||||||
|  |    "cell_type": "code", | ||||||
|  |    "execution_count": 2, | ||||||
|  |    "metadata": {}, | ||||||
|  |    "outputs": [], | ||||||
|  |    "source": [ | ||||||
|  |     "def extract_data(wikitext: str, title: str):\n", | ||||||
|  |     "\n", | ||||||
|  |     "    data = {\n", | ||||||
|  |     "        'word': title\n", | ||||||
|  |     "    }\n", | ||||||
|  |     "\n", | ||||||
|  |     "    splitted_text = wikitext.split(\"\\n\")\n", | ||||||
|  |     "\n", | ||||||
|  |     "    senses = []\n", | ||||||
|  |     "    synonyms = []\n", | ||||||
|  |     "    antonyms = []\n", | ||||||
|  |     "    num_translations = 0\n", | ||||||
|  |     "\n", | ||||||
|  |     "    senses_on = False\n", | ||||||
|  |     "    synonyms_on = False\n", | ||||||
|  |     "    antonyms_on = False\n", | ||||||
|  |     "    translations_on = False\n", | ||||||
|  |     "    first_translation_line = False\n", | ||||||
|  |     "\n", | ||||||
|  |     "    for i, line in enumerate(splitted_text):\n", | ||||||
|  |     "\n", | ||||||
|  |     "        if senses_on:\n", | ||||||
|  |     "            try:\n", | ||||||
|  |     "                sense = parse(\":[{}] {}\", line)[1]\n", | ||||||
|  |     "                sense = sense.replace(\"]\",\"\")\n", | ||||||
|  |     "                sense = sense.replace(\"[\",\"\")\n", | ||||||
|  |     "                senses.append(sense)\n", | ||||||
|  |     "                continue\n", | ||||||
|  |     "            except:\n", | ||||||
|  |     "                senses_on = False\n", | ||||||
|  |     "        \n", | ||||||
|  |     "        if synonyms_on:\n", | ||||||
|  |     "            try:\n", | ||||||
|  |     "                synonym = parse(\":[{}] {}\", line)[1]\n", | ||||||
|  |     "                synonym = synonym.replace(\"]\",\"\")\n", | ||||||
|  |     "                synonym = synonym.replace(\"[\",\"\")\n", | ||||||
|  |     "                synonyms.append(synonym)\n", | ||||||
|  |     "                continue\n", | ||||||
|  |     "            except:\n", | ||||||
|  |     "                synonyms_on = False\n", | ||||||
|  |     "        \n", | ||||||
|  |     "        if antonyms_on:\n", | ||||||
|  |     "            try:\n", | ||||||
|  |     "                antonym = parse(\":[{}] {}\", line)[1]\n", | ||||||
|  |     "                antonym = antonym.replace(\"]\",\"\")\n", | ||||||
|  |     "                antonym = antonym.replace(\"[\",\"\")\n", | ||||||
|  |     "                antonyms.append(antonym)\n", | ||||||
|  |     "                continue\n", | ||||||
|  |     "            except:\n", | ||||||
|  |     "                antonyms_on = False\n", | ||||||
|  |     "        \n", | ||||||
|  |     "        if translations_on:\n", | ||||||
|  |     "            if first_translation_line:\n", | ||||||
|  |     "                first_translation_line = False\n", | ||||||
|  |     "                continue\n", | ||||||
|  |     "            try:\n", | ||||||
|  |     "                _, __, ___ = parse(\"{} [{}] {}\", line)\n", | ||||||
|  |     "                num_translations += 2 # very simple heuristic, assuming there are two translation items in each row :D\n", | ||||||
|  |     "                continue\n", | ||||||
|  |     "            except:\n", | ||||||
|  |     "                translations_on = False\n", | ||||||
|  |     "\n", | ||||||
|  |     "        if \"{{Bedeutungen}}\" in line:\n", | ||||||
|  |     "            senses_on = True\n", | ||||||
|  |     "            continue\n", | ||||||
|  |     "\n", | ||||||
|  |     "        if \"{{Gegenwörter}}\" in line:\n", | ||||||
|  |     "            antonyms_on = True\n", | ||||||
|  |     "            continue\n", | ||||||
|  |     "\n", | ||||||
|  |     "        if \"{{Synonyme}}\" in line:\n", | ||||||
|  |     "            synonyms_on = True\n", | ||||||
|  |     "            continue\n", | ||||||
|  |     "\n", | ||||||
|  |     "        if \"{{Übersetzungen}}\" in line:\n", | ||||||
|  |     "            translations_on = True\n", | ||||||
|  |     "            first_translation_line = True\n", | ||||||
|  |     "            continue\n", | ||||||
|  |     "    \n", | ||||||
|  |     "    data['senses'] = senses\n", | ||||||
|  |     "    data['synonyms'] = synonyms\n", | ||||||
|  |     "    data['antonyms'] = antonyms\n", | ||||||
|  |     "    data['num_translations'] = num_translations\n", | ||||||
|  |     "\n", | ||||||
|  |     "    \n", | ||||||
|  |     "    return data\n" | ||||||
|  |    ] | ||||||
|  |   }, | ||||||
|  |   { | ||||||
|  |    "cell_type": "code", | ||||||
|  |    "execution_count": 3, | ||||||
|  |    "metadata": { | ||||||
|  |     "tags": [] | ||||||
|  |    }, | ||||||
|  |    "outputs": [ | ||||||
|  |     { | ||||||
|  |      "output_type": "stream", | ||||||
|  |      "name": "stderr", | ||||||
|  |      "text": [ | ||||||
|  |       "989351it [04:32, 3636.28it/s]\n" | ||||||
|  |      ] | ||||||
|  |     } | ||||||
|  |    ], | ||||||
|  |    "source": [ | ||||||
|  |     "bzfile_path = \"dewiktionary-20210201-pages-articles.xml.bz2\"\n", | ||||||
|  |     "bz = BZ2File(bzfile_path)\n", | ||||||
|  |     "\n", | ||||||
|  |     "db = {}\n", | ||||||
|  |     "\n", | ||||||
|  |     "for i, record in enumerate(tqdm.tqdm(Parser(bz))):\n", | ||||||
|  |     "    if 'langCode' not in record or record['langCode'] != 'de':\n", | ||||||
|  |     "        continue\n", | ||||||
|  |     "\n", | ||||||
|  |     "    word = record['title']\n", | ||||||
|  |     "    word = word.replace(\"ä\", \"ae\")\n", | ||||||
|  |     "    word = word.replace(\"ü\", \"ue\")\n", | ||||||
|  |     "    word = word.replace(\"ö\", \"oe\")\n", | ||||||
|  |     "    word = word.replace(\"ß\", \"ss\")\n", | ||||||
|  |     "\n", | ||||||
|  |     "    \n", | ||||||
|  |     "    data = extract_data(record['wikitext'], word)\n", | ||||||
|  |     "    \n", | ||||||
|  |     "\n", | ||||||
|  |     "    title = word.lower()\n", | ||||||
|  |     "    key = title\n", | ||||||
|  |     "\n", | ||||||
|  |     "    j = 0\n", | ||||||
|  |     "\n", | ||||||
|  |     "    while key in db:\n", | ||||||
|  |     "        key = title + str(j)\n", | ||||||
|  |     "        j += 1\n", | ||||||
|  |     "    \n", | ||||||
|  |     "    db[key] = data\n", | ||||||
|  |     "\n", | ||||||
|  |     "    i += 1\n", | ||||||
|  |     "\n" | ||||||
|  |    ] | ||||||
|  |   }, | ||||||
|  |   { | ||||||
|  |    "source": [ | ||||||
|  |     "## filter database from links and only keep \"relevant\" items" | ||||||
|  |    ], | ||||||
|  |    "cell_type": "markdown", | ||||||
|  |    "metadata": {} | ||||||
|  |   }, | ||||||
|  |   { | ||||||
|  |    "cell_type": "code", | ||||||
|  |    "execution_count": 6, | ||||||
|  |    "metadata": { | ||||||
|  |     "tags": [] | ||||||
|  |    }, | ||||||
|  |    "outputs": [], | ||||||
|  |    "source": [ | ||||||
|  |     "def clean_text(s:str):\n", | ||||||
|  |     "\n", | ||||||
|  |     "    if s.startswith(\"{{\"):\n", | ||||||
|  |     "        raw_categories, text = parse(\"{{{{{}}}}}{}\", s)\n", | ||||||
|  |     "        categories = raw_categories.split('|')\n", | ||||||
|  |     "        if \"K\" in categories:\n", | ||||||
|  |     "            categories.remove(\"K\")\n", | ||||||
|  |     "        \n", | ||||||
|  |     "        categories_text = f\"({', '.join(categories)}):\"\n", | ||||||
|  |     "    else:\n", | ||||||
|  |     "        categories_text = \"\"\n", | ||||||
|  |     "        text = s\n", | ||||||
|  |     "\n", | ||||||
|  |     "    cleaned_text = text\n", | ||||||
|  |     "    while \"<ref \" in cleaned_text and not \"</ref>\" in cleaned_text:\n", | ||||||
|  |     "        pre = \"\"\n", | ||||||
|  |     "        post = \"\"\n", | ||||||
|  |     "        try:\n", | ||||||
|  |     "            pre, _, post = parse(\"{}<ref{}/>{}\", cleaned_text)\n", | ||||||
|  |     "        except:\n", | ||||||
|  |     "            try:\n", | ||||||
|  |     "                _, post = parse(\"<ref{}/>{}\", cleaned_text)\n", | ||||||
|  |     "            except:\n", | ||||||
|  |     "                try:\n", | ||||||
|  |     "                    pre, _ = parse(\"{}<ref{}/>\", cleaned_text)\n", | ||||||
|  |     "                except:\n", | ||||||
|  |     "                    print(\"cannot clean inline ref from text\")\n", | ||||||
|  |     "                    break\n", | ||||||
|  |     "        cleaned_text = pre + post\n", | ||||||
|  |     "\n", | ||||||
|  |     "    while \"<ref\" in cleaned_text:\n", | ||||||
|  |     "        pre = \"\"\n", | ||||||
|  |     "        post = \"\"\n", | ||||||
|  |     "        try:\n", | ||||||
|  |     "            pre, _, post = parse(\"{}<ref{}</ref>{}\", cleaned_text)\n", | ||||||
|  |     "        except:\n", | ||||||
|  |     "            try:\n", | ||||||
|  |     "                _, post = parse(\"<ref{}</ref>{}\", cleaned_text)\n", | ||||||
|  |     "            except:\n", | ||||||
|  |     "                try:\n", | ||||||
|  |     "                    pre, _ = parse(\"{}<ref{}</ref>\", cleaned_text)\n", | ||||||
|  |     "                except:\n", | ||||||
|  |     "                    print(\"cannot clean ref from text\")\n", | ||||||
|  |     "                    break\n", | ||||||
|  |     "        cleaned_text = pre + post\n", | ||||||
|  |     "\n", | ||||||
|  |     "    cleaned_text = cleaned_text.replace(\"'\",\"\")\n", | ||||||
|  |     "\n", | ||||||
|  |     "    text = categories_text + cleaned_text\n", | ||||||
|  |     "\n", | ||||||
|  |     "    text = text.replace(\"::\", \":\")\n", | ||||||
|  |     "\n", | ||||||
|  |     "    return text\n", | ||||||
|  |     "\n" | ||||||
|  |    ] | ||||||
|  |   }, | ||||||
|  |   { | ||||||
|  |    "cell_type": "code", | ||||||
|  |    "execution_count": 7, | ||||||
|  |    "metadata": { | ||||||
|  |     "tags": [] | ||||||
|  |    }, | ||||||
|  |    "outputs": [ | ||||||
|  |     { | ||||||
|  |      "output_type": "stream", | ||||||
|  |      "name": "stderr", | ||||||
|  |      "text": [ | ||||||
|  |       "  1%|▏         | 9931/726049 [00:00<00:13, 51192.22it/s]cannot clean ref from text\n", | ||||||
|  |       "cannot clean ref from text\n", | ||||||
|  |       "cannot clean ref from text\n", | ||||||
|  |       "cannot clean ref from text\n", | ||||||
|  |       "cannot clean ref from text\n", | ||||||
|  |       "  3%|▎         | 22420/726049 [00:00<00:12, 58324.47it/s]cannot clean ref from text\n", | ||||||
|  |       "cannot clean ref from text\n", | ||||||
|  |       "cannot clean ref from text\n", | ||||||
|  |       "cannot clean ref from text\n", | ||||||
|  |       "  5%|▍         | 35021/726049 [00:00<00:11, 60170.85it/s]cannot clean ref from text\n", | ||||||
|  |       "cannot clean ref from text\n", | ||||||
|  |       "cannot clean ref from text\n", | ||||||
|  |       "cannot clean ref from text\n", | ||||||
|  |       "cannot clean ref from text\n", | ||||||
|  |       "cannot clean ref from text\n", | ||||||
|  |       "cannot clean ref from text\n", | ||||||
|  |       "cannot clean ref from text\n", | ||||||
|  |       "cannot clean ref from text\n", | ||||||
|  |       "cannot clean ref from text\n", | ||||||
|  |       "cannot clean inline ref from text\n", | ||||||
|  |       "cannot clean ref from text\n", | ||||||
|  |       "cannot clean ref from text\n", | ||||||
|  |       "cannot clean ref from text\n", | ||||||
|  |       " 10%|▉         | 71566/726049 [00:00<00:05, 112907.67it/s]cannot clean ref from text\n", | ||||||
|  |       "cannot clean ref from text\n", | ||||||
|  |       "cannot clean ref from text\n", | ||||||
|  |       "cannot clean ref from text\n", | ||||||
|  |       "cannot clean ref from text\n", | ||||||
|  |       "cannot clean ref from text\n", | ||||||
|  |       "cannot clean ref from text\n", | ||||||
|  |       " 18%|█▊        | 130217/726049 [00:01<00:03, 165772.31it/s]cannot clean ref from text\n", | ||||||
|  |       "cannot clean ref from text\n", | ||||||
|  |       "cannot clean ref from text\n", | ||||||
|  |       "cannot clean ref from text\n", | ||||||
|  |       "cannot clean ref from text\n", | ||||||
|  |       "cannot clean ref from text\n", | ||||||
|  |       "cannot clean ref from text\n", | ||||||
|  |       "cannot clean ref from text\n", | ||||||
|  |       "cannot clean ref from text\n", | ||||||
|  |       "cannot clean ref from text\n", | ||||||
|  |       "cannot clean ref from text\n", | ||||||
|  |       "cannot clean ref from text\n", | ||||||
|  |       " 33%|███▎      | 237155/726049 [00:01<00:01, 253304.61it/s]cannot clean ref from text\n", | ||||||
|  |       "cannot clean ref from text\n", | ||||||
|  |       " 40%|███▉      | 287866/726049 [00:01<00:01, 240537.77it/s]cannot clean ref from text\n", | ||||||
|  |       "cannot clean ref from text\n", | ||||||
|  |       "cannot clean ref from text\n", | ||||||
|  |       "cannot clean ref from text\n", | ||||||
|  |       "cannot clean ref from text\n", | ||||||
|  |       " 53%|█████▎    | 387184/726049 [00:02<00:01, 300131.76it/s]cannot clean ref from text\n", | ||||||
|  |       " 65%|██████▌   | 474466/726049 [00:02<00:01, 245717.70it/s]cannot clean ref from text\n", | ||||||
|  |       "cannot clean inline ref from text\n", | ||||||
|  |       "cannot clean ref from text\n", | ||||||
|  |       " 85%|████████▌ | 618002/726049 [00:02<00:00, 284270.19it/s]cannot clean ref from text\n", | ||||||
|  |       "100%|██████████| 726049/726049 [00:03<00:00, 215277.92it/s]\n" | ||||||
|  |      ] | ||||||
|  |     } | ||||||
|  |    ], | ||||||
|  |    "source": [ | ||||||
|  |     "filtered_db = {}\n", | ||||||
|  |     "\n", | ||||||
|  |     "for key, item in tqdm.tqdm(db.items()):\n", | ||||||
|  |     "    senses = item['senses']\n", | ||||||
|  |     "    synonyms = item['synonyms']\n", | ||||||
|  |     "    antonyms = item['antonyms']\n", | ||||||
|  |     "\n", | ||||||
|  |     "    def filter_entries(entries: list):\n", | ||||||
|  |     "        new_list = []\n", | ||||||
|  |     "        for entry in entries:\n", | ||||||
|  |     "            try:\n", | ||||||
|  |     "                cleaned_entry = clean_text(entry)\n", | ||||||
|  |     "                # sort out bad hints:\n", | ||||||
|  |     "                if \"{\" in cleaned_entry:\n", | ||||||
|  |     "                    continue\n", | ||||||
|  |     "                if item['word'].lower() in cleaned_entry.lower():\n", | ||||||
|  |     "                    continue\n", | ||||||
|  |     "                if \"familienname\" in cleaned_entry.lower():\n", | ||||||
|  |     "                    continue\n", | ||||||
|  |     "                if \"ortsteil von\" in cleaned_entry.lower():\n", | ||||||
|  |     "                    continue\n", | ||||||
|  |     "                if \"dorf in\" in cleaned_entry.lower():\n", | ||||||
|  |     "                    continue\n", | ||||||
|  |     "                if entry.isupper(): # try to sort out initialisms\n", | ||||||
|  |     "                    continue\n", | ||||||
|  |     "                \n", | ||||||
|  |     "                cleaned_entry = cleaned_entry.replace(\"|\",\"/\")\n", | ||||||
|  |     "\n", | ||||||
|  |     "                new_list.append(cleaned_entry)\n", | ||||||
|  |     "            except:\n", | ||||||
|  |     "                #print(\"cannot process item\", entry)\n", | ||||||
|  |     "                # just skipping unprocessable items\n", | ||||||
|  |     "                pass\n", | ||||||
|  |     "\n", | ||||||
|  |     "    \n", | ||||||
|  |     "        return new_list\n", | ||||||
|  |     "    \n", | ||||||
|  |     "    item['senses'] = filter_entries(senses)\n", | ||||||
|  |     "    item['synonyms'] = filter_entries(synonyms)\n", | ||||||
|  |     "    item['antonyms'] = filter_entries(antonyms)\n", | ||||||
|  |     "\n", | ||||||
|  |     "    # clean key from special characters:\n", | ||||||
|  |     "    unaccented_key = unidecode.unidecode(key)\n", | ||||||
|  |     "    unaccented_word = unidecode.unidecode(item['word'])\n", | ||||||
|  |     "\n", | ||||||
|  |     "    def has_digit(s: str):\n", | ||||||
|  |     "        for c in s:\n", | ||||||
|  |     "            if c.isdigit():\n", | ||||||
|  |     "                return True\n", | ||||||
|  |     "        return False\n", | ||||||
|  |     "\n", | ||||||
|  |     "    n_hints = len(item['senses']) + len(item['synonyms']) + len(item['antonyms'])\n", | ||||||
|  |     "\n", | ||||||
|  |     "    if (n_hints == 1 and item['num_translations'] >= 6) or n_hints > 1:\n", | ||||||
|  |     "        if not has_digit(item['word']):\n", | ||||||
|  |     "            item['word'] = unaccented_word\n", | ||||||
|  |     "            filtered_db[unaccented_key] = item\n" | ||||||
|  |    ] | ||||||
|  |   }, | ||||||
|  |   { | ||||||
|  |    "cell_type": "code", | ||||||
|  |    "execution_count": 13, | ||||||
|  |    "metadata": {}, | ||||||
|  |    "outputs": [ | ||||||
|  |     { | ||||||
|  |      "output_type": "execute_result", | ||||||
|  |      "data": { | ||||||
|  |       "text/plain": [ | ||||||
|  |        "{'word': 'Muenchen',\n", | ||||||
|  |        " 'senses': ['Stadt in Deutschland, die Hauptstadt von Bayern',\n", | ||||||
|  |        "  'Ortsteil von Bad Berka in Thüringen',\n", | ||||||
|  |        "  'Ortsteil von Hutthurm in Bayern',\n", | ||||||
|  |        "  'Ortsteil von Hirschbach in Bayern',\n", | ||||||
|  |        "  'Ortsteil von Uebigau-Wahrenbrück in Brandenburg',\n", | ||||||
|  |        "  'Dorf in Nordböhmen (Mnichov u Lučního Chvojna)',\n", | ||||||
|  |        "  'Wüstung (Mnichov) bei Zahrádky u České Lípy in Nordböhmen'],\n", | ||||||
|  |        " 'synonyms': [],\n", | ||||||
|  |        " 'antonyms': [],\n", | ||||||
|  |        " 'num_translations': 72}" | ||||||
|  |       ] | ||||||
|  |      }, | ||||||
|  |      "metadata": {}, | ||||||
|  |      "execution_count": 13 | ||||||
|  |     } | ||||||
|  |    ], | ||||||
|  |    "source": [ | ||||||
|  |     "db['muenchen']" | ||||||
|  |    ] | ||||||
|  |   }, | ||||||
|  |   { | ||||||
|  |    "cell_type": "code", | ||||||
|  |    "execution_count": 16, | ||||||
|  |    "metadata": {}, | ||||||
|  |    "outputs": [ | ||||||
|  |     { | ||||||
|  |      "output_type": "execute_result", | ||||||
|  |      "data": { | ||||||
|  |       "text/plain": [ | ||||||
|  |        "64385" | ||||||
|  |       ] | ||||||
|  |      }, | ||||||
|  |      "metadata": {}, | ||||||
|  |      "execution_count": 16 | ||||||
|  |     } | ||||||
|  |    ], | ||||||
|  |    "source": [ | ||||||
|  |     "len(filtered_db)" | ||||||
|  |    ] | ||||||
|  |   }, | ||||||
|  |   { | ||||||
|  |    "cell_type": "code", | ||||||
|  |    "execution_count": 18, | ||||||
|  |    "metadata": {}, | ||||||
|  |    "outputs": [], | ||||||
|  |    "source": [ | ||||||
|  |     "with open('de.json', 'w') as f:\n", | ||||||
|  |     "    json.dump(filtered_db, f, indent = 4)" | ||||||
|  |    ] | ||||||
|  |   }, | ||||||
|  |   { | ||||||
|  |    "cell_type": "code", | ||||||
|  |    "execution_count": 25, | ||||||
|  |    "metadata": {}, | ||||||
|  |    "outputs": [ | ||||||
|  |     { | ||||||
|  |      "output_type": "error", | ||||||
|  |      "ename": "ValueError", | ||||||
|  |      "evalue": "list.remove(x): x not in list", | ||||||
|  |      "traceback": [ | ||||||
|  |       "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", | ||||||
|  |       "\u001b[0;31mValueError\u001b[0m                                Traceback (most recent call last)", | ||||||
|  |       "\u001b[0;32m<ipython-input-25-0101445e8506>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[1;32m      1\u001b[0m \u001b[0ml\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;36m2\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;36m3\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 2\u001b[0;31m \u001b[0ml\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mremove\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m4\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m      3\u001b[0m \u001b[0ml\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", | ||||||
|  |       "\u001b[0;31mValueError\u001b[0m: list.remove(x): x not in list" | ||||||
|  |      ] | ||||||
|  |     } | ||||||
|  |    ], | ||||||
|  |    "source": [ | ||||||
|  |     "l = [1,2,3]\n", | ||||||
|  |     "l.remove(4)\n", | ||||||
|  |     "l" | ||||||
|  |    ] | ||||||
|  |   }, | ||||||
|  |   { | ||||||
|  |    "cell_type": "code", | ||||||
|  |    "execution_count": null, | ||||||
|  |    "metadata": {}, | ||||||
|  |    "outputs": [], | ||||||
|  |    "source": [] | ||||||
|  |   } | ||||||
|  |  ], | ||||||
|  |  "metadata": { | ||||||
|  |   "kernelspec": { | ||||||
|  |    "name": "python3", | ||||||
|  |    "display_name": "Python 3.9.5 64-bit" | ||||||
|  |   }, | ||||||
|  |   "language_info": { | ||||||
|  |    "codemirror_mode": { | ||||||
|  |     "name": "ipython", | ||||||
|  |     "version": 3 | ||||||
|  |    }, | ||||||
|  |    "file_extension": ".py", | ||||||
|  |    "mimetype": "text/x-python", | ||||||
|  |    "name": "python", | ||||||
|  |    "nbconvert_exporter": "python", | ||||||
|  |    "pygments_lexer": "ipython3", | ||||||
|  |    "version": "3.9.5" | ||||||
|  |   }, | ||||||
|  |   "interpreter": { | ||||||
|  |    "hash": "916dbcbb3f70747c44a77c7bcd40155683ae19c65e1c03b4aa3499c5328201f1" | ||||||
|  |   } | ||||||
|  |  }, | ||||||
|  |  "nbformat": 4, | ||||||
|  |  "nbformat_minor": 4 | ||||||
|  | } | ||||||
							
								
								
									
										2490
									
								
								data/data_tests_en.ipynb
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										2490
									
								
								data/data_tests_en.ipynb
									
									
									
									
									
										Normal file
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							
							
								
								
									
										
											BIN
										
									
								
								server/en.json
									 (Stored with Git LFS)
									
									
									
									
								
							
							
						
						
									
										
											BIN
										
									
								
								server/en.json
									 (Stored with Git LFS)
									
									
									
									
								
							
										
											Binary file not shown.
										
									
								
							
		Reference in New Issue
	
	Block a user