improved german and english db
This commit is contained in:
1
.gitignore
vendored
1
.gitignore
vendored
@ -162,3 +162,4 @@ cython_debug/
|
|||||||
|
|
||||||
|
|
||||||
.cache/
|
.cache/
|
||||||
|
.vscode/
|
||||||
|
|||||||
1422530
data/de.json
1422530
data/de.json
File diff suppressed because it is too large
Load Diff
@ -123,7 +123,7 @@
|
|||||||
"name": "stderr",
|
"name": "stderr",
|
||||||
"output_type": "stream",
|
"output_type": "stream",
|
||||||
"text": [
|
"text": [
|
||||||
"/tmp/ipykernel_157490/121043459.py:4: TqdmExperimentalWarning: Using `tqdm.autonotebook.tqdm` in notebook mode. Use `tqdm.tqdm` instead to force console mode (e.g. in jupyter console)\n",
|
"/tmp/ipykernel_177453/1748613008.py:4: TqdmExperimentalWarning: Using `tqdm.autonotebook.tqdm` in notebook mode. Use `tqdm.tqdm` instead to force console mode (e.g. in jupyter console)\n",
|
||||||
" from tqdm.autonotebook import tqdm\n"
|
" from tqdm.autonotebook import tqdm\n"
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
@ -134,14 +134,20 @@
|
|||||||
"import requests\n",
|
"import requests\n",
|
||||||
"from tqdm.autonotebook import tqdm \n",
|
"from tqdm.autonotebook import tqdm \n",
|
||||||
"from pathlib import Path\n",
|
"from pathlib import Path\n",
|
||||||
"\n",
|
"import json\n",
|
||||||
"# some constants\n",
|
"# some constants\n",
|
||||||
"\n",
|
"\n",
|
||||||
"CACHE_DIR = Path(\"./.cache\")\n",
|
"CACHE_DIR = Path(\"./.cache\")\n",
|
||||||
"CACHE_DIR.mkdir(exist_ok=True)\n",
|
"CACHE_DIR.mkdir(exist_ok=True)\n",
|
||||||
"\n",
|
"\n",
|
||||||
"CRYPTICS_CROSSWORDS_DB_URL = \"https://cryptics.georgeho.org/data/clues.csv?_stream=on&_size=max\"\n",
|
"CRYPTICS_CROSSWORDS_DB_URL = \"https://cryptics.georgeho.org/data/clues.csv?_stream=on&_size=max\"\n",
|
||||||
"CRYPTICS_CROSSWORDS_DB_CSV = CACHE_DIR / \"cryptics_clues.csv\""
|
"CRYPTICS_CROSSWORDS_DB_CSV = CACHE_DIR / \"cryptics_clues.csv\"\n",
|
||||||
|
"\n",
|
||||||
|
"# german wictionary data:\n",
|
||||||
|
"\n",
|
||||||
|
"\n",
|
||||||
|
"COMPRESSED_DE_WIKTIONARY_DUMP_URL = \"https://kaikki.org/dewiktionary/raw-wiktextract-data.jsonl.gz\"\n",
|
||||||
|
"COMPRESSED_DE_WIKTIONARY_DUMP = CACHE_DIR / \"de_wiktionary.jsonl.gz\""
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -156,7 +162,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": null,
|
"execution_count": 3,
|
||||||
"id": "806a5c51",
|
"id": "806a5c51",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [
|
"outputs": [
|
||||||
@ -167,18 +173,10 @@
|
|||||||
"[nltk_data] Downloading package wordnet to /home/jonas/nltk_data...\n",
|
"[nltk_data] Downloading package wordnet to /home/jonas/nltk_data...\n",
|
||||||
"[nltk_data] Package wordnet is already up-to-date!\n",
|
"[nltk_data] Package wordnet is already up-to-date!\n",
|
||||||
"[nltk_data] Downloading package omw-1.4 to /home/jonas/nltk_data...\n",
|
"[nltk_data] Downloading package omw-1.4 to /home/jonas/nltk_data...\n",
|
||||||
"[nltk_data] Package omw-1.4 is already up-to-date!\n"
|
"[nltk_data] Package omw-1.4 is already up-to-date!\n",
|
||||||
|
"[nltk_data] Downloading package omw to /home/jonas/nltk_data...\n",
|
||||||
|
"[nltk_data] Package omw is already up-to-date!\n"
|
||||||
]
|
]
|
||||||
},
|
|
||||||
{
|
|
||||||
"data": {
|
|
||||||
"text/plain": [
|
|
||||||
"True"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"execution_count": 3,
|
|
||||||
"metadata": {},
|
|
||||||
"output_type": "execute_result"
|
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"source": [
|
"source": [
|
||||||
@ -192,7 +190,14 @@
|
|||||||
"import nltk\n",
|
"import nltk\n",
|
||||||
"nltk.download('wordnet')\n",
|
"nltk.download('wordnet')\n",
|
||||||
"nltk.download('omw-1.4') # optional, extra languages / lemmas\n",
|
"nltk.download('omw-1.4') # optional, extra languages / lemmas\n",
|
||||||
"nltk.download('omw') # try the older omw package"
|
"nltk.download('omw') # try the older omw package\n",
|
||||||
|
"\n",
|
||||||
|
"# download the german wiktionary dump if not already cached\n",
|
||||||
|
"if not COMPRESSED_DE_WIKTIONARY_DUMP.exists():\n",
|
||||||
|
" response = requests.get(COMPRESSED_DE_WIKTIONARY_DUMP_URL, stream=True)\n",
|
||||||
|
" with open(COMPRESSED_DE_WIKTIONARY_DUMP, \"wb\") as f:\n",
|
||||||
|
" for chunk in tqdm(response.iter_content(chunk_size=8192), desc=\"Downloading de wiktionary dump\"):\n",
|
||||||
|
" f.write(chunk)"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -283,7 +288,7 @@
|
|||||||
{
|
{
|
||||||
"data": {
|
"data": {
|
||||||
"application/vnd.jupyter.widget-view+json": {
|
"application/vnd.jupyter.widget-view+json": {
|
||||||
"model_id": "4c5f6f1c7c594df6946e51686feeee34",
|
"model_id": "75b431216f3249c7879d87fe33f7817a",
|
||||||
"version_major": 2,
|
"version_major": 2,
|
||||||
"version_minor": 0
|
"version_minor": 0
|
||||||
},
|
},
|
||||||
@ -391,7 +396,7 @@
|
|||||||
{
|
{
|
||||||
"data": {
|
"data": {
|
||||||
"application/vnd.jupyter.widget-view+json": {
|
"application/vnd.jupyter.widget-view+json": {
|
||||||
"model_id": "c411da19479944298117158962a08ea3",
|
"model_id": "9f8d4c77558e478b9cf214d851fd503e",
|
||||||
"version_major": 2,
|
"version_major": 2,
|
||||||
"version_minor": 0
|
"version_minor": 0
|
||||||
},
|
},
|
||||||
@ -456,46 +461,193 @@
|
|||||||
"id": "1e246fd0",
|
"id": "1e246fd0",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"source": [
|
"source": [
|
||||||
"## Create German Database using OpenThesaurus\n",
|
"## Parse German Data\n"
|
||||||
"\n",
|
|
||||||
"* download data first:"
|
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 9,
|
"execution_count": 9,
|
||||||
"id": "435d0b78",
|
"id": "63953ce6",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [
|
"outputs": [],
|
||||||
{
|
|
||||||
"name": "stdout",
|
|
||||||
"output_type": "stream",
|
|
||||||
"text": [
|
|
||||||
"OpenThesaurus available at .cache/openthesaurus.txt\n"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"source": [
|
"source": [
|
||||||
"from multiplayer_crosswords.data_utils import download_openthesaurus\n",
|
"de_db = Dictionary (entries={})"
|
||||||
"\n",
|
|
||||||
"openthesaurus_path = download_openthesaurus()"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "markdown",
|
|
||||||
"id": "0de30aac",
|
|
||||||
"metadata": {},
|
|
||||||
"source": [
|
|
||||||
"* parse the data"
|
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": null,
|
"execution_count": 10,
|
||||||
"id": "07e4e97d",
|
"id": "435d0b78",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [],
|
"outputs": [
|
||||||
"source": []
|
{
|
||||||
|
"data": {
|
||||||
|
"application/vnd.jupyter.widget-view+json": {
|
||||||
|
"model_id": "76b0adb1fab244feaa5ab51c985fbe5f",
|
||||||
|
"version_major": 2,
|
||||||
|
"version_minor": 0
|
||||||
|
},
|
||||||
|
"text/plain": [
|
||||||
|
"Processing German Wiktionary entries: 0it [00:00, ?it/s]"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "display_data"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "stdout",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"Processed 78859 entries from German Wiktionary dump.\n"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"source": [
|
||||||
|
"# inspect data first. File contains jsonl.gz entries per line\n",
|
||||||
|
"\n",
|
||||||
|
"import gzip\n",
|
||||||
|
"import json\n",
|
||||||
|
"import difflib\n",
|
||||||
|
"\n",
|
||||||
|
"# define a helper function to fince the similarity between words. Used to sort out glosses with words too similar to the search word.\n",
|
||||||
|
"def _similarity_ratio(word1, word2):\n",
|
||||||
|
" return difflib.SequenceMatcher(None, word1.lower(), word2.lower()).ratio()\n",
|
||||||
|
"\n",
|
||||||
|
"def _ascii_word(word):\n",
|
||||||
|
" word = word.lower()\n",
|
||||||
|
" word = word.replace(\"ä\", \"ae\")\n",
|
||||||
|
" word = word.replace(\"ö\", \"oe\")\n",
|
||||||
|
" word = word.replace(\"ü\", \"ue\")\n",
|
||||||
|
" word = word.replace(\"ß\", \"ss\")\n",
|
||||||
|
"\n",
|
||||||
|
" return word \n",
|
||||||
|
"\n",
|
||||||
|
"def _only_acscii_chars_in_word(word):\n",
|
||||||
|
" # returns true if only the ascii alphabet is in the word\n",
|
||||||
|
" return all(c.isascii() and c.isalpha() for c in word)\n",
|
||||||
|
"\n",
|
||||||
|
"\n",
|
||||||
|
"def get_best_gloss_for_sesne(word, sense): \n",
|
||||||
|
" normalized_word = word.lower()\n",
|
||||||
|
"\n",
|
||||||
|
" glosses = sense.get(\"glosses\", [])\n",
|
||||||
|
" for g in glosses:\n",
|
||||||
|
" if normalized_word in g.lower():\n",
|
||||||
|
" continue\n",
|
||||||
|
" \n",
|
||||||
|
" # Use similarity ratio to skip glosses that are too close to the word itself (e.g. simple variations).\n",
|
||||||
|
" # check each word in the gloss\n",
|
||||||
|
" gloss_words = re.findall(r'\\b\\w+\\b', g.lower()) \n",
|
||||||
|
" found_similar = False \n",
|
||||||
|
" for gw in gloss_words:\n",
|
||||||
|
" if _similarity_ratio(normalized_word, gw) > 0.8:\n",
|
||||||
|
" #print(\"too similar:\", normalized_word, gw, \"->\", _similarity_ratio(normalized_word, gw), g)\n",
|
||||||
|
" found_similar = True \n",
|
||||||
|
" break \n",
|
||||||
|
" \n",
|
||||||
|
" if found_similar:\n",
|
||||||
|
" continue \n",
|
||||||
|
" return g\n",
|
||||||
|
"\n",
|
||||||
|
" return None\n",
|
||||||
|
"\n",
|
||||||
|
"def calculate_frequency_score(json_data):\n",
|
||||||
|
" # A heuristic to estimate word frequency/commonality based on available data\n",
|
||||||
|
" score = 0\n",
|
||||||
|
" \n",
|
||||||
|
" # 1. Number of senses (polysemy): Common words usually have multiple meanings\n",
|
||||||
|
" senses = json_data.get(\"senses\", [])\n",
|
||||||
|
" score += len(senses) * 2\n",
|
||||||
|
" \n",
|
||||||
|
" # 2. Number of translations: Common words are translated into many languages\n",
|
||||||
|
" translations = json_data.get(\"translations\", [])\n",
|
||||||
|
" score += len(translations) * 0.5\n",
|
||||||
|
" \n",
|
||||||
|
" # 3. Has audio pronunciation? Common words usually do.\n",
|
||||||
|
" sounds = json_data.get(\"sounds\", [])\n",
|
||||||
|
" if sounds:\n",
|
||||||
|
" score += 5\n",
|
||||||
|
" \n",
|
||||||
|
" # 4. Check for \"rare\", \"obsolete\", \"archaic\" tags in senses\n",
|
||||||
|
" # If a word is ONLY archaic, it should be low frequency.\n",
|
||||||
|
" # But usually we want to just boost the \"normal\" ones.\n",
|
||||||
|
" \n",
|
||||||
|
" # Normalize heavily. \n",
|
||||||
|
" # A word like \"Haus\" might have huge scores.\n",
|
||||||
|
" # We want a 0-100 scale.\n",
|
||||||
|
" \n",
|
||||||
|
" return min(100, int(score))\n",
|
||||||
|
"\n",
|
||||||
|
"def process_entry(json_data, min_freq_score=10):\n",
|
||||||
|
" senses = json_data.get(\"senses\", []) \n",
|
||||||
|
" processed_senses = []\n",
|
||||||
|
" tags = set()\n",
|
||||||
|
" for sense in senses:\n",
|
||||||
|
" glosses = sense .get(\"glosses\", [])\n",
|
||||||
|
" topic_labels = sense.get(\"topics\", [])\n",
|
||||||
|
" best_gloss = get_best_gloss_for_sesne(json_data.get(\"word\", \"\"), sense)\n",
|
||||||
|
" for topic in topic_labels:\n",
|
||||||
|
" tags.add(topic)\n",
|
||||||
|
" if best_gloss:\n",
|
||||||
|
" text = best_gloss\n",
|
||||||
|
" if topic_labels and len(topic_labels) > 0:\n",
|
||||||
|
" text = \"\" + \", \".join(topic_labels) + \": \" + text\n",
|
||||||
|
" processed_senses.append(text)\n",
|
||||||
|
" \n",
|
||||||
|
" # Calculate Frequency\n",
|
||||||
|
" freq = calculate_frequency_score(json_data)\n",
|
||||||
|
"\n",
|
||||||
|
" if freq < min_freq_score:\n",
|
||||||
|
" return [] # skip low frequency words\n",
|
||||||
|
"\n",
|
||||||
|
" if not _only_acscii_chars_in_word(_ascii_word(json_data.get(\"word\", \"\"))):\n",
|
||||||
|
" return [] # skip non-ascii words\n",
|
||||||
|
"\n",
|
||||||
|
" if len(processed_senses) == 0:\n",
|
||||||
|
" return [] # skip entries with no valid senses \n",
|
||||||
|
" \n",
|
||||||
|
" de_db.add_entry(WordEntry(\n",
|
||||||
|
" word=_ascii_word(json_data.get(\"word\", \"\").lower()),\n",
|
||||||
|
" senses=processed_senses,\n",
|
||||||
|
" synonyms=[],\n",
|
||||||
|
" antonyms=[],\n",
|
||||||
|
" word_frequency=freq, \n",
|
||||||
|
" source=\"de_wiktionary\",\n",
|
||||||
|
" categories=list(tags)\n",
|
||||||
|
" )) \n",
|
||||||
|
"\n",
|
||||||
|
" return processed_senses\n",
|
||||||
|
"\n",
|
||||||
|
"def parse_entry(json_line):\n",
|
||||||
|
"\n",
|
||||||
|
" \n",
|
||||||
|
" #print(\"\\n\")\n",
|
||||||
|
" #print(\"Parsing entry:\", json_line)\n",
|
||||||
|
" json_data = json.loads(json_line) \n",
|
||||||
|
" lang_code = json_data.get(\"lang_code\", \"unknown\").lower()\n",
|
||||||
|
" if lang_code != \"de\":\n",
|
||||||
|
" return False \n",
|
||||||
|
" #print(\"word:\", json_data.get(\"word\"))\n",
|
||||||
|
" senses = json_data.get(\"senses\", []) \n",
|
||||||
|
" processed_senses = process_entry(json_data)\n",
|
||||||
|
" if len(processed_senses) == 0:\n",
|
||||||
|
" #print(\"No valid senses found, skipping.\")\n",
|
||||||
|
" return False \n",
|
||||||
|
" #print(\"Senses / glosses:\", processed_senses)\n",
|
||||||
|
" return True\n",
|
||||||
|
"\n",
|
||||||
|
"# read file in unzipping on the fly using gzip module\n",
|
||||||
|
"# \"rt\" mode opens it as text, handling newlines correctly after decompression\n",
|
||||||
|
"with gzip.open(COMPRESSED_DE_WIKTIONARY_DUMP, \"rt\", encoding=\"utf-8\") as f:\n",
|
||||||
|
" i = 0\n",
|
||||||
|
" for _, line in enumerate(tqdm(f, desc=\"Processing German Wiktionary entries\" ) ):\n",
|
||||||
|
" #if i >= 10: \n",
|
||||||
|
" # break\n",
|
||||||
|
" if line.strip():\n",
|
||||||
|
" if parse_entry(line.strip()):\n",
|
||||||
|
" i += 1\n",
|
||||||
|
" print(f\"Processed {i} entries from German Wiktionary dump.\")"
|
||||||
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "markdown",
|
"cell_type": "markdown",
|
||||||
@ -509,28 +661,14 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"cell_type": "code",
|
"cell_type": "code",
|
||||||
"execution_count": 9,
|
"execution_count": 11,
|
||||||
"id": "69b67091",
|
"id": "69b67091",
|
||||||
"metadata": {},
|
"metadata": {},
|
||||||
"outputs": [
|
"outputs": [
|
||||||
{
|
{
|
||||||
"data": {
|
"data": {
|
||||||
"application/vnd.jupyter.widget-view+json": {
|
"application/vnd.jupyter.widget-view+json": {
|
||||||
"model_id": "ef2252adfaf240c981a18567f5d23b45",
|
"model_id": "6d1490c57118467fb95cdc111114f926",
|
||||||
"version_major": 2,
|
|
||||||
"version_minor": 0
|
|
||||||
},
|
|
||||||
"text/plain": [
|
|
||||||
" 0%| | 0/96407 [00:00<?, ?it/s]"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"metadata": {},
|
|
||||||
"output_type": "display_data"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"data": {
|
|
||||||
"application/vnd.jupyter.widget-view+json": {
|
|
||||||
"model_id": "ef2252adfaf240c981a18567f5d23b45",
|
|
||||||
"version_major": 2,
|
"version_major": 2,
|
||||||
"version_minor": 0
|
"version_minor": 0
|
||||||
},
|
},
|
||||||
@ -547,95 +685,75 @@
|
|||||||
"text": [
|
"text": [
|
||||||
"Wrote 74357 entries to en.json\n"
|
"Wrote 74357 entries to en.json\n"
|
||||||
]
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"data": {
|
||||||
|
"application/vnd.jupyter.widget-view+json": {
|
||||||
|
"model_id": "daad07fbef564f06a7079d99a3291125",
|
||||||
|
"version_major": 2,
|
||||||
|
"version_minor": 0
|
||||||
|
},
|
||||||
|
"text/plain": [
|
||||||
|
" 0%| | 0/77291 [00:00<?, ?it/s]"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"metadata": {},
|
||||||
|
"output_type": "display_data"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "stdout",
|
||||||
|
"output_type": "stream",
|
||||||
|
"text": [
|
||||||
|
"Wrote 77291 entries to de.json\n"
|
||||||
|
]
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"source": [
|
"source": [
|
||||||
"EN_PATH = Path(\"./en.json\")\n",
|
"EN_PATH = Path(\"./en.json\")\n",
|
||||||
|
"DE_PATH = Path(\"./de.json\") \n",
|
||||||
|
"\n",
|
||||||
|
"# file, db tuples\n",
|
||||||
|
"FILES_DBS = [\n",
|
||||||
|
" (EN_PATH, en_db),\n",
|
||||||
|
" (DE_PATH, de_db)\n",
|
||||||
|
"] \n",
|
||||||
"\n",
|
"\n",
|
||||||
"import json\n",
|
"import json\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# entries to include:\n",
|
"# entries to include:\n",
|
||||||
"INCLUDED_SOURCES = {\n",
|
"INCLUDED_SOURCES = {\n",
|
||||||
" #\"cryptics\",\n",
|
" #\"cryptics\",\n",
|
||||||
" \"wordnet\"\n",
|
" \"wordnet\",\n",
|
||||||
|
" \"de_wiktionary\"\n",
|
||||||
"} \n",
|
"} \n",
|
||||||
"\n",
|
"\n",
|
||||||
"with open(EN_PATH, \"w\") as f:\n",
|
"for FILE_PATH, DB in FILES_DBS: \n",
|
||||||
" f.write(\"{\\n\")\n",
|
" with open(FILE_PATH, \"w\") as f:\n",
|
||||||
" i = 0\n",
|
" f.write(\"{\\n\")\n",
|
||||||
" for key, value in tqdm(en_db.entries.items()):\n",
|
" i = 0\n",
|
||||||
|
" for key, value in tqdm( DB.entries.items()):\n",
|
||||||
"\n",
|
"\n",
|
||||||
" if value.source not in INCLUDED_SOURCES:\n",
|
" if value.source not in INCLUDED_SOURCES:\n",
|
||||||
" continue \n",
|
" continue \n",
|
||||||
" \n",
|
" \n",
|
||||||
" # dump json\n",
|
" # dump json\n",
|
||||||
" if i > 0:\n",
|
" if i > 0:\n",
|
||||||
" f.write(\",\\n\")\n",
|
" f.write(\",\\n\")\n",
|
||||||
" d_value = value.model_dump()\n",
|
" d_value = value.model_dump()\n",
|
||||||
" as_json = json.dumps(\n",
|
" as_json = json.dumps(\n",
|
||||||
" d_value, indent=4\n",
|
" d_value, indent=4\n",
|
||||||
" )\n",
|
" )\n",
|
||||||
" as_json = \"\\n \".join(as_json.split(\"\\n\"))\n",
|
" as_json = \"\\n \".join(as_json.split(\"\\n\"))\n",
|
||||||
" as_json = \" \\\"\" + key + \"\\\": \" + as_json\n",
|
" as_json = \" \\\"\" + key + \"\\\": \" + as_json\n",
|
||||||
" f.write(as_json)\n",
|
" f.write(as_json)\n",
|
||||||
" i += 1\n",
|
" i += 1\n",
|
||||||
" \n",
|
" \n",
|
||||||
" f.write(\"\\n}\\n\")\n",
|
" f.write(\"\\n}\\n\")\n",
|
||||||
" print(f\"Wrote {i} entries to {EN_PATH}\" )"
|
" print(f\"Wrote {i} entries to {FILE_PATH}\" )\n",
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 10,
|
|
||||||
"id": "c3f8d049",
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [],
|
|
||||||
"source": [
|
|
||||||
"test_synset = wn.synsets(\"house\")[0]"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 27,
|
|
||||||
"id": "3870ea2c",
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [
|
|
||||||
{
|
|
||||||
"data": {
|
|
||||||
"text/plain": [
|
|
||||||
"[]"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"execution_count": 27,
|
|
||||||
"metadata": {},
|
|
||||||
"output_type": "execute_result"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"source": [
|
|
||||||
"test_synset.a"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"cell_type": "code",
|
|
||||||
"execution_count": 26,
|
|
||||||
"id": "7e7033cf",
|
|
||||||
"metadata": {},
|
|
||||||
"outputs": [
|
|
||||||
{
|
|
||||||
"data": {
|
|
||||||
"text/plain": [
|
|
||||||
"60"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"execution_count": 26,
|
|
||||||
"metadata": {},
|
|
||||||
"output_type": "execute_result"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"source": [
|
|
||||||
"test_word_str = \"table\"\n",
|
|
||||||
"\n",
|
"\n",
|
||||||
"frequency_metric(wn.synsets(test_word_str)[0].lemmas()[0], test_word_str ) "
|
"\n",
|
||||||
|
"\n",
|
||||||
|
"\n"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|||||||
1301858
data/en.json
1301858
data/en.json
File diff suppressed because it is too large
Load Diff
@ -1,6 +1,6 @@
|
|||||||
[project]
|
[project]
|
||||||
name = "multiplayer-crosswords"
|
name = "multiplayer-crosswords"
|
||||||
version = "0.1.1"
|
version = "0.2.0"
|
||||||
description = ""
|
description = ""
|
||||||
authors = [
|
authors = [
|
||||||
{name="Jonas Weinz"}
|
{name="Jonas Weinz"}
|
||||||
@ -17,7 +17,7 @@ dependencies = [
|
|||||||
]
|
]
|
||||||
[tool.poetry]
|
[tool.poetry]
|
||||||
name = "multiplayer-crosswords"
|
name = "multiplayer-crosswords"
|
||||||
version = "0.1.1"
|
version = "0.2.0"
|
||||||
description = ""
|
description = ""
|
||||||
authors = [
|
authors = [
|
||||||
"Jonas Weinz"
|
"Jonas Weinz"
|
||||||
|
|||||||
Reference in New Issue
Block a user