4 Commits

Author SHA1 Message Date
a1b6e37354 improved german and english db 2026-02-01 19:28:25 +01:00
2a5069ed4b improvements in en db generation 2026-01-11 17:27:05 +01:00
54de8672dc small improvements 2025-11-16 10:05:31 +01:00
26108fe073 small improvements 2025-11-16 10:05:02 +01:00
13 changed files with 1805926 additions and 919644 deletions

3
.gitignore vendored
View File

@ -160,3 +160,6 @@ cython_debug/
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/
.cache/
.vscode/

View File

@ -1,2 +1,30 @@
# multiplayer_crosswords
This project is a web-based multiplayer crossword puzzle game that allows multiple users to collaborate in solving crossword puzzles in real-time. It features a user-friendly interface, session management, and real-time updates to enhance the collaborative experience.
## installation
1. Clone the repository:
```bash
git clone https://the-cake-is-a-lie.net/gitea/jonas/multiplayer_crosswords.git
cd multiplayer_crosswords
```
2. Install this repository as a package:
```bash
pip install .
```
## start the server
```bash
python -m multiplayer_crosswords.server.main
```
## start the webui
```bash
python -m multiplayer_crosswords.server.serve_frontend --port 8080
```
Then open your browser and navigate to `http://localhost:8080`.

1422532
data/de.json

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,789 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "e048da07",
"metadata": {},
"source": [
"# Create Dictionaies for crossword clues\n",
"\n",
"this notebook creates dictionaries for crossword clues.\n",
"\n",
"The final dictionaries will be saved as json file, containing a list of entries in the following format:\n",
"\n",
"```json\n",
"{\n",
" \"<unique_word_entry>\": {\n",
" \"word\": \"<word>\",\n",
" \"senses\": [\n",
" \"<definition_1>\",\n",
" \"<definition_2>\",\n",
" \"...\" \n",
" ],\n",
" \"synonyms\": [\n",
" \"<synonym_1>\",\n",
" \"<synonym_2>\",\n",
" \"...\" \n",
" ],\n",
" \"antonyms\": [\n",
" \"<antonym_1>\",\n",
" \"<antonym_2>\",\n",
" \"...\" \n",
" ],\n",
" \"word_frequency\": <frequency_value (from 0 to 100)>\n",
"\n",
" },\n",
"}\n",
"```\n"
]
},
{
"cell_type": "markdown",
"id": "28040681",
"metadata": {},
"source": [
"### Install some dependencies for that notebook"
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "f0aecff7",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Requirement already satisfied: tqdm in /home/jonas/.cache/pypoetry/virtualenvs/multiplayer-crosswords-W02cfZ32-py3.12/lib/python3.12/site-packages (4.67.1)\n",
"Requirement already satisfied: pandas in /home/jonas/.cache/pypoetry/virtualenvs/multiplayer-crosswords-W02cfZ32-py3.12/lib/python3.12/site-packages (2.3.0)\n",
"Requirement already satisfied: requests in /home/jonas/.cache/pypoetry/virtualenvs/multiplayer-crosswords-W02cfZ32-py3.12/lib/python3.12/site-packages (2.32.5)\n",
"Requirement already satisfied: ipywidgets in /home/jonas/.cache/pypoetry/virtualenvs/multiplayer-crosswords-W02cfZ32-py3.12/lib/python3.12/site-packages (8.1.8)\n",
"Requirement already satisfied: pydantic in /home/jonas/.cache/pypoetry/virtualenvs/multiplayer-crosswords-W02cfZ32-py3.12/lib/python3.12/site-packages (2.12.4)\n",
"Requirement already satisfied: nltk in /home/jonas/.cache/pypoetry/virtualenvs/multiplayer-crosswords-W02cfZ32-py3.12/lib/python3.12/site-packages (3.9.2)\n",
"Requirement already satisfied: numpy>=1.26.0 in /home/jonas/.cache/pypoetry/virtualenvs/multiplayer-crosswords-W02cfZ32-py3.12/lib/python3.12/site-packages (from pandas) (2.2.6)\n",
"Requirement already satisfied: python-dateutil>=2.8.2 in /home/jonas/.cache/pypoetry/virtualenvs/multiplayer-crosswords-W02cfZ32-py3.12/lib/python3.12/site-packages (from pandas) (2.9.0.post0)\n",
"Requirement already satisfied: pytz>=2020.1 in /home/jonas/.cache/pypoetry/virtualenvs/multiplayer-crosswords-W02cfZ32-py3.12/lib/python3.12/site-packages (from pandas) (2025.2)\n",
"Requirement already satisfied: tzdata>=2022.7 in /home/jonas/.cache/pypoetry/virtualenvs/multiplayer-crosswords-W02cfZ32-py3.12/lib/python3.12/site-packages (from pandas) (2025.2)\n",
"Requirement already satisfied: charset_normalizer<4,>=2 in /home/jonas/.cache/pypoetry/virtualenvs/multiplayer-crosswords-W02cfZ32-py3.12/lib/python3.12/site-packages (from requests) (3.4.4)\n",
"Requirement already satisfied: idna<4,>=2.5 in /home/jonas/.cache/pypoetry/virtualenvs/multiplayer-crosswords-W02cfZ32-py3.12/lib/python3.12/site-packages (from requests) (3.11)\n",
"Requirement already satisfied: urllib3<3,>=1.21.1 in /home/jonas/.cache/pypoetry/virtualenvs/multiplayer-crosswords-W02cfZ32-py3.12/lib/python3.12/site-packages (from requests) (2.5.0)\n",
"Requirement already satisfied: certifi>=2017.4.17 in /home/jonas/.cache/pypoetry/virtualenvs/multiplayer-crosswords-W02cfZ32-py3.12/lib/python3.12/site-packages (from requests) (2025.11.12)\n",
"Requirement already satisfied: comm>=0.1.3 in /home/jonas/.cache/pypoetry/virtualenvs/multiplayer-crosswords-W02cfZ32-py3.12/lib/python3.12/site-packages (from ipywidgets) (0.2.3)\n",
"Requirement already satisfied: ipython>=6.1.0 in /home/jonas/.cache/pypoetry/virtualenvs/multiplayer-crosswords-W02cfZ32-py3.12/lib/python3.12/site-packages (from ipywidgets) (9.7.0)\n",
"Requirement already satisfied: traitlets>=4.3.1 in /home/jonas/.cache/pypoetry/virtualenvs/multiplayer-crosswords-W02cfZ32-py3.12/lib/python3.12/site-packages (from ipywidgets) (5.14.3)\n",
"Requirement already satisfied: widgetsnbextension~=4.0.14 in /home/jonas/.cache/pypoetry/virtualenvs/multiplayer-crosswords-W02cfZ32-py3.12/lib/python3.12/site-packages (from ipywidgets) (4.0.15)\n",
"Requirement already satisfied: jupyterlab_widgets~=3.0.15 in /home/jonas/.cache/pypoetry/virtualenvs/multiplayer-crosswords-W02cfZ32-py3.12/lib/python3.12/site-packages (from ipywidgets) (3.0.16)\n",
"Requirement already satisfied: annotated-types>=0.6.0 in /home/jonas/.cache/pypoetry/virtualenvs/multiplayer-crosswords-W02cfZ32-py3.12/lib/python3.12/site-packages (from pydantic) (0.7.0)\n",
"Requirement already satisfied: pydantic-core==2.41.5 in /home/jonas/.cache/pypoetry/virtualenvs/multiplayer-crosswords-W02cfZ32-py3.12/lib/python3.12/site-packages (from pydantic) (2.41.5)\n",
"Requirement already satisfied: typing-extensions>=4.14.1 in /home/jonas/.cache/pypoetry/virtualenvs/multiplayer-crosswords-W02cfZ32-py3.12/lib/python3.12/site-packages (from pydantic) (4.15.0)\n",
"Requirement already satisfied: typing-inspection>=0.4.2 in /home/jonas/.cache/pypoetry/virtualenvs/multiplayer-crosswords-W02cfZ32-py3.12/lib/python3.12/site-packages (from pydantic) (0.4.2)\n",
"Requirement already satisfied: click in /home/jonas/.cache/pypoetry/virtualenvs/multiplayer-crosswords-W02cfZ32-py3.12/lib/python3.12/site-packages (from nltk) (8.3.1)\n",
"Requirement already satisfied: joblib in /home/jonas/.cache/pypoetry/virtualenvs/multiplayer-crosswords-W02cfZ32-py3.12/lib/python3.12/site-packages (from nltk) (1.5.2)\n",
"Requirement already satisfied: regex>=2021.8.3 in /home/jonas/.cache/pypoetry/virtualenvs/multiplayer-crosswords-W02cfZ32-py3.12/lib/python3.12/site-packages (from nltk) (2025.11.3)\n",
"Requirement already satisfied: decorator>=4.3.2 in /home/jonas/.cache/pypoetry/virtualenvs/multiplayer-crosswords-W02cfZ32-py3.12/lib/python3.12/site-packages (from ipython>=6.1.0->ipywidgets) (5.2.1)\n",
"Requirement already satisfied: ipython-pygments-lexers>=1.0.0 in /home/jonas/.cache/pypoetry/virtualenvs/multiplayer-crosswords-W02cfZ32-py3.12/lib/python3.12/site-packages (from ipython>=6.1.0->ipywidgets) (1.1.1)\n",
"Requirement already satisfied: jedi>=0.18.1 in /home/jonas/.cache/pypoetry/virtualenvs/multiplayer-crosswords-W02cfZ32-py3.12/lib/python3.12/site-packages (from ipython>=6.1.0->ipywidgets) (0.19.2)\n",
"Requirement already satisfied: matplotlib-inline>=0.1.5 in /home/jonas/.cache/pypoetry/virtualenvs/multiplayer-crosswords-W02cfZ32-py3.12/lib/python3.12/site-packages (from ipython>=6.1.0->ipywidgets) (0.2.1)\n",
"Requirement already satisfied: pexpect>4.3 in /home/jonas/.cache/pypoetry/virtualenvs/multiplayer-crosswords-W02cfZ32-py3.12/lib/python3.12/site-packages (from ipython>=6.1.0->ipywidgets) (4.9.0)\n",
"Requirement already satisfied: prompt_toolkit<3.1.0,>=3.0.41 in /home/jonas/.cache/pypoetry/virtualenvs/multiplayer-crosswords-W02cfZ32-py3.12/lib/python3.12/site-packages (from ipython>=6.1.0->ipywidgets) (3.0.52)\n",
"Requirement already satisfied: pygments>=2.11.0 in /home/jonas/.cache/pypoetry/virtualenvs/multiplayer-crosswords-W02cfZ32-py3.12/lib/python3.12/site-packages (from ipython>=6.1.0->ipywidgets) (2.19.2)\n",
"Requirement already satisfied: stack_data>=0.6.0 in /home/jonas/.cache/pypoetry/virtualenvs/multiplayer-crosswords-W02cfZ32-py3.12/lib/python3.12/site-packages (from ipython>=6.1.0->ipywidgets) (0.6.3)\n",
"Requirement already satisfied: six>=1.5 in /home/jonas/.cache/pypoetry/virtualenvs/multiplayer-crosswords-W02cfZ32-py3.12/lib/python3.12/site-packages (from python-dateutil>=2.8.2->pandas) (1.17.0)\n",
"Requirement already satisfied: parso<0.9.0,>=0.8.4 in /home/jonas/.cache/pypoetry/virtualenvs/multiplayer-crosswords-W02cfZ32-py3.12/lib/python3.12/site-packages (from jedi>=0.18.1->ipython>=6.1.0->ipywidgets) (0.8.5)\n",
"Requirement already satisfied: ptyprocess>=0.5 in /home/jonas/.cache/pypoetry/virtualenvs/multiplayer-crosswords-W02cfZ32-py3.12/lib/python3.12/site-packages (from pexpect>4.3->ipython>=6.1.0->ipywidgets) (0.7.0)\n",
"Requirement already satisfied: wcwidth in /home/jonas/.cache/pypoetry/virtualenvs/multiplayer-crosswords-W02cfZ32-py3.12/lib/python3.12/site-packages (from prompt_toolkit<3.1.0,>=3.0.41->ipython>=6.1.0->ipywidgets) (0.2.14)\n",
"Requirement already satisfied: executing>=1.2.0 in /home/jonas/.cache/pypoetry/virtualenvs/multiplayer-crosswords-W02cfZ32-py3.12/lib/python3.12/site-packages (from stack_data>=0.6.0->ipython>=6.1.0->ipywidgets) (2.2.1)\n",
"Requirement already satisfied: asttokens>=2.1.0 in /home/jonas/.cache/pypoetry/virtualenvs/multiplayer-crosswords-W02cfZ32-py3.12/lib/python3.12/site-packages (from stack_data>=0.6.0->ipython>=6.1.0->ipywidgets) (3.0.1)\n",
"Requirement already satisfied: pure-eval in /home/jonas/.cache/pypoetry/virtualenvs/multiplayer-crosswords-W02cfZ32-py3.12/lib/python3.12/site-packages (from stack_data>=0.6.0->ipython>=6.1.0->ipywidgets) (0.2.3)\n"
]
}
],
"source": [
"# install dependencies for this notebooks\n",
"\n",
"!pip install tqdm pandas requests ipywidgets pydantic nltk"
]
},
{
"cell_type": "markdown",
"id": "a0964dfc",
"metadata": {},
"source": [
"### Import Libraries and define Constants and source urls"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "e7d3d24f",
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"/tmp/ipykernel_177453/1748613008.py:4: TqdmExperimentalWarning: Using `tqdm.autonotebook.tqdm` in notebook mode. Use `tqdm.tqdm` instead to force console mode (e.g. in jupyter console)\n",
" from tqdm.autonotebook import tqdm\n"
]
}
],
"source": [
"# import necessary libraries\n",
"import pandas as pd\n",
"import requests\n",
"from tqdm.autonotebook import tqdm \n",
"from pathlib import Path\n",
"import json\n",
"# some constants\n",
"\n",
"CACHE_DIR = Path(\"./.cache\")\n",
"CACHE_DIR.mkdir(exist_ok=True)\n",
"\n",
"CRYPTICS_CROSSWORDS_DB_URL = \"https://cryptics.georgeho.org/data/clues.csv?_stream=on&_size=max\"\n",
"CRYPTICS_CROSSWORDS_DB_CSV = CACHE_DIR / \"cryptics_clues.csv\"\n",
"\n",
"# german wictionary data:\n",
"\n",
"\n",
"COMPRESSED_DE_WIKTIONARY_DUMP_URL = \"https://kaikki.org/dewiktionary/raw-wiktextract-data.jsonl.gz\"\n",
"COMPRESSED_DE_WIKTIONARY_DUMP = CACHE_DIR / \"de_wiktionary.jsonl.gz\""
]
},
{
"cell_type": "markdown",
"id": "61263a61",
"metadata": {},
"source": [
"## Download External Data\n",
"\n",
"* Crypticts DB (\"https://cryptics.georgeho.org/\")"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "806a5c51",
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"[nltk_data] Downloading package wordnet to /home/jonas/nltk_data...\n",
"[nltk_data] Package wordnet is already up-to-date!\n",
"[nltk_data] Downloading package omw-1.4 to /home/jonas/nltk_data...\n",
"[nltk_data] Package omw-1.4 is already up-to-date!\n",
"[nltk_data] Downloading package omw to /home/jonas/nltk_data...\n",
"[nltk_data] Package omw is already up-to-date!\n"
]
}
],
"source": [
"# download the cryptics crosswords database if not already cached \n",
"if not CRYPTICS_CROSSWORDS_DB_CSV.exists():\n",
" response = requests.get(CRYPTICS_CROSSWORDS_DB_URL)\n",
" with open(CRYPTICS_CROSSWORDS_DB_CSV, \"wb\") as f:\n",
" f.write(response.content)\n",
"\n",
"# download wordnet from nltk\n",
"import nltk\n",
"nltk.download('wordnet')\n",
"nltk.download('omw-1.4') # optional, extra languages / lemmas\n",
"nltk.download('omw') # try the older omw package\n",
"\n",
"# download the german wiktionary dump if not already cached\n",
"if not COMPRESSED_DE_WIKTIONARY_DUMP.exists():\n",
" response = requests.get(COMPRESSED_DE_WIKTIONARY_DUMP_URL, stream=True)\n",
" with open(COMPRESSED_DE_WIKTIONARY_DUMP, \"wb\") as f:\n",
" for chunk in tqdm(response.iter_content(chunk_size=8192), desc=\"Downloading de wiktionary dump\"):\n",
" f.write(chunk)"
]
},
{
"cell_type": "markdown",
"id": "26060068",
"metadata": {},
"source": [
"## Define our Datastructures"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "8c81708c",
"metadata": {},
"outputs": [],
"source": [
"from pydantic import BaseModel\n",
"import re \n",
"\n",
"class WordEntry(BaseModel):\n",
" word: str\n",
" senses: list[str]\n",
" synonyms: list[str]\n",
" antonyms: list[str]\n",
" word_frequency: int # frequency rank of the word (0% - 100%)\n",
" source: str # source of the word entry (e.g., \"cryptics\", \"wordnet\", etc.)\n",
" categories: list[str] # categories or tags associated with the word entry\n",
"\n",
"class Dictionary(BaseModel):\n",
" entries: dict[str, WordEntry] # mapping from word to WordEntry\n",
" def add_entry(self, entry: WordEntry):\n",
" if entry.word not in self.entries: \n",
" self.entries[entry.word] = entry\n",
" else:\n",
" if entry.source == self.entries[entry.word].source:\n",
" # merge entries if word already exists\n",
" existing_entry = self.entries[entry.word]\n",
" existing_entry.senses = list(set(existing_entry.senses) | set(entry.senses))\n",
" existing_entry.synonyms = list(set(existing_entry.synonyms) | set(entry.synonyms))\n",
" existing_entry.antonyms = list(set(existing_entry.antonyms) | set(entry.antonyms))\n",
" existing_entry.categories = list(set(existing_entry.categories) | set(entry.categories))\n",
" existing_entry.word_frequency = max(existing_entry.word_frequency, entry.word_frequency)\n",
" else:\n",
" # create a new entry\n",
" word = entry.word\n",
" i = 1\n",
" while f\"{word}_{i}\" in self.entries:\n",
" i += 1 \n",
" self.entries[f\"{word}_{i}\"] = entry\n",
"\n",
" "
]
},
{
"cell_type": "markdown",
"id": "b1397355",
"metadata": {},
"source": [
"## Parse Data (EN)\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "56988415",
"metadata": {},
"outputs": [],
"source": [
"en_db = Dictionary (entries={})"
]
},
{
"cell_type": "markdown",
"id": "d5b35c1b",
"metadata": {},
"source": [
"### Parse cryptics DB"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "e9e1ee43",
"metadata": {},
"outputs": [
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "75b431216f3249c7879d87fe33f7817a",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
" 0%| | 0/42 [00:00<?, ?it/s]"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"# csv structure: we will use the column clue as senses and the column answer (lowercase) as word.\n",
"# words that have spaces will be skipped\n",
"\n",
"# read the file in batches to avoid memory issues \n",
"batch_size = 1000\n",
"\n",
"# Calculate total lines safely\n",
"try:\n",
" total_lines = sum(1 for line in open(CRYPTICS_CROSSWORDS_DB_CSV))\n",
"except Exception as e:\n",
" print(f\"Error counting lines: {e}\")\n",
" total_lines = 0\n",
"\n",
"cryptics_cols = \"rowid\tclue\tanswer\tdefinition\tclue_number\tpuzzle_date\tpuzzle_name\tsource_url\tsource\".split()\n",
"\n",
"for start_row in tqdm(range(1, total_lines, batch_size)):\n",
" try:\n",
" # Use on_bad_lines='skip' to handle rows with too many fields\n",
" df = pd.read_csv(CRYPTICS_CROSSWORDS_DB_CSV, skiprows=start_row, nrows=batch_size, names=cryptics_cols, on_bad_lines='skip')\n",
" except Exception as e:\n",
" print(f\"Error reading batch starting at {start_row}: {e}\")\n",
" continue\n",
"\n",
" for index, row in df.iterrows():\n",
" # Check if answer is a string (handles NaN)\n",
" if not isinstance(row['answer'], str):\n",
" continue\n",
"\n",
" word = row['answer'].lower()\n",
" if ' ' in word:\n",
" continue\n",
" \n",
" # Check if clue is a string\n",
" if not isinstance(row['clue'], str):\n",
" continue\n",
"\n",
" clue = row['clue']\n",
" # replace - and _ with empty string\n",
" word = word.replace(\"-\", \"\").replace(\"_\", \"\")\n",
" word = word.lower()\n",
"\n",
" # remove numbers in parentheses or brackets (e.g. (5), [4], (3,4), [1-9])\n",
" clue = re.sub(r'\\s*[(\\[][\\d,\\-\\s]+[)\\]]$', '', clue).strip()\n",
"\n",
" # if the word is not alphabetic, skip it\n",
" if not word.isalpha():\n",
" continue \n",
" en_db.add_entry(WordEntry(\n",
" word=word,\n",
" senses=[clue],\n",
" synonyms=[],\n",
" antonyms=[],\n",
" word_frequency=-1, # placeholder frequency\n",
" source=\"cryptics\",\n",
" categories=[\"cryptic_clue\"]\n",
" ))"
]
},
{
"cell_type": "markdown",
"id": "62734af4",
"metadata": {},
"source": [
"### Parse Wordnet Data\n",
"\n",
"* import necessary stuff:"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "32280691",
"metadata": {},
"outputs": [],
"source": [
"from nltk.corpus import wordnet as wn\n"
]
},
{
"cell_type": "markdown",
"id": "e13bf2f3",
"metadata": {},
"source": [
"* Parse synsets"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "b21721e2",
"metadata": {},
"outputs": [
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "9f8d4c77558e478b9cf214d851fd503e",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
" 0%| | 0/117659 [00:00<?, ?it/s]"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"all_synsets = wn.all_synsets()\n",
"\n",
"def frequency_metric(lemma, word):\n",
" # derive a frequency metric, based on polysemy and lemma count:\n",
" # 1. SemCor frequency count for this specific sense\n",
" semcor_count = lemma.count()\n",
" # 2. Polysemy: number of synsets for this word\n",
" polysemy_count = len(wn.synsets(word))\n",
" \n",
" return semcor_count + polysemy_count\n",
" \n",
"\n",
"for synset in tqdm(list(all_synsets)):\n",
" #print(synset.name(), synset.definition() )\n",
"\n",
" # find the first \"good\" lemma name (only alphabetic characters) \n",
" # WE NEED LEMMA OBJECTS NOW, NOT JUST NAMES\n",
" good_lemmas = [lemma for lemma in synset.lemmas() if lemma.name().isalpha()] \n",
" if not good_lemmas:\n",
" continue \n",
"\n",
" target_lemma = good_lemmas[0]\n",
" word = target_lemma.name().lower()\n",
" clue = synset.definition()\n",
"\n",
" # Calculate frequency\n",
" raw_metric = frequency_metric(target_lemma, word)\n",
" \n",
" # Normalize to 0-100 range\n",
" # Values can range from 1 to >200 for very common words.\n",
" # We apply a factor and clamp.\n",
" # Using factor 1.0 means 100 count -> 100 frequency.\n",
" word_frequency = min(100, int(raw_metric))\n",
"\n",
" en_db.add_entry(WordEntry(\n",
" word=word,\n",
" senses=[clue],\n",
" synonyms=[],\n",
" antonyms=[],\n",
" word_frequency=word_frequency,\n",
" source=\"wordnet\",\n",
" categories=[\"wordnet\"]\n",
" ))\n",
" \n",
" #break"
]
},
{
"cell_type": "markdown",
"id": "1e246fd0",
"metadata": {},
"source": [
"## Parse German Data\n"
]
},
{
"cell_type": "code",
"execution_count": 9,
"id": "63953ce6",
"metadata": {},
"outputs": [],
"source": [
"de_db = Dictionary (entries={})"
]
},
{
"cell_type": "code",
"execution_count": 10,
"id": "435d0b78",
"metadata": {},
"outputs": [
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "76b0adb1fab244feaa5ab51c985fbe5f",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
"Processing German Wiktionary entries: 0it [00:00, ?it/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Processed 78859 entries from German Wiktionary dump.\n"
]
}
],
"source": [
"# inspect data first. File contains jsonl.gz entries per line\n",
"\n",
"import gzip\n",
"import json\n",
"import difflib\n",
"\n",
"# define a helper function to fince the similarity between words. Used to sort out glosses with words too similar to the search word.\n",
"def _similarity_ratio(word1, word2):\n",
" return difflib.SequenceMatcher(None, word1.lower(), word2.lower()).ratio()\n",
"\n",
"def _ascii_word(word):\n",
" word = word.lower()\n",
" word = word.replace(\"ä\", \"ae\")\n",
" word = word.replace(\"ö\", \"oe\")\n",
" word = word.replace(\"ü\", \"ue\")\n",
" word = word.replace(\"ß\", \"ss\")\n",
"\n",
" return word \n",
"\n",
"def _only_acscii_chars_in_word(word):\n",
" # returns true if only the ascii alphabet is in the word\n",
" return all(c.isascii() and c.isalpha() for c in word)\n",
"\n",
"\n",
"def get_best_gloss_for_sesne(word, sense): \n",
" normalized_word = word.lower()\n",
"\n",
" glosses = sense.get(\"glosses\", [])\n",
" for g in glosses:\n",
" if normalized_word in g.lower():\n",
" continue\n",
" \n",
" # Use similarity ratio to skip glosses that are too close to the word itself (e.g. simple variations).\n",
" # check each word in the gloss\n",
" gloss_words = re.findall(r'\\b\\w+\\b', g.lower()) \n",
" found_similar = False \n",
" for gw in gloss_words:\n",
" if _similarity_ratio(normalized_word, gw) > 0.8:\n",
" #print(\"too similar:\", normalized_word, gw, \"->\", _similarity_ratio(normalized_word, gw), g)\n",
" found_similar = True \n",
" break \n",
" \n",
" if found_similar:\n",
" continue \n",
" return g\n",
"\n",
" return None\n",
"\n",
"def calculate_frequency_score(json_data):\n",
" # A heuristic to estimate word frequency/commonality based on available data\n",
" score = 0\n",
" \n",
" # 1. Number of senses (polysemy): Common words usually have multiple meanings\n",
" senses = json_data.get(\"senses\", [])\n",
" score += len(senses) * 2\n",
" \n",
" # 2. Number of translations: Common words are translated into many languages\n",
" translations = json_data.get(\"translations\", [])\n",
" score += len(translations) * 0.5\n",
" \n",
" # 3. Has audio pronunciation? Common words usually do.\n",
" sounds = json_data.get(\"sounds\", [])\n",
" if sounds:\n",
" score += 5\n",
" \n",
" # 4. Check for \"rare\", \"obsolete\", \"archaic\" tags in senses\n",
" # If a word is ONLY archaic, it should be low frequency.\n",
" # But usually we want to just boost the \"normal\" ones.\n",
" \n",
" # Normalize heavily. \n",
" # A word like \"Haus\" might have huge scores.\n",
" # We want a 0-100 scale.\n",
" \n",
" return min(100, int(score))\n",
"\n",
"def process_entry(json_data, min_freq_score=10):\n",
" senses = json_data.get(\"senses\", []) \n",
" processed_senses = []\n",
" tags = set()\n",
" for sense in senses:\n",
" glosses = sense .get(\"glosses\", [])\n",
" topic_labels = sense.get(\"topics\", [])\n",
" best_gloss = get_best_gloss_for_sesne(json_data.get(\"word\", \"\"), sense)\n",
" for topic in topic_labels:\n",
" tags.add(topic)\n",
" if best_gloss:\n",
" text = best_gloss\n",
" if topic_labels and len(topic_labels) > 0:\n",
" text = \"\" + \", \".join(topic_labels) + \": \" + text\n",
" processed_senses.append(text)\n",
" \n",
" # Calculate Frequency\n",
" freq = calculate_frequency_score(json_data)\n",
"\n",
" if freq < min_freq_score:\n",
" return [] # skip low frequency words\n",
"\n",
" if not _only_acscii_chars_in_word(_ascii_word(json_data.get(\"word\", \"\"))):\n",
" return [] # skip non-ascii words\n",
"\n",
" if len(processed_senses) == 0:\n",
" return [] # skip entries with no valid senses \n",
" \n",
" de_db.add_entry(WordEntry(\n",
" word=_ascii_word(json_data.get(\"word\", \"\").lower()),\n",
" senses=processed_senses,\n",
" synonyms=[],\n",
" antonyms=[],\n",
" word_frequency=freq, \n",
" source=\"de_wiktionary\",\n",
" categories=list(tags)\n",
" )) \n",
"\n",
" return processed_senses\n",
"\n",
"def parse_entry(json_line):\n",
"\n",
" \n",
" #print(\"\\n\")\n",
" #print(\"Parsing entry:\", json_line)\n",
" json_data = json.loads(json_line) \n",
" lang_code = json_data.get(\"lang_code\", \"unknown\").lower()\n",
" if lang_code != \"de\":\n",
" return False \n",
" #print(\"word:\", json_data.get(\"word\"))\n",
" senses = json_data.get(\"senses\", []) \n",
" processed_senses = process_entry(json_data)\n",
" if len(processed_senses) == 0:\n",
" #print(\"No valid senses found, skipping.\")\n",
" return False \n",
" #print(\"Senses / glosses:\", processed_senses)\n",
" return True\n",
"\n",
"# read file in unzipping on the fly using gzip module\n",
"# \"rt\" mode opens it as text, handling newlines correctly after decompression\n",
"with gzip.open(COMPRESSED_DE_WIKTIONARY_DUMP, \"rt\", encoding=\"utf-8\") as f:\n",
" i = 0\n",
" for _, line in enumerate(tqdm(f, desc=\"Processing German Wiktionary entries\" ) ):\n",
" #if i >= 10: \n",
" # break\n",
" if line.strip():\n",
" if parse_entry(line.strip()):\n",
" i += 1\n",
" print(f\"Processed {i} entries from German Wiktionary dump.\")"
]
},
{
"cell_type": "markdown",
"id": "be16b393",
"metadata": {},
"source": [
"### Save extracted databases\n",
"\n",
"Dump the db to disk as json"
]
},
{
"cell_type": "code",
"execution_count": 11,
"id": "69b67091",
"metadata": {},
"outputs": [
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "6d1490c57118467fb95cdc111114f926",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
" 0%| | 0/96407 [00:00<?, ?it/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Wrote 74357 entries to en.json\n"
]
},
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "daad07fbef564f06a7079d99a3291125",
"version_major": 2,
"version_minor": 0
},
"text/plain": [
" 0%| | 0/77291 [00:00<?, ?it/s]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Wrote 77291 entries to de.json\n"
]
}
],
"source": [
"EN_PATH = Path(\"./en.json\")\n",
"DE_PATH = Path(\"./de.json\") \n",
"\n",
"# file, db tuples\n",
"FILES_DBS = [\n",
" (EN_PATH, en_db),\n",
" (DE_PATH, de_db)\n",
"] \n",
"\n",
"import json\n",
"\n",
"# entries to include:\n",
"INCLUDED_SOURCES = {\n",
" #\"cryptics\",\n",
" \"wordnet\",\n",
" \"de_wiktionary\"\n",
"} \n",
"\n",
"for FILE_PATH, DB in FILES_DBS: \n",
" with open(FILE_PATH, \"w\") as f:\n",
" f.write(\"{\\n\")\n",
" i = 0\n",
" for key, value in tqdm( DB.entries.items()):\n",
"\n",
" if value.source not in INCLUDED_SOURCES:\n",
" continue \n",
" \n",
" # dump json\n",
" if i > 0:\n",
" f.write(\",\\n\")\n",
" d_value = value.model_dump()\n",
" as_json = json.dumps(\n",
" d_value, indent=4\n",
" )\n",
" as_json = \"\\n \".join(as_json.split(\"\\n\"))\n",
" as_json = \" \\\"\" + key + \"\\\": \" + as_json\n",
" f.write(as_json)\n",
" i += 1\n",
" \n",
" f.write(\"\\n}\\n\")\n",
" print(f\"Wrote {i} entries to {FILE_PATH}\" )\n",
"\n",
"\n",
"\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f91aee6f",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "multiplayer-crosswords-W02cfZ32-py3.12",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.12.3"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

1301860
data/en.json

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,34 @@
import os
import requests
import zipfile
import time
from pathlib import Path
def download_openthesaurus(cache_dir = None) -> Path | None:
if cache_dir is None:
cache_dir = Path("./.cache")
else:
cache_dir = Path(cache_dir)
# Define paths
OPENTHESAURUS_URL = "https://www.openthesaurus.de/export/OpenThesaurus-Textversion.zip"
OPENTHESAURUS_ZIP = cache_dir / "OpenThesaurus-Textversion.zip"
OPENTHESAURUS_TXT = cache_dir / "openthesaurus.txt"
if not OPENTHESAURUS_TXT.exists():
if not OPENTHESAURUS_ZIP.exists():
print("Downloading OpenThesaurus...")
r = requests.get(OPENTHESAURUS_URL)
with open(OPENTHESAURUS_ZIP, "wb") as f:
f.write(r.content)
print("Extracting...")
with zipfile.ZipFile(OPENTHESAURUS_ZIP, 'r') as zip_ref:
zip_ref.extractall(cache_dir)
print(f"OpenThesaurus available at {OPENTHESAURUS_TXT}")
return OPENTHESAURUS_TXT

View File

@ -294,39 +294,12 @@ class WebsocketCrosswordServer(object):
if current_grid_letter.upper() == msg_letter.upper():
# No change
return
crossword.place_letter(
x=message.col,
y=message.row,
letter=msg_letter.lower(),
)
# now check if the word is solved
# check if the letter already is solved, if so, ignore the update
words_at_position = crossword.get_words_by_y_x_position(y=message.row, x=message.col)
is_solved = any(cw.solved for cw in words_at_position)
if is_solved:
logger.info("Word solved at position (%d, %d) in session %s", message.col, message.row, session.session_id)
messages = []
for cw in words_at_position:
if cw.solved:
logger.info("Solved word: %s", cw.word)
# go through each letter in the word and create a message
for i in range(len(cw.word)):
if cw.orientation == Orientation.HORIZONTAL:
row = cw.start_y
col = cw.start_x + i
else:
row = cw.start_y + i
col = cw.start_x
letter = cw.word[i].upper()
msg = server_messages.LetterUpdateBroadcastServerMessage(
session_id=session.session_id,
row=row,
col=col,
letter=letter,
is_solved=True
)
messages.append(msg)
if any(cw.solved for cw in words_at_position):
logger.info("Ignoring update to already solved position (%d, %d) in session %s", message.col, message.row, session.session_id)
else:
# send letter again to client to ensure they have the correct letter
msg = server_messages.LetterUpdateBroadcastServerMessage(
session_id=session.session_id,
row=message.row,
@ -335,6 +308,50 @@ class WebsocketCrosswordServer(object):
is_solved=is_solved
)
messages = [msg]
else:
# also check if the position is
crossword.place_letter(
x=message.col,
y=message.row,
letter=msg_letter.lower(),
)
words_at_position = crossword.get_words_by_y_x_position(y=message.row, x=message.col)
is_solved = any(cw.solved for cw in words_at_position)
if is_solved:
logger.info("Word solved at position (%d, %d) in session %s", message.col, message.row, session.session_id)
messages = []
for cw in words_at_position:
if cw.solved:
logger.info("Solved word: %s", cw.word)
# go through each letter in the word and create a message
for i in range(len(cw.word)):
if cw.orientation == Orientation.HORIZONTAL:
row = cw.start_y
col = cw.start_x + i
else:
row = cw.start_y + i
col = cw.start_x
letter = cw.word[i].upper()
msg = server_messages.LetterUpdateBroadcastServerMessage(
session_id=session.session_id,
row=row,
col=col,
letter=letter,
is_solved=True
)
messages.append(msg)
else:
msg = server_messages.LetterUpdateBroadcastServerMessage(
session_id=session.session_id,
row=message.row,
col=message.col,
letter=msg_letter.upper(),
is_solved=is_solved
)
messages = [msg]
# NOTE: we do this purposefully outside of the session lock to avoid
# potential deadlocks if sending messages takes time.
@ -342,9 +359,7 @@ class WebsocketCrosswordServer(object):
for broadcast_message in messages:
await session.send_message_to_all_clients(message=broadcast_message.model_dump())
def __init__(self, host: str, port: int):
self._host = host
self._port = port

View File

@ -59,9 +59,16 @@ def load_dictionary(p: str | Path) -> Dictionary:
if len(hints) > 0:
difficulty = 1.0
word_freq = obj.get("word_frequency", -1)
if word_freq >= 0:
# Map 0-100 frequency to 1.0-0.0 difficulty
# Higher frequency -> Lower difficulty
difficulty = max(0.0, min(1.0, 1.0 - (word_freq / 100.0)))
w = Word(word=word,
hints=hints,
difficulty=1)
difficulty=difficulty)
dict_obj.add_word(w)
load_dictionary._cache[cache_key] = dict_obj
return dict_obj
@ -70,4 +77,4 @@ def load_en_dictionary() -> Dictionary:
return load_dictionary(Path(__file__).parent.parent / "data" / "en.json")
def load_de_dictionary() -> Dictionary:
return load_dictionary(Path(__file__).parent.parent / "data" / "de.json")
return load_dictionary(Path(__file__).parent.parent / "data" / "de.json")

View File

@ -552,6 +552,36 @@ export class CrosswordGrid extends LitElement {
this.requestUpdate();
}
/**
* Calculate completion ratio as percentage (0-100)
*/
_calculateCompletionRatio() {
let totalNonWallCells = 0;
let solvedCells = 0;
for (let r = 0; r < this.rows; r++) {
for (let c = 0; c < this.cols; c++) {
if (this._grid[r][c] !== '#') {
totalNonWallCells++;
const cellKey = `${r},${c}`;
if (this._solvedCells.has(cellKey)) {
solvedCells++;
}
}
}
}
if (totalNonWallCells === 0) return 0;
return Math.round((solvedCells / totalNonWallCells) * 100);
}
/**
* Get current completion ratio (public method)
*/
getCompletionRatio() {
return this._calculateCompletionRatio();
}
/**
* Handle letter updates from server (broadcast messages from other players)
*/
@ -589,6 +619,14 @@ export class CrosswordGrid extends LitElement {
this.requestUpdate();
// Calculate and emit completion ratio update
const completionRatio = this._calculateCompletionRatio();
this.dispatchEvent(new CustomEvent('completion-ratio-changed', {
detail: { completionRatio },
bubbles: true,
composed: true
}));
// Trigger animation if solution word just completed
if (this._isSolutionWordComplete()) {
this.updateComplete.then(() => {

View File

@ -73,6 +73,9 @@
console.log('Subscribing to session:', sessionId);
currentSessionId = sessionId;
// Update URL with session ID
updateUrlWithSessionId(sessionId);
// Show game UI immediately
menu.style.display = 'none';
gridContainer.style.display = 'block';
@ -134,7 +137,7 @@
// Create container with close button
gridContainer.innerHTML = `
<div class="game-header">
<h2 style="text-align: center;">Crossword</h2>
<h2 id="crossword-title" style="text-align: center;">Crossword (0%)</h2>
<div class="header-buttons">
<button class="share-game-btn" aria-label="Share game">
<span style="padding-right: 0.5rem;">Share Session</span>
@ -284,6 +287,36 @@
clueArea._updateSolvedClues();
clueArea.requestUpdate();
});
// Listen for completion ratio updates
gridElement.addEventListener('completion-ratio-changed', (e) => {
const { completionRatio } = e.detail;
updateHeaderTitle(completionRatio);
// Update session storage with completion ratio
if (window.updateSessionCompletionRatio) {
window.updateSessionCompletionRatio(currentSessionId, completionRatio);
}
});
// Function to update header title with completion percentage
function updateHeaderTitle(completionRatio) {
const titleElement = document.getElementById('crossword-title');
if (titleElement) {
titleElement.textContent = `Crossword (${completionRatio}%)`;
}
}
// Calculate initial completion ratio after grid is fully set up
setTimeout(() => {
const initialRatio = gridElement.getCompletionRatio();
updateHeaderTitle(initialRatio);
// Update session storage with initial completion ratio
if (window.updateSessionCompletionRatio) {
window.updateSessionCompletionRatio(currentSessionId, initialRatio);
}
}, 100);
// Close button handler
closeBtn.addEventListener('click', closeGame);

View File

@ -37,9 +37,14 @@ export class CrosswordMenu extends LitElement {
wsManager.setNotificationManager(notificationManager);
// Listen for session creation/subscription events
wsManager.onMessage('session_created', (msg) => this._onSessionCreated(msg));
wsManager.onMessage('session_subscribed', (msg) => this._onSessionSubscribed(msg));
wsManager.onMessage('session_not_found', (msg) => this._onSessionNotFound(msg));
wsManager.onMessage('full_session_state', (msg) => this._onSessionJoined(msg));
wsManager.onMessage('error', (msg) => this._onSessionError(msg));
this._initializeConnection();
// Make update function available globally
window.updateSessionCompletionRatio = (sessionId, completionRatio) => {
this._updateSessionCompletionRatio(sessionId, completionRatio);
};
}
disconnectedCallback() {
@ -48,8 +53,8 @@ export class CrosswordMenu extends LitElement {
wsManager.offMessage('available_session_properties', this._handleSessionProperties);
wsManager.offMessage('error', this._handleError);
wsManager.offMessage('session_created', this._onSessionCreated);
wsManager.offMessage('session_subscribed', this._onSessionSubscribed);
wsManager.offMessage('session_not_found', this._onSessionNotFound);
wsManager.offMessage('full_session_state', this._onSessionJoined);
wsManager.offMessage('error', this._onSessionError);
}
_initializeConnection() {
@ -116,7 +121,7 @@ export class CrosswordMenu extends LitElement {
this._loading = false;
this._error = null;
notificationManager.success('Game options loaded');
notificationManager.success('Connected to Crossword server');
this.requestUpdate();
}
@ -165,14 +170,26 @@ export class CrosswordMenu extends LitElement {
// Session storage management
_initializeSessionStorage() {
const savedSessionsData = this._getCookie('savedSessions');
if (savedSessionsData) {
try {
this._savedSessions = JSON.parse(savedSessionsData);
this._saveSessionsEnabled = true;
} catch (e) {
console.warn('Failed to parse saved sessions cookie:', e);
this._clearAllCookies();
// Check if the save setting is enabled
const saveSettingEnabled = this._getCookie('saveSessionsEnabled');
if (saveSettingEnabled === 'true') {
this._saveSessionsEnabled = true;
// Load saved sessions if the setting is enabled
const savedSessionsData = this._getCookie('savedSessions');
if (savedSessionsData) {
try {
this._savedSessions = JSON.parse(savedSessionsData);
// Ensure all sessions have a completionRatio field (for backward compatibility)
this._savedSessions = this._savedSessions.map(session => ({
...session,
completionRatio: session.completionRatio || 0
}));
} catch (e) {
console.warn('Failed to parse saved sessions cookie:', e);
this._clearAllCookies();
}
}
}
}
@ -196,14 +213,25 @@ export class CrosswordMenu extends LitElement {
_clearAllCookies() {
this._deleteCookie('savedSessions');
this._deleteCookie('saveSessionsEnabled');
this._savedSessions = [];
this._saveSessionsEnabled = false;
this.requestUpdate();
}
_clearSessionsOnly() {
this._deleteCookie('savedSessions');
this._savedSessions = [];
this.requestUpdate();
}
_toggleSessionSaving() {
this._saveSessionsEnabled = !this._saveSessionsEnabled;
if (!this._saveSessionsEnabled) {
if (this._saveSessionsEnabled) {
// Save the setting preference when enabled
this._setCookie('saveSessionsEnabled', 'true');
} else {
// Clear everything when disabled
this._clearAllCookies();
}
this.requestUpdate();
@ -219,6 +247,7 @@ export class CrosswordMenu extends LitElement {
this._savedSessions.unshift({
id: sessionId,
timestamp: Date.now(),
completionRatio: 0, // Default completion ratio
...sessionInfo
});
@ -230,10 +259,25 @@ export class CrosswordMenu extends LitElement {
this.requestUpdate();
}
_updateSessionCompletionRatio(sessionId, completionRatio) {
if (!this._saveSessionsEnabled) return;
// Find and update the session
const sessionIndex = this._savedSessions.findIndex(s => s.id === sessionId);
if (sessionIndex !== -1) {
this._savedSessions[sessionIndex].completionRatio = completionRatio;
this._savedSessions[sessionIndex].timestamp = Date.now(); // Update timestamp
// Save updated sessions to cookie
this._setCookie('savedSessions', JSON.stringify(this._savedSessions));
this.requestUpdate();
}
}
_removeSession(sessionId) {
this._savedSessions = this._savedSessions.filter(s => s.id !== sessionId);
if (this._savedSessions.length === 0) {
this._clearAllCookies();
this._clearSessionsOnly();
} else {
this._setCookie('savedSessions', JSON.stringify(this._savedSessions));
}
@ -250,7 +294,7 @@ export class CrosswordMenu extends LitElement {
}
}
_onSessionSubscribed(message) {
_onSessionJoined(message) {
if (message.session_id) {
this._saveSession(message.session_id, {
type: 'joined'
@ -258,10 +302,17 @@ export class CrosswordMenu extends LitElement {
}
}
_onSessionNotFound(message) {
if (message.session_id) {
this._removeSession(message.session_id);
notificationManager.warning(`Session ${message.session_id.substring(0, 8)}... no longer exists and was removed from saved sessions`);
_onSessionError(message) {
// Check if it's a session not found error
if (message.error_message && message.error_message.includes('session') && message.error_message.includes('not found')) {
// Try to extract session ID from error message or use current session ID
// This is a fallback - we might not always have the exact session ID in error messages
const sessionIdMatch = message.error_message.match(/session\s+([a-f0-9-]+)/i);
if (sessionIdMatch) {
const sessionId = sessionIdMatch[1];
this._removeSession(sessionId);
notificationManager.warning(`Session ${sessionId.substring(0, 8)}... no longer exists and was removed from saved sessions`);
}
}
}
@ -284,7 +335,7 @@ export class CrosswordMenu extends LitElement {
}
_clearSavedSessions() {
this._clearAllCookies();
this._clearSessionsOnly();
notificationManager.info('All saved sessions cleared');
}
@ -366,6 +417,7 @@ export class CrosswordMenu extends LitElement {
<span class="session-id">${session.id.substring(0, 8)}...</span>
<span class="session-time">${this._formatTimestamp(session.timestamp)}</span>
${session.language ? html`<span class="session-lang">${session.language.toUpperCase()}</span>` : ''}
<span class="session-completion">${session.completionRatio || 0}% solved</span>
</div>
<div class="session-actions">
<button class="reconnect-btn" @click="${() => this._reconnectToSession(session.id)}">Rejoin</button>

View File

@ -338,6 +338,32 @@ crossword-grid { display: block; margin: 0 auto; }
inset 1px 1px 2px rgba(255,255,255,0.3);
}
/* Solved cells that are also highlighted - keep green background but yellow border */
.cell.solved.mode-highlighted {
background:
repeating-linear-gradient(87deg, transparent, transparent 3px, rgba(100,200,100,.1) 3px, rgba(100,200,100,.1) 5px),
repeating-linear-gradient(22deg, transparent, transparent 4px, rgba(100,200,100,.08) 4px, rgba(100,200,100,.08) 6px),
repeating-linear-gradient(59deg, transparent, transparent 3px, rgba(100,200,100,.06) 3px, rgba(100,200,100,.06) 5px),
repeating-linear-gradient(-11deg, transparent, transparent 4px, rgba(100,200,100,.04) 4px, rgba(100,200,100,.04) 6px),
repeating-radial-gradient(circle at 12% 18%, rgba(100,200,100,.06) 1px, transparent 1px),
repeating-radial-gradient(circle at 67% 77%, rgba(100,200,100,.07) 1px, transparent 1px),
repeating-radial-gradient(circle at 34% 51%, rgba(100,200,100,.03) 1.5px, transparent 1.5px),
repeating-radial-gradient(circle at 23% 67%, rgba(100,200,100,.015) 0.8px, transparent 0.8px),
repeating-radial-gradient(circle at 78% 22%, rgba(255,255,255,.03) 0.8px, transparent 0.8px),
radial-gradient(ellipse 800px 600px at 30% 40%, rgba(255,255,255,.2) 0%, transparent 40%),
radial-gradient(circle at 0% 0%, rgba(100,200,100,.015) 0%, transparent 70%),
radial-gradient(circle at 100% 100%, rgba(100,200,100,.015) 0%, transparent 70%),
linear-gradient(135deg, #d4f4d4 0%, #c8ead4 100%);
box-shadow:
inset 0 1px 2px rgba(255,255,255,0.8),
inset 0 0 0 1px #c8e6f0,
0 0.5px 1px rgba(0,0,0,0.05),
inset -1px -1px 2px rgba(100,200,100,0.08),
inset 1px 1px 2px rgba(255,255,255,0.3);
border-color: #a8d4e8;
}
@keyframes cell-bounce {
0%, 100% {
transform: scale(1);
@ -382,6 +408,32 @@ crossword-grid { display: block; margin: 0 auto; }
inset 1px 1px 2px rgba(255,255,255,0.5);
}
/* Solved cells that are also selected - keep green background but yellow border */
.cell.solved.selected {
outline: none;
background:
repeating-linear-gradient(87deg, transparent, transparent 3px, rgba(100,200,100,.1) 3px, rgba(100,200,100,.1) 5px),
repeating-linear-gradient(22deg, transparent, transparent 4px, rgba(100,200,100,.08) 4px, rgba(100,200,100,.08) 6px),
repeating-linear-gradient(59deg, transparent, transparent 3px, rgba(100,200,100,.06) 3px, rgba(100,200,100,.06) 5px),
repeating-linear-gradient(-11deg, transparent, transparent 4px, rgba(100,200,100,.04) 4px, rgba(100,200,100,.04) 6px),
repeating-radial-gradient(circle at 12% 18%, rgba(100,200,100,.06) 1px, transparent 1px),
repeating-radial-gradient(circle at 67% 77%, rgba(100,200,100,.07) 1px, transparent 1px),
repeating-radial-gradient(circle at 34% 51%, rgba(100,200,100,.03) 1.5px, transparent 1.5px),
repeating-radial-gradient(circle at 23% 67%, rgba(100,200,100,.015) 0.8px, transparent 0.8px),
repeating-radial-gradient(circle at 78% 22%, rgba(255,255,255,.03) 0.8px, transparent 0.8px),
radial-gradient(ellipse 800px 600px at 30% 40%, rgba(255,255,255,.2) 0%, transparent 40%),
radial-gradient(circle at 0% 0%, rgba(100,200,100,.015) 0%, transparent 70%),
radial-gradient(circle at 100% 100%, rgba(100,200,100,.015) 0%, transparent 70%),
linear-gradient(135deg, #d4f4d4 0%, #c8ead4 100%) !important;
border-color: var(--ink-dark) !important;
box-shadow:
inset 0 1px 2px rgba(255,255,255,0.8),
inset 0 0 0 1.5px #ffc107,
0 0 8px rgba(255,193,7,0.25),
inset -1px -1px 2px rgba(100,200,100,0.08),
inset 1px 1px 2px rgba(255,255,255,0.3) !important;
}
.cell.selected.mode-highlighted {
background:
repeating-linear-gradient(87deg, transparent, transparent 3px, rgba(200,150,0,.15) 3px, rgba(200,150,0,.15) 5px),
@ -858,7 +910,7 @@ crossword-menu {
.saved-sessions h3 {
margin: 0 0 1rem 0;
color: #5c6fc3;
color: #232842;
font-size: 1.1rem;
}
@ -891,11 +943,16 @@ crossword-menu {
color: #222c55;
}
.session-time, .session-lang {
.session-time, .session-lang, .session-completion {
font-size: 0.8rem;
color: rgba(0, 0, 0, 0.7);
}
.session-completion {
font-weight: 600;
color: #4a7a9e;
}
.session-actions {
display: flex;
gap: 0.5rem;

View File

@ -1,6 +1,6 @@
[project]
name = "multiplayer-crosswords"
version = "0.1.0"
version = "0.2.0"
description = ""
authors = [
{name="Jonas Weinz"}
@ -17,7 +17,7 @@ dependencies = [
]
[tool.poetry]
name = "multiplayer-crosswords"
version = "0.1.0"
version = "0.2.0"
description = ""
authors = [
"Jonas Weinz"