Add per-request AI logging, DB batch queue, WS entity updates, and UI polish

- log_thread.py: thread-safe ContextVar bridge so executor threads can log
  individual LLM calls and archive searches back to the event loop
- ai_log.py: init_thread_logging(), notify_entity_update(); WS now pushes
  entity_update messages when book data changes after any plugin or batch run
- batch.py: replace batch_pending.json with batch_queue SQLite table;
  run_batch_consumer() reads queue dynamically so new books can be added
  while batch is running; add_to_queue() deduplicates
- migrate.py: fix _migrate_v1 (clear-on-startup bug); add _migrate_v2 for
  batch_queue table
- _client.py / archive.py / identification.py: wrap each LLM API call and
  archive search with log_thread start/finish entries
- api.py: POST /api/batch returns {already_running, added}; notify_entity_update
  after identify pipeline
- models.default.yaml: strengthen ai_identify confidence-scoring instructions;
  warn against placeholder data
- detail-render.js: book log entries show clickable ID + spine thumbnail;
  book spine/title images open full-screen popup
- events.js: batch-start handles already_running+added; open-img-popup action
- init.js: entity_update WS handler; image popup close listeners
- overlays.css / index.html: full-screen image popup overlay
- eslint.config.js: add new globals; fix no-redeclare/no-unused-vars for
  multi-file global architecture; all lint errors resolved

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
2026-03-11 12:10:54 +03:00
parent fd32be729f
commit b94f222c96
41 changed files with 2566 additions and 586 deletions

View File

@@ -30,14 +30,16 @@ functions:
rate_limit_seconds: 0
timeout: 30
# ── Book identification: raw_text → {title, author, year, isbn, publisher, confidence}
# ── Book identification: VLM result + archive results → ranked identification blocks
# is_vlm: true means the model also receives the book's spine and title-page images.
book_identifiers:
identify:
model: ai_identify
confidence_threshold: 0.8
auto_queue: false
rate_limit_seconds: 0
timeout: 30
timeout: 60
is_vlm: true
# ── Archive searchers: query → [{source, title, author, year, isbn, publisher}, ...]
archive_searchers:

View File

@@ -43,8 +43,32 @@ models:
model: "google/gemini-flash-1.5"
prompt: |
# ${RAW_TEXT} — text read from the book spine (multi-line)
# ${ARCHIVE_RESULTS} — JSON array of candidate records from library archives
# ${OUTPUT_FORMAT} — JSON schema injected by BookIdentifierPlugin
The following text was read from a book spine:
Text read from the book spine:
${RAW_TEXT}
Identify this book. Search for it if needed. Return ONLY valid JSON, no explanation:
Archive search results (may be empty):
${ARCHIVE_RESULTS}
Your task:
1. Search the web for this book if needed to find additional information.
2. Combine the spine text, archive results, and your web search into identification candidates.
3. Collapse candidates that are clearly the same book (same title + author + year + publisher) into one entry, listing all contributing sources.
4. Rank candidates by confidence (highest first). Assign a score 0.0-1.0.
5. Remove any candidates you believe are irrelevant or clearly wrong.
IMPORTANT — confidence scoring rules:
- The score must reflect how well the found information matches the spine text and recognized data.
- If the only available evidence is a title with no author, year, publisher, or corroborating archive results, the score must not exceed 0.5.
- Base confidence on: quality of spine text match, number of matching fields, archive result corroboration, and completeness of the identified record.
- A record with title + author + year that appears in multiple archive sources warrants a high score; a record with only a guessed title warrants a low score.
IMPORTANT — output format rules:
- The JSON schema below is a format specification only. Do NOT use it as a source of example data.
- Do NOT return placeholder values such as "The Great Gatsby", "Unknown Author", "Example Publisher", or any other generic example text unless that exact text literally appears on the spine.
- Return only real books that could plausibly match what is shown on this spine.
- If you cannot identify the book with reasonable confidence, return an empty array [].
Return ONLY valid JSON matching the schema below, no explanation:
${OUTPUT_FORMAT}

View File

@@ -1,3 +1,5 @@
# UI settings. Override in ui.user.yaml.
ui:
boundary_grab_px: 14 # pixel grab threshold for dragging boundary lines
spine_padding_pct: 0.30 # extra fraction of book width added on each side of spine crop
ai_log_max_entries: 100 # max AI request log entries kept in memory

View File

@@ -20,15 +20,18 @@ src/
config.py # Config loading and typed AppConfig
models.py # Typed dataclasses / mashumaro decoders
errors.py # Domain exceptions (NotFoundError, BadRequestError subtypes)
log_thread.py # Thread-safe logging context (ContextVar + event-loop bridge for executor threads)
logic/
__init__.py # dispatch_plugin() orchestrator + re-exports
boundaries.py # Boundary math, shelf/spine crop sources, boundary detector runner
identification.py # Status computation, text recognizer, book identifier runners
archive.py # Archive searcher runner (sync + background)
batch.py # Batch pipeline, process_book_sync
batch.py # Batch queue consumer (run_batch_consumer); queue persisted in batch_queue DB table
ai_log.py # AI request ring buffer + WebSocket pub-sub (log_start/log_finish/notify_entity_update); persisted to ai_log table
images.py # crop_save, prep_img_b64, serve_crop
migrate.py # DB migration; run_migration() called at startup
plugins/
__init__.py # Registry: load_plugins(), get_plugin(), get_manifest()
__init__.py # Registry: load_plugins(), get_plugin(), get_manifest(), get_all_text_recognizers(), get_all_book_identifiers(), get_all_archive_searchers()
rate_limiter.py # Thread-safe per-domain rate limiter
ai_compat/ # AI plugin implementations
archives/ # Archive plugin implementations
@@ -71,7 +74,7 @@ Categories:
| `credentials` | `base_url` + `api_key` per endpoint; no model or prompt |
| `models` | `credentials` ref + `model` string + optional `extra_body` + `prompt` |
| `functions` | Plugin definitions; dict key = plugin_id (unique across all categories) |
| `ui` | Frontend display settings |
| `ui` | Frontend display settings (`boundary_grab_px`, `spine_padding_pct`, `ai_log_max_entries`) |
Minimal setup — create `config/credentials.user.yaml`:
```yaml
@@ -88,9 +91,19 @@ credentials:
| `boundary_detectors` (`target=shelves`) | cabinet image | `{boundaries:[…], confidence:N}` | `cabinets.ai_shelf_boundaries` |
| `boundary_detectors` (`target=books`) | shelf image | `{boundaries:[…]}` | `shelves.ai_book_boundaries` |
| `text_recognizers` | spine image | `{raw_text, title, author, …}` | `books.raw_text` + `candidates` |
| `book_identifiers` | raw_text | `{title, author, …, confidence}` | `books.ai_*` + `candidates` |
| `book_identifiers` | raw_text + archive results + optional images | `[{title, author, …, score, sources}, …]` | `books.ai_blocks` + `books.ai_*` |
| `archive_searchers` | query string | `[{source, title, author, …}, …]` | `books.candidates` |
### Identification pipeline (`POST /api/books/{id}/identify`)
Single endpoint runs the full pipeline in sequence:
1. **VLM text recognizer** reads the spine image → `raw_text` and structured fields.
2. **All archive searchers** run in parallel with title+author and title-only queries.
3. Archive results are **deduplicated** by normalized full-field match (case-insensitive, punctuation removed, spaces collapsed).
4. **Main identifier model** receives `raw_text`, deduplicated archive results, and (if `is_vlm: true`) spine + title-page images. Returns ranked `IdentifyBlock` list.
5. `ai_blocks` stored persistently in the DB (never cleared; overwritten each pipeline run). Top block updates `ai_*` fields if score ≥ `confidence_threshold`.
`functions.*.yaml` key for `book_identifiers`: add `is_vlm: true` for models that accept images.
### Universal plugin endpoint
```
POST /api/{entity_type}/{entity_id}/plugin/{plugin_id}
@@ -108,14 +121,22 @@ All implement `search(query: str) -> list[CandidateRecord]`. Use shared `RATE_LI
### Auto-queue
- After `text_recognizer` completes → fires all `archive_searchers` with `auto_queue: true` in background thread pool.
- `POST /api/batch`runs `text_recognizers` then `archive_searchers` for all unidentified books.
- `POST /api/batch`adds all unidentified books to the `batch_queue` DB table; starts `run_batch_consumer()` if not already running. Calling again while running adds newly-unidentified books to the live queue.
## Database Schema (key fields)
| Table | Notable columns |
|-------|-----------------|
| `cabinets` | `shelf_boundaries` (JSON `[…]`), `ai_shelf_boundaries` (JSON `{pluginId:[…]}`) |
| `shelves` | `book_boundaries`, `ai_book_boundaries` (same format), `photo_filename` (optional override) |
| `books` | `raw_text`, `ai_title/author/year/isbn/publisher`, `candidates` (JSON `[{source,…}]`), `identification_status` |
| `books` | `raw_text`, `ai_title/author/year/isbn/publisher`, `candidates` (JSON `[{source,…}]`), `ai_blocks` (JSON `[{title,author,year,isbn,publisher,score,sources}]`), `identification_status` |
| `batch_queue` | `book_id` (PK), `added_at` — persistent batch processing queue; consumed in FIFO order by `run_batch_consumer()` |
`ai_blocks` are persistent: set by the identification pipeline, shown in the book detail panel as clickable cards. Hidden by default for `user_approved` books.
### DB Migration (`src/migrate.py`)
`run_migration()` is called at startup (after `init_db()`). Migrations:
- `_migrate_v1`: adds the `ai_blocks` column if absent; clears stale AI fields (runs once only, not on every startup).
- `_migrate_v2`: creates the `batch_queue` table if absent.
`identification_status`: `unidentified``ai_identified``user_approved`.
@@ -127,7 +148,12 @@ N interior boundaries → N+1 segments. `full = [0] + boundaries + [1]`. Segment
- Book K spine = shelf image cropped to `(x_start, *, x_end, *)` with composed crop if cabinet-based
## Frontend JS
No ES modules, no bundler. All files use global scope; load order in `index.html` is the dependency order. State lives in `state.js` (`S`, `_plugins`, `_bnd`, `_photoQueue`, etc.). Events delegated via `#app` in `events.js`.
No ES modules, no bundler. All files use global scope; load order in `index.html` is the dependency order. State lives in `state.js` (`S`, `_plugins`, `_bnd`, `_photoQueue`, `_aiLog`, `_aiLogWs`, etc.). Events delegated via `#app` in `events.js`.
`connectAiLogWs()` subscribes to `/ws/ai-log` on startup. Message types:
- `snapshot` — full log on connect (`_aiLog` initialized)
- `update` — single log entry added or updated (spinner count in header updated)
- `entity_update` — entity data changed (tree node updated via `walkTree`; detail panel or full render depending on selection)
## Tooling
```
@@ -150,8 +176,11 @@ PATCH /api/cabinets/{id}/boundaries # update shelf boundary
PATCH /api/shelves/{id}/boundaries # update book boundary list
GET /api/shelves/{id}/image # shelf image (override or cabinet crop)
GET /api/books/{id}/spine # book spine crop
POST /api/books/{id}/identify # full identification pipeline (VLM → archives → main model)
POST /api/books/{id}/process # full auto-queue pipeline (single book)
POST /api/batch / GET /api/batch/status # batch processing
WS /ws/batch # batch progress push (replaces polling)
WS /ws/ai-log # AI request log: snapshot + update per request + entity_update on book changes
POST /api/books/{id}/dismiss-field # dismiss a candidate suggestion
PATCH /api/{kind}/reorder # drag-to-reorder
POST /api/cabinets/{id}/crop / POST /api/shelves/{id}/crop # permanent crop

View File

@@ -19,9 +19,12 @@ const appGlobals = {
S: 'writable',
_plugins: 'writable',
_batchState: 'writable',
_batchPollTimer: 'writable',
_batchWs: 'writable',
_bnd: 'writable',
_photoQueue: 'writable',
_aiBlocksVisible: 'writable',
_aiLog: 'writable',
_aiLogWs: 'writable',
// helpers.js
esc: 'readonly',
@@ -46,6 +49,7 @@ const appGlobals = {
isLoading: 'readonly',
vPluginBtn: 'readonly',
vBatchBtn: 'readonly',
vAiIndicator: 'readonly',
candidateSugRows: 'readonly',
_STATUS_BADGE: 'readonly',
getBookStats: 'readonly',
@@ -56,6 +60,7 @@ const appGlobals = {
// detail-render.js
vDetailBody: 'readonly',
aiBlocksShown: 'readonly',
// canvas-crop.js
startCropMode: 'readonly',
@@ -72,7 +77,8 @@ const appGlobals = {
// init.js
render: 'readonly',
renderDetail: 'readonly',
startBatchPolling: 'readonly',
connectBatchWs: 'readonly',
connectAiLogWs: 'readonly',
loadTree: 'readonly',
// CDN (SortableJS loaded via <script> in index.html)
@@ -96,8 +102,15 @@ export default [
// Catch typos and missing globals
'no-undef': 'error',
// builtinGlobals:false — only catch intra-file re-declarations, not globals
// from appGlobals which are intentionally re-defined in their owning file.
'no-redeclare': ['error', { builtinGlobals: false }],
// Unused variables: allow leading-underscore convention for intentional ignores
'no-unused-vars': ['error', { argsIgnorePattern: '^_', varsIgnorePattern: '^_' }],
'no-unused-vars': [
'error',
{ argsIgnorePattern: '^_', varsIgnorePattern: '^_', caughtErrorsIgnorePattern: '^_' },
],
// Require strict equality
eqeqeq: ['error', 'always', { null: 'ignore' }],

View File

@@ -48,6 +48,7 @@ line-length = 120
[tool.flake8]
max-line-length = 120
extend-ignore = ["E203"]
exclude = "node_modules/*"
[tool.pyright]
pythonVersion = "3.14"

View File

@@ -8,9 +8,10 @@ No SQL here; no business logic here.
import asyncio
import dataclasses
import json
import time
from typing import Any, TypeVar
from fastapi import APIRouter, File, HTTPException, Request, UploadFile
from fastapi import APIRouter, File, HTTPException, Request, UploadFile, WebSocket, WebSocketDisconnect
from mashumaro.codecs import BasicDecoder
import db
@@ -55,8 +56,12 @@ async def _parse(decoder: BasicDecoder[_T], request: Request) -> _T:
@router.get("/api/config")
def api_config() -> dict[str, Any]:
cfg = get_config()
logic.set_max_entries(cfg.ui.ai_log_max_entries)
return {
"boundary_grab_px": get_config().ui.boundary_grab_px,
"boundary_grab_px": cfg.ui.boundary_grab_px,
"spine_padding_pct": cfg.ui.spine_padding_pct,
"ai_log_max_entries": cfg.ui.ai_log_max_entries,
"plugins": plugin_registry.get_manifest(),
}
@@ -333,8 +338,9 @@ async def book_photo(book_id: str, image: UploadFile = File(...)) -> dict[str, A
@router.get("/api/books/{book_id}/spine")
def book_spine(book_id: str) -> Any:
padding = get_config().ui.spine_padding_pct
with db.connection() as c:
path, crop = book_spine_source(c, book_id)
path, crop = book_spine_source(c, book_id, padding)
return serve_crop(path, crop)
@@ -365,6 +371,26 @@ async def process_book(book_id: str) -> dict[str, Any]:
return dataclasses.asdict(book)
@router.post("/api/books/{book_id}/identify")
async def identify_book(book_id: str) -> dict[str, Any]:
"""Run the full identification pipeline (VLM -> archives -> main model) for a single book."""
with db.connection() as c:
if not db.get_book(c, book_id):
raise HTTPException(404, "Book not found")
loop = asyncio.get_event_loop()
started = time.time()
entry_id = logic.log_start("identify_pipeline", "books", book_id, "pipeline", book_id)
try:
result = await loop.run_in_executor(logic.batch_executor, logic.run_identify_pipeline, book_id)
logic.log_finish(entry_id, "ok", result.ai_title or "", started)
except Exception as exc:
logic.log_finish(entry_id, "error", str(exc), started)
raise
result_dict = dataclasses.asdict(result)
logic.notify_entity_update("books", book_id, result_dict)
return result_dict
# ── Universal plugin endpoint ─────────────────────────────────────────────────
@@ -393,14 +419,15 @@ async def run_plugin(entity_type: str, entity_id: str, plugin_id: str) -> dict[s
@router.post("/api/batch")
async def start_batch() -> dict[str, Any]:
if logic.batch_state["running"]:
return {"already_running": True}
with db.connection() as c:
ids = db.get_unidentified_book_ids(c)
if not ids:
return {"started": False, "reason": "no_unidentified_books"}
asyncio.create_task(logic.run_batch(ids))
return {"started": True, "total": len(ids)}
added = logic.add_to_queue(ids)
if logic.batch_state["running"]:
return {"already_running": True, "added": added}
asyncio.create_task(logic.run_batch_consumer())
return {"started": True, "added": added}
@router.get("/api/batch/status")
@@ -408,6 +435,48 @@ def batch_status() -> dict[str, Any]:
return dict(logic.batch_state)
@router.websocket("/ws/batch")
async def ws_batch(websocket: WebSocket) -> None:
"""Stream batch_state snapshots as JSON until the batch finishes or the client disconnects.
Sends the current state immediately on connect, then pushes each subsequent
update until running transitions to false.
"""
await websocket.accept()
q = logic.subscribe_batch()
try:
await websocket.send_json(dict(logic.batch_state))
while logic.batch_state["running"]:
state = await q.get()
await websocket.send_json(state)
if not state["running"]:
break
except WebSocketDisconnect:
pass
finally:
logic.unsubscribe_batch(q)
@router.websocket("/ws/ai-log")
async def ws_ai_log(websocket: WebSocket) -> None:
"""Stream AI request log entries as JSON.
Sends a snapshot of all current entries on connect, then pushes each new
update message until the client disconnects.
"""
await websocket.accept()
q = logic.subscribe_log()
try:
await websocket.send_json({"type": "snapshot", "entries": logic.get_snapshot()})
while True:
msg = await q.get()
await websocket.send_json(msg)
except WebSocketDisconnect:
pass
finally:
logic.unsubscribe_log(q)
# ── Reorder ───────────────────────────────────────────────────────────────────
_REORDER_TABLES = {"rooms", "cabinets", "shelves", "books"}

View File

@@ -8,18 +8,21 @@ Usage:
poetry run serve
"""
import asyncio
from contextlib import asynccontextmanager
from fastapi import FastAPI, Request
from fastapi.responses import FileResponse, JSONResponse
from fastapi.staticfiles import StaticFiles
import logic
import plugins as plugin_registry
from api import router
from config import get_config, load_config
from db import init_db
from files import IMAGES_DIR, init_dirs
from errors import BadRequestError, ConfigError, ImageReadError, NotFoundError
from migrate import run_migration
@asynccontextmanager
@@ -27,8 +30,22 @@ async def lifespan(app: FastAPI):
load_config()
init_dirs()
init_db()
run_migration()
plugin_registry.load_plugins(get_config())
cfg = get_config()
logic.load_from_db(cfg.ui.ai_log_max_entries)
logic.init_thread_logging(asyncio.get_running_loop())
pending = logic.get_pending_batch()
if pending:
asyncio.create_task(logic.run_batch_consumer())
yield
# Graceful shutdown: cancel the running batch task so uvicorn isn't blocked,
# then release executor threads (running threads finish naturally in the background).
task = logic.get_batch_task()
if task is not None and not task.done():
task.cancel()
logic.batch_executor.shutdown(wait=False, cancel_futures=True)
logic.archive_executor.shutdown(wait=False, cancel_futures=True)
app = FastAPI(lifespan=lifespan)

View File

@@ -53,6 +53,7 @@ class AIFunctionConfig:
max_image_px: int = 1600
confidence_threshold: float = 0.8
name: str = ""
is_vlm: bool = False
@dataclass
@@ -76,6 +77,8 @@ class FunctionsConfig:
@dataclass
class UIConfig:
boundary_grab_px: int = 14
spine_padding_pct: float = 0.10
ai_log_max_entries: int = 100
@dataclass

115
src/db.py
View File

@@ -5,11 +5,13 @@ No file I/O, no config, no business logic. All SQL lives here.
import json
import sqlite3
import time
import uuid
from collections.abc import Iterator
from contextlib import contextmanager
from datetime import datetime
from pathlib import Path
from typing import Any
from mashumaro.codecs import BasicDecoder
@@ -67,7 +69,24 @@ CREATE TABLE IF NOT EXISTS books (
title_confidence REAL DEFAULT 0,
analyzed_at TEXT,
created_at TEXT NOT NULL,
candidates TEXT DEFAULT NULL
candidates TEXT DEFAULT NULL,
ai_blocks TEXT DEFAULT NULL
);
CREATE TABLE IF NOT EXISTS ai_log (
id TEXT PRIMARY KEY,
ts REAL NOT NULL,
plugin_id TEXT NOT NULL,
entity_type TEXT NOT NULL,
entity_id TEXT NOT NULL,
model TEXT NOT NULL,
request TEXT NOT NULL DEFAULT '',
status TEXT NOT NULL DEFAULT 'running',
response TEXT NOT NULL DEFAULT '',
duration_ms INTEGER NOT NULL DEFAULT 0
);
CREATE TABLE IF NOT EXISTS batch_queue (
book_id TEXT PRIMARY KEY,
added_at REAL NOT NULL
);
"""
@@ -413,11 +432,12 @@ def create_book(db: sqlite3.Connection, shelf_id: str) -> BookRow:
"analyzed_at": None,
"created_at": now(),
"candidates": None,
"ai_blocks": None,
}
db.execute(
"INSERT INTO books VALUES(:id,:shelf_id,:position,:image_filename,:title,:author,:year,:isbn,:publisher,"
":notes,:raw_text,:ai_title,:ai_author,:ai_year,:ai_isbn,:ai_publisher,:identification_status,"
":title_confidence,:analyzed_at,:created_at,:candidates)",
":title_confidence,:analyzed_at,:created_at,:candidates,:ai_blocks)",
data,
)
return _book_dec.decode(data)
@@ -494,6 +514,10 @@ def set_book_candidates(db: sqlite3.Connection, book_id: str, candidates_json: s
db.execute("UPDATE books SET candidates=? WHERE id=?", [candidates_json, book_id])
def set_book_ai_blocks(db: sqlite3.Connection, book_id: str, ai_blocks_json: str) -> None:
db.execute("UPDATE books SET ai_blocks=? WHERE id=?", [ai_blocks_json, book_id])
def get_book_rank(db: sqlite3.Connection, book_id: str) -> int:
"""0-based rank of book among its siblings sorted by position."""
row = db.execute("SELECT shelf_id FROM books WHERE id=?", [book_id]).fetchone()
@@ -513,3 +537,90 @@ def get_unidentified_book_ids(db: sqlite3.Connection) -> list[str]:
def reorder_entities(db: sqlite3.Connection, table: str, ids: list[str]) -> None:
for i, entity_id in enumerate(ids, 1):
db.execute(f"UPDATE {table} SET position=? WHERE id=?", [i, entity_id])
# ── AI log ────────────────────────────────────────────────────────────────────
def insert_ai_log_entry(
db: sqlite3.Connection,
entry_id: str,
ts: float,
plugin_id: str,
entity_type: str,
entity_id: str,
model: str,
request: str,
) -> None:
"""Insert a new AI log entry with status='running'."""
db.execute(
"INSERT OR IGNORE INTO ai_log"
" (id, ts, plugin_id, entity_type, entity_id, model, request) VALUES (?,?,?,?,?,?,?)",
[entry_id, ts, plugin_id, entity_type, entity_id, model, request],
)
def update_ai_log_entry(db: sqlite3.Connection, entry_id: str, status: str, response: str, duration_ms: int) -> None:
"""Update an AI log entry with the final status and response."""
db.execute(
"UPDATE ai_log SET status=?, response=?, duration_ms=? WHERE id=?",
[status, response, duration_ms, entry_id],
)
def get_ai_log_entries(db: sqlite3.Connection, limit: int) -> list[dict[str, Any]]:
"""Return the most recent AI log entries, oldest first."""
rows = db.execute(
"SELECT id, ts, plugin_id, entity_type, entity_id, model, request, status, response, duration_ms"
" FROM ai_log ORDER BY ts DESC LIMIT ?",
[limit],
).fetchall()
return [dict(r) for r in reversed(rows)]
# ── Batch queue ────────────────────────────────────────────────────────────────
def add_to_batch_queue(db: sqlite3.Connection, book_ids: list[str]) -> None:
"""Insert book IDs into the batch queue, ignoring duplicates.
Args:
db: Open database connection (must be writable).
book_ids: Book IDs to enqueue.
"""
ts = time.time()
db.executemany(
"INSERT OR IGNORE INTO batch_queue (book_id, added_at) VALUES (?,?)", [(bid, ts) for bid in book_ids]
)
def remove_from_batch_queue(db: sqlite3.Connection, book_id: str) -> None:
"""Remove a single book ID from the batch queue.
Args:
db: Open database connection (must be writable).
book_id: Book ID to dequeue.
"""
db.execute("DELETE FROM batch_queue WHERE book_id=?", [book_id])
def get_batch_queue(db: sqlite3.Connection) -> list[str]:
"""Return all queued book IDs ordered by insertion time (oldest first).
Args:
db: Open database connection.
Returns:
List of book ID strings.
"""
rows = db.execute("SELECT book_id FROM batch_queue ORDER BY added_at").fetchall()
return [str(r[0]) for r in rows]
def clear_batch_queue(db: sqlite3.Connection) -> None:
"""Remove all entries from the batch queue.
Args:
db: Open database connection (must be writable).
"""
db.execute("DELETE FROM batch_queue")

View File

@@ -154,6 +154,21 @@ class NoRawTextError(BadRequestError):
return f"Book {self.book_id!r} has no raw text; run text recognizer first"
class NoPipelinePluginError(BadRequestError):
"""Raised when the identification pipeline requires a plugin category with no registered plugins.
Attributes:
plugin_category: The plugin category (e.g. 'text_recognizer') that has no registered plugins.
"""
def __init__(self, plugin_category: str) -> None:
super().__init__()
self.plugin_category = plugin_category
def __str__(self) -> str:
return f"No {self.plugin_category!r} plugin configured; add one to functions.*.yaml"
class InvalidPluginEntityError(BadRequestError):
"""Raised when a plugin category does not support the requested entity type.

141
src/log_thread.py Normal file
View File

@@ -0,0 +1,141 @@
"""Thread-safe AI logging helpers for use from thread pool workers.
Provides start_entry() / finish_entry() that schedule log operations on the
event loop via call_soon_threadsafe, making them safe to call from executor
threads. Also provides a ContextVar so plugin/entity context flows through
asyncio.run_in_executor() calls automatically.
Initialized by logic/ai_log.py at app startup via set_app_loop().
Importable by both logic/ and plugins/ without circular dependencies.
"""
import concurrent.futures
import time
from collections.abc import Callable
from contextvars import ContextVar
from dataclasses import dataclass
from typing import Any
try:
import asyncio as _asyncio
_AbstractEventLoop = _asyncio.AbstractEventLoop
except ImportError: # pragma: no cover
_AbstractEventLoop = Any # type: ignore[assignment,misc]
import asyncio
@dataclass
class _LogCtx:
plugin_id: str
entity_type: str
entity_id: str
# ContextVar propagated automatically into asyncio executor threads.
_ctx: ContextVar[_LogCtx | None] = ContextVar("_log_ctx", default=None)
# Initialized at startup by set_app_loop().
_loop: asyncio.AbstractEventLoop | None = None
_log_start_fn: Callable[..., str] | None = None
_log_finish_fn: Callable[..., None] | None = None
def set_app_loop(
loop: asyncio.AbstractEventLoop,
log_start: Callable[..., str],
log_finish: Callable[..., None],
) -> None:
"""Store the running event loop and logging callables.
Must be called once at app startup from the async context.
Args:
loop: The running asyncio event loop.
log_start: Synchronous log_start function from logic.ai_log.
log_finish: Synchronous log_finish function from logic.ai_log.
"""
global _loop, _log_start_fn, _log_finish_fn
_loop = loop
_log_start_fn = log_start
_log_finish_fn = log_finish
def set_log_ctx(plugin_id: str, entity_type: str, entity_id: str) -> None:
"""Set the current log context for this thread/task.
Call before run_in_executor() to propagate context into executor threads.
Or call directly inside a thread to set context for subsequent calls in
the same thread.
Args:
plugin_id: Plugin ID to attribute log entries to.
entity_type: Entity type (e.g. ``"books"``).
entity_id: Entity ID.
"""
_ctx.set(_LogCtx(plugin_id=plugin_id, entity_type=entity_type, entity_id=entity_id))
def start_entry(model: str, request_summary: str) -> str:
"""Start a log entry from a thread pool worker.
Reads context from the ContextVar set by set_log_ctx(). Schedules
log_start on the event loop and blocks briefly to obtain the entry ID.
Returns empty string if context or loop is unavailable.
Args:
model: Model name used for the request.
request_summary: Short human-readable description.
Returns:
Log entry ID string, or ``""`` if logging is unavailable.
"""
ctx = _ctx.get()
if ctx is None or _loop is None or _log_start_fn is None:
return ""
fut: concurrent.futures.Future[str] = concurrent.futures.Future()
fn = _log_start_fn
pid, et, eid = ctx.plugin_id, ctx.entity_type, ctx.entity_id
def _call() -> None:
try:
entry_id = fn(pid, et, eid, model, request_summary)
fut.set_result(entry_id)
except Exception as exc: # noqa: BLE001
fut.set_exception(exc)
_loop.call_soon_threadsafe(_call)
try:
return fut.result(timeout=5)
except Exception:
return ""
def finish_entry(entry_id: str, status: str, response: str, started_at: float) -> None:
"""Finish a log entry from a thread pool worker (fire-and-forget).
Schedules log_finish on the event loop. Does nothing if entry_id is empty
or the loop is unavailable.
Args:
entry_id: ID returned by start_entry().
status: ``"ok"`` or ``"error"``.
response: Short summary of response or error message.
started_at: ``time.time()`` value recorded before the request.
"""
if not entry_id or _loop is None or _log_finish_fn is None:
return
fn = _log_finish_fn
_loop.call_soon_threadsafe(fn, entry_id, status, response, started_at)
def timed_start(model: str, request_summary: str) -> tuple[str, float]:
"""Convenience wrapper: start an entry and record the start time.
Returns:
Tuple of (entry_id, started_at) for passing to finish_entry().
"""
started_at = time.time()
entry_id = start_entry(model, request_summary)
return entry_id, started_at

View File

@@ -2,13 +2,37 @@
import asyncio
import dataclasses
import time
from typing import Any
import log_thread
import plugins as plugin_registry
from errors import InvalidPluginEntityError, PluginNotFoundError, PluginTargetMismatchError
from models import PluginLookupResult
from logic.ai_log import (
get_snapshot,
init_thread_logging,
load_from_db,
log_finish,
log_start,
notify_entity_update,
set_max_entries,
subscribe_log,
unsubscribe_log,
)
from logic.archive import run_archive_searcher, run_archive_searcher_bg
from logic.batch import archive_executor, batch_executor, batch_state, process_book_sync, run_batch
from logic.batch import (
add_to_queue,
archive_executor,
batch_executor,
batch_state,
get_batch_task,
get_pending_batch,
process_book_sync,
run_batch_consumer,
subscribe_batch,
unsubscribe_batch,
)
from logic.boundaries import book_spine_source, bounds_for_index, run_boundary_detector, shelf_source
from logic.identification import (
AI_FIELDS,
@@ -17,6 +41,7 @@ from logic.identification import (
compute_status,
dismiss_field,
run_book_identifier,
run_identify_pipeline,
run_text_recognizer,
save_user_fields,
)
@@ -24,6 +49,7 @@ from logic.images import prep_img_b64, crop_save, serve_crop
__all__ = [
"AI_FIELDS",
"add_to_queue",
"apply_ai_result",
"archive_executor",
"batch_executor",
@@ -35,17 +61,31 @@ __all__ = [
"crop_save",
"dismiss_field",
"dispatch_plugin",
"get_batch_task",
"get_pending_batch",
"get_snapshot",
"init_thread_logging",
"load_from_db",
"log_finish",
"log_start",
"notify_entity_update",
"prep_img_b64",
"process_book_sync",
"run_archive_searcher",
"run_archive_searcher_bg",
"run_batch",
"run_batch_consumer",
"run_book_identifier",
"run_boundary_detector",
"run_identify_pipeline",
"run_text_recognizer",
"save_user_fields",
"serve_crop",
"set_max_entries",
"shelf_source",
"prep_img_b64",
"subscribe_batch",
"subscribe_log",
"unsubscribe_batch",
"unsubscribe_log",
]
@@ -58,6 +98,10 @@ async def dispatch_plugin(
) -> dict[str, Any]:
"""Validate plugin/entity compatibility, run the plugin, and trigger auto-queue follow-ups.
Sets the log context ContextVar before each run_in_executor call so that
AIClient and archive runner logging is attributed to the correct plugin and entity.
After a successful run, broadcasts an entity_update to WebSocket subscribers.
Args:
plugin_id: The plugin ID string (used in error reporting).
lookup: Discriminated tuple from plugins.get_plugin(); (None, None) if not found.
@@ -84,25 +128,65 @@ async def dispatch_plugin(
raise PluginTargetMismatchError(plugin.plugin_id, "shelves", plugin.target)
if entity_type == "shelves" and plugin.target != "books":
raise PluginTargetMismatchError(plugin.plugin_id, "books", plugin.target)
started = time.time()
entry_id = log_start(plugin_id, entity_type, entity_id, plugin.model, entity_id)
log_thread.set_log_ctx(plugin_id, entity_type, entity_id)
try:
result = await loop.run_in_executor(None, run_boundary_detector, plugin, entity_type, entity_id)
return dataclasses.asdict(result)
log_finish(entry_id, "ok", "done", started)
except Exception as exc:
log_finish(entry_id, "error", str(exc), started)
raise
result_dict = dataclasses.asdict(result)
notify_entity_update(entity_type, entity_id, result_dict)
return result_dict
case ("text_recognizer", plugin):
if entity_type != "books":
raise InvalidPluginEntityError("text_recognizer", entity_type)
started = time.time()
entry_id = log_start(plugin_id, entity_type, entity_id, plugin.model, entity_id)
log_thread.set_log_ctx(plugin_id, entity_type, entity_id)
try:
result = await loop.run_in_executor(None, run_text_recognizer, plugin, entity_id)
log_finish(entry_id, "ok", result.raw_text[:120] if result.raw_text else "", started)
except Exception as exc:
log_finish(entry_id, "error", str(exc), started)
raise
for ap in plugin_registry.get_auto_queue("archive_searchers"):
loop.run_in_executor(archive_executor, run_archive_searcher_bg, ap, entity_id)
return dataclasses.asdict(result)
result_dict = dataclasses.asdict(result)
notify_entity_update(entity_type, entity_id, result_dict)
return result_dict
case ("book_identifier", plugin):
if entity_type != "books":
raise InvalidPluginEntityError("book_identifier", entity_type)
started = time.time()
entry_id = log_start(plugin_id, entity_type, entity_id, plugin.model, entity_id)
log_thread.set_log_ctx(plugin_id, entity_type, entity_id)
try:
result = await loop.run_in_executor(None, run_book_identifier, plugin, entity_id)
return dataclasses.asdict(result)
log_finish(entry_id, "ok", result.ai_title or "", started)
except Exception as exc:
log_finish(entry_id, "error", str(exc), started)
raise
result_dict = dataclasses.asdict(result)
notify_entity_update(entity_type, entity_id, result_dict)
return result_dict
case ("archive_searcher", plugin):
if entity_type != "books":
raise InvalidPluginEntityError("archive_searcher", entity_type)
started = time.time()
entry_id = log_start(plugin_id, entity_type, entity_id, "", entity_id)
log_thread.set_log_ctx(plugin_id, entity_type, entity_id)
try:
result = await loop.run_in_executor(archive_executor, run_archive_searcher, plugin, entity_id)
return dataclasses.asdict(result)
log_finish(entry_id, "ok", "done", started)
except Exception as exc:
log_finish(entry_id, "error", str(exc), started)
raise
result_dict = dataclasses.asdict(result)
notify_entity_update(entity_type, entity_id, result_dict)
return result_dict

190
src/logic/ai_log.py Normal file
View File

@@ -0,0 +1,190 @@
"""AI request log: ring buffer with WebSocket pub-sub for live UI updates.
Entries are persisted to the ai_log table so they survive service restarts.
Call load_from_db() once at startup after init_db() to populate the ring buffer.
Call init_thread_logging() once at startup to enable logging from executor threads.
"""
import asyncio
import time
from collections import deque
from typing import Any
import db
import log_thread
from models import AiLogEntry
# Ring buffer; max size set at runtime by set_max_entries().
_log: deque[AiLogEntry] = deque(maxlen=100)
_log_subs: set[asyncio.Queue[dict[str, Any]]] = set()
_next_id: list[int] = [0]
def set_max_entries(n: int) -> None:
"""Resize the ring buffer.
Args:
n: Maximum number of entries to retain.
"""
global _log
_log = deque(_log, maxlen=n)
def subscribe_log() -> asyncio.Queue[dict[str, Any]]:
"""Register a subscriber for AI log updates.
Returns:
Queue that will receive update messages as dicts with keys
``type`` (``"snapshot"`` or ``"update"``) and either ``entries``
or ``entry``.
"""
q: asyncio.Queue[dict[str, Any]] = asyncio.Queue()
_log_subs.add(q)
return q
def unsubscribe_log(q: asyncio.Queue[dict[str, Any]]) -> None:
"""Remove a subscriber queue.
Args:
q: Queue previously returned by subscribe_log().
"""
_log_subs.discard(q)
def get_snapshot() -> list[AiLogEntry]:
"""Return a copy of the current log for snapshot delivery on WS connect.
Returns:
List of AiLogEntry dicts, oldest first.
"""
return list(_log)
def load_from_db(limit: int = 100) -> None:
"""Populate the in-memory ring buffer from the database.
Call once at startup after init_db(). Does not push WS notifications.
Any numeric IDs loaded from the DB advance _next_id to avoid collisions.
Args:
limit: Maximum number of entries to load (most recent).
"""
with db.connection() as c:
rows = db.get_ai_log_entries(c, limit)
for row in rows:
entry: AiLogEntry = {
"id": str(row["id"]),
"ts": float(str(row["ts"])),
"plugin_id": str(row["plugin_id"]),
"entity_type": str(row["entity_type"]),
"entity_id": str(row["entity_id"]),
"model": str(row["model"]),
"request": str(row["request"]),
"status": str(row["status"]),
"response": str(row["response"]),
"duration_ms": int(str(row["duration_ms"])),
}
_log.append(entry)
try:
num = int(entry["id"])
if num >= _next_id[0]:
_next_id[0] = num + 1
except ValueError:
pass
def log_start(plugin_id: str, entity_type: str, entity_id: str, model: str, request_summary: str) -> str:
"""Record the start of an AI request and return its log entry ID.
Must be called from the asyncio event loop thread. Persists the entry to DB.
Args:
plugin_id: Plugin that is running.
entity_type: Entity type (e.g. ``"books"``).
entity_id: Entity ID.
model: Model name used for the request.
request_summary: Short human-readable description of the request.
Returns:
Opaque string ID for the log entry, to be passed to log_finish().
"""
_next_id[0] += 1
entry_id = str(_next_id[0])
ts = time.time()
entry: AiLogEntry = {
"id": entry_id,
"ts": ts,
"plugin_id": plugin_id,
"entity_type": entity_type,
"entity_id": entity_id,
"model": model,
"request": request_summary,
"status": "running",
"response": "",
"duration_ms": 0,
}
_log.append(entry)
_notify({"type": "update", "entry": dict(entry)})
try:
with db.transaction() as c:
db.insert_ai_log_entry(c, entry_id, ts, plugin_id, entity_type, entity_id, model, request_summary)
except Exception:
pass # log persistence is best-effort
return entry_id
def log_finish(entry_id: str, status: str, response: str, started_at: float) -> None:
"""Update a log entry with the result of an AI request.
Must be called from the asyncio event loop thread. Persists the update to DB.
Args:
entry_id: ID returned by log_start().
status: ``"ok"`` or ``"error"``.
response: Short summary of the response or error message.
started_at: ``time.time()`` value recorded before the request.
"""
duration_ms = int((time.time() - started_at) * 1000)
for entry in _log:
if entry["id"] == entry_id:
entry["status"] = status
entry["response"] = response
entry["duration_ms"] = duration_ms
_notify({"type": "update", "entry": dict(entry)})
break
try:
with db.transaction() as c:
db.update_ai_log_entry(c, entry_id, status, response, duration_ms)
except Exception:
pass # log persistence is best-effort
def init_thread_logging(loop: asyncio.AbstractEventLoop) -> None:
"""Enable log_start / log_finish calls from executor threads.
Must be called once at app startup after the event loop is running.
Stores the loop and function references in log_thread for use from workers.
Args:
loop: The running asyncio event loop.
"""
log_thread.set_app_loop(loop, log_start, log_finish)
def notify_entity_update(entity_type: str, entity_id: str, data: dict[str, Any]) -> None:
"""Broadcast an entity update to all AI-log WebSocket subscribers.
Must be called from the asyncio event loop thread.
Args:
entity_type: Entity type string (e.g. ``"books"``).
entity_id: Entity ID.
data: Dict representation of the updated entity row.
"""
_notify({"type": "entity_update", "entity_type": entity_type, "entity_id": entity_id, "data": data})
def _notify(msg: dict[str, Any]) -> None:
for q in _log_subs:
q.put_nowait(msg)

View File

@@ -1,8 +1,10 @@
"""Archive search plugin runner."""
import json
import time
import db
import log_thread
from errors import BookNotFoundError
from models import ArchiveSearcherPlugin, BookRow, CandidateRecord
from logic.identification import build_query
@@ -11,6 +13,9 @@ from logic.identification import build_query
def run_archive_searcher(plugin: ArchiveSearcherPlugin, book_id: str) -> BookRow:
"""Run an archive search for a book and merge results into the candidates list.
Sets the log context for this thread so individual HTTP requests logged inside
the plugin are attributed to the correct plugin and entity.
Args:
plugin: The archive searcher plugin to execute.
book_id: ID of the book to search for.
@@ -21,6 +26,7 @@ def run_archive_searcher(plugin: ArchiveSearcherPlugin, book_id: str) -> BookRow
Raises:
BookNotFoundError: If book_id does not exist.
"""
log_thread.set_log_ctx(plugin.plugin_id, "books", book_id)
with db.transaction() as c:
book = db.get_book(c, book_id)
if not book:
@@ -28,7 +34,14 @@ def run_archive_searcher(plugin: ArchiveSearcherPlugin, book_id: str) -> BookRow
query = build_query(book)
if not query:
return book
started = time.time()
entry_id = log_thread.start_entry("", f"search: {query[:80]}")
try:
results: list[CandidateRecord] = plugin.search(query)
log_thread.finish_entry(entry_id, "ok", f"{len(results)} result(s)", started)
except Exception as exc:
log_thread.finish_entry(entry_id, "error", str(exc), started)
raise
existing: list[CandidateRecord] = json.loads(book.candidates or "[]")
existing = [cd for cd in existing if cd.get("source") != plugin.plugin_id]
existing.extend(results)

View File

@@ -1,66 +1,168 @@
"""Batch processing pipeline: auto-queue text recognition and archive search."""
import asyncio
import dataclasses
import time
from concurrent.futures import ThreadPoolExecutor
from typing import Any
import db
import plugins as plugin_registry
from logic.ai_log import log_finish, log_start, notify_entity_update
from logic.identification import run_identify_pipeline
from models import BatchState
from logic.identification import run_text_recognizer
from logic.archive import run_archive_searcher
batch_state: BatchState = {"running": False, "total": 0, "done": 0, "errors": 0, "current": ""}
batch_executor = ThreadPoolExecutor(max_workers=1)
archive_executor = ThreadPoolExecutor(max_workers=8)
# WebSocket subscribers: each is a queue that receives batch_state snapshots.
_batch_subs: set[asyncio.Queue[dict[str, Any]]] = set()
# Tracked asyncio task for the running batch (for cancellation on shutdown).
_batch_task: asyncio.Task[None] | None = None
def subscribe_batch() -> asyncio.Queue[dict[str, Any]]:
"""Register a new subscriber for batch state updates.
Returns:
A queue that will receive a dict snapshot after each state change.
"""
q: asyncio.Queue[dict[str, Any]] = asyncio.Queue()
_batch_subs.add(q)
return q
def unsubscribe_batch(q: asyncio.Queue[dict[str, Any]]) -> None:
"""Remove a subscriber queue from batch state notifications.
Args:
q: Queue previously returned by subscribe_batch().
"""
_batch_subs.discard(q)
def get_batch_task() -> "asyncio.Task[None] | None":
"""Return the currently running batch asyncio task, or None.
Returns:
The running Task, or None if no batch is active.
"""
return _batch_task
def get_pending_batch() -> list[str]:
"""Return pending book IDs from the database batch queue.
Used at startup to resume an interrupted batch.
Returns:
List of book IDs in queue order, or [] if queue is empty.
"""
with db.connection() as c:
return db.get_batch_queue(c)
def add_to_queue(book_ids: list[str]) -> int:
"""Add books to the DB batch queue, skipping duplicates.
Args:
book_ids: Candidate book IDs to enqueue.
Returns:
Number of books actually added (not already in queue).
"""
with db.connection() as c:
existing = set(db.get_batch_queue(c))
new_ids = [bid for bid in book_ids if bid not in existing]
if new_ids:
with db.transaction() as c:
db.add_to_batch_queue(c, new_ids)
return len(new_ids)
def _notify_subs() -> None:
snap: dict[str, Any] = {
"running": batch_state["running"],
"total": batch_state["total"],
"done": batch_state["done"],
"errors": batch_state["errors"],
"current": batch_state["current"],
}
for q in _batch_subs:
q.put_nowait(snap)
def process_book_sync(book_id: str) -> None:
"""Run the full auto-queue pipeline for a single book synchronously.
"""Run the full identification pipeline for a single book synchronously.
Runs all auto_queue text_recognizers (if book has no raw_text yet), then all
auto_queue archive_searchers. Exceptions from individual plugins are suppressed.
Exceptions from the pipeline propagate to the caller.
Args:
book_id: ID of the book to process.
Raises:
Any exception raised by run_identify_pipeline.
"""
with db.connection() as c:
book = db.get_book(c, book_id)
has_text = bool((book.raw_text if book else "").strip())
if not has_text:
for p in plugin_registry.get_auto_queue("text_recognizers"):
try:
run_text_recognizer(p, book_id)
except Exception:
pass
for p in plugin_registry.get_auto_queue("archive_searchers"):
try:
run_archive_searcher(p, book_id)
except Exception:
pass
run_identify_pipeline(book_id)
async def run_batch(book_ids: list[str]) -> None:
"""Process a list of books through the auto-queue pipeline sequentially.
async def run_batch_consumer() -> None:
"""Process books from the DB batch queue until the queue is empty.
Updates batch_state throughout execution. Exceptions from individual books
are counted in batch_state['errors'] and do not abort the run.
Args:
book_ids: List of book IDs to process.
Reads pending book IDs from the database queue. Each book is processed
sequentially via process_book_sync in the batch_executor. New books may
be added to the queue while this consumer is running and will be picked up
automatically. Batch state is broadcast to WebSocket subscribers after each
book. Individual book errors are counted but do not abort the run.
"""
global _batch_task
_batch_task = asyncio.current_task()
loop = asyncio.get_event_loop()
batch_state["running"] = True
batch_state["total"] = len(book_ids)
batch_state["done"] = 0
batch_state["errors"] = 0
for bid in book_ids:
with db.connection() as c:
pending = db.get_batch_queue(c)
batch_state["total"] = len(pending)
_notify_subs()
try:
while True:
with db.connection() as c:
pending = db.get_batch_queue(c)
if not pending:
break
bid = pending[0]
batch_state["current"] = bid
batch_state["total"] = batch_state["done"] + len(pending)
_notify_subs()
wall_start = time.time()
entry_id = log_start("identify_pipeline", "books", bid, "pipeline", bid)
try:
await loop.run_in_executor(batch_executor, process_book_sync, bid)
except Exception:
log_finish(entry_id, "ok", "", wall_start)
# Push entity update so connected clients see the new book data.
with db.connection() as c:
book = db.get_book(c, bid)
if book is not None:
notify_entity_update("books", bid, dataclasses.asdict(book))
except asyncio.CancelledError:
log_finish(entry_id, "error", "cancelled", wall_start)
raise
except Exception as exc:
log_finish(entry_id, "error", str(exc), wall_start)
batch_state["errors"] += 1
with db.transaction() as c:
db.remove_from_batch_queue(c, bid)
batch_state["done"] += 1
_notify_subs()
finally:
batch_state["running"] = False
batch_state["current"] = ""
_notify_subs()
_batch_task = None

View File

@@ -64,15 +64,22 @@ def shelf_source(c: sqlite3.Connection, shelf_id: str) -> tuple[Path, tuple[floa
return IMAGES_DIR / cab.photo_filename, (0.0, y0, 1.0, y1)
def book_spine_source(c: sqlite3.Connection, book_id: str) -> tuple[Path, tuple[float, float, float, float]]:
def book_spine_source(
c: sqlite3.Connection,
book_id: str,
padding_pct: float = 0.0,
) -> tuple[Path, tuple[float, float, float, float]]:
"""Return the image path and crop fractions for a book's spine image.
Composes the shelf's image source with the book's horizontal position within
the shelf's book boundaries.
the shelf's book boundaries, then expands the x-extent by padding_pct of
the book width on each side to account for book inclination.
Args:
c: Open database connection.
book_id: ID of the book to resolve.
padding_pct: Fraction of book width to add on each horizontal side
(e.g. 0.10 adds 10% on left and right). Clamped to image edges.
Returns:
(image_path, crop_frac) — always returns a crop (never None).
@@ -93,6 +100,11 @@ def book_spine_source(c: sqlite3.Connection, book_id: str) -> tuple[Path, tuple[
idx = db.get_book_rank(c, book_id)
x0, x1 = bounds_for_index(shelf.book_boundaries, idx)
if padding_pct > 0.0:
pad = (x1 - x0) * padding_pct
x0 = max(0.0, x0 - pad)
x1 = min(1.0, x1 + pad)
if base_crop is None:
return base_path, (x0, 0.0, x1, 1.0)
else:

View File

@@ -1,17 +1,24 @@
"""Book identification logic: status computation, AI result application, plugin runners."""
import json
import re
import time
from concurrent.futures import ThreadPoolExecutor, as_completed
import db
import log_thread
from config import get_config
from db import now
from errors import BookNotFoundError, NoRawTextError
from errors import BookNotFoundError, NoPipelinePluginError, NoRawTextError
from logic.boundaries import book_spine_source
from logic.images import prep_img_b64
from models import (
AIIdentifyResult,
ArchiveSearcherPlugin,
BookIdentifierPlugin,
BookRow,
CandidateRecord,
IdentifyBlock,
TextRecognizeResult,
TextRecognizerPlugin,
)
@@ -19,6 +26,9 @@ from models import (
AI_FIELDS = ("title", "author", "year", "isbn", "publisher")
_APPROVED_REQUIRED = ("title", "author", "year")
_ARCHIVE_PIPELINE_WORKERS = 8
_ARCHIVE_PIPELINE_TIMEOUT = 60.0
def compute_status(book: BookRow) -> str:
"""Return the identification_status string derived from current book field values.
@@ -173,7 +183,8 @@ def run_text_recognizer(plugin: TextRecognizerPlugin, book_id: str) -> BookRow:
book = db.get_book(c, book_id)
if not book:
raise BookNotFoundError(book_id)
spine_path, spine_crop = book_spine_source(c, book_id)
padding = get_config().ui.spine_padding_pct
spine_path, spine_crop = book_spine_source(c, book_id, padding)
b64, mt = prep_img_b64(spine_path, spine_crop, max_px=plugin.max_image_px)
result: TextRecognizeResult = plugin.recognize(b64, mt)
raw_text = result.get("raw_text") or ""
@@ -198,9 +209,10 @@ def run_text_recognizer(plugin: TextRecognizerPlugin, book_id: str) -> BookRow:
def run_book_identifier(plugin: BookIdentifierPlugin, book_id: str) -> BookRow:
"""Identify a book using AI and update ai_* fields and candidates.
"""Identify a book using the AI identifier plugin and update ai_blocks and ai_* fields.
Requires raw_text to have been populated by a text recognizer first.
Standalone mode: passes empty archive results and no images.
For the full multi-step pipeline use run_identify_pipeline instead.
Args:
plugin: The book identifier plugin to execute.
@@ -220,26 +232,242 @@ def run_book_identifier(plugin: BookIdentifierPlugin, book_id: str) -> BookRow:
raw_text = (book.raw_text or "").strip()
if not raw_text:
raise NoRawTextError(book_id)
result: AIIdentifyResult = plugin.identify(raw_text)
# apply_ai_result manages its own transaction
apply_ai_result(book_id, result, plugin.confidence_threshold)
with db.transaction() as c:
blocks: list[IdentifyBlock] = plugin.identify(raw_text, [], [])
db.set_book_ai_blocks(c, book_id, json.dumps(blocks, ensure_ascii=False))
top_score = float(blocks[0].get("score") or 0.0) if blocks else 0.0
if blocks and top_score >= plugin.confidence_threshold:
top = blocks[0]
db.set_book_ai_fields(
c,
book_id,
top.get("title") or "",
top.get("author") or "",
top.get("year") or "",
top.get("isbn") or "",
top.get("publisher") or "",
)
db.set_book_confidence(c, book_id, top_score, now())
book = db.get_book(c, book_id)
if not book:
raise BookNotFoundError(book_id)
cand: CandidateRecord = {
"source": plugin.plugin_id,
"title": (result.get("title") or "").strip(),
"author": (result.get("author") or "").strip(),
"year": (result.get("year") or "").strip(),
"isbn": (result.get("isbn") or "").strip(),
"publisher": (result.get("publisher") or "").strip(),
}
existing: list[CandidateRecord] = json.loads(book.candidates or "[]")
existing = [cd for cd in existing if cd.get("source") != plugin.plugin_id]
existing.append(cand)
db.set_book_candidates(c, book_id, json.dumps(existing))
db.set_book_status(c, book_id, compute_status(book))
updated = db.get_book(c, book_id)
if not updated:
raise BookNotFoundError(book_id)
return updated
# ── Identification pipeline ───────────────────────────────────────────────────
def _normalize_field(value: str) -> str:
"""Lowercase, strip punctuation, and collapse spaces for candidate deduplication.
Args:
value: Raw field string.
Returns:
Normalized string.
"""
v = value.lower()
v = re.sub(r"[^\w\s]", "", v)
return " ".join(v.split())
def _candidate_key(c: CandidateRecord) -> tuple[str, str, str, str, str]:
return (
_normalize_field(c.get("title") or ""),
_normalize_field(c.get("author") or ""),
_normalize_field(c.get("year") or ""),
_normalize_field(c.get("isbn") or ""),
_normalize_field(c.get("publisher") or ""),
)
def _deduplicate_candidates(candidates: list[CandidateRecord]) -> list[CandidateRecord]:
"""Merge candidates that are identical after normalization, unioning their sources.
Two candidates match if title, author, year, isbn, and publisher all match
case-insensitively with punctuation removed and spaces normalized. Candidates
differing in any field (e.g. same title+author but different year) are kept separate.
Args:
candidates: Raw candidate list from multiple archive sources.
Returns:
Deduplicated list; first occurrence order preserved; sources merged with ', '.
"""
seen: dict[tuple[str, str, str, str, str], CandidateRecord] = {}
for cand in candidates:
key = _candidate_key(cand)
if key in seen:
existing_src = seen[key].get("source") or ""
new_src = cand.get("source") or ""
if new_src and new_src not in existing_src:
seen[key]["source"] = f"{existing_src}, {new_src}" if existing_src else new_src
else:
seen[key] = {
"source": cand.get("source") or "",
"title": cand.get("title") or "",
"author": cand.get("author") or "",
"year": cand.get("year") or "",
"isbn": cand.get("isbn") or "",
"publisher": cand.get("publisher") or "",
}
return list(seen.values())
def _get_book_images(book_id: str, max_image_px: int) -> list[tuple[str, str]]:
"""Collect spine and title-page images for a book, encoded as base64.
Silently skips images that cannot be loaded.
Args:
book_id: ID of the book.
max_image_px: Maximum pixel dimension for downscaling.
Returns:
List of (base64_string, mime_type) tuples; may be empty.
"""
images: list[tuple[str, str]] = []
padding = get_config().ui.spine_padding_pct
with db.connection() as c:
try:
spine_path, spine_crop = book_spine_source(c, book_id, padding)
b64, mt = prep_img_b64(spine_path, spine_crop, max_px=max_image_px)
images.append((b64, mt))
except Exception:
pass
book = db.get_book(c, book_id)
if book and book.image_filename:
from files import IMAGES_DIR
try:
b64_tp, mt_tp = prep_img_b64(IMAGES_DIR / book.image_filename, max_px=max_image_px)
images.append((b64_tp, mt_tp))
except Exception:
pass
return images
def _search_with_log(searcher: ArchiveSearcherPlugin, query: str, book_id: str) -> list[CandidateRecord]:
"""Run one archive search call with thread-safe logging."""
log_thread.set_log_ctx(searcher.plugin_id, "books", book_id)
started = time.time()
entry_id = log_thread.start_entry("", f"search: {query[:80]}")
try:
results = searcher.search(query)
log_thread.finish_entry(entry_id, "ok", f"{len(results)} result(s)", started)
return results
except Exception as exc:
log_thread.finish_entry(entry_id, "error", str(exc), started)
raise
def run_identify_pipeline(book_id: str) -> BookRow:
"""Run the full identification pipeline: VLM recognition -> archives -> main model.
Steps:
1. VLM text recognizer reads the spine image -> raw_text and structured fields.
2. All archive searchers run in parallel using title+author and title-only queries.
3. Archive results are deduplicated by normalized full-field match.
4. The main identifier model receives raw_text, deduplicated archive results, and
(if is_vlm is True) the spine and title-page images.
5. The model returns ranked IdentifyBlock list stored in books.ai_blocks (never cleared).
6. The top block (if score >= confidence_threshold) updates books.ai_* fields.
Args:
book_id: ID of the book to identify.
Returns:
Updated BookRow after completing the pipeline.
Raises:
BookNotFoundError: If book_id does not exist.
NoPipelinePluginError: If no text_recognizer or book_identifier is configured.
"""
import plugins as plugin_registry
with db.connection() as c:
if not db.get_book(c, book_id):
raise BookNotFoundError(book_id)
recognizers = plugin_registry.get_all_text_recognizers()
if not recognizers:
raise NoPipelinePluginError("text_recognizer")
recognizer = recognizers[0]
identifiers = plugin_registry.get_all_book_identifiers()
if not identifiers:
raise NoPipelinePluginError("book_identifier")
identifier = identifiers[0]
# Step 1: VLM recognition — set log context so AIClient.call() attributes the LLM call
log_thread.set_log_ctx(recognizer.plugin_id, "books", book_id)
book = run_text_recognizer(recognizer, book_id)
raw_text = (book.raw_text or "").strip()
candidates: list[CandidateRecord] = json.loads(book.candidates or "[]")
vlm_cand = next((c for c in candidates if c.get("source") == recognizer.plugin_id), None)
title = (vlm_cand.get("title") or "").strip() if vlm_cand else ""
author = (vlm_cand.get("author") or "").strip() if vlm_cand else ""
queries: list[str] = []
if title and author:
queries.append(f"{author} {title}")
if title:
queries.append(title)
if not queries and raw_text:
queries.append(raw_text[:200])
# Step 2: Parallel archive search — each call sets its own log context via _search_with_log
searchers = plugin_registry.get_all_archive_searchers()
all_archive: list[CandidateRecord] = []
if searchers and queries:
unique_queries = list(dict.fromkeys(queries))
with ThreadPoolExecutor(max_workers=_ARCHIVE_PIPELINE_WORKERS) as pool:
futs = {
pool.submit(_search_with_log, s, q, book_id): s.plugin_id for s in searchers for q in unique_queries
}
for fut in as_completed(futs, timeout=_ARCHIVE_PIPELINE_TIMEOUT):
try:
all_archive.extend(fut.result())
except Exception:
pass
# Step 3: Deduplicate
deduped = _deduplicate_candidates(all_archive)
# Step 4: Collect images if identifier is a VLM
images: list[tuple[str, str]] = []
if identifier.is_vlm:
images = _get_book_images(book_id, identifier.max_image_px)
# Step 5: Call main identifier — set log context so AIClient.call() logs the LLM call
log_thread.set_log_ctx(identifier.plugin_id, "books", book_id)
blocks: list[IdentifyBlock] = identifier.identify(raw_text, deduped, images)
# Step 6: Persist results (ai_blocks are never removed; overwritten each pipeline run)
with db.transaction() as c:
db.set_book_ai_blocks(c, book_id, json.dumps(blocks, ensure_ascii=False))
top_score = float(blocks[0].get("score") or 0.0) if blocks else 0.0
if blocks and top_score >= identifier.confidence_threshold:
top = blocks[0]
db.set_book_ai_fields(
c,
book_id,
top.get("title") or "",
top.get("author") or "",
top.get("year") or "",
top.get("isbn") or "",
top.get("publisher") or "",
)
db.set_book_confidence(c, book_id, top_score, now())
updated_book = db.get_book(c, book_id)
if not updated_book:
raise BookNotFoundError(book_id)
db.set_book_status(c, book_id, compute_status(updated_book))
final = db.get_book(c, book_id)
if not final:
raise BookNotFoundError(book_id)
return final

72
src/migrate.py Normal file
View File

@@ -0,0 +1,72 @@
"""Database migration functions.
Each migration is idempotent and safe to run on a database that has already been migrated.
Run via run_migration() called from app startup after init_db().
"""
import sqlite3
from db import DB_PATH
def run_migration() -> None:
"""Apply all pending schema migrations in order.
Currently applies:
- v1: Add ai_blocks column to books; clear AI-derived data while preserving user data.
- v2: Add batch_queue table for persistent batch processing queue.
Migrations are idempotent — running them on an already-migrated database is a no-op.
"""
c = sqlite3.connect(DB_PATH)
c.row_factory = sqlite3.Row
c.execute("PRAGMA foreign_keys = ON")
try:
_migrate_v1(c)
_migrate_v2(c)
c.commit()
except Exception:
c.rollback()
raise
finally:
c.close()
def _migrate_v1(c: sqlite3.Connection) -> None:
"""Add ai_blocks column and clear stale AI data from all books (first run only).
- Adds ai_blocks TEXT DEFAULT NULL column if it does not exist.
- On first run only (when the column is absent): clears raw_text, ai_*, title_confidence,
analyzed_at, candidates, ai_blocks from all books (these are regenerated by the new pipeline).
- For user_approved books: copies user fields back to ai_* so that
compute_status() still returns 'user_approved' after the ai_* clear.
This migration assumes the database already has the base books schema.
It is a no-op if ai_blocks already exists.
"""
cols = {row["name"] for row in c.execute("PRAGMA table_info(books)")}
if "ai_blocks" not in cols:
c.execute("ALTER TABLE books ADD COLUMN ai_blocks TEXT DEFAULT NULL")
# Clear AI-derived fields only when first adding the column.
c.execute(
"UPDATE books SET "
"raw_text='', ai_title='', ai_author='', ai_year='', ai_isbn='', ai_publisher='', "
"title_confidence=0, analyzed_at=NULL, candidates=NULL, ai_blocks=NULL"
)
# For user_approved books, restore ai_* = user fields so status stays user_approved.
c.execute(
"UPDATE books SET "
"ai_title=title, ai_author=author, ai_year=year, ai_isbn=isbn, ai_publisher=publisher "
"WHERE identification_status='user_approved'"
)
def _migrate_v2(c: sqlite3.Connection) -> None:
"""Add batch_queue table for persistent batch processing queue.
Replaces data/batch_pending.json with a DB table so batch state survives
across restarts alongside all other persistent data.
"""
c.execute("CREATE TABLE IF NOT EXISTS batch_queue (" "book_id TEXT PRIMARY KEY," "added_at REAL NOT NULL" ")")

View File

@@ -29,6 +29,16 @@ class AIIdentifyResult(TypedDict, total=False):
confidence: float
class IdentifyBlock(TypedDict, total=False):
title: str
author: str
year: str
isbn: str
publisher: str
score: float
sources: list[str]
# ── Candidate + AI config ─────────────────────────────────────────────────────
@@ -48,6 +58,7 @@ class AIConfig(TypedDict):
max_image_px: int
confidence_threshold: float
extra_body: dict[str, Any]
is_vlm: bool
# ── Application state ─────────────────────────────────────────────────────────
@@ -61,6 +72,19 @@ class BatchState(TypedDict):
current: str
class AiLogEntry(TypedDict):
id: str
ts: float
plugin_id: str
entity_type: str
entity_id: str
model: str
request: str
status: str # "running" | "ok" | "error"
response: str
duration_ms: int
# ── Plugin manifest ───────────────────────────────────────────────────────────
@@ -84,6 +108,9 @@ class BoundaryDetectorPlugin(Protocol):
auto_queue: bool
target: str
@property
def model(self) -> str: ...
@property
def max_image_px(self) -> int: ...
@@ -95,6 +122,9 @@ class TextRecognizerPlugin(Protocol):
name: str
auto_queue: bool
@property
def model(self) -> str: ...
@property
def max_image_px(self) -> int: ...
@@ -106,10 +136,24 @@ class BookIdentifierPlugin(Protocol):
name: str
auto_queue: bool
@property
def model(self) -> str: ...
@property
def max_image_px(self) -> int: ...
@property
def confidence_threshold(self) -> float: ...
def identify(self, raw_text: str) -> AIIdentifyResult: ...
@property
def is_vlm(self) -> bool: ...
def identify(
self,
raw_text: str,
archive_results: list["CandidateRecord"],
images: list[tuple[str, str]],
) -> list["IdentifyBlock"]: ...
class ArchiveSearcherPlugin(Protocol):
@@ -197,6 +241,7 @@ class BookRow:
analyzed_at: str | None
created_at: str
candidates: str | None
ai_blocks: str | None
# ── API request payload dataclasses ──────────────────────────────────────────

View File

@@ -70,6 +70,7 @@ def _build_ai_cfg(model_cfg: ModelConfig, cred_cfg: CredentialConfig, func: AIFu
max_image_px=func.max_image_px,
confidence_threshold=func.confidence_threshold,
extra_body=model_cfg.extra_body,
is_vlm=func.is_vlm,
)
@@ -227,6 +228,21 @@ def get_auto_queue(
return []
def get_all_text_recognizers() -> list[TextRecognizerPlugin]:
"""Return all registered text recognizer plugins."""
return list(_text_recognizers.values())
def get_all_book_identifiers() -> list[BookIdentifierPlugin]:
"""Return all registered book identifier plugins."""
return list(_book_identifiers.values())
def get_all_archive_searchers() -> list[ArchiveSearcherPlugin]:
"""Return all registered archive searcher plugins."""
return list(_archive_searchers.values())
def get_plugin(plugin_id: str) -> PluginLookupResult:
"""Find a plugin by ID across all categories. Returns a discriminated (category, plugin) tuple."""
if plugin_id in _boundary_detectors:

View File

@@ -2,12 +2,14 @@
Caches openai.OpenAI instances per (base_url, api_key) to avoid re-creating on each call.
AIClient wraps the raw API call: fills prompt template, encodes images, parses JSON response.
Individual LLM API calls are logged via log_thread if a log context is set.
"""
import json
import re
import time
from string import Template
from typing import Any, cast
from typing import Any, Literal, cast, overload
import openai
from openai.types.chat import ChatCompletionMessageParam
@@ -17,6 +19,7 @@ from openai.types.chat.chat_completion_content_part_image_param import (
)
from openai.types.chat.chat_completion_content_part_text_param import ChatCompletionContentPartTextParam
import log_thread
from models import AIConfig
# Module-level cache of openai.OpenAI instances keyed by (base_url, api_key)
@@ -48,6 +51,24 @@ def _parse_json(text: str) -> dict[str, Any]:
return cast(dict[str, Any], result)
def _parse_json_list(text: str) -> list[Any]:
"""Extract and parse the first JSON array found in text.
Raises ValueError if no JSON array is found or the JSON is malformed.
"""
text = text.strip()
m = re.search(r"\[.*\]", text, re.DOTALL)
if not m:
raise ValueError(f"No JSON array found in AI response: {text[:200]!r}")
try:
result = json.loads(m.group())
except json.JSONDecodeError as exc:
raise ValueError(f"Failed to parse AI response as JSON: {exc}") from exc
if not isinstance(result, list):
raise ValueError(f"Expected JSON array, got {type(result).__name__}")
return cast(list[Any], result)
ContentPart = ChatCompletionContentPartImageParam | ChatCompletionContentPartTextParam
@@ -62,16 +83,41 @@ class AIClient:
self.cfg = cfg
self.output_format = output_format
@overload
def call(
self,
prompt_template: str,
images: list[tuple[str, str]],
text_vars: dict[str, str] | None = None,
) -> dict[str, Any]:
output_is_list: Literal[False] = False,
) -> dict[str, Any]: ...
@overload
def call(
self,
prompt_template: str,
images: list[tuple[str, str]],
text_vars: dict[str, str] | None,
output_is_list: Literal[True],
) -> list[Any]: ...
def call(
self,
prompt_template: str,
images: list[tuple[str, str]],
text_vars: dict[str, str] | None = None,
output_is_list: bool = False,
) -> dict[str, Any] | list[Any]:
"""Substitute template vars, call API with optional images, return parsed JSON.
images: list of (base64_str, mime_type) tuples.
text_vars: extra ${KEY} substitutions beyond ${OUTPUT_FORMAT}.
Args:
prompt_template: Prompt string with ${KEY} placeholders.
images: List of (base64_str, mime_type) tuples.
text_vars: Extra ${KEY} substitutions beyond ${OUTPUT_FORMAT}.
output_is_list: If True, parse the response as a JSON array instead of object.
Returns:
Parsed JSON — dict if output_is_list is False, list otherwise.
"""
vars_: dict[str, str] = {"OUTPUT_FORMAT": self.output_format}
if text_vars:
@@ -87,8 +133,17 @@ class AIClient:
]
parts.append(ChatCompletionContentPartTextParam(type="text", text=prompt))
messages: list[ChatCompletionMessageParam] = [{"role": "user", "content": parts}]
started = time.time()
entry_id = log_thread.start_entry(self.cfg["model"], prompt[:120])
try:
r = client.chat.completions.create(
model=self.cfg["model"], max_tokens=2048, messages=messages, extra_body=self.cfg["extra_body"]
model=self.cfg["model"], max_tokens=4096, messages=messages, extra_body=self.cfg["extra_body"]
)
raw = r.choices[0].message.content or ""
log_thread.finish_entry(entry_id, "ok", raw[:120], started)
except Exception as exc:
log_thread.finish_entry(entry_id, "error", str(exc), started)
raise
if output_is_list:
return _parse_json_list(raw)
return _parse_json(raw)

View File

@@ -1,23 +1,38 @@
"""Book identifier plugin — raw spine text → bibliographic metadata.
"""Book identifier plugin — VLM result + archive candidates → ranked identification blocks.
Input: raw_text string (from text_recognizer).
Output: {"title": "...", "author": "...", "year": "...", "isbn": "...",
"publisher": "...", "confidence": 0.95}
confidence — float 0-1; results below confidence_threshold are discarded by logic.py.
Result added to books.candidates and books.ai_* fields.
Input: raw_text string (from text_recognizer), archive_results (deduplicated candidates),
images (list of (b64, mime) pairs if is_vlm).
Output: list of IdentifyBlock dicts ranked by descending confidence score.
Result stored as books.ai_blocks JSON.
"""
from models import AIConfig, AIIdentifyResult
import json
from typing import Any, TypeGuard
from models import AIConfig, CandidateRecord, IdentifyBlock
from ._client import AIClient
def _is_str_dict(v: object) -> TypeGuard[dict[str, Any]]:
return isinstance(v, dict)
def _is_any_list(v: object) -> TypeGuard[list[Any]]:
return isinstance(v, list)
class BookIdentifierPlugin:
"""Identifies a book from spine text using a VLM with web-search capability."""
"""Identifies a book by combining VLM spine text with archive search results."""
category = "book_identifiers"
OUTPUT_FORMAT = (
'{"title": "...", "author": "...", "year": "...", ' '"isbn": "...", "publisher": "...", "confidence": 0.95}'
'[{"title": "The Master and Margarita", "author": "Mikhail Bulgakov", '
'"year": "1967", "isbn": "", "publisher": "YMCA Press", '
'"score": 0.95, "sources": ["rusneb", "openlibrary"]}, '
'{"title": "Master i Margarita", "author": "M. Bulgakov", '
'"year": "2005", "isbn": "978-5-17-123456-7", "publisher": "AST", '
'"score": 0.72, "sources": ["web"]}]'
)
def __init__(
@@ -36,21 +51,67 @@ class BookIdentifierPlugin:
self._client = AIClient(ai_config, self.OUTPUT_FORMAT)
self._prompt_text = prompt_text
def identify(self, raw_text: str) -> AIIdentifyResult:
"""Returns AIIdentifyResult with title/author/year/isbn/publisher/confidence."""
raw = self._client.call(self._prompt_text, [], text_vars={"RAW_TEXT": raw_text})
result = AIIdentifyResult(
title=str(raw.get("title") or ""),
author=str(raw.get("author") or ""),
year=str(raw.get("year") or ""),
isbn=str(raw.get("isbn") or ""),
publisher=str(raw.get("publisher") or ""),
def identify(
self,
raw_text: str,
archive_results: list[CandidateRecord],
images: list[tuple[str, str]],
) -> list[IdentifyBlock]:
"""Call the AI model to produce ranked identification blocks.
Args:
raw_text: Verbatim text read from the book spine.
archive_results: Deduplicated candidates from archive searchers.
images: (base64, mime_type) pairs; non-empty only when is_vlm is True.
Returns:
List of IdentifyBlock dicts ranked by descending score.
"""
archive_json = json.dumps(archive_results, ensure_ascii=False)
raw = self._client.call(
self._prompt_text,
images,
text_vars={"RAW_TEXT": raw_text, "ARCHIVE_RESULTS": archive_json},
output_is_list=True,
)
conf = raw.get("confidence")
if conf is not None:
result["confidence"] = float(conf)
return result
blocks: list[IdentifyBlock] = []
for item in raw:
if not _is_str_dict(item):
continue
sources: list[str] = []
sources_val = item.get("sources")
if _is_any_list(sources_val):
for sv in sources_val:
if isinstance(sv, str):
sources.append(sv)
block = IdentifyBlock(
title=str(item.get("title") or "").strip(),
author=str(item.get("author") or "").strip(),
year=str(item.get("year") or "").strip(),
isbn=str(item.get("isbn") or "").strip(),
publisher=str(item.get("publisher") or "").strip(),
score=float(item.get("score") or 0.0),
sources=sources,
)
blocks.append(block)
return sorted(blocks, key=lambda b: b.get("score", 0.0), reverse=True)
@property
def model(self) -> str:
"""AI model name used for identification."""
return self._client.cfg["model"]
@property
def max_image_px(self) -> int:
"""Maximum pixel dimension for images passed to the AI model."""
return self._client.cfg["max_image_px"]
@property
def confidence_threshold(self) -> float:
"""Minimum score threshold for the top block to set ai_* fields."""
return self._client.cfg["confidence_threshold"]
@property
def is_vlm(self) -> bool:
"""True if images should be included in the request."""
return self._client.cfg["is_vlm"]

View File

@@ -41,6 +41,10 @@ class BoundaryDetectorBooksPlugin:
boundaries: list[float] = [float(b) for b in raw_bounds if isinstance(b, (int, float))]
return BoundaryDetectResult(boundaries=boundaries)
@property
def model(self) -> str:
return self._client.cfg["model"]
@property
def max_image_px(self) -> int:
return self._client.cfg["max_image_px"]

View File

@@ -46,6 +46,10 @@ class BoundaryDetectorShelvesPlugin:
result["confidence"] = float(conf)
return result
@property
def model(self) -> str:
return self._client.cfg["model"]
@property
def max_image_px(self) -> int:
return self._client.cfg["max_image_px"]

View File

@@ -51,6 +51,10 @@ class TextRecognizerPlugin:
other=str(raw.get("other") or ""),
)
@property
def model(self) -> str:
return self._client.cfg["model"]
@property
def max_image_px(self) -> int:
return self._client.cfg["max_image_px"]

View File

@@ -1,29 +1,37 @@
/*
* layout.css
* Top-level layout: sticky header bar, two-column desktop layout
* (300px sidebar + flex main panel), mobile single-column default,
* Top-level layout: global header spanning full width, two-column desktop
* layout (300px sidebar + flex main panel), mobile single-column default,
* and the contenteditable header span used for inline entity renaming.
*
* Breakpoint: ≥768px = desktop two-column; <768px = mobile accordion.
*/
/* ── Header ── */
/* ── Page wrapper (header + content area) ── */
.page-wrap{display:flex;flex-direction:column;min-height:100vh}
/* ── Global header ── */
.hdr{background:#1e3a5f;color:white;padding:10px 14px;display:flex;align-items:center;gap:8px;position:sticky;top:0;z-index:100;box-shadow:0 2px 6px rgba(0,0,0,.3);flex-shrink:0}
.hdr h1{flex:1;font-size:.96rem;font-weight:600}
.hdr h1{font-size:.96rem;font-weight:600}
.hbtn{background:none;border:none;color:white;min-width:34px;min-height:34px;border-radius:50%;cursor:pointer;font-size:1rem;display:flex;align-items:center;justify-content:center;flex-shrink:0}
.hbtn:active{background:rgba(255,255,255,.2)}
/* ── AI active indicator (in global header) ── */
.ai-indicator{display:inline-flex;align-items:center;gap:5px;font-size:.75rem;color:rgba(255,255,255,.9);padding:2px 8px;border-radius:10px;background:rgba(255,255,255,.12)}
.ai-dot{width:7px;height:7px;border-radius:50%;background:#f59e0b;animation:pulse 1.2s ease-in-out infinite}
@keyframes pulse{0%,100%{opacity:1;transform:scale(1)}50%{opacity:.5;transform:scale(.8)}}
/* ── Mobile layout (default) ── */
.layout{display:flex;flex-direction:column;min-height:100vh}
.layout{display:flex;flex-direction:column;flex:1}
.sidebar{flex:1}
.main-panel{display:none}
/* ── Desktop layout ── */
@media(min-width:768px){
body{overflow:hidden}
.layout{flex-direction:row;height:100vh;overflow:hidden}
.page-wrap{height:100vh;overflow:hidden}
.layout{flex-direction:row;flex:1;overflow:hidden}
.sidebar{width:300px;display:flex;flex-direction:column;border-right:1px solid #cbd5e1;overflow:hidden;flex-shrink:0}
.sidebar .hdr{padding:9px 12px}
.sidebar-body{flex:1;overflow-y:auto;padding:8px 10px 16px}
.main-panel{flex:1;display:flex;flex-direction:column;overflow:hidden;background:#e8eef5}
.main-hdr{background:#1e3a5f;color:white;padding:9px 14px;display:flex;align-items:center;gap:8px;flex-shrink:0}
@@ -31,6 +39,12 @@
.main-body{flex:1;overflow:auto;padding:14px}
}
/* ── Root detail panel ── */
.det-root{max-width:640px}
.ai-log-entry{border-bottom:1px solid #f1f5f9;padding:0 2px}
.ai-log-entry:last-child{border-bottom:none}
.ai-log-entry summary::-webkit-details-marker{display:none}
/* ── Detail header editable name ── */
.hdr-edit{display:block;outline:none;cursor:text;border-radius:3px;padding:1px 4px;white-space:nowrap;overflow:hidden;text-overflow:ellipsis}
.hdr-edit:focus{background:rgba(255,255,255,.15);white-space:normal;overflow:visible}

View File

@@ -29,3 +29,10 @@
.pq-skip-btn{background:rgba(255,255,255,.1);color:#cbd5e1;border:none;border-radius:8px;padding:12px 18px;font-size:.85rem;cursor:pointer;min-width:70px}
.pq-skip-btn:active{background:rgba(255,255,255,.2)}
.pq-processing{position:absolute;inset:0;background:rgba(15,23,42,.88);display:flex;align-items:center;justify-content:center;flex-direction:column;gap:10px;font-size:.9rem}
/* ── Image popup ── */
.img-popup{display:none;position:fixed;inset:0;background:rgba(0,0,0,.75);z-index:500;align-items:center;justify-content:center}
.img-popup.open{display:flex}
.img-popup-inner{position:relative;max-width:90vw;max-height:90vh}
.img-popup-inner img{max-width:90vw;max-height:90vh;object-fit:contain;border-radius:4px;display:block}
.img-popup-close{position:absolute;top:-14px;right:-14px;background:#fff;border:none;border-radius:50%;width:28px;height:28px;cursor:pointer;font-size:18px;line-height:28px;text-align:center;padding:0;box-shadow:0 2px 6px rgba(0,0,0,.3)}

View File

@@ -33,6 +33,15 @@
<!-- Slide-in toast notification; text set by toast() in js/helpers.js -->
<div class="toast" id="toast"></div>
<!-- Full-screen image popup: shown when user clicks a book spine or title-page image.
Closed by clicking outside or the × button. -->
<div id="img-popup" class="img-popup">
<div class="img-popup-inner">
<button class="img-popup-close" id="img-popup-close">×</button>
<img id="img-popup-img" src="" alt="">
</div>
</div>
<!-- SortableJS: drag-and-drop reordering for rooms, cabinets, shelves, and books -->
<script src="https://cdn.jsdelivr.net/npm/sortablejs@1.15.2/Sortable.min.js"></script>
@@ -73,7 +82,7 @@
with all action cases; accordion expand helpers. -->
<script src="js/events.js"></script>
<!-- render(), renderDetail(), loadConfig(), startBatchPolling(), loadTree(),
<!-- render(), renderDetail(), loadConfig(), connectBatchWs(), loadTree(),
and the bootstrap Promise.all([loadConfig(), loadTree()]) call. -->
<script src="js/init.js"></script>

View File

@@ -7,16 +7,22 @@
* Depends on: nothing
*/
/* exported req */
// ── API ──────────────────────────────────────────────────────────────────────
async function req(method, url, body = null, isForm = false) {
const opts = {method};
const opts = { method };
if (body) {
if (isForm) { opts.body = body; }
else { opts.headers = {'Content-Type':'application/json'}; opts.body = JSON.stringify(body); }
if (isForm) {
opts.body = body;
} else {
opts.headers = { 'Content-Type': 'application/json' };
opts.body = JSON.stringify(body);
}
}
const r = await fetch(url, opts);
if (!r.ok) {
const e = await r.json().catch(() => ({detail:'Request failed'}));
const e = await r.json().catch(() => ({ detail: 'Request failed' }));
throw new Error(e.detail || 'Request failed');
}
return r.json();

View File

@@ -16,10 +16,16 @@
* setupDetailCanvas(), drawBnd(), clearSegHover()
*/
/* exported parseBounds, parseBndPluginResults, setupDetailCanvas, drawBnd */
// ── Boundary parsing helpers ─────────────────────────────────────────────────
function parseBounds(json) {
if (!json) return [];
try { return JSON.parse(json) || []; } catch { return []; }
try {
return JSON.parse(json) || [];
} catch {
return [];
}
}
function parseBndPluginResults(json) {
@@ -28,11 +34,19 @@ function parseBndPluginResults(json) {
const v = JSON.parse(json);
if (Array.isArray(v) || !v || typeof v !== 'object') return {};
return v;
} catch { return {}; }
} catch {
return {};
}
}
const SEG_FILLS = ['rgba(59,130,246,.14)','rgba(16,185,129,.14)','rgba(245,158,11,.14)','rgba(239,68,68,.14)','rgba(168,85,247,.14)'];
const SEG_STROKES = ['#3b82f6','#10b981','#f59e0b','#ef4444','#a855f7'];
const SEG_FILLS = [
'rgba(59,130,246,.14)',
'rgba(16,185,129,.14)',
'rgba(245,158,11,.14)',
'rgba(239,68,68,.14)',
'rgba(168,85,247,.14)',
];
const SEG_STROKES = ['#3b82f6', '#10b981', '#f59e0b', '#ef4444', '#a855f7'];
// ── Canvas setup ─────────────────────────────────────────────────────────────
function setupDetailCanvas() {
@@ -40,7 +54,7 @@ function setupDetailCanvas() {
const img = document.getElementById('bnd-img');
const canvas = document.getElementById('bnd-canvas');
if (!wrap || !img || !canvas || !S.selected) return;
const {type, id} = S.selected;
const { type, id } = S.selected;
const node = findNode(id);
if (!node || (type !== 'cabinet' && type !== 'shelf')) return;
@@ -48,16 +62,26 @@ function setupDetailCanvas() {
const boundaries = parseBounds(type === 'cabinet' ? node.shelf_boundaries : node.book_boundaries);
const pluginResults = parseBndPluginResults(type === 'cabinet' ? node.ai_shelf_boundaries : node.ai_book_boundaries);
const pluginIds = Object.keys(pluginResults);
const segments = type === 'cabinet'
? node.shelves.map((s,i) => ({id:s.id, label:s.name||`Shelf ${i+1}`}))
: node.books.map((b,i) => ({id:b.id, label:b.title||`Book ${i+1}`}));
const segments =
type === 'cabinet'
? node.shelves.map((s, i) => ({ id: s.id, label: s.name || `Shelf ${i + 1}` }))
: node.books.map((b, i) => ({ id: b.id, label: b.title || `Book ${i + 1}` }));
const hasChildren = type === 'cabinet' ? node.shelves.length > 0 : node.books.length > 0;
const prevSel = (_bnd?.nodeId === id) ? _bnd.selectedPlugin
: (hasChildren ? null : pluginIds[0] ?? null);
const prevSel = _bnd?.nodeId === id ? _bnd.selectedPlugin : hasChildren ? null : (pluginIds[0] ?? null);
_bnd = {wrap, img, canvas, axis, boundaries:[...boundaries],
pluginResults, selectedPlugin: prevSel, segments, nodeId:id, nodeType:type};
_bnd = {
wrap,
img,
canvas,
axis,
boundaries: [...boundaries],
pluginResults,
selectedPlugin: prevSel,
segments,
nodeId: id,
nodeType: type,
};
function sizeAndDraw() {
canvas.width = img.offsetWidth;
@@ -78,8 +102,9 @@ function setupDetailCanvas() {
// ── Draw ─────────────────────────────────────────────────────────────────────
function drawBnd(dragIdx = -1, dragVal = null) {
if (!_bnd || S._cropMode) return;
const {canvas, axis, boundaries, segments} = _bnd;
const W = canvas.width, H = canvas.height;
const { canvas, axis, boundaries, segments } = _bnd;
const W = canvas.width,
H = canvas.height;
if (!W || !H) return;
const ctx = canvas.getContext('2d');
ctx.clearRect(0, 0, W, H);
@@ -94,11 +119,12 @@ function drawBnd(dragIdx = -1, dragVal = null) {
// Draw segments
for (let i = 0; i < full.length - 1; i++) {
const a = full[i], b = full[i + 1];
const a = full[i],
b = full[i + 1];
const ci = i % SEG_FILLS.length;
ctx.fillStyle = SEG_FILLS[ci];
if (axis === 'y') ctx.fillRect(0, a*H, W, (b-a)*H);
else ctx.fillRect(a*W, 0, (b-a)*W, H);
if (axis === 'y') ctx.fillRect(0, a * H, W, (b - a) * H);
else ctx.fillRect(a * W, 0, (b - a) * W, H);
// Label
const seg = segments[i];
if (seg) {
@@ -106,10 +132,13 @@ function drawBnd(dragIdx = -1, dragVal = null) {
ctx.fillStyle = 'rgba(0,0,0,.5)';
const lbl = seg.label.slice(0, 24);
if (axis === 'y') {
ctx.fillText(lbl, 4, a*H + 14);
ctx.fillText(lbl, 4, a * H + 14);
} else {
ctx.save(); ctx.translate(a*W + 12, 14); ctx.rotate(Math.PI/2);
ctx.fillText(lbl, 0, 0); ctx.restore();
ctx.save();
ctx.translate(a * W + 12, 14);
ctx.rotate(Math.PI / 2);
ctx.fillText(lbl, 0, 0);
ctx.restore();
}
}
}
@@ -118,26 +147,36 @@ function drawBnd(dragIdx = -1, dragVal = null) {
ctx.setLineDash([5, 3]);
ctx.lineWidth = 2;
for (let i = 0; i < boundaries.length; i++) {
const val = (dragIdx === i && dragVal !== null) ? full[i+1] : boundaries[i];
const val = dragIdx === i && dragVal !== null ? full[i + 1] : boundaries[i];
ctx.strokeStyle = '#1e3a5f';
ctx.beginPath();
if (axis === 'y') { ctx.moveTo(0, val*H); ctx.lineTo(W, val*H); }
else { ctx.moveTo(val*W, 0); ctx.lineTo(val*W, H); }
if (axis === 'y') {
ctx.moveTo(0, val * H);
ctx.lineTo(W, val * H);
} else {
ctx.moveTo(val * W, 0);
ctx.lineTo(val * W, H);
}
ctx.stroke();
}
// Draw plugin boundary suggestions (dashed, non-interactive)
const {pluginResults, selectedPlugin} = _bnd;
const { pluginResults, selectedPlugin } = _bnd;
const pluginIds = Object.keys(pluginResults);
if (selectedPlugin && pluginIds.length) {
ctx.setLineDash([3, 6]);
ctx.lineWidth = 1.5;
const drawPluginBounds = (bounds, color) => {
ctx.strokeStyle = color;
for (const ab of (bounds || [])) {
for (const ab of bounds || []) {
ctx.beginPath();
if (axis === 'y') { ctx.moveTo(0, ab*H); ctx.lineTo(W, ab*H); }
else { ctx.moveTo(ab*W, 0); ctx.lineTo(ab*W, H); }
if (axis === 'y') {
ctx.moveTo(0, ab * H);
ctx.lineTo(W, ab * H);
} else {
ctx.moveTo(ab * W, 0);
ctx.lineTo(ab * W, H);
}
ctx.stroke();
}
};
@@ -151,7 +190,8 @@ function drawBnd(dragIdx = -1, dragVal = null) {
}
// ── Drag machinery ───────────────────────────────────────────────────────────
let _dragIdx = -1, _dragging = false;
let _dragIdx = -1,
_dragging = false;
function fracFromEvt(e) {
const r = _bnd.canvas.getBoundingClientRect();
@@ -161,27 +201,40 @@ function fracFromEvt(e) {
}
function nearestBnd(frac) {
const {boundaries, canvas, axis} = _bnd;
const { boundaries, canvas, axis } = _bnd;
const r = canvas.getBoundingClientRect();
const dim = axis === 'y' ? r.height : r.width;
const thresh = (window._grabPx ?? 14) / dim;
let best = -1, bestD = thresh;
boundaries.forEach((b,i) => { const d=Math.abs(b-frac); if(d<bestD){bestD=d;best=i;} });
let best = -1,
bestD = thresh;
boundaries.forEach((b, i) => {
const d = Math.abs(b - frac);
if (d < bestD) {
bestD = d;
best = i;
}
});
return best;
}
function snapToAi(frac) {
if (!_bnd?.selectedPlugin) return frac;
const {pluginResults, selectedPlugin} = _bnd;
const snapBounds = selectedPlugin === 'all'
? Object.values(pluginResults).flat()
: (pluginResults[selectedPlugin] || []);
const { pluginResults, selectedPlugin } = _bnd;
const snapBounds =
selectedPlugin === 'all' ? Object.values(pluginResults).flat() : pluginResults[selectedPlugin] || [];
if (!snapBounds.length) return frac;
const r = _bnd.canvas.getBoundingClientRect();
const dim = _bnd.axis === 'y' ? r.height : r.width;
const thresh = (window._grabPx ?? 14) / dim;
let best = frac, bestD = thresh;
snapBounds.forEach(ab => { const d = Math.abs(ab - frac); if (d < bestD) { bestD = d; best = ab; } });
let best = frac,
bestD = thresh;
snapBounds.forEach((ab) => {
const d = Math.abs(ab - frac);
if (d < bestD) {
bestD = d;
best = ab;
}
});
return best;
}
@@ -190,7 +243,8 @@ function bndPointerDown(e) {
const frac = fracFromEvt(e);
const idx = nearestBnd(frac);
if (idx >= 0) {
_dragIdx = idx; _dragging = true;
_dragIdx = idx;
_dragging = true;
_bnd.canvas.setPointerCapture(e.pointerId);
e.stopPropagation();
}
@@ -200,8 +254,7 @@ function bndPointerMove(e) {
if (!_bnd || S._cropMode) return;
const frac = fracFromEvt(e);
const near = nearestBnd(frac);
_bnd.canvas.style.cursor = (near >= 0 || _dragging)
? (_bnd.axis==='y' ? 'ns-resize' : 'ew-resize') : 'default';
_bnd.canvas.style.cursor = near >= 0 || _dragging ? (_bnd.axis === 'y' ? 'ns-resize' : 'ew-resize') : 'default';
if (_dragging && _dragIdx >= 0) drawBnd(_dragIdx, frac);
}
@@ -209,22 +262,24 @@ async function bndPointerUp(e) {
if (!_dragging || !_bnd || S._cropMode) return;
const frac = fracFromEvt(e);
_dragging = false;
const {boundaries, nodeId, nodeType} = _bnd;
const { boundaries, nodeId, nodeType } = _bnd;
const full = [0, ...boundaries, 1];
const clamped = Math.max(full[_dragIdx]+0.005, Math.min(full[_dragIdx+2]-0.005, frac));
const clamped = Math.max(full[_dragIdx] + 0.005, Math.min(full[_dragIdx + 2] - 0.005, frac));
boundaries[_dragIdx] = Math.round(snapToAi(clamped) * 10000) / 10000;
_bnd.boundaries = [...boundaries];
_dragIdx = -1;
drawBnd();
const url = nodeType==='cabinet' ? `/api/cabinets/${nodeId}/boundaries` : `/api/shelves/${nodeId}/boundaries`;
const url = nodeType === 'cabinet' ? `/api/cabinets/${nodeId}/boundaries` : `/api/shelves/${nodeId}/boundaries`;
try {
await req('PATCH', url, {boundaries});
await req('PATCH', url, { boundaries });
const node = findNode(nodeId);
if (node) {
if (nodeType==='cabinet') node.shelf_boundaries = JSON.stringify(boundaries);
if (nodeType === 'cabinet') node.shelf_boundaries = JSON.stringify(boundaries);
else node.book_boundaries = JSON.stringify(boundaries);
}
} catch(err) { toast('Save failed: ' + err.message); }
} catch (err) {
toast('Save failed: ' + err.message);
}
}
async function bndClick(e) {
@@ -232,40 +287,59 @@ async function bndClick(e) {
if (!e.ctrlKey || !e.altKey) return;
e.preventDefault();
const frac = snapToAi(fracFromEvt(e));
const {boundaries, nodeId, nodeType} = _bnd;
const newBounds = [...boundaries, frac].sort((a,b)=>a-b);
const { boundaries, nodeId, nodeType } = _bnd;
const newBounds = [...boundaries, frac].sort((a, b) => a - b);
_bnd.boundaries = newBounds;
const url = nodeType==='cabinet' ? `/api/cabinets/${nodeId}/boundaries` : `/api/shelves/${nodeId}/boundaries`;
const url = nodeType === 'cabinet' ? `/api/cabinets/${nodeId}/boundaries` : `/api/shelves/${nodeId}/boundaries`;
try {
await req('PATCH', url, {boundaries: newBounds});
await req('PATCH', url, { boundaries: newBounds });
if (nodeType === 'cabinet') {
const s = await req('POST', `/api/cabinets/${nodeId}/shelves`, null);
S.tree.forEach(r=>r.cabinets.forEach(c=>{ if(c.id===nodeId){
c.shelf_boundaries=JSON.stringify(newBounds); c.shelves.push({...s,books:[]});
}}));
S.tree.forEach((r) =>
r.cabinets.forEach((c) => {
if (c.id === nodeId) {
c.shelf_boundaries = JSON.stringify(newBounds);
c.shelves.push({ ...s, books: [] });
}
}),
);
} else {
const b = await req('POST', `/api/shelves/${nodeId}/books`);
S.tree.forEach(r=>r.cabinets.forEach(c=>c.shelves.forEach(s=>{ if(s.id===nodeId){
s.book_boundaries=JSON.stringify(newBounds); s.books.push(b);
}})));
S.tree.forEach((r) =>
r.cabinets.forEach((c) =>
c.shelves.forEach((s) => {
if (s.id === nodeId) {
s.book_boundaries = JSON.stringify(newBounds);
s.books.push(b);
}
}),
),
);
}
render();
} catch(err) { toast('Error: ' + err.message); }
} catch (err) {
toast('Error: ' + err.message);
}
}
function bndHover(e) {
if (!_bnd || S._cropMode) return;
const frac = fracFromEvt(e);
const {boundaries, segments} = _bnd;
const { boundaries, segments } = _bnd;
const full = [0, ...boundaries, 1];
let segIdx = -1;
for (let i = 0; i < full.length-1; i++) { if(frac>=full[i]&&frac<full[i+1]){segIdx=i;break;} }
for (let i = 0; i < full.length - 1; i++) {
if (frac >= full[i] && frac < full[i + 1]) {
segIdx = i;
break;
}
}
clearSegHover();
if (segIdx>=0 && segments[segIdx]) {
if (segIdx >= 0 && segments[segIdx]) {
document.querySelector(`.node[data-id="${segments[segIdx].id}"] .nrow`)?.classList.add('seg-hover');
}
}
function clearSegHover() {
document.querySelectorAll('.seg-hover').forEach(el=>el.classList.remove('seg-hover'));
document.querySelectorAll('.seg-hover').forEach((el) => el.classList.remove('seg-hover'));
}

View File

@@ -13,6 +13,8 @@
* Provides: startCropMode(), cancelCrop(), confirmCrop()
*/
/* exported startCropMode */
// ── Crop state ───────────────────────────────────────────────────────────────
let _cropState = null; // {x1,y1,x2,y2} fractions; null = not in crop mode
let _cropDragPart = null; // 'tl','tr','bl','br','t','b','l','r','move' | null
@@ -23,8 +25,8 @@ function startCropMode(type, id) {
const canvas = document.getElementById('bnd-canvas');
const wrap = document.getElementById('bnd-wrap');
if (!canvas || !wrap) return;
S._cropMode = {type, id};
_cropState = {x1: 0.05, y1: 0.05, x2: 0.95, y2: 0.95};
S._cropMode = { type, id };
_cropState = { x1: 0.05, y1: 0.05, x2: 0.95, y2: 0.95 };
canvas.addEventListener('pointerdown', cropPointerDown);
canvas.addEventListener('pointermove', cropPointerMove);
@@ -34,7 +36,8 @@ function startCropMode(type, id) {
const bar = document.createElement('div');
bar.id = 'crop-bar';
bar.style.cssText = 'margin-top:10px;display:flex;gap:8px';
bar.innerHTML = '<button class="btn btn-p" id="crop-ok">Confirm crop</button><button class="btn btn-s" id="crop-cancel">Cancel</button>';
bar.innerHTML =
'<button class="btn btn-p" id="crop-ok">Confirm crop</button><button class="btn btn-s" id="crop-cancel">Cancel</button>';
wrap.after(bar);
document.getElementById('crop-ok').addEventListener('click', confirmCrop);
document.getElementById('crop-cancel').addEventListener('click', cancelCrop);
@@ -47,63 +50,81 @@ function drawCropOverlay() {
const canvas = document.getElementById('bnd-canvas');
if (!canvas || !_cropState) return;
const ctx = canvas.getContext('2d');
const W = canvas.width, H = canvas.height;
const {x1, y1, x2, y2} = _cropState;
const px1=x1*W, py1=y1*H, px2=x2*W, py2=y2*H;
const W = canvas.width,
H = canvas.height;
const { x1, y1, x2, y2 } = _cropState;
const px1 = x1 * W,
py1 = y1 * H,
px2 = x2 * W,
py2 = y2 * H;
ctx.clearRect(0, 0, W, H);
// Dark shadow outside crop rect
ctx.fillStyle = 'rgba(0,0,0,0.55)';
ctx.fillRect(0, 0, W, H);
ctx.clearRect(px1, py1, px2-px1, py2-py1);
ctx.clearRect(px1, py1, px2 - px1, py2 - py1);
// Bright border
ctx.strokeStyle = '#38bdf8'; ctx.lineWidth = 2; ctx.setLineDash([]);
ctx.strokeRect(px1, py1, px2-px1, py2-py1);
ctx.strokeStyle = '#38bdf8';
ctx.lineWidth = 2;
ctx.setLineDash([]);
ctx.strokeRect(px1, py1, px2 - px1, py2 - py1);
// Corner handles
const hs = 9;
ctx.fillStyle = '#38bdf8';
[[px1,py1],[px2,py1],[px1,py2],[px2,py2]].forEach(([x,y]) => ctx.fillRect(x-hs/2, y-hs/2, hs, hs));
[
[px1, py1],
[px2, py1],
[px1, py2],
[px2, py2],
].forEach(([x, y]) => ctx.fillRect(x - hs / 2, y - hs / 2, hs, hs));
}
// ── Hit testing ──────────────────────────────────────────────────────────────
function _cropFracFromEvt(e) {
const canvas = document.getElementById('bnd-canvas');
const r = canvas.getBoundingClientRect();
return {fx: (e.clientX-r.left)/r.width, fy: (e.clientY-r.top)/r.height};
return { fx: (e.clientX - r.left) / r.width, fy: (e.clientY - r.top) / r.height };
}
function _getCropPart(fx, fy) {
if (!_cropState) return null;
const {x1, y1, x2, y2} = _cropState;
const { x1, y1, x2, y2 } = _cropState;
const th = 0.05;
const inX=fx>=x1&&fx<=x2, inY=fy>=y1&&fy<=y2;
const nX1=Math.abs(fx-x1)<th, nX2=Math.abs(fx-x2)<th;
const nY1=Math.abs(fy-y1)<th, nY2=Math.abs(fy-y2)<th;
if (nX1&&nY1) return 'tl'; if (nX2&&nY1) return 'tr';
if (nX1&&nY2) return 'bl'; if (nX2&&nY2) return 'br';
if (nY1&&inX) return 't'; if (nY2&&inX) return 'b';
if (nX1&&inY) return 'l'; if (nX2&&inY) return 'r';
if (inX&&inY) return 'move';
const inX = fx >= x1 && fx <= x2,
inY = fy >= y1 && fy <= y2;
const nX1 = Math.abs(fx - x1) < th,
nX2 = Math.abs(fx - x2) < th;
const nY1 = Math.abs(fy - y1) < th,
nY2 = Math.abs(fy - y2) < th;
if (nX1 && nY1) return 'tl';
if (nX2 && nY1) return 'tr';
if (nX1 && nY2) return 'bl';
if (nX2 && nY2) return 'br';
if (nY1 && inX) return 't';
if (nY2 && inX) return 'b';
if (nX1 && inY) return 'l';
if (nX2 && inY) return 'r';
if (inX && inY) return 'move';
return null;
}
function _cropPartCursor(part) {
if (!part) return 'crosshair';
if (part==='move') return 'move';
if (part==='tl'||part==='br') return 'nwse-resize';
if (part==='tr'||part==='bl') return 'nesw-resize';
if (part==='t'||part==='b') return 'ns-resize';
if (part === 'move') return 'move';
if (part === 'tl' || part === 'br') return 'nwse-resize';
if (part === 'tr' || part === 'bl') return 'nesw-resize';
if (part === 't' || part === 'b') return 'ns-resize';
return 'ew-resize';
}
// ── Pointer events ───────────────────────────────────────────────────────────
function cropPointerDown(e) {
if (!_cropState) return;
const {fx, fy} = _cropFracFromEvt(e);
const { fx, fy } = _cropFracFromEvt(e);
const part = _getCropPart(fx, fy);
if (part) {
_cropDragPart = part;
_cropDragStart = {fx, fy, ..._cropState};
_cropDragStart = { fx, fy, ..._cropState };
document.getElementById('bnd-canvas').setPointerCapture(e.pointerId);
}
}
@@ -111,19 +132,23 @@ function cropPointerDown(e) {
function cropPointerMove(e) {
if (!_cropState) return;
const canvas = document.getElementById('bnd-canvas');
const {fx, fy} = _cropFracFromEvt(e);
const { fx, fy } = _cropFracFromEvt(e);
if (_cropDragPart && _cropDragStart) {
const dx=fx-_cropDragStart.fx, dy=fy-_cropDragStart.fy;
const s = {..._cropState};
if (_cropDragPart==='move') {
const w=_cropDragStart.x2-_cropDragStart.x1, h=_cropDragStart.y2-_cropDragStart.y1;
s.x1=Math.max(0,Math.min(1-w,_cropDragStart.x1+dx)); s.y1=Math.max(0,Math.min(1-h,_cropDragStart.y1+dy));
s.x2=s.x1+w; s.y2=s.y1+h;
const dx = fx - _cropDragStart.fx,
dy = fy - _cropDragStart.fy;
const s = { ..._cropState };
if (_cropDragPart === 'move') {
const w = _cropDragStart.x2 - _cropDragStart.x1,
h = _cropDragStart.y2 - _cropDragStart.y1;
s.x1 = Math.max(0, Math.min(1 - w, _cropDragStart.x1 + dx));
s.y1 = Math.max(0, Math.min(1 - h, _cropDragStart.y1 + dy));
s.x2 = s.x1 + w;
s.y2 = s.y1 + h;
} else {
if (_cropDragPart.includes('l')) s.x1=Math.max(0,Math.min(_cropDragStart.x2-0.05,_cropDragStart.x1+dx));
if (_cropDragPart.includes('r')) s.x2=Math.min(1,Math.max(_cropDragStart.x1+0.05,_cropDragStart.x2+dx));
if (_cropDragPart.includes('t')) s.y1=Math.max(0,Math.min(_cropDragStart.y2-0.05,_cropDragStart.y1+dy));
if (_cropDragPart.includes('b')) s.y2=Math.min(1,Math.max(_cropDragStart.y1+0.05,_cropDragStart.y2+dy));
if (_cropDragPart.includes('l')) s.x1 = Math.max(0, Math.min(_cropDragStart.x2 - 0.05, _cropDragStart.x1 + dx));
if (_cropDragPart.includes('r')) s.x2 = Math.min(1, Math.max(_cropDragStart.x1 + 0.05, _cropDragStart.x2 + dx));
if (_cropDragPart.includes('t')) s.y1 = Math.max(0, Math.min(_cropDragStart.y2 - 0.05, _cropDragStart.y1 + dy));
if (_cropDragPart.includes('b')) s.y2 = Math.min(1, Math.max(_cropDragStart.y1 + 0.05, _cropDragStart.y2 + dy));
}
_cropState = s;
drawCropOverlay();
@@ -133,27 +158,46 @@ function cropPointerMove(e) {
}
}
function cropPointerUp() { _cropDragPart = null; _cropDragStart = null; }
function cropPointerUp() {
_cropDragPart = null;
_cropDragStart = null;
}
// ── Confirm / cancel ─────────────────────────────────────────────────────────
async function confirmCrop() {
if (!_cropState || !S._cropMode) return;
const img = document.getElementById('bnd-img');
if (!img) return;
const {x1, y1, x2, y2} = _cropState;
const W=img.naturalWidth, H=img.naturalHeight;
const px = {x:Math.round(x1*W), y:Math.round(y1*H), w:Math.round((x2-x1)*W), h:Math.round((y2-y1)*H)};
if (px.w<10||px.h<10) { toast('Selection too small'); return; }
const {type, id} = S._cropMode;
const url = type==='cabinet' ? `/api/cabinets/${id}/crop` : `/api/shelves/${id}/crop`;
const { x1, y1, x2, y2 } = _cropState;
const W = img.naturalWidth,
H = img.naturalHeight;
const px = {
x: Math.round(x1 * W),
y: Math.round(y1 * H),
w: Math.round((x2 - x1) * W),
h: Math.round((y2 - y1) * H),
};
if (px.w < 10 || px.h < 10) {
toast('Selection too small');
return;
}
const { type, id } = S._cropMode;
const url = type === 'cabinet' ? `/api/cabinets/${id}/crop` : `/api/shelves/${id}/crop`;
try {
await req('POST', url, px);
toast('Cropped'); cancelCrop(); render();
} catch(err) { toast('Crop failed: '+err.message); }
toast('Cropped');
cancelCrop();
render();
} catch (err) {
toast('Crop failed: ' + err.message);
}
}
function cancelCrop() {
S._cropMode = null; _cropState = null; _cropDragPart = null; _cropDragStart = null;
S._cropMode = null;
_cropState = null;
_cropDragPart = null;
_cropDragStart = null;
document.getElementById('crop-bar')?.remove();
const canvas = document.getElementById('bnd-canvas');
if (canvas) {

View File

@@ -11,20 +11,70 @@
* vShelfDetail(), vBookDetail()
*/
/* exported vDetailBody, aiBlocksShown */
// ── Room detail ──────────────────────────────────────────────────────────────
function vRoomDetail(r) {
const stats = getBookStats(r, 'room');
const totalBooks = stats.total;
return `<div>
${vAiProgressBar(stats)}
<p style="font-size:.72rem;color:#64748b">${r.cabinets.length} cabinet${r.cabinets.length!==1?'s':''} · ${totalBooks} book${totalBooks!==1?'s':''}</p>
<p style="font-size:.72rem;color:#64748b">${r.cabinets.length} cabinet${r.cabinets.length !== 1 ? 's' : ''} · ${totalBooks} book${totalBooks !== 1 ? 's' : ''}</p>
</div>`;
}
// ── Root detail (no selection) ────────────────────────────────────────────────
function vAiLogEntry(entry) {
const ts = new Date(entry.ts * 1000).toLocaleTimeString();
const statusColor = entry.status === 'ok' ? '#15803d' : entry.status === 'error' ? '#dc2626' : '#b45309';
const statusLabel = entry.status === 'running' ? '⏳' : entry.status === 'ok' ? '✓' : '✗';
const dur = entry.duration_ms > 0 ? ` ${entry.duration_ms}ms` : '';
const model = entry.model
? `<span style="font-size:.68rem;color:#94a3b8;margin-left:6px">${esc(entry.model)}</span>`
: '';
const isBook = entry.entity_type === 'books';
const entityLabel = isBook
? `<button data-a="select" data-type="book" data-id="${esc(entry.entity_id)}"
style="background:none;border:none;padding:0;cursor:pointer;color:#2563eb;font-size:.75rem;text-decoration:underline"
>${esc(entry.entity_id.slice(0, 8))}</button>`
: `<span>${esc(entry.entity_id.slice(0, 8))}</span>`;
const thumb = isBook
? `<img src="/api/books/${esc(entry.entity_id)}/spine" alt=""
style="height:30px;width:auto;vertical-align:middle;border-radius:2px;margin-left:2px"
onerror="this.style.display='none'">`
: '';
return `<details class="ai-log-entry">
<summary style="display:flex;align-items:center;gap:6px;cursor:pointer;list-style:none;padding:6px 0">
<span style="color:${statusColor};font-weight:600;font-size:.78rem;width:1.2rem;text-align:center">${statusLabel}</span>
<span style="font-size:.75rem;color:#475569;flex:1;display:flex;align-items:center;gap:4px;flex-wrap:wrap">
${esc(entry.plugin_id)} · ${entityLabel}${thumb}
</span>
<span style="font-size:.68rem;color:#94a3b8;white-space:nowrap">${ts}${dur}</span>
</summary>
<div style="padding:6px 0 6px 1.8rem;font-size:.75rem;color:#475569">
${model}
${entry.request ? `<div style="margin-top:4px;color:#64748b"><strong>Request:</strong> ${esc(entry.request)}</div>` : ''}
${entry.response ? `<div style="margin-top:4px;color:#64748b"><strong>Response:</strong> ${esc(entry.response)}</div>` : ''}
</div>
</details>`;
}
function vRootDetail() {
const log = (_aiLog || []).slice().reverse(); // newest first
return `<div style="padding:0">
<div style="font-size:.72rem;font-weight:600;color:#64748b;margin-bottom:8px;text-transform:uppercase;letter-spacing:.04em">AI Request Log</div>
${
log.length === 0
? `<div style="font-size:.78rem;color:#94a3b8">No AI requests yet. Use Identify or run a plugin on a book.</div>`
: log.map(vAiLogEntry).join('<hr style="border:none;border-top:1px solid #f1f5f9;margin:0">')
}
</div>`;
}
// ── Detail body (right panel) ────────────────────────────────────────────────
function vDetailBody() {
if (!S.selected) return '<div class="det-empty">← Select a room, cabinet or shelf from the tree</div>';
const {type, id} = S.selected;
if (!S.selected) return `<div class="det-root">${vRootDetail()}</div>`;
const { type, id } = S.selected;
const node = findNode(id);
if (!node) return '<div class="det-empty">Not found</div>';
if (type === 'room') return vRoomDetail(node);
@@ -42,29 +92,34 @@ function vCabinetDetail(cab) {
const bndPlugins = pluginsByTarget('boundary_detector', 'shelves');
const pluginResults = parseBndPluginResults(cab.ai_shelf_boundaries);
const pluginIds = Object.keys(pluginResults);
const sel = (_bnd?.nodeId === cab.id) ? _bnd.selectedPlugin
: (cab.shelves.length > 0 ? null : pluginIds[0] ?? null);
const sel = _bnd?.nodeId === cab.id ? _bnd.selectedPlugin : cab.shelves.length > 0 ? null : (pluginIds[0] ?? null);
const selOpts = [
`<option value="">None</option>`,
...pluginIds.map(pid => `<option value="${pid}"${sel===pid?' selected':''}>${pid}</option>`),
...(pluginIds.length > 1 ? [`<option value="all"${sel==='all'?' selected':''}>All</option>`] : []),
...pluginIds.map((pid) => `<option value="${pid}"${sel === pid ? ' selected' : ''}>${pid}</option>`),
...(pluginIds.length > 1 ? [`<option value="all"${sel === 'all' ? ' selected' : ''}>All</option>`] : []),
].join('');
return `<div>
${vAiProgressBar(stats)}
${hasPhoto
${
hasPhoto
? `<div class="img-wrap" id="bnd-wrap" data-type="cabinet" data-id="${cab.id}">
<img id="bnd-img" src="/images/${cab.photo_filename}?t=${Date.now()}" alt="">
<canvas id="bnd-canvas"></canvas>
</div>`
: `<div class="empty"><div class="ei">📷</div><div>Upload a cabinet photo (📷 in header) to get started</div></div>`}
: `<div class="empty"><div class="ei">📷</div><div>Upload a cabinet photo (📷 in header) to get started</div></div>`
}
${hasPhoto ? `<p style="font-size:.72rem;color:#94a3b8;margin-top:8px">Drag lines · Ctrl+Alt+Click to add · Snap to AI guides</p>` : ''}
${hasPhoto ? `<div style="display:flex;align-items:center;gap:6px;flex-wrap:wrap;margin-top:6px">
${bounds.length ? `<span style="font-size:.72rem;color:#64748b">${cab.shelves.length} shelf${cab.shelves.length!==1?'s':''} · ${bounds.length} boundar${bounds.length!==1?'ies':'y'}</span>` : ''}
${
hasPhoto
? `<div style="display:flex;align-items:center;gap:6px;flex-wrap:wrap;margin-top:6px">
${bounds.length ? `<span style="font-size:.72rem;color:#64748b">${cab.shelves.length} shelf${cab.shelves.length !== 1 ? 's' : ''} · ${bounds.length} boundar${bounds.length !== 1 ? 'ies' : 'y'}</span>` : ''}
<div style="display:flex;gap:4px;align-items:center;flex-wrap:wrap;margin-left:auto">
${bndPlugins.map(p => vPluginBtn(p, cab.id, 'cabinets')).join('')}
${bndPlugins.map((p) => vPluginBtn(p, cab.id, 'cabinets')).join('')}
<select data-a="select-bnd-plugin" style="font-size:.72rem;padding:2px 5px;border:1px solid #e2e8f0;border-radius:4px;background:white;color:#475569">${selOpts}</select>
</div>
</div>` : ''}
</div>`
: ''
}
</div>`;
}
@@ -75,12 +130,11 @@ function vShelfDetail(shelf) {
const bndPlugins = pluginsByTarget('boundary_detector', 'books');
const pluginResults = parseBndPluginResults(shelf.ai_book_boundaries);
const pluginIds = Object.keys(pluginResults);
const sel = (_bnd?.nodeId === shelf.id) ? _bnd.selectedPlugin
: (shelf.books.length > 0 ? null : pluginIds[0] ?? null);
const sel = _bnd?.nodeId === shelf.id ? _bnd.selectedPlugin : shelf.books.length > 0 ? null : (pluginIds[0] ?? null);
const selOpts = [
`<option value="">None</option>`,
...pluginIds.map(pid => `<option value="${pid}"${sel===pid?' selected':''}>${pid}</option>`),
...(pluginIds.length > 1 ? [`<option value="all"${sel==='all'?' selected':''}>All</option>`] : []),
...pluginIds.map((pid) => `<option value="${pid}"${sel === pid ? ' selected' : ''}>${pid}</option>`),
...(pluginIds.length > 1 ? [`<option value="all"${sel === 'all' ? ' selected' : ''}>All</option>`] : []),
].join('');
return `<div>
${vAiProgressBar(stats)}
@@ -91,72 +145,115 @@ function vShelfDetail(shelf) {
</div>
<p style="font-size:.72rem;color:#94a3b8;margin-top:8px">Drag lines · Ctrl+Alt+Click to add · Snap to AI guides</p>
<div style="display:flex;align-items:center;gap:6px;flex-wrap:wrap;margin-top:6px">
${bounds.length ? `<span style="font-size:.72rem;color:#64748b">${shelf.books.length} book${shelf.books.length!==1?'s':''} · ${bounds.length} boundary${bounds.length!==1?'ies':''}</span>` : ''}
${bounds.length ? `<span style="font-size:.72rem;color:#64748b">${shelf.books.length} book${shelf.books.length !== 1 ? 's' : ''} · ${bounds.length} boundary${bounds.length !== 1 ? 'ies' : ''}</span>` : ''}
<div style="display:flex;gap:4px;align-items:center;flex-wrap:wrap;margin-left:auto">
${bndPlugins.map(p => vPluginBtn(p, shelf.id, 'shelves')).join('')}
${bndPlugins.map((p) => vPluginBtn(p, shelf.id, 'shelves')).join('')}
<select data-a="select-bnd-plugin" style="font-size:.72rem;padding:2px 5px;border:1px solid #e2e8f0;border-radius:4px;background:white;color:#475569">${selOpts}</select>
</div>
</div>
</div>`;
}
// ── AI blocks helpers ─────────────────────────────────────────────────────────
function parseAiBlocks(json) {
if (!json) return [];
try {
return JSON.parse(json) || [];
} catch {
return [];
}
}
function aiBlocksShown(b) {
if (b.id in _aiBlocksVisible) return _aiBlocksVisible[b.id];
return b.identification_status !== 'user_approved';
}
function vAiBlock(block, bookId) {
const score = typeof block.score === 'number' ? (block.score * 100).toFixed(0) + '%' : '';
const sources = (block.sources || []).join(', ');
const fields = [
['title', block.title],
['author', block.author],
['year', block.year],
['isbn', block.isbn],
['publisher', block.publisher],
].filter(([, v]) => v && v.trim());
const rows = fields
.map(
([k, v]) =>
`<div style="font-size:.78rem;color:#475569"><span style="color:#94a3b8;min-width:4.5rem;display:inline-block">${k}</span> ${esc(v)}</div>`,
)
.join('');
const blockData = esc(JSON.stringify(block));
return `<div class="ai-block" data-a="apply-ai-block" data-id="${bookId}" data-block="${blockData}"
style="cursor:pointer;border:1px solid #e2e8f0;border-radius:6px;padding:8px 10px;margin-bottom:6px;background:#f8fafc">
<div style="display:flex;align-items:center;gap:6px;margin-bottom:4px;flex-wrap:wrap">
${score ? `<span style="background:#dbeafe;color:#1e40af;border-radius:4px;padding:1px 6px;font-size:.72rem;font-weight:600">${score}</span>` : ''}
${sources ? `<span style="font-size:.7rem;color:#64748b">${esc(sources)}</span>` : ''}
</div>
${rows}
</div>`;
}
// ── Book detail ──────────────────────────────────────────────────────────────
function vBookDetail(b) {
const [sc, sl] = _STATUS_BADGE[b.identification_status] ?? _STATUS_BADGE.unidentified;
const recognizers = pluginsByCategory('text_recognizer');
const identifiers = pluginsByCategory('book_identifier');
const searchers = pluginsByCategory('archive_searcher');
const hasRawText = !!(b.raw_text || '').trim();
const isLoading_ = isLoading('identify', b.id);
const blocks = parseAiBlocks(b.ai_blocks);
const shown = aiBlocksShown(b);
const spineUrl = `/api/books/${b.id}/spine?t=${Date.now()}`;
const titleUrl = b.image_filename ? `/images/${b.image_filename}` : '';
return `<div class="book-panel">
<div>
<div class="book-img-label">Spine</div>
<div class="book-img-box"><img src="/api/books/${b.id}/spine?t=${Date.now()}" alt=""
onerror="this.style.display='none'"></div>
${b.image_filename
<div class="book-img-box">
<img src="${spineUrl}" alt="" style="cursor:pointer"
data-a="open-img-popup" data-src="${spineUrl}"
onerror="this.style.display='none'">
</div>
${
titleUrl
? `<div class="book-img-label">Title page</div>
<div class="book-img-box"><img src="/images/${b.image_filename}" alt=""></div>`
: ''}
<div class="book-img-box">
<img src="${titleUrl}" alt="" style="cursor:pointer"
data-a="open-img-popup" data-src="${titleUrl}">
</div>`
: ''
}
</div>
<div>
<div class="card">
<div style="display:flex;align-items:center;gap:8px;margin-bottom:8px">
<div style="display:flex;align-items:center;gap:8px;margin-bottom:8px;flex-wrap:wrap">
<span class="sbadge ${sc}" style="font-size:.7rem;padding:2px 7px">${sl}</span>
<span style="font-size:.72rem;color:#64748b">${b.identification_status ?? 'unidentified'}</span>
${b.analyzed_at ? `<span style="font-size:.68rem;color:#94a3b8;margin-left:auto">Identified ${b.analyzed_at.slice(0,10)}</span>` : ''}
${b.analyzed_at ? `<span style="font-size:.68rem;color:#94a3b8">Identified ${b.analyzed_at.slice(0, 10)}</span>` : ''}
<button class="btn btn-s" style="padding:2px 10px;font-size:.78rem;min-height:0;margin-left:auto"
data-a="identify-book" data-id="${b.id}"${isLoading_ ? ' disabled' : ''}>
${isLoading_ ? '⏳ Identifying…' : '🔍 Identify'}
</button>
</div>
<div class="fgroup">
<label class="flabel" style="display:flex;align-items:center;gap:6px;flex-wrap:wrap">
Recognition
${recognizers.map(p => vPluginBtn(p, b.id, 'books')).join('')}
${identifiers.map(p => vPluginBtn(p, b.id, 'books', !hasRawText)).join('')}
</label>
<textarea class="finput" id="d-raw-text" style="height:72px;font-family:monospace;font-size:.8rem" readonly>${esc(b.raw_text ?? '')}</textarea>
${
blocks.length
? `<div style="margin-bottom:8px">
<div style="display:flex;align-items:center;gap:6px;margin-bottom:6px">
<span style="font-size:.72rem;font-weight:600;color:#475569">AI Results (${blocks.length})</span>
<button class="btn btn-s" style="padding:1px 7px;font-size:.72rem;min-height:0;margin-left:auto"
data-a="toggle-ai-blocks" data-id="${b.id}">${shown ? 'Hide' : 'Show'}</button>
</div>
${searchers.length ? `<div class="fgroup">
<label class="flabel" style="display:flex;align-items:center;gap:6px;flex-wrap:wrap">
Archives
${searchers.map(p => vPluginBtn(p, b.id, 'books')).join('')}
</label>
</div>` : ''}
<div class="fgroup">
${candidateSugRows(b, 'title', 'd-title')}
<label class="flabel">Title</label>
${shown ? blocks.map((bl) => vAiBlock(bl, b.id)).join('') : ''}
</div>`
: ''
}
<div class="fgroup"><label class="flabel">Title</label>
<input class="finput" id="d-title" value="${esc(b.title ?? '')}"></div>
<div class="fgroup">
${candidateSugRows(b, 'author', 'd-author')}
<label class="flabel">Author</label>
<div class="fgroup"><label class="flabel">Author</label>
<input class="finput" id="d-author" value="${esc(b.author ?? '')}"></div>
<div class="fgroup">
${candidateSugRows(b, 'year', 'd-year')}
<label class="flabel">Year</label>
<div class="fgroup"><label class="flabel">Year</label>
<input class="finput" id="d-year" value="${esc(b.year ?? '')}" inputmode="numeric"></div>
<div class="fgroup">
${candidateSugRows(b, 'isbn', 'd-isbn')}
<label class="flabel">ISBN</label>
<div class="fgroup"><label class="flabel">ISBN</label>
<input class="finput" id="d-isbn" value="${esc(b.isbn ?? '')}" inputmode="numeric"></div>
<div class="fgroup">
${candidateSugRows(b, 'publisher', 'd-pub')}
<label class="flabel">Publisher</label>
<div class="fgroup"><label class="flabel">Publisher</label>
<input class="finput" id="d-pub" value="${esc(b.publisher ?? '')}"></div>
<div class="fgroup"><label class="flabel">Notes</label>
<textarea class="finput" id="d-notes">${esc(b.notes ?? '')}</textarea></div>

View File

@@ -12,54 +12,84 @@
* Provides: attachEditables(), initSortables()
*/
/* exported attachEditables, initSortables */
// ── SortableJS instances (destroyed and recreated on each render) ─────────────
let _sortables = [];
// ── Inline name editing ──────────────────────────────────────────────────────
function attachEditables() {
document.querySelectorAll('[contenteditable=true]').forEach(el => {
document.querySelectorAll('[contenteditable=true]').forEach((el) => {
el.dataset.orig = el.textContent.trim();
el.addEventListener('keydown', e => {
if (e.key==='Enter') { e.preventDefault(); el.blur(); }
if (e.key==='Escape') { el.textContent=el.dataset.orig; el.blur(); }
el.addEventListener('keydown', (e) => {
if (e.key === 'Enter') {
e.preventDefault();
el.blur();
}
if (e.key === 'Escape') {
el.textContent = el.dataset.orig;
el.blur();
}
e.stopPropagation();
});
el.addEventListener('blur', async () => {
const val = el.textContent.trim();
if (!val||val===el.dataset.orig) { if(!val) el.textContent=el.dataset.orig; return; }
const newName = val.replace(/^[🏠📚]\s*/u,'').trim();
const {type, id} = el.dataset;
const url = {room:`/api/rooms/${id}`,cabinet:`/api/cabinets/${id}`,shelf:`/api/shelves/${id}`}[type];
if (!val || val === el.dataset.orig) {
if (!val) el.textContent = el.dataset.orig;
return;
}
const newName = val.replace(/^[🏠📚]\s*/u, '').trim();
const { type, id } = el.dataset;
const url = { room: `/api/rooms/${id}`, cabinet: `/api/cabinets/${id}`, shelf: `/api/shelves/${id}` }[type];
if (!url) return;
try {
await req('PUT', url, {name: newName});
await req('PUT', url, { name: newName });
el.dataset.orig = el.textContent.trim();
walkTree(n=>{ if(n.id===id) n.name=newName; });
walkTree((n) => {
if (n.id === id) n.name = newName;
});
// Update sidebar label if editing from header (sidebar has non-editable nname spans)
const sideLabel = document.querySelector(`.node[data-id="${id}"] .nname`);
if (sideLabel && sideLabel !== el) {
const prefix = type==='room' ? '🏠 ' : type==='cabinet' ? '📚 ' : '';
const prefix = type === 'room' ? '🏠 ' : type === 'cabinet' ? '📚 ' : '';
sideLabel.textContent = prefix + newName;
}
} catch(err) { el.textContent=el.dataset.orig; toast('Rename failed: '+err.message); }
} catch (err) {
el.textContent = el.dataset.orig;
toast('Rename failed: ' + err.message);
}
});
el.addEventListener('click', e=>e.stopPropagation());
el.addEventListener('click', (e) => e.stopPropagation());
});
}
// ── SortableJS drag-and-drop ─────────────────────────────────────────────────
function initSortables() {
_sortables.forEach(s=>{ try{s.destroy();}catch(_){} });
_sortables.forEach((s) => {
try {
s.destroy();
} catch (_) {
// ignore destroy errors on stale instances
}
});
_sortables = [];
document.querySelectorAll('.sortable-list').forEach(el => {
document.querySelectorAll('.sortable-list').forEach((el) => {
const type = el.dataset.type;
_sortables.push(Sortable.create(el, {
handle:'.drag-h', animation:120, ghostClass:'drag-ghost',
_sortables.push(
Sortable.create(el, {
handle: '.drag-h',
animation: 120,
ghostClass: 'drag-ghost',
onEnd: async () => {
const ids = [...el.querySelectorAll(':scope > .node')].map(n=>n.dataset.id);
try { await req('PATCH',`/api/${type}/reorder`,{ids}); }
catch(err) { toast('Reorder failed'); await loadTree(); }
const ids = [...el.querySelectorAll(':scope > .node')].map((n) => n.dataset.id);
try {
await req('PATCH', `/api/${type}/reorder`, { ids });
} catch (_err) {
toast('Reorder failed');
await loadTree();
}
},
}));
}),
);
});
}

View File

@@ -12,7 +12,7 @@
* Depends on: S, _bnd, _batchState, _photoQueue (state.js);
* req (api.js); toast, isDesktop (helpers.js);
* walkTree, removeNode, findNode, parseBounds (tree-render.js /
* canvas-boundary.js); render, renderDetail, startBatchPolling
* canvas-boundary.js); render, renderDetail, connectBatchWs
* (init.js); startCropMode (canvas-crop.js);
* triggerPhoto, collectQueueBooks, renderPhotoQueue (photo.js);
* drawBnd (canvas-boundary.js)
@@ -22,53 +22,61 @@
// ── Accordion helpers ────────────────────────────────────────────────────────
function getSiblingIds(id, type) {
if (!S.tree) return [];
if (type === 'room') return S.tree.filter(r => r.id !== id).map(r => r.id);
if (type === 'room') return S.tree.filter((r) => r.id !== id).map((r) => r.id);
for (const r of S.tree) {
if (type === 'cabinet' && r.cabinets.some(c => c.id === id))
return r.cabinets.filter(c => c.id !== id).map(c => c.id);
if (type === 'cabinet' && r.cabinets.some((c) => c.id === id))
return r.cabinets.filter((c) => c.id !== id).map((c) => c.id);
for (const c of r.cabinets) {
if (type === 'shelf' && c.shelves.some(s => s.id === id))
return c.shelves.filter(s => s.id !== id).map(s => s.id);
if (type === 'shelf' && c.shelves.some((s) => s.id === id))
return c.shelves.filter((s) => s.id !== id).map((s) => s.id);
}
}
return [];
}
function accordionExpand(id, type) {
if (!isDesktop()) getSiblingIds(id, type).forEach(sid => S.expanded.delete(sid));
if (!isDesktop()) getSiblingIds(id, type).forEach((sid) => S.expanded.delete(sid));
S.expanded.add(id);
}
// ── Event delegation ─────────────────────────────────────────────────────────
document.getElementById('app').addEventListener('click', async e => {
document.getElementById('app').addEventListener('click', async (e) => {
const el = e.target.closest('[data-a]');
if (!el) return;
const d = el.dataset;
try { await handle(d.a, d, e); }
catch(err) { toast('Error: '+err.message); }
try {
await handle(d.a, d, e);
} catch (err) {
toast('Error: ' + err.message);
}
});
document.getElementById('app').addEventListener('change', async e => {
document.getElementById('app').addEventListener('change', async (e) => {
const el = e.target.closest('[data-a]');
if (!el) return;
const d = el.dataset;
try { await handle(d.a, d, e); }
catch(err) { toast('Error: '+err.message); }
try {
await handle(d.a, d, e);
} catch (err) {
toast('Error: ' + err.message);
}
});
// Photo queue overlay is outside #app so needs its own listener
document.getElementById('photo-queue-overlay').addEventListener('click', async e => {
document.getElementById('photo-queue-overlay').addEventListener('click', async (e) => {
const el = e.target.closest('[data-a]');
if (!el) return;
const d = el.dataset;
try { await handle(d.a, d, e); }
catch(err) { toast('Error: ' + err.message); }
try {
await handle(d.a, d, e);
} catch (err) {
toast('Error: ' + err.message);
}
});
// ── Action dispatcher ────────────────────────────────────────────────────────
async function handle(action, d, e) {
switch (action) {
case 'select': {
// Ignore if the click hit a button or editable inside the row
if (e?.target?.closest('button,[contenteditable]')) return;
@@ -80,14 +88,16 @@ async function handle(action, d, e) {
}
break;
}
S.selected = {type: d.type, id: d.id};
S.selected = { type: d.type, id: d.id };
S._loading = {};
render(); break;
render();
break;
}
case 'deselect': {
S.selected = null;
render(); break;
render();
break;
}
case 'toggle': {
@@ -95,95 +105,135 @@ async function handle(action, d, e) {
// Mobile: expand-only (no collapse to avoid accidental mistaps)
accordionExpand(d.id, d.type);
} else {
if (S.expanded.has(d.id)) { S.expanded.delete(d.id); }
else { S.expanded.add(d.id); }
if (S.expanded.has(d.id)) {
S.expanded.delete(d.id);
} else {
S.expanded.add(d.id);
}
render(); break;
}
render();
break;
}
// Rooms
case 'add-room': {
const r = await req('POST','/api/rooms');
if (!S.tree) S.tree=[];
S.tree.push({...r, cabinets:[]});
S.expanded.add(r.id); render(); break;
const r = await req('POST', '/api/rooms');
if (!S.tree) S.tree = [];
S.tree.push({ ...r, cabinets: [] });
S.expanded.add(r.id);
render();
break;
}
case 'del-room': {
if (!confirm('Delete room and all contents?')) break;
await req('DELETE',`/api/rooms/${d.id}`);
removeNode('room',d.id);
if (S.selected?.id===d.id) S.selected=null;
render(); break;
await req('DELETE', `/api/rooms/${d.id}`);
removeNode('room', d.id);
if (S.selected?.id === d.id) S.selected = null;
render();
break;
}
// Cabinets
case 'add-cabinet': {
const c = await req('POST',`/api/rooms/${d.id}/cabinets`);
S.tree.forEach(r=>{ if(r.id===d.id) r.cabinets.push({...c,shelves:[]}); });
S.expanded.add(d.id); render(); break; // expand parent room
const c = await req('POST', `/api/rooms/${d.id}/cabinets`);
S.tree.forEach((r) => {
if (r.id === d.id) r.cabinets.push({ ...c, shelves: [] });
});
S.expanded.add(d.id);
render();
break; // expand parent room
}
case 'del-cabinet': {
if (!confirm('Delete cabinet and all contents?')) break;
await req('DELETE',`/api/cabinets/${d.id}`);
removeNode('cabinet',d.id);
if (S.selected?.id===d.id) S.selected=null;
render(); break;
await req('DELETE', `/api/cabinets/${d.id}`);
removeNode('cabinet', d.id);
if (S.selected?.id === d.id) S.selected = null;
render();
break;
}
// Shelves
case 'add-shelf': {
const cab = findNode(d.id);
const prevCount = cab ? cab.shelves.length : 0;
const s = await req('POST',`/api/cabinets/${d.id}/shelves`);
S.tree.forEach(r=>r.cabinets.forEach(c=>{ if(c.id===d.id) c.shelves.push({...s,books:[]}); }));
const s = await req('POST', `/api/cabinets/${d.id}/shelves`);
S.tree.forEach((r) =>
r.cabinets.forEach((c) => {
if (c.id === d.id) c.shelves.push({ ...s, books: [] });
}),
);
if (prevCount > 0) {
// Split last segment in half to make room for new shelf
const bounds = parseBounds(cab.shelf_boundaries);
const lastStart = bounds.length ? bounds[bounds.length-1] : 0.0;
const newBound = Math.round((lastStart + 1.0) / 2 * 10000) / 10000;
const lastStart = bounds.length ? bounds[bounds.length - 1] : 0.0;
const newBound = Math.round(((lastStart + 1.0) / 2) * 10000) / 10000;
const newBounds = [...bounds, newBound];
await req('PATCH', `/api/cabinets/${d.id}/boundaries`, {boundaries: newBounds});
S.tree.forEach(r=>r.cabinets.forEach(c=>{ if(c.id===d.id) c.shelf_boundaries=JSON.stringify(newBounds); }));
await req('PATCH', `/api/cabinets/${d.id}/boundaries`, { boundaries: newBounds });
S.tree.forEach((r) =>
r.cabinets.forEach((c) => {
if (c.id === d.id) c.shelf_boundaries = JSON.stringify(newBounds);
}),
);
}
S.expanded.add(d.id); render(); break; // expand parent cabinet
S.expanded.add(d.id);
render();
break; // expand parent cabinet
}
case 'del-shelf': {
if (!confirm('Delete shelf and all books?')) break;
await req('DELETE',`/api/shelves/${d.id}`);
removeNode('shelf',d.id);
if (S.selected?.id===d.id) S.selected=null;
render(); break;
await req('DELETE', `/api/shelves/${d.id}`);
removeNode('shelf', d.id);
if (S.selected?.id === d.id) S.selected = null;
render();
break;
}
// Books
case 'add-book': {
const shelf = findNode(d.id);
const prevCount = shelf ? shelf.books.length : 0;
const b = await req('POST',`/api/shelves/${d.id}/books`);
S.tree.forEach(r=>r.cabinets.forEach(c=>c.shelves.forEach(s=>{ if(s.id===d.id) s.books.push(b); })));
const b = await req('POST', `/api/shelves/${d.id}/books`);
S.tree.forEach((r) =>
r.cabinets.forEach((c) =>
c.shelves.forEach((s) => {
if (s.id === d.id) s.books.push(b);
}),
),
);
if (prevCount > 0) {
// Split last segment in half to make room for new book
const bounds = parseBounds(shelf.book_boundaries);
const lastStart = bounds.length ? bounds[bounds.length-1] : 0.0;
const newBound = Math.round((lastStart + 1.0) / 2 * 10000) / 10000;
const lastStart = bounds.length ? bounds[bounds.length - 1] : 0.0;
const newBound = Math.round(((lastStart + 1.0) / 2) * 10000) / 10000;
const newBounds = [...bounds, newBound];
await req('PATCH', `/api/shelves/${d.id}/boundaries`, {boundaries: newBounds});
S.tree.forEach(r=>r.cabinets.forEach(c=>c.shelves.forEach(s=>{ if(s.id===d.id) s.book_boundaries=JSON.stringify(newBounds); })));
await req('PATCH', `/api/shelves/${d.id}/boundaries`, { boundaries: newBounds });
S.tree.forEach((r) =>
r.cabinets.forEach((c) =>
c.shelves.forEach((s) => {
if (s.id === d.id) s.book_boundaries = JSON.stringify(newBounds);
}),
),
);
}
S.expanded.add(d.id); render(); break; // expand parent shelf
S.expanded.add(d.id);
render();
break; // expand parent shelf
}
case 'del-book': {
if (!confirm('Delete this book?')) break;
await req('DELETE',`/api/books/${d.id}`);
removeNode('book',d.id);
if (S.selected?.id===d.id) S.selected=null;
render(); break;
await req('DELETE', `/api/books/${d.id}`);
removeNode('book', d.id);
if (S.selected?.id === d.id) S.selected = null;
render();
break;
}
case 'del-book-confirm': {
if (!confirm('Delete this book?')) break;
await req('DELETE',`/api/books/${d.id}`);
removeNode('book',d.id);
S.selected=null; render(); break;
await req('DELETE', `/api/books/${d.id}`);
removeNode('book', d.id);
S.selected = null;
render();
break;
}
case 'save-book': {
const data = {
@@ -194,69 +244,190 @@ async function handle(action, d, e) {
publisher: document.getElementById('d-pub')?.value || '',
notes: document.getElementById('d-notes')?.value || '',
};
const res = await req('PUT',`/api/books/${d.id}`,data);
walkTree(n => {
const res = await req('PUT', `/api/books/${d.id}`, data);
walkTree((n) => {
if (n.id === d.id) {
Object.assign(n, data);
n.ai_title = data.title; n.ai_author = data.author; n.ai_year = data.year;
n.ai_isbn = data.isbn; n.ai_publisher = data.publisher;
n.ai_title = data.title;
n.ai_author = data.author;
n.ai_year = data.year;
n.ai_isbn = data.isbn;
n.ai_publisher = data.publisher;
n.identification_status = res.identification_status ?? n.identification_status;
}
});
toast('Saved'); render(); break;
toast('Saved');
render();
break;
}
case 'run-plugin': {
const key = `${d.plugin}:${d.id}`;
S._loading[key] = true; renderDetail();
// Capture any unsaved field edits before the first renderDetail() overwrites them.
if (d.etype === 'books') {
walkTree((n) => {
if (n.id === d.id) {
n.title = document.getElementById('d-title')?.value ?? n.title;
n.author = document.getElementById('d-author')?.value ?? n.author;
n.year = document.getElementById('d-year')?.value ?? n.year;
n.isbn = document.getElementById('d-isbn')?.value ?? n.isbn;
n.publisher = document.getElementById('d-pub')?.value ?? n.publisher;
n.notes = document.getElementById('d-notes')?.value ?? n.notes;
}
});
}
S._loading[key] = true;
renderDetail();
try {
const res = await req('POST', `/api/${d.etype}/${d.id}/plugin/${d.plugin}`);
walkTree(n => { if (n.id === d.id) Object.assign(n, res); });
} catch(err) { toast(`${d.plugin} failed: ${err.message}`); }
delete S._loading[key]; renderDetail();
walkTree((n) => {
if (n.id !== d.id) return;
if (d.etype === 'books') {
// Server response must not overwrite user edits captured above.
const saved = {
title: n.title,
author: n.author,
year: n.year,
isbn: n.isbn,
publisher: n.publisher,
notes: n.notes,
};
Object.assign(n, res);
Object.assign(n, saved);
} else {
Object.assign(n, res);
}
});
} catch (err) {
toast(`${d.plugin} failed: ${err.message}`);
}
delete S._loading[key];
renderDetail();
break;
}
case 'select-bnd-plugin': {
if (_bnd) { _bnd.selectedPlugin = e.target.value || null; drawBnd(); }
if (_bnd) {
_bnd.selectedPlugin = e.target.value || null;
drawBnd();
}
break;
}
case 'accept-field': {
const inp = document.getElementById(d.input);
if (inp) inp.value = d.value;
walkTree(n => { if (n.id === d.id) n[d.field] = d.value; });
renderDetail(); break;
walkTree((n) => {
if (n.id === d.id) n[d.field] = d.value;
});
renderDetail();
break;
}
case 'dismiss-field': {
const res = await req('POST', `/api/books/${d.id}/dismiss-field`, {field: d.field, value: d.value || ''});
walkTree(n => {
const res = await req('POST', `/api/books/${d.id}/dismiss-field`, { field: d.field, value: d.value || '' });
walkTree((n) => {
if (n.id === d.id) {
n.candidates = JSON.stringify(res.candidates || []);
if (!d.value) n[`ai_${d.field}`] = n[d.field] || '';
n.identification_status = res.identification_status ?? n.identification_status;
}
});
renderDetail(); break;
renderDetail();
break;
}
case 'identify-book': {
const key = `identify:${d.id}`;
S._loading[key] = true;
renderDetail();
try {
const res = await req('POST', `/api/books/${d.id}/identify`);
walkTree((n) => {
if (n.id !== d.id) return;
const saved = {
title: n.title,
author: n.author,
year: n.year,
isbn: n.isbn,
publisher: n.publisher,
notes: n.notes,
};
Object.assign(n, res);
Object.assign(n, saved);
});
} catch (err) {
toast(`Identify failed: ${err.message}`);
}
delete S._loading[key];
renderDetail();
break;
}
case 'toggle-ai-blocks': {
walkTree((n) => {
if (n.id === d.id) _aiBlocksVisible[d.id] = !aiBlocksShown(n);
});
renderDetail();
break;
}
case 'apply-ai-block': {
let block;
try {
block = JSON.parse(d.block);
} catch {
break;
}
const fieldMap = { title: 'd-title', author: 'd-author', year: 'd-year', isbn: 'd-isbn', publisher: 'd-pub' };
for (const [field, inputId] of Object.entries(fieldMap)) {
const v = (block[field] || '').trim();
if (!v) continue;
const inp = document.getElementById(inputId);
if (inp) inp.value = v;
walkTree((n) => {
if (n.id === d.id) n[field] = v;
});
}
renderDetail();
break;
}
case 'batch-start': {
const res = await req('POST', '/api/batch');
if (res.already_running) { toast('Batch already running'); break; }
if (!res.started) { toast('No unidentified books'); break; }
_batchState = {running: true, total: res.total, done: 0, errors: 0, current: ''};
startBatchPolling(); renderDetail(); break;
if (res.already_running) {
toast(res.added > 0 ? `Added ${res.added} book(s) to batch` : 'Batch already running');
if (!_batchWs) connectBatchWs();
break;
}
if (!res.started) {
toast('No unidentified books');
break;
}
connectBatchWs();
renderDetail();
break;
}
case 'open-img-popup': {
const popup = document.getElementById('img-popup');
if (!popup) break;
document.getElementById('img-popup-img').src = d.src;
popup.classList.add('open');
break;
}
// Photo
case 'photo': triggerPhoto(d.type, d.id); break;
case 'photo':
triggerPhoto(d.type, d.id);
break;
// Crop
case 'crop-start': startCropMode(d.type, d.id); break;
case 'crop-start':
startCropMode(d.type, d.id);
break;
// Photo queue
case 'photo-queue-start': {
const node = findNode(d.id);
if (!node) break;
const books = collectQueueBooks(node, d.type);
if (!books.length) { toast('No unidentified books'); break; }
_photoQueue = {books, index: 0, processing: false};
if (!books.length) {
toast('No unidentified books');
break;
}
_photoQueue = { books, index: 0, processing: false };
renderPhotoQueue();
break;
}
@@ -278,6 +449,5 @@ async function handle(action, d, e) {
renderPhotoQueue();
break;
}
}
}

View File

@@ -6,16 +6,25 @@
* Provides: esc(), toast(), isDesktop()
*/
/* exported esc, toast, isDesktop */
// ── Helpers ─────────────────────────────────────────────────────────────────
function esc(s) {
return String(s ?? '').replace(/&/g,'&amp;').replace(/</g,'&lt;').replace(/>/g,'&gt;').replace(/"/g,'&quot;');
return String(s ?? '')
.replace(/&/g, '&amp;')
.replace(/</g, '&lt;')
.replace(/>/g, '&gt;')
.replace(/"/g, '&quot;');
}
function toast(msg, dur = 2800) {
const el = document.getElementById('toast');
el.textContent = msg; el.classList.add('on');
el.textContent = msg;
el.classList.add('on');
clearTimeout(toast._t);
toast._t = setTimeout(() => el.classList.remove('on'), dur);
}
function isDesktop() { return window.innerWidth >= 768; }
function isDesktop() {
return window.innerWidth >= 768;
}

View File

@@ -10,16 +10,18 @@
* renderDetail() does a cheaper in-place update of the right panel only,
* used during plugin runs and field edits to avoid re-rendering the sidebar.
*
* Depends on: S, _plugins, _batchState, _batchPollTimer (state.js);
* Depends on: S, _plugins, _batchState, _batchWs (state.js);
* req, toast (api.js / helpers.js); isDesktop (helpers.js);
* vApp, vDetailBody, mainTitle, mainHeaderBtns, vBatchBtn
* (tree-render.js / detail-render.js);
* attachEditables, initSortables (editing.js);
* setupDetailCanvas (canvas-boundary.js)
* Provides: render(), renderDetail(), loadConfig(), startBatchPolling(),
* Provides: render(), renderDetail(), loadConfig(), connectBatchWs(),
* loadTree()
*/
/* exported render, renderDetail, connectBatchWs, connectAiLogWs, loadTree */
// ── Full re-render ────────────────────────────────────────────────────────────
function render() {
if (document.activeElement?.contentEditable === 'true') return;
@@ -37,11 +39,9 @@ function renderDetail() {
const body = document.getElementById('main-body');
if (body) body.innerHTML = vDetailBody();
const t = document.getElementById('main-title');
if (t) t.innerHTML = mainTitle(); // innerHTML: mainTitle() returns an HTML span
if (t) t.innerHTML = mainTitle(); // innerHTML: mainTitle() returns an HTML string
const hb = document.getElementById('main-hdr-btns');
if (hb) hb.innerHTML = mainHeaderBtns();
const bb = document.getElementById('main-hdr-batch');
if (bb) bb.innerHTML = vBatchBtn();
attachEditables(); // pick up the new editable span in the header
requestAnimationFrame(setupDetailCanvas);
}
@@ -49,34 +49,111 @@ function renderDetail() {
// ── Data loading ──────────────────────────────────────────────────────────────
async function loadConfig() {
try {
const cfg = await req('GET','/api/config');
const cfg = await req('GET', '/api/config');
window._grabPx = cfg.boundary_grab_px ?? 14;
window._confidenceThreshold = cfg.confidence_threshold ?? 0.8;
window._aiLogMax = cfg.ai_log_max_entries ?? 100;
_plugins = cfg.plugins || [];
} catch { window._grabPx = 14; window._confidenceThreshold = 0.8; }
} catch {
window._grabPx = 14;
window._confidenceThreshold = 0.8;
window._aiLogMax = 100;
}
}
function startBatchPolling() {
if (_batchPollTimer) clearInterval(_batchPollTimer);
_batchPollTimer = setInterval(async () => {
try {
const st = await req('GET', '/api/batch/status');
function connectBatchWs() {
if (_batchWs) {
_batchWs.close();
_batchWs = null;
}
const proto = location.protocol === 'https:' ? 'wss:' : 'ws:';
const ws = new WebSocket(`${proto}//${location.host}/ws/batch`);
_batchWs = ws;
ws.onmessage = async (ev) => {
const st = JSON.parse(ev.data);
_batchState = st;
const bb = document.getElementById('main-hdr-batch');
if (bb) bb.innerHTML = vBatchBtn();
if (!st.running) {
clearInterval(_batchPollTimer); _batchPollTimer = null;
ws.close();
_batchWs = null;
toast(`Batch: ${st.done} done, ${st.errors} errors`);
await loadTree();
}
} catch { /* ignore poll errors */ }
}, 2000);
};
ws.onerror = () => {
_batchWs = null;
};
ws.onclose = () => {
_batchWs = null;
};
}
function connectAiLogWs() {
const proto = location.protocol === 'https:' ? 'wss:' : 'ws:';
const ws = new WebSocket(`${proto}//${location.host}/ws/ai-log`);
_aiLogWs = ws;
ws.onmessage = (ev) => {
const msg = JSON.parse(ev.data);
if (msg.type === 'snapshot') {
_aiLog = msg.entries || [];
} else if (msg.type === 'update') {
const entry = msg.entry;
const idx = _aiLog.findIndex((e) => e.id === entry.id);
if (idx >= 0) {
_aiLog[idx] = entry;
} else {
_aiLog.push(entry);
const max = window._aiLogMax ?? 100;
if (_aiLog.length > max) _aiLog.splice(0, _aiLog.length - max);
}
} else if (msg.type === 'entity_update') {
const etype = msg.entity_type.slice(0, -1); // "books" → "book"
walkTree((n) => {
if (n.id === msg.entity_id) Object.assign(n, msg.data);
});
if (S.selected && S.selected.type === etype && S.selected.id === msg.entity_id) {
renderDetail();
} else {
render(); // update sidebar badges
}
return; // skip AI indicator update — not a log entry
}
// Update header AI indicator
const hdr = document.getElementById('hdr-ai-indicator');
if (hdr) {
const running = _aiLog.filter((e) => e.status === 'running').length;
hdr.innerHTML = running > 0 ? vAiIndicator(running) : '';
}
// Update root detail panel if shown
if (!S.selected) renderDetail();
};
ws.onerror = () => {};
ws.onclose = () => {
// Reconnect after a short delay
setTimeout(connectAiLogWs, 3000);
};
}
async function loadTree() {
S.tree = await req('GET','/api/tree');
S.tree = await req('GET', '/api/tree');
render();
}
// ── Init ──────────────────────────────────────────────────────────────────────
Promise.all([loadConfig(), loadTree()]);
// Image popup: close when clicking the overlay background or the × button.
(function () {
const popup = document.getElementById('img-popup');
const closeBtn = document.getElementById('img-popup-close');
if (popup) {
popup.addEventListener('click', (e) => {
if (e.target === popup) popup.classList.remove('open');
});
}
if (closeBtn) {
closeBtn.addEventListener('click', () => popup && popup.classList.remove('open'));
}
})();
Promise.all([loadConfig(), loadTree()]).then(() => connectAiLogWs());

View File

@@ -21,6 +21,8 @@
* Provides: collectQueueBooks(), renderPhotoQueue(), triggerPhoto()
*/
/* exported collectQueueBooks, renderPhotoQueue, triggerPhoto */
// ── Photo Queue ──────────────────────────────────────────────────────────────
function collectQueueBooks(node, type) {
const books = [];
@@ -29,9 +31,9 @@ function collectQueueBooks(node, type) {
if (n.identification_status !== 'user_approved') books.push(n);
return;
}
if (t === 'room') n.cabinets.forEach(c => collect(c, 'cabinet'));
if (t === 'cabinet') n.shelves.forEach(s => collect(s, 'shelf'));
if (t === 'shelf') n.books.forEach(b => collect(b, 'book'));
if (t === 'room') n.cabinets.forEach((c) => collect(c, 'cabinet'));
if (t === 'cabinet') n.shelves.forEach((s) => collect(s, 'shelf'));
if (t === 'shelf') n.books.forEach((b) => collect(b, 'book'));
}
collect(node, type);
return books;
@@ -40,8 +42,12 @@ function collectQueueBooks(node, type) {
function renderPhotoQueue() {
const el = document.getElementById('photo-queue-overlay');
if (!el) return;
if (!_photoQueue) { el.style.display = 'none'; el.innerHTML = ''; return; }
const {books, index, processing} = _photoQueue;
if (!_photoQueue) {
el.style.display = 'none';
el.innerHTML = '';
return;
}
const { books, index, processing } = _photoQueue;
el.style.display = 'flex';
if (index >= books.length) {
el.innerHTML = `<div class="pq-hdr">
@@ -79,8 +85,8 @@ function renderPhotoQueue() {
const gphoto = document.getElementById('gphoto');
function triggerPhoto(type, id) {
S._photoTarget = {type, id};
if (/Android|iPhone|iPad/i.test(navigator.userAgent)) gphoto.setAttribute('capture','environment');
S._photoTarget = { type, id };
if (/Android|iPhone|iPad/i.test(navigator.userAgent)) gphoto.setAttribute('capture', 'environment');
else gphoto.removeAttribute('capture');
gphoto.value = '';
gphoto.click();
@@ -89,7 +95,7 @@ function triggerPhoto(type, id) {
gphoto.addEventListener('change', async () => {
const file = gphoto.files[0];
if (!file || !S._photoTarget) return;
const {type, id} = S._photoTarget;
const { type, id } = S._photoTarget;
S._photoTarget = null;
const fd = new FormData();
fd.append('image', file, file.name); // HD — no client-side compression
@@ -101,8 +107,10 @@ gphoto.addEventListener('change', async () => {
};
try {
const res = await req('POST', urls[type], fd, true);
const key = type==='book' ? 'image_filename' : 'photo_filename';
walkTree(n=>{ if(n.id===id) n[key]=res[key]; });
const key = type === 'book' ? 'image_filename' : 'photo_filename';
walkTree((n) => {
if (n.id === id) n[key] = res[key];
});
// Photo queue mode: process and advance without full re-render
if (_photoQueue && type === 'book') {
_photoQueue.processing = true;
@@ -111,8 +119,12 @@ gphoto.addEventListener('change', async () => {
if (book && book.identification_status !== 'user_approved') {
try {
const br = await req('POST', `/api/books/${id}/process`);
walkTree(n => { if (n.id === id) Object.assign(n, br); });
} catch { /* continue queue on process error */ }
walkTree((n) => {
if (n.id === id) Object.assign(n, br);
});
} catch {
/* continue queue on process error */
}
}
_photoQueue.processing = false;
_photoQueue.index++;
@@ -127,12 +139,24 @@ gphoto.addEventListener('change', async () => {
if (book && book.identification_status !== 'user_approved') {
try {
const br = await req('POST', `/api/books/${id}/process`);
walkTree(n => { if(n.id===id) Object.assign(n, br); });
walkTree((n) => {
if (n.id === id) Object.assign(n, br);
});
toast(`Photo saved · Identified (${br.identification_status})`);
render();
} catch { toast('Photo saved'); }
} else { toast('Photo saved'); }
} else { toast('Photo saved'); }
} else { toast('Photo saved'); }
} catch(err) { toast('Upload failed: '+err.message); }
} catch {
toast('Photo saved');
}
} else {
toast('Photo saved');
}
} else {
toast('Photo saved');
}
} else {
toast('Photo saved');
}
} catch (err) {
toast('Upload failed: ' + err.message);
}
});

View File

@@ -7,15 +7,17 @@
* S — main UI state (tree data, selection, loading flags)
* _plugins — plugin manifest populated from GET /api/config
* _batchState — current batch-processing progress
* _batchPollTimer — setInterval handle for batch polling
* _batchWs — active WebSocket for batch push notifications (null when idle)
* _bnd — live boundary-canvas state (written by canvas-boundary.js,
* read by detail-render.js)
* _photoQueue — photo queue session state (written by photo.js,
* read by events.js)
*/
/* exported S */
// ── Main UI state ───────────────────────────────────────────────────────────
let S = {
const S = {
tree: null,
expanded: new Set(),
selected: null, // {type:'cabinet'|'shelf'|'book', id}
@@ -25,17 +27,33 @@ let S = {
};
// ── Plugin registry ─────────────────────────────────────────────────────────
// eslint-disable-next-line prefer-const
let _plugins = []; // populated from GET /api/config
// ── Batch processing state ──────────────────────────────────────────────────
let _batchState = {running: false, total: 0, done: 0, errors: 0, current: ''};
let _batchPollTimer = null;
const _batchState = { running: false, total: 0, done: 0, errors: 0, current: '' };
// eslint-disable-next-line prefer-const
let _batchWs = null;
// ── Boundary canvas live state ───────────────────────────────────────────────
// Owned by canvas-boundary.js; declared here so detail-render.js can read it
// without a circular load dependency.
// eslint-disable-next-line prefer-const
let _bnd = null; // {wrap,img,canvas,axis,boundaries[],pluginResults{},selectedPlugin,segments[],nodeId,nodeType}
// ── Photo queue session state ────────────────────────────────────────────────
// Owned by photo.js; declared here so events.js can read/write it.
// eslint-disable-next-line prefer-const
let _photoQueue = null; // {books:[...], index:0, processing:false}
// ── AI blocks visibility ─────────────────────────────────────────────────────
// Per-book override map. If bookId is absent the default rule applies:
// show when not user_approved, hide when user_approved.
const _aiBlocksVisible = {}; // {bookId: true|false}
// ── AI request log ───────────────────────────────────────────────────────────
// Populated from /ws/ai-log on page load.
// eslint-disable-next-line prefer-const
let _aiLog = []; // AiLogEntry[] — ring buffer, oldest first
// eslint-disable-next-line prefer-const
let _aiLogWs = null; // active WebSocket for AI log push (never closed)

View File

@@ -13,17 +13,27 @@
* vBook(), getBookStats(), vAiProgressBar()
*/
/* exported pluginsByCategory, pluginsByTarget, isLoading, vPluginBtn, vBatchBtn, vAiIndicator,
candidateSugRows, vApp, mainTitle, mainHeaderBtns, _STATUS_BADGE,
getBookStats, vAiProgressBar, walkTree, removeNode, findNode */
// ── Plugin helpers ───────────────────────────────────────────────────────────
function pluginsByCategory(cat) { return _plugins.filter(p => p.category === cat); }
function pluginsByTarget(cat, target) { return _plugins.filter(p => p.category === cat && p.target === target); }
function isLoading(pluginId, entityId) { return !!S._loading[`${pluginId}:${entityId}`]; }
function pluginsByCategory(cat) {
return _plugins.filter((p) => p.category === cat);
}
function pluginsByTarget(cat, target) {
return _plugins.filter((p) => p.category === cat && p.target === target);
}
function isLoading(pluginId, entityId) {
return !!S._loading[`${pluginId}:${entityId}`];
}
function vPluginBtn(plugin, entityId, entityType, extraDisabled = false) {
const loading = isLoading(plugin.id, entityId);
const label = loading ? '⏳' : esc(plugin.name);
return `<button class="btn btn-s" style="padding:2px 7px;font-size:.78rem;min-height:0"
data-a="run-plugin" data-plugin="${plugin.id}" data-id="${entityId}"
data-etype="${entityType}"${(loading||extraDisabled)?' disabled':''}
data-etype="${entityType}"${loading || extraDisabled ? ' disabled' : ''}
title="${esc(plugin.name)}">${label}</button>`;
}
@@ -34,21 +44,36 @@ function vBatchBtn() {
return `<button class="hbtn" data-a="batch-start" title="Analyze all unidentified books">🔄</button>`;
}
// ── AI active indicator ───────────────────────────────────────────────────────
function vAiIndicator(count) {
return `<span class="ai-indicator" title="${count} AI request${count === 1 ? '' : 's'} running"><span class="ai-dot"></span>${count}</span>`;
}
// ── Candidate suggestion rows ────────────────────────────────────────────────
const SOURCE_LABELS = {
vlm: 'VLM', ai: 'AI', openlibrary: 'OpenLib',
rsl: 'РГБ', rusneb: 'НЭБ', alib: 'Alib', nlr: 'НЛР', shpl: 'ШПИЛ',
vlm: 'VLM',
ai: 'AI',
openlibrary: 'OpenLib',
rsl: 'РГБ',
rusneb: 'НЭБ',
alib: 'Alib',
nlr: 'НЛР',
shpl: 'ШПИЛ',
};
function getSourceLabel(source) {
if (SOURCE_LABELS[source]) return SOURCE_LABELS[source];
const p = _plugins.find(pl => pl.id === source);
const p = _plugins.find((pl) => pl.id === source);
return p ? p.name : source;
}
function parseCandidates(json) {
if (!json) return [];
try { return JSON.parse(json) || []; } catch { return []; }
try {
return JSON.parse(json) || [];
} catch {
return [];
}
}
function candidateSugRows(b, field, inputId) {
@@ -61,7 +86,7 @@ function candidateSugRows(b, field, inputId) {
const v = (c[field] || '').trim();
if (!v) continue;
const key = v.toLowerCase();
if (!byVal.has(key)) byVal.set(key, {display: v, sources: []});
if (!byVal.has(key)) byVal.set(key, { display: v, sources: [] });
const entry = byVal.get(key);
if (!entry.sources.includes(c.source)) entry.sources.push(c.source);
}
@@ -69,17 +94,17 @@ function candidateSugRows(b, field, inputId) {
const aiVal = (b[`ai_${field}`] || '').trim();
if (aiVal) {
const key = aiVal.toLowerCase();
if (!byVal.has(key)) byVal.set(key, {display: aiVal, sources: []});
if (!byVal.has(key)) byVal.set(key, { display: aiVal, sources: [] });
const entry = byVal.get(key);
if (!entry.sources.includes('ai')) entry.sources.unshift('ai');
}
return [...byVal.entries()]
.filter(([k]) => k !== userVal.toLowerCase())
.map(([, {display, sources}]) => {
const badges = sources.map(s =>
`<span class="src-badge src-${esc(s)}">${esc(getSourceLabel(s))}</span>`
).join(' ');
.map(([, { display, sources }]) => {
const badges = sources
.map((s) => `<span class="src-badge src-${esc(s)}">${esc(getSourceLabel(s))}</span>`)
.join(' ');
const val = esc(display);
return `<div class="ai-sug">
${badges} <em>${val}</em>
@@ -90,14 +115,21 @@ function candidateSugRows(b, field, inputId) {
data-a="dismiss-field" data-id="${b.id}" data-field="${field}"
data-value="${val}" title="Dismiss">✗</button>
</div>`;
}).join('');
})
.join('');
}
// ── App shell ────────────────────────────────────────────────────────────────
function vApp() {
return `<div class="layout">
const running = (_aiLog || []).filter((e) => e.status === 'running').length;
return `<div class="page-wrap">
<div class="hdr">
<h1 data-a="deselect" style="cursor:pointer;flex:1" title="Back to overview">📚 Bookshelf</h1>
<div id="hdr-ai-indicator">${running > 0 ? vAiIndicator(running) : ''}</div>
<div id="main-hdr-batch">${vBatchBtn()}</div>
</div>
<div class="layout">
<div class="sidebar">
<div class="hdr"><h1 data-a="deselect" style="cursor:pointer" title="Back to overview">📚 Bookshelf</h1></div>
<div class="sidebar-body">
${vTreeBody()}
<button class="add-root" data-a="add-room">+ Add Room</button>
@@ -106,18 +138,18 @@ function vApp() {
<div class="main-panel">
<div class="main-hdr" id="main-hdr">
<h2 id="main-title">${mainTitle()}</h2>
<div id="main-hdr-batch">${vBatchBtn()}</div>
<div id="main-hdr-btns">${mainHeaderBtns()}</div>
</div>
<div class="main-body" id="main-body">${vDetailBody()}</div>
</div>
</div>
</div>`;
}
function mainTitle() {
if (!S.selected) return '<span style="opacity:.7">Select a room, cabinet or shelf</span>';
if (!S.selected) return '📚 Bookshelf';
const n = findNode(S.selected.id);
const {type, id} = S.selected;
const { type, id } = S.selected;
if (type === 'book') {
return `<span>${esc(n?.title || 'Untitled book')}</span>`;
}
@@ -127,7 +159,7 @@ function mainTitle() {
function mainHeaderBtns() {
if (!S.selected) return '';
const {type, id} = S.selected;
const { type, id } = S.selected;
if (type === 'room') {
return `<div style="display:flex;gap:2px">
<button class="hbtn" data-a="add-cabinet" data-id="${id}" title="Add cabinet"></button>
@@ -171,18 +203,22 @@ function vRoom(r) {
const exp = S.expanded.has(r.id);
const sel = S.selected?.id === r.id;
return `<div class="node" data-id="${r.id}" data-type="room">
<div class="nrow nrow-room${sel?' sel':''}" data-a="select" data-type="room" data-id="${r.id}">
<div class="nrow nrow-room${sel ? ' sel' : ''}" data-a="select" data-type="room" data-id="${r.id}">
<span class="drag-h">⠿</span>
<button class="tbtn ${exp?'':'col'}" data-a="toggle" data-type="room" data-id="${r.id}">▾</button>
<button class="tbtn ${exp ? '' : 'col'}" data-a="toggle" data-type="room" data-id="${r.id}">▾</button>
<span class="nname" data-type="room" data-id="${r.id}">🏠 ${esc(r.name)}</span>
<div class="nacts">
<button class="ibtn" data-a="add-cabinet" data-id="${r.id}" title="Add cabinet"></button>
<button class="ibtn" data-a="del-room" data-id="${r.id}" title="Delete">🗑</button>
</div>
</div>
${exp ? `<div class="nchildren"><div class="sortable-list" data-type="cabinets" data-parent="${r.id}">
${
exp
? `<div class="nchildren"><div class="sortable-list" data-type="cabinets" data-parent="${r.id}">
${r.cabinets.map(vCabinet).join('')}
</div></div>` : ''}
</div></div>`
: ''
}
</div>`;
}
@@ -190,9 +226,9 @@ function vCabinet(c) {
const exp = S.expanded.has(c.id);
const sel = S.selected?.id === c.id;
return `<div class="node" data-id="${c.id}" data-type="cabinet">
<div class="nrow nrow-cabinet${sel?' sel':''}" data-a="select" data-type="cabinet" data-id="${c.id}">
<div class="nrow nrow-cabinet${sel ? ' sel' : ''}" data-a="select" data-type="cabinet" data-id="${c.id}">
<span class="drag-h">⠿</span>
<button class="tbtn ${exp?'':'col'}" data-a="toggle" data-type="cabinet" data-id="${c.id}">▾</button>
<button class="tbtn ${exp ? '' : 'col'}" data-a="toggle" data-type="cabinet" data-id="${c.id}">▾</button>
${c.photo_filename ? `<img src="/images/${c.photo_filename}" style="width:26px;height:32px;object-fit:cover;border-radius:2px;flex-shrink:0" alt="">` : ''}
<span class="nname" data-type="cabinet" data-id="${c.id}">📚 ${esc(c.name)}</span>
<div class="nacts">
@@ -202,9 +238,13 @@ function vCabinet(c) {
${!isDesktop() ? `<button class="ibtn" data-a="del-cabinet" data-id="${c.id}" title="Delete">🗑</button>` : ''}
</div>
</div>
${exp ? `<div class="nchildren"><div class="sortable-list" data-type="shelves" data-parent="${c.id}">
${
exp
? `<div class="nchildren"><div class="sortable-list" data-type="shelves" data-parent="${c.id}">
${c.shelves.map(vShelf).join('')}
</div></div>` : ''}
</div></div>`
: ''
}
</div>`;
}
@@ -212,9 +252,9 @@ function vShelf(s) {
const exp = S.expanded.has(s.id);
const sel = S.selected?.id === s.id;
return `<div class="node" data-id="${s.id}" data-type="shelf">
<div class="nrow nrow-shelf${sel?' sel':''}" data-a="select" data-type="shelf" data-id="${s.id}">
<div class="nrow nrow-shelf${sel ? ' sel' : ''}" data-a="select" data-type="shelf" data-id="${s.id}">
<span class="drag-h">⠿</span>
<button class="tbtn ${exp?'':'col'}" data-a="toggle" data-type="shelf" data-id="${s.id}">▾</button>
<button class="tbtn ${exp ? '' : 'col'}" data-a="toggle" data-type="shelf" data-id="${s.id}">▾</button>
<span class="nname" data-type="shelf" data-id="${s.id}">${esc(s.name)}</span>
<div class="nacts">
${!isDesktop() ? `<button class="ibtn" data-a="photo" data-type="shelf" data-id="${s.id}" title="Photo">📷</button>` : ''}
@@ -223,9 +263,13 @@ function vShelf(s) {
${!isDesktop() ? `<button class="ibtn" data-a="del-shelf" data-id="${s.id}" title="Delete">🗑</button>` : ''}
</div>
</div>
${exp ? `<div class="nchildren"><div class="sortable-list" data-type="books" data-parent="${s.id}">
${
exp
? `<div class="nchildren"><div class="sortable-list" data-type="books" data-parent="${s.id}">
${s.books.map(vBook).join('')}
</div></div>` : ''}
</div></div>`
: ''
}
</div>`;
}
@@ -240,7 +284,7 @@ function vBook(b) {
const sub = [b.author, b.year].filter(Boolean).join(' · ');
const sel = S.selected?.id === b.id;
return `<div class="node" data-id="${b.id}" data-type="book">
<div class="nrow nrow-book${sel?' sel':''}" data-a="select" data-type="book" data-id="${b.id}">
<div class="nrow nrow-book${sel ? ' sel' : ''}" data-a="select" data-type="book" data-id="${b.id}">
<span class="drag-h">⠿</span>
<span class="sbadge ${sc}" title="${b.identification_status ?? 'unidentified'}">${sl}</span>
${b.image_filename ? `<img src="/images/${b.image_filename}" class="bthumb" alt="">` : `<div class="bthumb-ph">📖</div>`}
@@ -248,10 +292,14 @@ function vBook(b) {
<div class="bttl">${esc(b.title || '—')}</div>
${sub ? `<div class="bsub">${esc(sub)}</div>` : ''}
</div>
${!isDesktop() ? `<div class="nacts">
${
!isDesktop()
? `<div class="nacts">
<button class="ibtn" data-a="photo" data-type="book" data-id="${b.id}" title="Upload photo">📷</button>
<button class="ibtn" data-a="del-book" data-id="${b.id}" title="Delete">🗑</button>
</div>` : ''}
</div>`
: ''
}
</div>
</div>`;
}
@@ -260,26 +308,29 @@ function vBook(b) {
function getBookStats(node, type) {
const books = [];
function collect(n, t) {
if (t==='book') { books.push(n); return; }
if (t==='room') (n.cabinets||[]).forEach(c => collect(c,'cabinet'));
if (t==='cabinet') (n.shelves||[]).forEach(s => collect(s,'shelf'));
if (t==='shelf') (n.books||[]).forEach(b => collect(b,'book'));
if (t === 'book') {
books.push(n);
return;
}
if (t === 'room') (n.cabinets || []).forEach((c) => collect(c, 'cabinet'));
if (t === 'cabinet') (n.shelves || []).forEach((s) => collect(s, 'shelf'));
if (t === 'shelf') (n.books || []).forEach((b) => collect(b, 'book'));
}
collect(node, type);
return {
total: books.length,
approved: books.filter(b=>b.identification_status==='user_approved').length,
ai: books.filter(b=>b.identification_status==='ai_identified').length,
unidentified: books.filter(b=>b.identification_status==='unidentified').length,
approved: books.filter((b) => b.identification_status === 'user_approved').length,
ai: books.filter((b) => b.identification_status === 'ai_identified').length,
unidentified: books.filter((b) => b.identification_status === 'unidentified').length,
};
}
function vAiProgressBar(stats) {
const {total, approved, ai, unidentified} = stats;
const { total, approved, ai, unidentified } = stats;
if (!total || approved === total) return '';
const pA = (approved/total*100).toFixed(1);
const pI = (ai/total*100).toFixed(1);
const pU = (unidentified/total*100).toFixed(1);
const pA = ((approved / total) * 100).toFixed(1);
const pI = ((ai / total) * 100).toFixed(1);
const pU = ((unidentified / total) * 100).toFixed(1);
return `<div style="margin-bottom:10px;background:white;border-radius:8px;padding:10px;box-shadow:0 1px 3px rgba(0,0,0,.07)">
<div style="display:flex;gap:8px;font-size:.7rem;margin-bottom:5px">
<span style="color:#15803d">✓ ${approved} approved</span><span style="color:#94a3b8">·</span>
@@ -297,10 +348,13 @@ function vAiProgressBar(stats) {
// ── Tree helpers ─────────────────────────────────────────────────────────────
function walkTree(fn) {
if (!S.tree) return;
for (const r of S.tree) { fn(r,'room');
for (const c of r.cabinets) { fn(c,'cabinet');
for (const s of c.shelves) { fn(s,'shelf');
for (const b of s.books) fn(b,'book');
for (const r of S.tree) {
fn(r, 'room');
for (const c of r.cabinets) {
fn(c, 'cabinet');
for (const s of c.shelves) {
fn(s, 'shelf');
for (const b of s.books) fn(b, 'book');
}
}
}
@@ -308,14 +362,20 @@ function walkTree(fn) {
function removeNode(type, id) {
if (!S.tree) return;
if (type==='room') S.tree = S.tree.filter(r=>r.id!==id);
if (type==='cabinet') S.tree.forEach(r=>r.cabinets=r.cabinets.filter(c=>c.id!==id));
if (type==='shelf') S.tree.forEach(r=>r.cabinets.forEach(c=>c.shelves=c.shelves.filter(s=>s.id!==id)));
if (type==='book') S.tree.forEach(r=>r.cabinets.forEach(c=>c.shelves.forEach(s=>s.books=s.books.filter(b=>b.id!==id))));
if (type === 'room') S.tree = S.tree.filter((r) => r.id !== id);
if (type === 'cabinet') S.tree.forEach((r) => (r.cabinets = r.cabinets.filter((c) => c.id !== id)));
if (type === 'shelf')
S.tree.forEach((r) => r.cabinets.forEach((c) => (c.shelves = c.shelves.filter((s) => s.id !== id))));
if (type === 'book')
S.tree.forEach((r) =>
r.cabinets.forEach((c) => c.shelves.forEach((s) => (s.books = s.books.filter((b) => b.id !== id)))),
);
}
function findNode(id) {
let found = null;
walkTree(n => { if (n.id===id) found=n; });
walkTree((n) => {
if (n.id === id) found = n;
});
return found;
}

View File

@@ -26,6 +26,7 @@ from models import (
BoundaryDetectResult,
BookRow,
CandidateRecord,
IdentifyBlock,
PluginLookupResult,
TextRecognizeResult,
)
@@ -56,6 +57,7 @@ def _book(**kwargs: object) -> BookRow:
"analyzed_at": None,
"created_at": "2024-01-01T00:00:00",
"candidates": None,
"ai_blocks": None,
}
defaults.update(kwargs)
return BookRow(**defaults) # type: ignore[arg-type]
@@ -75,7 +77,7 @@ def seeded_db(tmp_path: Path, monkeypatch: pytest.MonkeyPatch) -> None:
c.execute("INSERT INTO cabinets VALUES (?,?,?,?,?,?,?,?)", ["c1", "r1", "Cabinet", None, None, None, 1, ts])
c.execute("INSERT INTO shelves VALUES (?,?,?,?,?,?,?,?)", ["s1", "c1", "Shelf", None, None, None, 1, ts])
c.execute(
"INSERT INTO books VALUES (?,?,0,NULL,'','','','','','','','','','','','','unidentified',0,NULL,?,NULL)",
"INSERT INTO books VALUES (?,?,0,NULL,'','','','','','','','','','','','','unidentified',0,NULL,?,NULL,NULL)",
["b1", "s1", ts],
)
c.commit()
@@ -93,6 +95,10 @@ class _BoundaryDetectorStub:
auto_queue = False
target = "books"
@property
def model(self) -> str:
return "stub-model"
@property
def max_image_px(self) -> int:
return 1600
@@ -109,6 +115,10 @@ class _BoundaryDetectorShelvesStub:
auto_queue = False
target = "shelves"
@property
def model(self) -> str:
return "stub-model"
@property
def max_image_px(self) -> int:
return 1600
@@ -124,6 +134,10 @@ class _TextRecognizerStub:
name = "Stub TR"
auto_queue = False
@property
def model(self) -> str:
return "stub-model"
@property
def max_image_px(self) -> int:
return 1600
@@ -139,19 +153,29 @@ class _BookIdentifierStub:
name = "Stub BI"
auto_queue = False
@property
def model(self) -> str:
return "stub-model"
@property
def max_image_px(self) -> int:
return 1600
@property
def confidence_threshold(self) -> float:
return 0.8
def identify(self, raw_text: str) -> AIIdentifyResult:
return {
"title": "Found Book",
"author": "Found Author",
"year": "2000",
"isbn": "",
"publisher": "",
"confidence": 0.9,
}
@property
def is_vlm(self) -> bool:
return False
def identify(
self,
raw_text: str,
archive_results: list[CandidateRecord],
images: list[tuple[str, str]],
) -> list[IdentifyBlock]:
return [IdentifyBlock(title="Found Book", author="Found Author", year="2000", score=0.9)]
class _ArchiveSearcherStub: