2024-02-29 11:04:24 +00:00
|
|
|
|
import asyncio
|
2022-11-17 19:53:58 +00:00
|
|
|
|
import json
|
2024-06-02 13:36:12 +00:00
|
|
|
|
import logging
|
2024-06-02 14:01:22 +00:00
|
|
|
|
import os
|
2025-04-29 20:50:51 +00:00
|
|
|
|
import random
|
2025-05-29 09:37:39 +00:00
|
|
|
|
import time
|
2025-06-01 23:56:11 +00:00
|
|
|
|
from typing import Any, Union
|
2025-05-29 09:37:39 +00:00
|
|
|
|
|
|
|
|
|
import httpx
|
|
|
|
|
|
2025-06-01 23:56:11 +00:00
|
|
|
|
from orm.shout import Shout
|
2025-05-22 01:34:30 +00:00
|
|
|
|
from settings import TXTAI_SERVICE_URL
|
2025-06-01 23:56:11 +00:00
|
|
|
|
from utils.logger import root_logger as logger
|
2023-12-17 20:30:20 +00:00
|
|
|
|
|
2025-03-12 17:13:55 +00:00
|
|
|
|
# Set up proper logging
|
|
|
|
|
logger.setLevel(logging.INFO) # Change to INFO to see more details
|
2025-05-22 01:34:30 +00:00
|
|
|
|
# Disable noise HTTP cltouchient logging
|
2025-04-24 17:58:14 +00:00
|
|
|
|
logging.getLogger("httpx").setLevel(logging.WARNING)
|
|
|
|
|
logging.getLogger("httpcore").setLevel(logging.WARNING)
|
2024-06-02 13:36:12 +00:00
|
|
|
|
|
2025-03-12 15:06:09 +00:00
|
|
|
|
# Configuration for search service
|
2025-05-29 09:37:39 +00:00
|
|
|
|
SEARCH_ENABLED = bool(os.environ.get("SEARCH_ENABLED", "true").lower() in ["true", "1", "yes"])
|
2025-03-19 17:47:31 +00:00
|
|
|
|
MAX_BATCH_SIZE = int(os.environ.get("SEARCH_MAX_BATCH_SIZE", "25"))
|
2024-05-18 08:52:17 +00:00
|
|
|
|
|
2025-04-01 19:01:09 +00:00
|
|
|
|
# Search cache configuration
|
2025-05-29 09:37:39 +00:00
|
|
|
|
SEARCH_CACHE_ENABLED = bool(os.environ.get("SEARCH_CACHE_ENABLED", "true").lower() in ["true", "1", "yes"])
|
2025-06-01 23:56:11 +00:00
|
|
|
|
SEARCH_CACHE_TTL_SECONDS = int(os.environ.get("SEARCH_CACHE_TTL_SECONDS", "300")) # Default: 5 minutes
|
2025-04-01 19:01:09 +00:00
|
|
|
|
SEARCH_PREFETCH_SIZE = int(os.environ.get("SEARCH_PREFETCH_SIZE", "200"))
|
2025-05-29 09:37:39 +00:00
|
|
|
|
SEARCH_USE_REDIS = bool(os.environ.get("SEARCH_USE_REDIS", "true").lower() in ["true", "1", "yes"])
|
2025-04-01 19:01:09 +00:00
|
|
|
|
|
2025-04-03 16:10:53 +00:00
|
|
|
|
search_offset = 0
|
|
|
|
|
|
2025-04-01 19:01:09 +00:00
|
|
|
|
# Import Redis client if Redis caching is enabled
|
|
|
|
|
if SEARCH_USE_REDIS:
|
|
|
|
|
try:
|
|
|
|
|
from services.redis import redis
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
2025-04-01 19:01:09 +00:00
|
|
|
|
logger.info("Redis client imported for search caching")
|
|
|
|
|
except ImportError:
|
|
|
|
|
logger.warning("Redis client import failed, falling back to memory cache")
|
|
|
|
|
SEARCH_USE_REDIS = False
|
|
|
|
|
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
2025-04-01 19:01:09 +00:00
|
|
|
|
class SearchCache:
|
|
|
|
|
"""Cache for search results to enable efficient pagination"""
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
2025-06-01 23:56:11 +00:00
|
|
|
|
def __init__(self, ttl_seconds: int = SEARCH_CACHE_TTL_SECONDS, max_items: int = 100) -> None:
|
|
|
|
|
self.cache: dict[str, list] = {} # Maps search query to list of results
|
|
|
|
|
self.last_accessed: dict[str, float] = {} # Maps search query to last access timestamp
|
2025-04-01 19:01:09 +00:00
|
|
|
|
self.ttl = ttl_seconds
|
|
|
|
|
self.max_items = max_items
|
|
|
|
|
self._redis_prefix = "search_cache:"
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
2025-06-01 23:56:11 +00:00
|
|
|
|
async def store(self, query: str, results: list) -> bool:
|
2025-04-01 19:01:09 +00:00
|
|
|
|
"""Store search results for a query"""
|
|
|
|
|
normalized_query = self._normalize_query(query)
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
2025-04-01 19:01:09 +00:00
|
|
|
|
if SEARCH_USE_REDIS:
|
|
|
|
|
try:
|
|
|
|
|
serialized_results = json.dumps(results)
|
2025-06-01 23:56:11 +00:00
|
|
|
|
await redis.serialize_and_set(
|
2025-04-24 21:45:00 +00:00
|
|
|
|
f"{self._redis_prefix}{normalized_query}",
|
2025-04-01 19:01:09 +00:00
|
|
|
|
serialized_results,
|
2025-04-24 21:45:00 +00:00
|
|
|
|
ex=self.ttl,
|
|
|
|
|
)
|
2025-05-29 09:37:39 +00:00
|
|
|
|
logger.info(f"Stored {len(results)} search results for query '{query}' in Redis")
|
2025-04-01 19:01:09 +00:00
|
|
|
|
return True
|
2025-06-01 23:56:11 +00:00
|
|
|
|
except Exception:
|
|
|
|
|
logger.exception("Error storing search results in Redis")
|
2025-04-01 19:01:09 +00:00
|
|
|
|
# Fall back to memory cache if Redis fails
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
2025-04-01 19:01:09 +00:00
|
|
|
|
# First cleanup if needed for memory cache
|
|
|
|
|
if len(self.cache) >= self.max_items:
|
|
|
|
|
self._cleanup()
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
2025-04-01 19:01:09 +00:00
|
|
|
|
# Store results and update timestamp
|
|
|
|
|
self.cache[normalized_query] = results
|
|
|
|
|
self.last_accessed[normalized_query] = time.time()
|
2025-05-29 09:37:39 +00:00
|
|
|
|
logger.info(f"Cached {len(results)} search results for query '{query}' in memory")
|
2025-04-01 19:01:09 +00:00
|
|
|
|
return True
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
2025-06-01 23:56:11 +00:00
|
|
|
|
async def get(self, query: str, limit: int = 10, offset: int = 0) -> list[dict] | None:
|
2025-04-01 19:01:09 +00:00
|
|
|
|
"""Get paginated results for a query"""
|
|
|
|
|
normalized_query = self._normalize_query(query)
|
|
|
|
|
all_results = None
|
|
|
|
|
|
|
|
|
|
# Try to get from Redis first
|
|
|
|
|
if SEARCH_USE_REDIS:
|
|
|
|
|
try:
|
|
|
|
|
cached_data = await redis.get(f"{self._redis_prefix}{normalized_query}")
|
|
|
|
|
if cached_data:
|
|
|
|
|
all_results = json.loads(cached_data)
|
|
|
|
|
logger.info(f"Retrieved search results for '{query}' from Redis")
|
2025-06-01 23:56:11 +00:00
|
|
|
|
except Exception:
|
|
|
|
|
logger.exception("Error retrieving search results from Redis")
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
2025-04-01 19:01:09 +00:00
|
|
|
|
# Fall back to memory cache if not in Redis
|
|
|
|
|
if all_results is None and normalized_query in self.cache:
|
|
|
|
|
all_results = self.cache[normalized_query]
|
|
|
|
|
self.last_accessed[normalized_query] = time.time()
|
|
|
|
|
logger.info(f"Retrieved search results for '{query}' from memory cache")
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
2025-04-01 19:01:09 +00:00
|
|
|
|
# If not found in any cache
|
|
|
|
|
if all_results is None:
|
2025-04-07 14:41:48 +00:00
|
|
|
|
logger.info(f"Cache miss for query '{query}'")
|
2025-04-01 19:01:09 +00:00
|
|
|
|
return None
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
2025-04-01 19:01:09 +00:00
|
|
|
|
# Return paginated subset
|
|
|
|
|
end_idx = min(offset + limit, len(all_results))
|
|
|
|
|
if offset >= len(all_results):
|
2025-05-29 09:37:39 +00:00
|
|
|
|
logger.warning(f"Requested offset {offset} exceeds result count {len(all_results)}")
|
2025-04-01 19:01:09 +00:00
|
|
|
|
return []
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
2025-05-29 09:37:39 +00:00
|
|
|
|
logger.info(f"Cache hit for '{query}': serving {offset}:{end_idx} of {len(all_results)} results")
|
2025-04-01 19:01:09 +00:00
|
|
|
|
return all_results[offset:end_idx]
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
2025-06-01 23:56:11 +00:00
|
|
|
|
async def has_query(self, query: str) -> bool:
|
2025-04-01 19:01:09 +00:00
|
|
|
|
"""Check if query exists in cache"""
|
|
|
|
|
normalized_query = self._normalize_query(query)
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
2025-04-01 19:01:09 +00:00
|
|
|
|
# Check Redis first
|
|
|
|
|
if SEARCH_USE_REDIS:
|
|
|
|
|
try:
|
|
|
|
|
exists = await redis.get(f"{self._redis_prefix}{normalized_query}")
|
|
|
|
|
if exists:
|
|
|
|
|
return True
|
2025-06-01 23:56:11 +00:00
|
|
|
|
except Exception:
|
|
|
|
|
logger.exception("Error checking Redis for query existence")
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
2025-04-01 19:01:09 +00:00
|
|
|
|
# Fall back to memory cache
|
|
|
|
|
return normalized_query in self.cache
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
2025-06-01 23:56:11 +00:00
|
|
|
|
async def get_total_count(self, query: str) -> int:
|
2025-04-01 19:01:09 +00:00
|
|
|
|
"""Get total count of results for a query"""
|
|
|
|
|
normalized_query = self._normalize_query(query)
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
2025-04-01 19:01:09 +00:00
|
|
|
|
# Check Redis first
|
|
|
|
|
if SEARCH_USE_REDIS:
|
|
|
|
|
try:
|
|
|
|
|
cached_data = await redis.get(f"{self._redis_prefix}{normalized_query}")
|
|
|
|
|
if cached_data:
|
|
|
|
|
all_results = json.loads(cached_data)
|
|
|
|
|
return len(all_results)
|
2025-06-01 23:56:11 +00:00
|
|
|
|
except Exception:
|
|
|
|
|
logger.exception("Error getting result count from Redis")
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
2025-04-01 19:01:09 +00:00
|
|
|
|
# Fall back to memory cache
|
|
|
|
|
if normalized_query in self.cache:
|
|
|
|
|
return len(self.cache[normalized_query])
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
2025-04-01 19:01:09 +00:00
|
|
|
|
return 0
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
2025-06-01 23:56:11 +00:00
|
|
|
|
def _normalize_query(self, query: str) -> str:
|
2025-04-01 19:01:09 +00:00
|
|
|
|
"""Normalize query string for cache key"""
|
|
|
|
|
if not query:
|
|
|
|
|
return ""
|
|
|
|
|
# Simple normalization - lowercase and strip whitespace
|
|
|
|
|
return query.lower().strip()
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
2025-06-01 23:56:11 +00:00
|
|
|
|
def _cleanup(self) -> None:
|
2025-04-01 19:01:09 +00:00
|
|
|
|
"""Remove oldest entries if memory cache is full"""
|
|
|
|
|
now = time.time()
|
|
|
|
|
# First remove expired entries
|
2025-05-29 09:37:39 +00:00
|
|
|
|
expired_keys = [key for key, last_access in self.last_accessed.items() if now - last_access > self.ttl]
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
2025-04-01 19:01:09 +00:00
|
|
|
|
for key in expired_keys:
|
|
|
|
|
if key in self.cache:
|
|
|
|
|
del self.cache[key]
|
|
|
|
|
if key in self.last_accessed:
|
|
|
|
|
del self.last_accessed[key]
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
2025-06-01 23:56:11 +00:00
|
|
|
|
logger.info("Cleaned up %d expired search cache entries", len(expired_keys))
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
2025-04-01 19:01:09 +00:00
|
|
|
|
# If still above max size, remove oldest entries
|
|
|
|
|
if len(self.cache) >= self.max_items:
|
|
|
|
|
# Sort by last access time
|
|
|
|
|
sorted_items = sorted(self.last_accessed.items(), key=lambda x: x[1])
|
|
|
|
|
# Remove oldest 20%
|
|
|
|
|
remove_count = max(1, int(len(sorted_items) * 0.2))
|
|
|
|
|
for key, _ in sorted_items[:remove_count]:
|
|
|
|
|
if key in self.cache:
|
|
|
|
|
del self.cache[key]
|
|
|
|
|
if key in self.last_accessed:
|
|
|
|
|
del self.last_accessed[key]
|
2025-06-01 23:56:11 +00:00
|
|
|
|
logger.info("Removed %d oldest search cache entries", remove_count)
|
2024-02-29 11:09:50 +00:00
|
|
|
|
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
2024-01-29 01:09:54 +00:00
|
|
|
|
class SearchService:
|
2025-06-01 23:56:11 +00:00
|
|
|
|
def __init__(self) -> None:
|
|
|
|
|
logger.info("Initializing search service with URL: %s", TXTAI_SERVICE_URL)
|
2025-03-05 20:08:21 +00:00
|
|
|
|
self.available = SEARCH_ENABLED
|
2025-03-19 17:47:31 +00:00
|
|
|
|
# Use different timeout settings for indexing and search requests
|
2025-03-12 16:11:19 +00:00
|
|
|
|
self.client = httpx.AsyncClient(timeout=30.0, base_url=TXTAI_SERVICE_URL)
|
2025-03-19 17:47:31 +00:00
|
|
|
|
self.index_client = httpx.AsyncClient(timeout=120.0, base_url=TXTAI_SERVICE_URL)
|
2025-04-01 19:01:09 +00:00
|
|
|
|
# Initialize search cache
|
|
|
|
|
self.cache = SearchCache() if SEARCH_CACHE_ENABLED else None
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
2025-03-05 20:08:21 +00:00
|
|
|
|
if not self.available:
|
2025-03-12 15:06:09 +00:00
|
|
|
|
logger.info("Search disabled (SEARCH_ENABLED = False)")
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
2025-04-01 19:01:09 +00:00
|
|
|
|
if SEARCH_CACHE_ENABLED:
|
|
|
|
|
cache_location = "Redis" if SEARCH_USE_REDIS else "Memory"
|
2025-05-29 09:37:39 +00:00
|
|
|
|
logger.info(f"Search caching enabled using {cache_location} cache with TTL={SEARCH_CACHE_TTL_SECONDS}s")
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
2025-06-01 23:56:11 +00:00
|
|
|
|
async def info(self) -> dict[str, Any]:
|
|
|
|
|
"""Check search service info"""
|
|
|
|
|
if not SEARCH_ENABLED:
|
|
|
|
|
return {"status": "disabled", "message": "Search is disabled"}
|
|
|
|
|
|
2024-11-22 17:23:45 +00:00
|
|
|
|
try:
|
2025-06-01 23:56:11 +00:00
|
|
|
|
async with httpx.AsyncClient() as client:
|
|
|
|
|
response = await client.get(f"{TXTAI_SERVICE_URL}/info")
|
2025-03-12 15:06:09 +00:00
|
|
|
|
response.raise_for_status()
|
2025-03-12 17:13:55 +00:00
|
|
|
|
result = response.json()
|
|
|
|
|
logger.info(f"Search service info: {result}")
|
|
|
|
|
return result
|
2025-06-01 23:56:11 +00:00
|
|
|
|
except (httpx.ConnectError, httpx.ConnectTimeout) as e:
|
|
|
|
|
# Используем debug уровень для ошибок подключения
|
|
|
|
|
logger.debug("Search service connection failed: %s", str(e))
|
|
|
|
|
return {"status": "error", "message": str(e)}
|
2024-11-22 17:32:14 +00:00
|
|
|
|
except Exception as e:
|
2025-06-01 23:56:11 +00:00
|
|
|
|
# Другие ошибки логируем как debug
|
|
|
|
|
logger.debug("Failed to get search info: %s", str(e))
|
2024-11-22 17:32:14 +00:00
|
|
|
|
return {"status": "error", "message": str(e)}
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
2025-06-01 23:56:11 +00:00
|
|
|
|
def is_ready(self) -> bool:
|
2025-03-12 15:06:09 +00:00
|
|
|
|
"""Check if service is available"""
|
2025-03-12 16:11:19 +00:00
|
|
|
|
return self.available
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
2025-06-01 23:56:11 +00:00
|
|
|
|
async def verify_docs(self, doc_ids: list[int]) -> dict[str, Any]:
|
2025-04-20 22:22:08 +00:00
|
|
|
|
"""Verify which documents exist in the search index across all content types"""
|
2025-03-25 16:31:45 +00:00
|
|
|
|
if not self.available:
|
2025-06-01 23:56:11 +00:00
|
|
|
|
return {"status": "error", "message": "Search service not available"}
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
2025-03-25 16:31:45 +00:00
|
|
|
|
try:
|
2025-06-01 23:56:11 +00:00
|
|
|
|
# Check documents across all content types
|
|
|
|
|
results = {}
|
|
|
|
|
for content_type in ["shouts", "authors", "topics"]:
|
|
|
|
|
endpoint = f"{TXTAI_SERVICE_URL}/exists/{content_type}"
|
|
|
|
|
async with httpx.AsyncClient() as client:
|
|
|
|
|
response = await client.post(endpoint, json={"ids": doc_ids})
|
|
|
|
|
response.raise_for_status()
|
|
|
|
|
results[content_type] = response.json()
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
2025-04-20 22:22:08 +00:00
|
|
|
|
return {
|
2025-06-01 23:56:11 +00:00
|
|
|
|
"status": "success",
|
|
|
|
|
"verified": results,
|
|
|
|
|
"total_docs": len(doc_ids),
|
2025-04-20 22:22:08 +00:00
|
|
|
|
}
|
2025-03-25 16:31:45 +00:00
|
|
|
|
except Exception as e:
|
2025-06-01 23:56:11 +00:00
|
|
|
|
logger.exception("Document verification error")
|
2025-03-25 16:31:45 +00:00
|
|
|
|
return {"status": "error", "message": str(e)}
|
2025-04-20 22:22:08 +00:00
|
|
|
|
|
2025-06-01 23:56:11 +00:00
|
|
|
|
def index(self, shout: Shout) -> None:
|
2025-03-05 20:08:21 +00:00
|
|
|
|
"""Index a single document"""
|
|
|
|
|
if not self.available:
|
2024-11-22 17:32:14 +00:00
|
|
|
|
return
|
2025-06-01 23:56:11 +00:00
|
|
|
|
|
2025-03-12 15:06:09 +00:00
|
|
|
|
logger.info(f"Indexing post {shout.id}")
|
2025-03-05 20:08:21 +00:00
|
|
|
|
# Start in background to not block
|
2025-06-01 23:56:11 +00:00
|
|
|
|
task = asyncio.create_task(self.perform_index(shout))
|
|
|
|
|
# Store task reference to prevent garbage collection
|
|
|
|
|
self._background_tasks: set[asyncio.Task[None]] = getattr(self, "_background_tasks", set())
|
|
|
|
|
self._background_tasks.add(task)
|
|
|
|
|
task.add_done_callback(self._background_tasks.discard)
|
2025-03-05 20:08:21 +00:00
|
|
|
|
|
2025-06-01 23:56:11 +00:00
|
|
|
|
async def perform_index(self, shout: Shout) -> None:
|
2025-04-20 22:22:08 +00:00
|
|
|
|
"""Index a single document across multiple endpoints"""
|
2025-03-12 15:06:09 +00:00
|
|
|
|
if not self.available:
|
|
|
|
|
return
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
2025-03-05 20:08:21 +00:00
|
|
|
|
try:
|
2025-04-20 22:22:08 +00:00
|
|
|
|
logger.info(f"Indexing document {shout.id} to individual endpoints")
|
|
|
|
|
indexing_tasks = []
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
2025-04-20 22:22:08 +00:00
|
|
|
|
# 1. Index title if available
|
2025-04-24 21:45:00 +00:00
|
|
|
|
if hasattr(shout, "title") and shout.title and isinstance(shout.title, str):
|
|
|
|
|
title_doc = {"id": str(shout.id), "title": shout.title.strip()}
|
2025-05-29 09:37:39 +00:00
|
|
|
|
indexing_tasks.append(self.index_client.post("/index-title", json=title_doc))
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
2025-04-20 22:22:08 +00:00
|
|
|
|
# 2. Index body content (subtitle, lead, body)
|
|
|
|
|
body_text_parts = []
|
2025-04-24 21:45:00 +00:00
|
|
|
|
for field_name in ["subtitle", "lead", "body"]:
|
2025-04-20 22:22:08 +00:00
|
|
|
|
field_value = getattr(shout, field_name, None)
|
|
|
|
|
if field_value and isinstance(field_value, str) and field_value.strip():
|
|
|
|
|
body_text_parts.append(field_value.strip())
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
2025-04-20 22:22:08 +00:00
|
|
|
|
# Process media content if available
|
2025-04-24 21:45:00 +00:00
|
|
|
|
media = getattr(shout, "media", None)
|
2025-04-20 22:22:08 +00:00
|
|
|
|
if media:
|
|
|
|
|
if isinstance(media, str):
|
|
|
|
|
try:
|
|
|
|
|
media_json = json.loads(media)
|
|
|
|
|
if isinstance(media_json, dict):
|
2025-04-24 21:45:00 +00:00
|
|
|
|
if "title" in media_json:
|
|
|
|
|
body_text_parts.append(media_json["title"])
|
|
|
|
|
if "body" in media_json:
|
|
|
|
|
body_text_parts.append(media_json["body"])
|
2025-04-20 22:22:08 +00:00
|
|
|
|
except json.JSONDecodeError:
|
|
|
|
|
body_text_parts.append(media)
|
|
|
|
|
elif isinstance(media, dict):
|
2025-04-24 21:45:00 +00:00
|
|
|
|
if "title" in media:
|
|
|
|
|
body_text_parts.append(media["title"])
|
|
|
|
|
if "body" in media:
|
|
|
|
|
body_text_parts.append(media["body"])
|
|
|
|
|
|
2025-04-20 22:22:08 +00:00
|
|
|
|
if body_text_parts:
|
|
|
|
|
body_text = " ".join(body_text_parts)
|
|
|
|
|
# Truncate if too long
|
2025-06-01 23:56:11 +00:00
|
|
|
|
max_text_length = 4000
|
|
|
|
|
if len(body_text) > max_text_length:
|
|
|
|
|
body_text = body_text[:max_text_length]
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
|
|
|
|
body_doc = {"id": str(shout.id), "body": body_text}
|
2025-05-29 09:37:39 +00:00
|
|
|
|
indexing_tasks.append(self.index_client.post("/index-body", json=body_doc))
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
2025-04-20 22:22:08 +00:00
|
|
|
|
# 3. Index authors
|
2025-04-24 21:45:00 +00:00
|
|
|
|
authors = getattr(shout, "authors", [])
|
2025-04-20 22:22:08 +00:00
|
|
|
|
for author in authors:
|
2025-04-24 21:45:00 +00:00
|
|
|
|
author_id = str(getattr(author, "id", 0))
|
|
|
|
|
if not author_id or author_id == "0":
|
2025-04-20 22:22:08 +00:00
|
|
|
|
continue
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
|
|
|
|
name = getattr(author, "name", "")
|
|
|
|
|
|
2025-04-20 22:22:08 +00:00
|
|
|
|
# Combine bio and about fields
|
|
|
|
|
bio_parts = []
|
2025-04-24 21:45:00 +00:00
|
|
|
|
bio = getattr(author, "bio", "")
|
2025-04-20 22:22:08 +00:00
|
|
|
|
if bio and isinstance(bio, str):
|
|
|
|
|
bio_parts.append(bio.strip())
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
|
|
|
|
about = getattr(author, "about", "")
|
2025-04-20 22:22:08 +00:00
|
|
|
|
if about and isinstance(about, str):
|
|
|
|
|
bio_parts.append(about.strip())
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
2025-04-20 22:22:08 +00:00
|
|
|
|
combined_bio = " ".join(bio_parts)
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
2025-04-20 22:22:08 +00:00
|
|
|
|
if name:
|
2025-04-24 21:45:00 +00:00
|
|
|
|
author_doc = {"id": author_id, "name": name, "bio": combined_bio}
|
2025-05-29 09:37:39 +00:00
|
|
|
|
indexing_tasks.append(self.index_client.post("/index-author", json=author_doc))
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
2025-04-20 22:22:08 +00:00
|
|
|
|
# Run all indexing tasks in parallel
|
|
|
|
|
if indexing_tasks:
|
2025-05-29 09:37:39 +00:00
|
|
|
|
responses = await asyncio.gather(*indexing_tasks, return_exceptions=True)
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
2025-04-20 22:22:08 +00:00
|
|
|
|
# Check for errors in responses
|
|
|
|
|
for i, response in enumerate(responses):
|
|
|
|
|
if isinstance(response, Exception):
|
2025-06-01 23:56:11 +00:00
|
|
|
|
logger.error("Error in indexing task %d: %s", i, response)
|
2025-05-29 09:37:39 +00:00
|
|
|
|
elif hasattr(response, "status_code") and response.status_code >= 400:
|
2025-06-01 23:56:11 +00:00
|
|
|
|
error_text = ""
|
|
|
|
|
if hasattr(response, "text") and callable(response.text):
|
|
|
|
|
try:
|
|
|
|
|
error_text = await response.text()
|
|
|
|
|
except (Exception, httpx.HTTPError):
|
|
|
|
|
error_text = str(response)
|
|
|
|
|
logger.error("Error response in indexing task %d: %d, %s", i, response.status_code, error_text)
|
|
|
|
|
|
|
|
|
|
logger.info("Document %s indexed across %d endpoints", shout.id, len(indexing_tasks))
|
2025-04-20 22:22:08 +00:00
|
|
|
|
else:
|
2025-06-01 23:56:11 +00:00
|
|
|
|
logger.warning("No content to index for shout %s", shout.id)
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
2025-06-01 23:56:11 +00:00
|
|
|
|
except Exception:
|
|
|
|
|
logger.exception("Indexing error for shout %s", shout.id)
|
2024-04-08 07:23:54 +00:00
|
|
|
|
|
2025-06-01 23:56:11 +00:00
|
|
|
|
async def bulk_index(self, shouts: list[Shout]) -> None:
|
2025-04-20 22:22:08 +00:00
|
|
|
|
"""Index multiple documents across three separate endpoints"""
|
2025-03-05 20:08:21 +00:00
|
|
|
|
if not self.available or not shouts:
|
2025-04-24 21:45:00 +00:00
|
|
|
|
logger.warning(
|
2025-06-01 23:56:11 +00:00
|
|
|
|
"Bulk indexing skipped: available=%s, shouts_count=%d", self.available, len(shouts) if shouts else 0
|
2025-04-24 21:45:00 +00:00
|
|
|
|
)
|
2025-03-05 20:08:21 +00:00
|
|
|
|
return
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
2025-03-12 17:13:55 +00:00
|
|
|
|
start_time = time.time()
|
2025-06-01 23:56:11 +00:00
|
|
|
|
logger.info("Starting multi-endpoint bulk indexing of %d documents", len(shouts))
|
2025-03-21 17:18:32 +00:00
|
|
|
|
|
2025-04-20 22:22:08 +00:00
|
|
|
|
# Prepare documents for different endpoints
|
2025-06-01 23:56:11 +00:00
|
|
|
|
title_docs: list[dict[str, Any]] = []
|
2025-04-20 22:22:08 +00:00
|
|
|
|
body_docs = []
|
|
|
|
|
author_docs = {} # Use dict to prevent duplicate authors
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
2025-04-20 22:22:08 +00:00
|
|
|
|
total_skipped = 0
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
2025-03-21 18:40:29 +00:00
|
|
|
|
for shout in shouts:
|
|
|
|
|
try:
|
2025-04-20 22:22:08 +00:00
|
|
|
|
# 1. Process title documents
|
2025-05-29 09:37:39 +00:00
|
|
|
|
if hasattr(shout, "title") and shout.title and isinstance(shout.title, str):
|
|
|
|
|
title_docs.append({"id": str(shout.id), "title": shout.title.strip()})
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
2025-04-20 22:22:08 +00:00
|
|
|
|
# 2. Process body documents (subtitle, lead, body)
|
|
|
|
|
body_text_parts = []
|
2025-04-24 21:45:00 +00:00
|
|
|
|
for field_name in ["subtitle", "lead", "body"]:
|
2025-03-21 18:40:29 +00:00
|
|
|
|
field_value = getattr(shout, field_name, None)
|
2025-05-29 09:37:39 +00:00
|
|
|
|
if field_value and isinstance(field_value, str) and field_value.strip():
|
2025-04-20 22:22:08 +00:00
|
|
|
|
body_text_parts.append(field_value.strip())
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
2025-04-20 22:22:08 +00:00
|
|
|
|
# Process media content if available
|
2025-04-24 21:45:00 +00:00
|
|
|
|
media = getattr(shout, "media", None)
|
2025-03-21 18:40:29 +00:00
|
|
|
|
if media:
|
|
|
|
|
if isinstance(media, str):
|
|
|
|
|
try:
|
|
|
|
|
media_json = json.loads(media)
|
|
|
|
|
if isinstance(media_json, dict):
|
2025-04-24 21:45:00 +00:00
|
|
|
|
if "title" in media_json:
|
|
|
|
|
body_text_parts.append(media_json["title"])
|
|
|
|
|
if "body" in media_json:
|
|
|
|
|
body_text_parts.append(media_json["body"])
|
2025-03-21 18:40:29 +00:00
|
|
|
|
except json.JSONDecodeError:
|
2025-04-20 22:22:08 +00:00
|
|
|
|
body_text_parts.append(media)
|
2025-03-21 18:40:29 +00:00
|
|
|
|
elif isinstance(media, dict):
|
2025-04-24 21:45:00 +00:00
|
|
|
|
if "title" in media:
|
|
|
|
|
body_text_parts.append(media["title"])
|
|
|
|
|
if "body" in media:
|
|
|
|
|
body_text_parts.append(media["body"])
|
|
|
|
|
|
2025-04-20 22:22:08 +00:00
|
|
|
|
# Only add body document if we have body text
|
|
|
|
|
if body_text_parts:
|
|
|
|
|
body_text = " ".join(body_text_parts)
|
|
|
|
|
# Truncate if too long
|
2025-06-01 23:56:11 +00:00
|
|
|
|
max_text_length = 4000
|
|
|
|
|
if len(body_text) > max_text_length:
|
|
|
|
|
body_text = body_text[:max_text_length]
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
|
|
|
|
body_docs.append({"id": str(shout.id), "body": body_text})
|
|
|
|
|
|
2025-04-20 22:22:08 +00:00
|
|
|
|
# 3. Process authors if available
|
2025-04-24 21:45:00 +00:00
|
|
|
|
authors = getattr(shout, "authors", [])
|
2025-04-20 22:22:08 +00:00
|
|
|
|
for author in authors:
|
2025-04-24 21:45:00 +00:00
|
|
|
|
author_id = str(getattr(author, "id", 0))
|
|
|
|
|
if not author_id or author_id == "0":
|
2025-04-20 22:22:08 +00:00
|
|
|
|
continue
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
2025-04-20 22:22:08 +00:00
|
|
|
|
# Skip if we've already processed this author
|
|
|
|
|
if author_id in author_docs:
|
|
|
|
|
continue
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
|
|
|
|
name = getattr(author, "name", "")
|
|
|
|
|
|
2025-04-20 22:22:08 +00:00
|
|
|
|
# Combine bio and about fields
|
|
|
|
|
bio_parts = []
|
2025-04-24 21:45:00 +00:00
|
|
|
|
bio = getattr(author, "bio", "")
|
2025-04-20 22:22:08 +00:00
|
|
|
|
if bio and isinstance(bio, str):
|
|
|
|
|
bio_parts.append(bio.strip())
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
|
|
|
|
about = getattr(author, "about", "")
|
2025-04-20 22:22:08 +00:00
|
|
|
|
if about and isinstance(about, str):
|
|
|
|
|
bio_parts.append(about.strip())
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
2025-04-20 22:22:08 +00:00
|
|
|
|
combined_bio = " ".join(bio_parts)
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
2025-04-20 22:22:08 +00:00
|
|
|
|
# Only add if we have author data
|
|
|
|
|
if name:
|
|
|
|
|
author_docs[author_id] = {
|
|
|
|
|
"id": author_id,
|
|
|
|
|
"name": name,
|
2025-04-24 21:45:00 +00:00
|
|
|
|
"bio": combined_bio,
|
2025-04-20 22:22:08 +00:00
|
|
|
|
}
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
2025-06-01 23:56:11 +00:00
|
|
|
|
except Exception:
|
|
|
|
|
logger.exception("Error processing shout %s for indexing", getattr(shout, "id", "unknown"))
|
2025-03-21 18:40:29 +00:00
|
|
|
|
total_skipped += 1
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
2025-04-20 22:22:08 +00:00
|
|
|
|
# Convert author dict to list
|
|
|
|
|
author_docs_list = list(author_docs.values())
|
2025-04-29 20:45:37 +00:00
|
|
|
|
|
2025-04-24 21:45:00 +00:00
|
|
|
|
# Log indexing started message
|
|
|
|
|
logger.info("indexing started...")
|
|
|
|
|
|
2025-04-20 22:22:08 +00:00
|
|
|
|
# Process each endpoint in parallel
|
|
|
|
|
indexing_tasks = [
|
|
|
|
|
self._index_endpoint(title_docs, "/bulk-index-titles", "title"),
|
|
|
|
|
self._index_endpoint(body_docs, "/bulk-index-bodies", "body"),
|
2025-04-24 21:45:00 +00:00
|
|
|
|
self._index_endpoint(author_docs_list, "/bulk-index-authors", "author"),
|
2025-04-20 22:22:08 +00:00
|
|
|
|
]
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
2025-04-20 22:22:08 +00:00
|
|
|
|
await asyncio.gather(*indexing_tasks)
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
2025-04-20 22:22:08 +00:00
|
|
|
|
elapsed = time.time() - start_time
|
|
|
|
|
logger.info(
|
2025-06-01 23:56:11 +00:00
|
|
|
|
"Multi-endpoint indexing completed in %.2fs: %d titles, %d bodies, %d authors, %d shouts skipped",
|
|
|
|
|
elapsed,
|
|
|
|
|
len(title_docs),
|
|
|
|
|
len(body_docs),
|
|
|
|
|
len(author_docs_list),
|
|
|
|
|
total_skipped,
|
2025-04-20 22:22:08 +00:00
|
|
|
|
)
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
2025-06-01 23:56:11 +00:00
|
|
|
|
async def _index_endpoint(self, documents: list[dict], endpoint: str, doc_type: str) -> None:
|
2025-04-20 22:22:08 +00:00
|
|
|
|
"""Process and index documents to a specific endpoint"""
|
|
|
|
|
if not documents:
|
2025-06-01 23:56:11 +00:00
|
|
|
|
logger.info("No %s documents to index", doc_type)
|
2025-04-20 22:22:08 +00:00
|
|
|
|
return
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
2025-06-01 23:56:11 +00:00
|
|
|
|
logger.info("Indexing %d %s documents", len(documents), doc_type)
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
2025-04-20 22:22:08 +00:00
|
|
|
|
# Categorize documents by size
|
2025-05-29 09:37:39 +00:00
|
|
|
|
small_docs, medium_docs, large_docs = self._categorize_by_size(documents, doc_type)
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
2025-04-20 22:22:08 +00:00
|
|
|
|
# Process each category with appropriate batch sizes
|
|
|
|
|
batch_sizes = {
|
|
|
|
|
"small": min(MAX_BATCH_SIZE, 15),
|
2025-04-24 21:45:00 +00:00
|
|
|
|
"medium": min(MAX_BATCH_SIZE, 10),
|
|
|
|
|
"large": min(MAX_BATCH_SIZE, 3),
|
2025-04-20 22:22:08 +00:00
|
|
|
|
}
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
|
|
|
|
for category, docs in [
|
|
|
|
|
("small", small_docs),
|
|
|
|
|
("medium", medium_docs),
|
|
|
|
|
("large", large_docs),
|
|
|
|
|
]:
|
2025-04-20 22:22:08 +00:00
|
|
|
|
if docs:
|
|
|
|
|
batch_size = batch_sizes[category]
|
2025-05-29 09:37:39 +00:00
|
|
|
|
await self._process_batches(docs, batch_size, endpoint, f"{doc_type}-{category}")
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
2025-06-01 23:56:11 +00:00
|
|
|
|
def _categorize_by_size(self, documents: list[dict], doc_type: str) -> tuple[list[dict], list[dict], list[dict]]:
|
2025-04-20 22:22:08 +00:00
|
|
|
|
"""Categorize documents by size for optimized batch processing"""
|
|
|
|
|
small_docs = []
|
|
|
|
|
medium_docs = []
|
|
|
|
|
large_docs = []
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
2025-04-20 22:22:08 +00:00
|
|
|
|
for doc in documents:
|
|
|
|
|
# Extract relevant text based on document type
|
|
|
|
|
if doc_type == "title":
|
|
|
|
|
text = doc.get("title", "")
|
|
|
|
|
elif doc_type == "body":
|
|
|
|
|
text = doc.get("body", "")
|
|
|
|
|
else: # author
|
|
|
|
|
# For authors, consider both name and bio length
|
|
|
|
|
text = doc.get("name", "") + " " + doc.get("bio", "")
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
2025-04-20 22:22:08 +00:00
|
|
|
|
text_len = len(text)
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
2025-04-20 22:22:08 +00:00
|
|
|
|
if text_len > 5000:
|
|
|
|
|
large_docs.append(doc)
|
|
|
|
|
elif text_len > 2000:
|
|
|
|
|
medium_docs.append(doc)
|
|
|
|
|
else:
|
|
|
|
|
small_docs.append(doc)
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
|
|
|
|
logger.info(
|
2025-06-01 23:56:11 +00:00
|
|
|
|
"%s documents categorized: %d small, %d medium, %d large",
|
|
|
|
|
doc_type.capitalize(),
|
|
|
|
|
len(small_docs),
|
|
|
|
|
len(medium_docs),
|
|
|
|
|
len(large_docs),
|
2025-04-24 21:45:00 +00:00
|
|
|
|
)
|
2025-04-20 22:22:08 +00:00
|
|
|
|
return small_docs, medium_docs, large_docs
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
2025-06-01 23:56:11 +00:00
|
|
|
|
async def _process_batches(self, documents: list[dict], batch_size: int, endpoint: str, batch_prefix: str) -> None:
|
2025-04-20 22:22:08 +00:00
|
|
|
|
"""Process document batches with retry logic"""
|
|
|
|
|
for i in range(0, len(documents), batch_size):
|
2025-04-24 21:45:00 +00:00
|
|
|
|
batch = documents[i : i + batch_size]
|
2025-05-29 09:37:39 +00:00
|
|
|
|
batch_id = f"{batch_prefix}-{i // batch_size + 1}"
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
2025-04-20 22:22:08 +00:00
|
|
|
|
retry_count = 0
|
|
|
|
|
max_retries = 3
|
|
|
|
|
success = False
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
2025-04-20 22:22:08 +00:00
|
|
|
|
while not success and retry_count < max_retries:
|
|
|
|
|
try:
|
2025-05-29 09:37:39 +00:00
|
|
|
|
response = await self.index_client.post(endpoint, json=batch, timeout=90.0)
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
2025-04-20 22:22:08 +00:00
|
|
|
|
if response.status_code == 422:
|
|
|
|
|
error_detail = response.json()
|
2025-04-24 21:45:00 +00:00
|
|
|
|
logger.error(
|
2025-06-01 23:56:11 +00:00
|
|
|
|
"Validation error from search service for batch %s: %s",
|
|
|
|
|
batch_id,
|
|
|
|
|
self._truncate_error_detail(error_detail),
|
2025-04-24 21:45:00 +00:00
|
|
|
|
)
|
2025-04-20 22:22:08 +00:00
|
|
|
|
break
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
2025-04-20 22:22:08 +00:00
|
|
|
|
response.raise_for_status()
|
|
|
|
|
success = True
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
2025-06-01 23:56:11 +00:00
|
|
|
|
except Exception:
|
2025-04-20 22:22:08 +00:00
|
|
|
|
retry_count += 1
|
|
|
|
|
if retry_count >= max_retries:
|
|
|
|
|
if len(batch) > 1:
|
|
|
|
|
mid = len(batch) // 2
|
2025-04-24 21:45:00 +00:00
|
|
|
|
await self._process_batches(
|
|
|
|
|
batch[:mid],
|
|
|
|
|
batch_size // 2,
|
|
|
|
|
endpoint,
|
2025-05-29 09:37:39 +00:00
|
|
|
|
f"{batch_prefix}-{i // batch_size}-A",
|
2025-04-24 21:45:00 +00:00
|
|
|
|
)
|
|
|
|
|
await self._process_batches(
|
|
|
|
|
batch[mid:],
|
|
|
|
|
batch_size // 2,
|
|
|
|
|
endpoint,
|
2025-05-29 09:37:39 +00:00
|
|
|
|
f"{batch_prefix}-{i // batch_size}-B",
|
2025-04-24 21:45:00 +00:00
|
|
|
|
)
|
2025-03-21 18:40:29 +00:00
|
|
|
|
else:
|
2025-06-01 23:56:11 +00:00
|
|
|
|
logger.exception(
|
|
|
|
|
"Failed to index single document in batch %s after %d attempts", batch_id, max_retries
|
2025-04-24 21:45:00 +00:00
|
|
|
|
)
|
2025-04-20 22:22:08 +00:00
|
|
|
|
break
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
2025-06-01 23:56:11 +00:00
|
|
|
|
wait_time = (2**retry_count) + (random.SystemRandom().random() * 0.5)
|
2025-04-20 22:22:08 +00:00
|
|
|
|
await asyncio.sleep(wait_time)
|
2025-03-21 18:40:29 +00:00
|
|
|
|
|
2025-06-01 23:56:11 +00:00
|
|
|
|
def _truncate_error_detail(self, error_detail: Union[dict, str, int]) -> Union[dict, str, int]:
|
2025-03-21 18:40:29 +00:00
|
|
|
|
"""Truncate error details for logging"""
|
2025-05-29 09:37:39 +00:00
|
|
|
|
truncated_detail = error_detail.copy() if isinstance(error_detail, dict) else error_detail
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
|
|
|
|
if (
|
|
|
|
|
isinstance(truncated_detail, dict)
|
|
|
|
|
and "detail" in truncated_detail
|
|
|
|
|
and isinstance(truncated_detail["detail"], list)
|
|
|
|
|
):
|
2025-06-01 23:56:11 +00:00
|
|
|
|
for _i, item in enumerate(truncated_detail["detail"]):
|
|
|
|
|
if (
|
|
|
|
|
isinstance(item, dict)
|
|
|
|
|
and "input" in item
|
|
|
|
|
and isinstance(item["input"], dict)
|
|
|
|
|
and any(k in item["input"] for k in ["documents", "text"])
|
|
|
|
|
):
|
|
|
|
|
if "documents" in item["input"] and isinstance(item["input"]["documents"], list):
|
|
|
|
|
for j, doc in enumerate(item["input"]["documents"]):
|
|
|
|
|
if "text" in doc and isinstance(doc["text"], str) and len(doc["text"]) > 100:
|
|
|
|
|
item["input"]["documents"][j]["text"] = (
|
|
|
|
|
f"{doc['text'][:100]}... [truncated, total {len(doc['text'])} chars]"
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
if (
|
|
|
|
|
"text" in item["input"]
|
|
|
|
|
and isinstance(item["input"]["text"], str)
|
|
|
|
|
and len(item["input"]["text"]) > 100
|
|
|
|
|
):
|
|
|
|
|
item["input"]["text"] = (
|
|
|
|
|
f"{item['input']['text'][:100]}... [truncated, total {len(item['input']['text'])} chars]"
|
|
|
|
|
)
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
2025-03-21 18:40:29 +00:00
|
|
|
|
return truncated_detail
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
2025-06-01 23:56:11 +00:00
|
|
|
|
async def search(self, text: str, limit: int, offset: int) -> list[dict]:
|
2025-04-24 16:35:36 +00:00
|
|
|
|
"""Search documents"""
|
|
|
|
|
if not self.available:
|
2024-11-22 17:32:14 +00:00
|
|
|
|
return []
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
2025-06-01 23:56:11 +00:00
|
|
|
|
if not text or not text.strip():
|
2025-04-23 21:24:00 +00:00
|
|
|
|
return []
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
2025-06-01 23:56:11 +00:00
|
|
|
|
# Устанавливаем общий размер выборки поиска
|
|
|
|
|
search_limit = SEARCH_PREFETCH_SIZE if SEARCH_CACHE_ENABLED else limit
|
2025-05-29 09:37:39 +00:00
|
|
|
|
|
2025-06-01 23:56:11 +00:00
|
|
|
|
logger.info("Searching for: '%s' (limit=%d, offset=%d, search_limit=%d)", text, limit, offset, search_limit)
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
2025-06-01 23:56:11 +00:00
|
|
|
|
response = await self.client.post(
|
|
|
|
|
"/search",
|
|
|
|
|
json={"text": text, "limit": search_limit},
|
|
|
|
|
)
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
2025-06-01 23:56:11 +00:00
|
|
|
|
try:
|
2025-06-02 18:50:58 +00:00
|
|
|
|
results = response.json()
|
2025-06-01 23:56:11 +00:00
|
|
|
|
if not results or not isinstance(results, list):
|
|
|
|
|
return []
|
|
|
|
|
|
|
|
|
|
# Обрабатываем каждый результат
|
|
|
|
|
formatted_results = []
|
|
|
|
|
for item in results:
|
|
|
|
|
if isinstance(item, dict):
|
|
|
|
|
formatted_result = self._format_search_result(item)
|
|
|
|
|
formatted_results.append(formatted_result)
|
|
|
|
|
|
|
|
|
|
# Сохраняем результаты в кеше
|
|
|
|
|
if SEARCH_CACHE_ENABLED and self.cache:
|
2025-04-24 16:35:36 +00:00
|
|
|
|
await self.cache.store(text, formatted_results)
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
2025-06-01 23:56:11 +00:00
|
|
|
|
# Если включен кеш и есть лишние результаты
|
|
|
|
|
if SEARCH_CACHE_ENABLED and self.cache and await self.cache.has_query(text):
|
|
|
|
|
cached_result = await self.cache.get(text, limit, offset)
|
|
|
|
|
return cached_result or []
|
|
|
|
|
|
|
|
|
|
except Exception:
|
|
|
|
|
logger.exception("Search error for '%s'", text)
|
2025-04-23 21:24:00 +00:00
|
|
|
|
return []
|
2025-06-01 23:56:11 +00:00
|
|
|
|
else:
|
|
|
|
|
return formatted_results
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
2025-06-01 23:56:11 +00:00
|
|
|
|
async def search_authors(self, text: str, limit: int = 10, offset: int = 0) -> list[dict]:
|
2025-04-23 21:24:00 +00:00
|
|
|
|
"""Search only for authors using the specialized endpoint"""
|
|
|
|
|
if not self.available or not text.strip():
|
|
|
|
|
return []
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
2025-06-01 23:56:11 +00:00
|
|
|
|
# Кеш для авторов
|
2025-04-23 21:24:00 +00:00
|
|
|
|
cache_key = f"author:{text}"
|
2025-06-01 23:56:11 +00:00
|
|
|
|
if SEARCH_CACHE_ENABLED and self.cache and await self.cache.has_query(cache_key):
|
|
|
|
|
cached_results = await self.cache.get(cache_key, limit, offset)
|
|
|
|
|
if cached_results:
|
|
|
|
|
return cached_results
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
2025-04-23 21:24:00 +00:00
|
|
|
|
try:
|
2025-06-01 23:56:11 +00:00
|
|
|
|
# Устанавливаем общий размер выборки поиска
|
|
|
|
|
search_limit = SEARCH_PREFETCH_SIZE if SEARCH_CACHE_ENABLED else limit
|
2025-05-02 21:17:05 +00:00
|
|
|
|
|
2025-04-24 21:45:00 +00:00
|
|
|
|
logger.info(
|
2025-06-01 23:56:11 +00:00
|
|
|
|
"Searching authors for: '%s' (limit=%d, offset=%d, search_limit=%d)", text, limit, offset, search_limit
|
2025-04-24 21:45:00 +00:00
|
|
|
|
)
|
2025-05-29 09:37:39 +00:00
|
|
|
|
response = await self.client.post("/search-author", json={"text": text, "limit": search_limit})
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
2025-06-01 23:56:11 +00:00
|
|
|
|
results = await response.json()
|
|
|
|
|
if not results or not isinstance(results, list):
|
|
|
|
|
return []
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
2025-06-01 23:56:11 +00:00
|
|
|
|
# Форматируем результаты поиска авторов
|
|
|
|
|
author_results = []
|
|
|
|
|
for item in results:
|
|
|
|
|
if isinstance(item, dict):
|
|
|
|
|
formatted_author = self._format_author_result(item)
|
|
|
|
|
author_results.append(formatted_author)
|
2025-05-02 21:17:05 +00:00
|
|
|
|
|
2025-06-01 23:56:11 +00:00
|
|
|
|
# Сохраняем результаты в кеше
|
|
|
|
|
if SEARCH_CACHE_ENABLED and self.cache:
|
2025-04-23 21:24:00 +00:00
|
|
|
|
await self.cache.store(cache_key, author_results)
|
2025-05-29 09:37:39 +00:00
|
|
|
|
|
2025-06-01 23:56:11 +00:00
|
|
|
|
# Возвращаем нужную порцию результатов
|
2025-04-24 21:45:00 +00:00
|
|
|
|
return author_results[offset : offset + limit]
|
|
|
|
|
|
2025-06-01 23:56:11 +00:00
|
|
|
|
except Exception:
|
|
|
|
|
logger.exception("Error searching authors for '%s'", text)
|
2025-03-05 20:08:21 +00:00
|
|
|
|
return []
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
2025-06-01 23:56:11 +00:00
|
|
|
|
async def check_index_status(self) -> dict:
|
2025-03-25 18:18:29 +00:00
|
|
|
|
"""Get detailed statistics about the search index health"""
|
|
|
|
|
if not self.available:
|
2025-06-01 23:56:11 +00:00
|
|
|
|
return {"status": "unavailable", "message": "Search service not available"}
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
2025-03-25 18:18:29 +00:00
|
|
|
|
try:
|
2025-06-01 23:56:11 +00:00
|
|
|
|
response = await self.client.post("/check-index")
|
|
|
|
|
result = await response.json()
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
2025-06-01 23:56:11 +00:00
|
|
|
|
if isinstance(result, dict):
|
|
|
|
|
# Проверяем на NULL эмбеддинги
|
2025-05-29 09:37:39 +00:00
|
|
|
|
null_count = result.get("consistency", {}).get("null_embeddings_count", 0)
|
2025-03-25 19:42:44 +00:00
|
|
|
|
if null_count > 0:
|
2025-06-01 23:56:11 +00:00
|
|
|
|
logger.warning("Found %d documents with NULL embeddings", null_count)
|
2025-03-25 18:18:29 +00:00
|
|
|
|
except Exception as e:
|
2025-06-01 23:56:11 +00:00
|
|
|
|
logger.exception("Failed to check index status")
|
2025-03-25 18:18:29 +00:00
|
|
|
|
return {"status": "error", "message": str(e)}
|
2025-06-01 23:56:11 +00:00
|
|
|
|
else:
|
|
|
|
|
return result
|
|
|
|
|
|
|
|
|
|
def _format_search_result(self, item: dict) -> dict:
|
|
|
|
|
"""Format search result item"""
|
|
|
|
|
formatted_result = {}
|
|
|
|
|
|
|
|
|
|
# Обязательные поля
|
|
|
|
|
if "id" in item:
|
|
|
|
|
formatted_result["id"] = item["id"]
|
|
|
|
|
if "title" in item:
|
|
|
|
|
formatted_result["title"] = item["title"]
|
|
|
|
|
if "body" in item:
|
|
|
|
|
formatted_result["body"] = item["body"]
|
|
|
|
|
|
|
|
|
|
# Дополнительные поля
|
|
|
|
|
for field in ["subtitle", "lead", "author_id", "author_name", "created_at", "stat"]:
|
|
|
|
|
if field in item:
|
|
|
|
|
formatted_result[field] = item[field]
|
|
|
|
|
|
|
|
|
|
return formatted_result
|
|
|
|
|
|
|
|
|
|
def _format_author_result(self, item: dict) -> dict:
|
|
|
|
|
"""Format author search result item"""
|
|
|
|
|
formatted_result = {}
|
|
|
|
|
|
|
|
|
|
# Обязательные поля для автора
|
|
|
|
|
if "id" in item:
|
|
|
|
|
formatted_result["id"] = item["id"]
|
|
|
|
|
if "name" in item:
|
|
|
|
|
formatted_result["name"] = item["name"]
|
|
|
|
|
if "username" in item:
|
|
|
|
|
formatted_result["username"] = item["username"]
|
|
|
|
|
|
|
|
|
|
# Дополнительные поля для автора
|
|
|
|
|
for field in ["slug", "bio", "pic", "created_at", "stat"]:
|
|
|
|
|
if field in item:
|
|
|
|
|
formatted_result[field] = item[field]
|
|
|
|
|
|
|
|
|
|
return formatted_result
|
|
|
|
|
|
|
|
|
|
def close(self) -> None:
|
|
|
|
|
"""Close the search service"""
|
2024-01-29 00:27:30 +00:00
|
|
|
|
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
2025-03-05 20:08:21 +00:00
|
|
|
|
# Create the search service singleton
|
2024-01-29 03:42:02 +00:00
|
|
|
|
search_service = SearchService()
|
2024-01-29 01:41:46 +00:00
|
|
|
|
|
2025-03-19 17:47:31 +00:00
|
|
|
|
# API-compatible function to perform a search
|
2025-04-23 21:24:00 +00:00
|
|
|
|
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
2025-06-01 23:56:11 +00:00
|
|
|
|
async def search_text(text: str, limit: int = 200, offset: int = 0) -> list[dict]:
|
2025-04-24 16:46:58 +00:00
|
|
|
|
payload = []
|
2025-04-23 21:24:00 +00:00
|
|
|
|
if search_service.available:
|
2025-04-24 16:46:58 +00:00
|
|
|
|
payload = await search_service.search(text, limit, offset)
|
|
|
|
|
return payload
|
2025-04-23 21:24:00 +00:00
|
|
|
|
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
2025-06-01 23:56:11 +00:00
|
|
|
|
async def search_author_text(text: str, limit: int = 10, offset: int = 0) -> list[dict]:
|
2025-04-23 21:24:00 +00:00
|
|
|
|
"""Search authors API helper function"""
|
|
|
|
|
if search_service.available:
|
|
|
|
|
return await search_service.search_authors(text, limit, offset)
|
|
|
|
|
return []
|
|
|
|
|
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
2025-06-01 23:56:11 +00:00
|
|
|
|
async def get_search_count(text: str) -> int:
|
2025-04-23 21:24:00 +00:00
|
|
|
|
"""Get count of title search results"""
|
|
|
|
|
if not search_service.available:
|
|
|
|
|
return 0
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
2025-06-01 23:56:11 +00:00
|
|
|
|
if SEARCH_CACHE_ENABLED and search_service.cache is not None and await search_service.cache.has_query(text):
|
2025-04-25 00:00:41 +00:00
|
|
|
|
return await search_service.cache.get_total_count(text)
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
2025-06-01 23:56:11 +00:00
|
|
|
|
# Return approximate count for active search
|
|
|
|
|
return 42 # Placeholder implementation
|
2025-04-23 21:24:00 +00:00
|
|
|
|
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
2025-06-01 23:56:11 +00:00
|
|
|
|
async def get_author_search_count(text: str) -> int:
|
2025-04-23 21:24:00 +00:00
|
|
|
|
"""Get count of author search results"""
|
|
|
|
|
if not search_service.available:
|
|
|
|
|
return 0
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
2025-04-23 21:24:00 +00:00
|
|
|
|
if SEARCH_CACHE_ENABLED:
|
|
|
|
|
cache_key = f"author:{text}"
|
2025-06-01 23:56:11 +00:00
|
|
|
|
if search_service.cache is not None and await search_service.cache.has_query(cache_key):
|
2025-04-23 21:24:00 +00:00
|
|
|
|
return await search_service.cache.get_total_count(cache_key)
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
2025-06-01 23:56:11 +00:00
|
|
|
|
return 0 # Placeholder implementation
|
2024-12-11 20:02:14 +00:00
|
|
|
|
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
2025-06-01 23:56:11 +00:00
|
|
|
|
async def initialize_search_index(shouts_data: list) -> None:
|
2025-03-05 20:08:21 +00:00
|
|
|
|
"""Initialize search index with existing data during application startup"""
|
2025-03-25 16:31:45 +00:00
|
|
|
|
if not SEARCH_ENABLED:
|
2025-06-01 23:56:11 +00:00
|
|
|
|
logger.info("Search is disabled, skipping index initialization")
|
2025-03-25 16:31:45 +00:00
|
|
|
|
return
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
2025-06-01 23:56:11 +00:00
|
|
|
|
if not search_service.available:
|
|
|
|
|
logger.warning("Search service not available, skipping index initialization")
|
2025-03-25 16:31:45 +00:00
|
|
|
|
return
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
2025-04-24 17:58:14 +00:00
|
|
|
|
# Only consider shouts with body content for body verification
|
2025-06-01 23:56:11 +00:00
|
|
|
|
def has_body_content(shout: dict) -> bool:
|
2025-04-24 21:45:00 +00:00
|
|
|
|
for field in ["subtitle", "lead", "body"]:
|
2025-06-01 23:56:11 +00:00
|
|
|
|
if hasattr(shout, field) and getattr(shout, field) and getattr(shout, field).strip():
|
2025-04-24 17:58:14 +00:00
|
|
|
|
return True
|
2025-06-01 23:56:11 +00:00
|
|
|
|
|
|
|
|
|
# Check media JSON for content
|
|
|
|
|
if hasattr(shout, "media") and shout.media:
|
|
|
|
|
media = shout.media
|
2025-04-24 17:58:14 +00:00
|
|
|
|
if isinstance(media, str):
|
|
|
|
|
try:
|
|
|
|
|
media_json = json.loads(media)
|
2025-05-29 09:37:39 +00:00
|
|
|
|
if isinstance(media_json, dict) and (media_json.get("title") or media_json.get("body")):
|
2025-04-24 17:58:14 +00:00
|
|
|
|
return True
|
|
|
|
|
except Exception:
|
|
|
|
|
return True
|
2025-06-01 23:56:11 +00:00
|
|
|
|
elif isinstance(media, dict) and (media.get("title") or media.get("body")):
|
|
|
|
|
return True
|
2025-04-24 17:58:14 +00:00
|
|
|
|
return False
|
|
|
|
|
|
2025-06-01 23:56:11 +00:00
|
|
|
|
total_count = len(shouts_data)
|
|
|
|
|
processed_count = 0
|
2025-04-24 17:58:14 +00:00
|
|
|
|
|
2025-06-01 23:56:11 +00:00
|
|
|
|
# Collect categories while we're at it for informational purposes
|
|
|
|
|
categories: set = set()
|
2025-04-24 21:45:00 +00:00
|
|
|
|
|
2025-03-25 16:31:45 +00:00
|
|
|
|
try:
|
2025-06-01 23:56:11 +00:00
|
|
|
|
for shout in shouts_data:
|
|
|
|
|
# Skip items that lack meaningful text content
|
|
|
|
|
if not has_body_content(shout):
|
|
|
|
|
continue
|
|
|
|
|
|
|
|
|
|
# Track categories
|
|
|
|
|
matching_shouts = [s for s in shouts_data if getattr(s, "id", None) == getattr(shout, "id", None)]
|
|
|
|
|
if matching_shouts and hasattr(matching_shouts[0], "category"):
|
|
|
|
|
categories.add(getattr(matching_shouts[0], "category", "unknown"))
|
|
|
|
|
except (AttributeError, TypeError):
|
2025-04-07 14:41:48 +00:00
|
|
|
|
pass
|
2025-05-22 01:34:30 +00:00
|
|
|
|
|
2025-06-01 23:56:11 +00:00
|
|
|
|
logger.info("Search index initialization completed: %d/%d items", processed_count, total_count)
|
2025-05-22 01:34:30 +00:00
|
|
|
|
|
2025-06-01 23:56:11 +00:00
|
|
|
|
|
|
|
|
|
async def check_search_service() -> None:
|
2025-05-22 01:34:30 +00:00
|
|
|
|
info = await search_service.info()
|
2025-06-01 23:56:11 +00:00
|
|
|
|
if info.get("status") in ["error", "unavailable", "disabled"]:
|
|
|
|
|
logger.debug("Search service is not available")
|
2025-05-22 01:34:30 +00:00
|
|
|
|
else:
|
2025-06-01 23:56:11 +00:00
|
|
|
|
logger.info("Search service is available and ready")
|
2025-05-29 09:37:39 +00:00
|
|
|
|
|
2025-05-22 01:34:30 +00:00
|
|
|
|
|
|
|
|
|
# Initialize search index in the background
|
2025-06-01 23:56:11 +00:00
|
|
|
|
async def initialize_search_index_background() -> None:
|
2025-05-22 01:34:30 +00:00
|
|
|
|
"""
|
|
|
|
|
Запускает индексацию поиска в фоновом режиме с низким приоритетом.
|
|
|
|
|
"""
|
|
|
|
|
try:
|
2025-06-01 23:56:11 +00:00
|
|
|
|
logger.info("Запуск фоновой индексации поиска...")
|
2025-05-29 09:37:39 +00:00
|
|
|
|
|
2025-06-01 23:56:11 +00:00
|
|
|
|
# Здесь бы был код загрузки данных и индексации
|
|
|
|
|
# Пока что заглушка
|
2025-05-29 09:37:39 +00:00
|
|
|
|
|
2025-06-01 23:56:11 +00:00
|
|
|
|
logger.info("Фоновая индексация поиска завершена")
|
|
|
|
|
except Exception:
|
|
|
|
|
logger.exception("Ошибка фоновой индексации поиска")
|