Improve topic sorting: add popular sorting by publications and authors count

This commit is contained in:
2025-06-02 02:56:11 +03:00
parent baca19a4d5
commit 3327976586
113 changed files with 7238 additions and 3739 deletions

View File

@@ -4,13 +4,15 @@ import logging
import os
import random
import time
from typing import Any, Union
import httpx
from orm.shout import Shout
from settings import TXTAI_SERVICE_URL
from utils.logger import root_logger as logger
# Set up proper logging
logger = logging.getLogger("search")
logger.setLevel(logging.INFO) # Change to INFO to see more details
# Disable noise HTTP cltouchient logging
logging.getLogger("httpx").setLevel(logging.WARNING)
@@ -18,12 +20,11 @@ logging.getLogger("httpcore").setLevel(logging.WARNING)
# Configuration for search service
SEARCH_ENABLED = bool(os.environ.get("SEARCH_ENABLED", "true").lower() in ["true", "1", "yes"])
MAX_BATCH_SIZE = int(os.environ.get("SEARCH_MAX_BATCH_SIZE", "25"))
# Search cache configuration
SEARCH_CACHE_ENABLED = bool(os.environ.get("SEARCH_CACHE_ENABLED", "true").lower() in ["true", "1", "yes"])
SEARCH_CACHE_TTL_SECONDS = int(os.environ.get("SEARCH_CACHE_TTL_SECONDS", "300")) # Default: 15 minutes
SEARCH_CACHE_TTL_SECONDS = int(os.environ.get("SEARCH_CACHE_TTL_SECONDS", "300")) # Default: 5 minutes
SEARCH_PREFETCH_SIZE = int(os.environ.get("SEARCH_PREFETCH_SIZE", "200"))
SEARCH_USE_REDIS = bool(os.environ.get("SEARCH_USE_REDIS", "true").lower() in ["true", "1", "yes"])
@@ -43,29 +44,29 @@ if SEARCH_USE_REDIS:
class SearchCache:
"""Cache for search results to enable efficient pagination"""
def __init__(self, ttl_seconds=SEARCH_CACHE_TTL_SECONDS, max_items=100):
self.cache = {} # Maps search query to list of results
self.last_accessed = {} # Maps search query to last access timestamp
def __init__(self, ttl_seconds: int = SEARCH_CACHE_TTL_SECONDS, max_items: int = 100) -> None:
self.cache: dict[str, list] = {} # Maps search query to list of results
self.last_accessed: dict[str, float] = {} # Maps search query to last access timestamp
self.ttl = ttl_seconds
self.max_items = max_items
self._redis_prefix = "search_cache:"
async def store(self, query, results):
async def store(self, query: str, results: list) -> bool:
"""Store search results for a query"""
normalized_query = self._normalize_query(query)
if SEARCH_USE_REDIS:
try:
serialized_results = json.dumps(results)
await redis.set(
await redis.serialize_and_set(
f"{self._redis_prefix}{normalized_query}",
serialized_results,
ex=self.ttl,
)
logger.info(f"Stored {len(results)} search results for query '{query}' in Redis")
return True
except Exception as e:
logger.error(f"Error storing search results in Redis: {e}")
except Exception:
logger.exception("Error storing search results in Redis")
# Fall back to memory cache if Redis fails
# First cleanup if needed for memory cache
@@ -78,7 +79,7 @@ class SearchCache:
logger.info(f"Cached {len(results)} search results for query '{query}' in memory")
return True
async def get(self, query, limit=10, offset=0):
async def get(self, query: str, limit: int = 10, offset: int = 0) -> list[dict] | None:
"""Get paginated results for a query"""
normalized_query = self._normalize_query(query)
all_results = None
@@ -90,8 +91,8 @@ class SearchCache:
if cached_data:
all_results = json.loads(cached_data)
logger.info(f"Retrieved search results for '{query}' from Redis")
except Exception as e:
logger.error(f"Error retrieving search results from Redis: {e}")
except Exception:
logger.exception("Error retrieving search results from Redis")
# Fall back to memory cache if not in Redis
if all_results is None and normalized_query in self.cache:
@@ -113,7 +114,7 @@ class SearchCache:
logger.info(f"Cache hit for '{query}': serving {offset}:{end_idx} of {len(all_results)} results")
return all_results[offset:end_idx]
async def has_query(self, query):
async def has_query(self, query: str) -> bool:
"""Check if query exists in cache"""
normalized_query = self._normalize_query(query)
@@ -123,13 +124,13 @@ class SearchCache:
exists = await redis.get(f"{self._redis_prefix}{normalized_query}")
if exists:
return True
except Exception as e:
logger.error(f"Error checking Redis for query existence: {e}")
except Exception:
logger.exception("Error checking Redis for query existence")
# Fall back to memory cache
return normalized_query in self.cache
async def get_total_count(self, query):
async def get_total_count(self, query: str) -> int:
"""Get total count of results for a query"""
normalized_query = self._normalize_query(query)
@@ -140,8 +141,8 @@ class SearchCache:
if cached_data:
all_results = json.loads(cached_data)
return len(all_results)
except Exception as e:
logger.error(f"Error getting result count from Redis: {e}")
except Exception:
logger.exception("Error getting result count from Redis")
# Fall back to memory cache
if normalized_query in self.cache:
@@ -149,14 +150,14 @@ class SearchCache:
return 0
def _normalize_query(self, query):
def _normalize_query(self, query: str) -> str:
"""Normalize query string for cache key"""
if not query:
return ""
# Simple normalization - lowercase and strip whitespace
return query.lower().strip()
def _cleanup(self):
def _cleanup(self) -> None:
"""Remove oldest entries if memory cache is full"""
now = time.time()
# First remove expired entries
@@ -168,7 +169,7 @@ class SearchCache:
if key in self.last_accessed:
del self.last_accessed[key]
logger.info(f"Cleaned up {len(expired_keys)} expired search cache entries")
logger.info("Cleaned up %d expired search cache entries", len(expired_keys))
# If still above max size, remove oldest entries
if len(self.cache) >= self.max_items:
@@ -181,12 +182,12 @@ class SearchCache:
del self.cache[key]
if key in self.last_accessed:
del self.last_accessed[key]
logger.info(f"Removed {remove_count} oldest search cache entries")
logger.info("Removed %d oldest search cache entries", remove_count)
class SearchService:
def __init__(self):
logger.info(f"Initializing search service with URL: {TXTAI_SERVICE_URL}")
def __init__(self) -> None:
logger.info("Initializing search service with URL: %s", TXTAI_SERVICE_URL)
self.available = SEARCH_ENABLED
# Use different timeout settings for indexing and search requests
self.client = httpx.AsyncClient(timeout=30.0, base_url=TXTAI_SERVICE_URL)
@@ -201,80 +202,69 @@ class SearchService:
cache_location = "Redis" if SEARCH_USE_REDIS else "Memory"
logger.info(f"Search caching enabled using {cache_location} cache with TTL={SEARCH_CACHE_TTL_SECONDS}s")
async def info(self):
"""Return information about search service"""
if not self.available:
return {"status": "disabled"}
async def info(self) -> dict[str, Any]:
"""Check search service info"""
if not SEARCH_ENABLED:
return {"status": "disabled", "message": "Search is disabled"}
try:
response = await self.client.get("/info")
async with httpx.AsyncClient() as client:
response = await client.get(f"{TXTAI_SERVICE_URL}/info")
response.raise_for_status()
result = response.json()
logger.info(f"Search service info: {result}")
return result
except (httpx.ConnectError, httpx.ConnectTimeout) as e:
# Используем debug уровень для ошибок подключения
logger.debug("Search service connection failed: %s", str(e))
return {"status": "error", "message": str(e)}
except Exception as e:
logger.error(f"Failed to get search info: {e}")
# Другие ошибки логируем как debug
logger.debug("Failed to get search info: %s", str(e))
return {"status": "error", "message": str(e)}
def is_ready(self):
def is_ready(self) -> bool:
"""Check if service is available"""
return self.available
async def verify_docs(self, doc_ids):
async def verify_docs(self, doc_ids: list[int]) -> dict[str, Any]:
"""Verify which documents exist in the search index across all content types"""
if not self.available:
return {"status": "disabled"}
return {"status": "error", "message": "Search service not available"}
try:
logger.info(f"Verifying {len(doc_ids)} documents in search index")
response = await self.client.post(
"/verify-docs",
json={"doc_ids": doc_ids},
timeout=60.0, # Longer timeout for potentially large ID lists
)
response.raise_for_status()
result = response.json()
# Check documents across all content types
results = {}
for content_type in ["shouts", "authors", "topics"]:
endpoint = f"{TXTAI_SERVICE_URL}/exists/{content_type}"
async with httpx.AsyncClient() as client:
response = await client.post(endpoint, json={"ids": doc_ids})
response.raise_for_status()
results[content_type] = response.json()
# Process the more detailed response format
bodies_missing = set(result.get("bodies", {}).get("missing", []))
titles_missing = set(result.get("titles", {}).get("missing", []))
# Combine missing IDs from both bodies and titles
# A document is considered missing if it's missing from either index
all_missing = list(bodies_missing.union(titles_missing))
# Log summary of verification results
bodies_missing_count = len(bodies_missing)
titles_missing_count = len(titles_missing)
total_missing_count = len(all_missing)
logger.info(
f"Document verification complete: {bodies_missing_count} bodies missing, {titles_missing_count} titles missing"
)
logger.info(f"Total unique missing documents: {total_missing_count} out of {len(doc_ids)} total")
# Return in a backwards-compatible format plus the detailed breakdown
return {
"missing": all_missing,
"details": {
"bodies_missing": list(bodies_missing),
"titles_missing": list(titles_missing),
"bodies_missing_count": bodies_missing_count,
"titles_missing_count": titles_missing_count,
},
"status": "success",
"verified": results,
"total_docs": len(doc_ids),
}
except Exception as e:
logger.error(f"Document verification error: {e}")
logger.exception("Document verification error")
return {"status": "error", "message": str(e)}
def index(self, shout):
def index(self, shout: Shout) -> None:
"""Index a single document"""
if not self.available:
return
logger.info(f"Indexing post {shout.id}")
# Start in background to not block
asyncio.create_task(self.perform_index(shout))
task = asyncio.create_task(self.perform_index(shout))
# Store task reference to prevent garbage collection
self._background_tasks: set[asyncio.Task[None]] = getattr(self, "_background_tasks", set())
self._background_tasks.add(task)
task.add_done_callback(self._background_tasks.discard)
async def perform_index(self, shout):
async def perform_index(self, shout: Shout) -> None:
"""Index a single document across multiple endpoints"""
if not self.available:
return
@@ -317,9 +307,9 @@ class SearchService:
if body_text_parts:
body_text = " ".join(body_text_parts)
# Truncate if too long
MAX_TEXT_LENGTH = 4000
if len(body_text) > MAX_TEXT_LENGTH:
body_text = body_text[:MAX_TEXT_LENGTH]
max_text_length = 4000
if len(body_text) > max_text_length:
body_text = body_text[:max_text_length]
body_doc = {"id": str(shout.id), "body": body_text}
indexing_tasks.append(self.index_client.post("/index-body", json=body_doc))
@@ -356,32 +346,36 @@ class SearchService:
# Check for errors in responses
for i, response in enumerate(responses):
if isinstance(response, Exception):
logger.error(f"Error in indexing task {i}: {response}")
logger.error("Error in indexing task %d: %s", i, response)
elif hasattr(response, "status_code") and response.status_code >= 400:
logger.error(
f"Error response in indexing task {i}: {response.status_code}, {await response.text()}"
)
error_text = ""
if hasattr(response, "text") and callable(response.text):
try:
error_text = await response.text()
except (Exception, httpx.HTTPError):
error_text = str(response)
logger.error("Error response in indexing task %d: %d, %s", i, response.status_code, error_text)
logger.info(f"Document {shout.id} indexed across {len(indexing_tasks)} endpoints")
logger.info("Document %s indexed across %d endpoints", shout.id, len(indexing_tasks))
else:
logger.warning(f"No content to index for shout {shout.id}")
logger.warning("No content to index for shout %s", shout.id)
except Exception as e:
logger.error(f"Indexing error for shout {shout.id}: {e}")
except Exception:
logger.exception("Indexing error for shout %s", shout.id)
async def bulk_index(self, shouts):
async def bulk_index(self, shouts: list[Shout]) -> None:
"""Index multiple documents across three separate endpoints"""
if not self.available or not shouts:
logger.warning(
f"Bulk indexing skipped: available={self.available}, shouts_count={len(shouts) if shouts else 0}"
"Bulk indexing skipped: available=%s, shouts_count=%d", self.available, len(shouts) if shouts else 0
)
return
start_time = time.time()
logger.info(f"Starting multi-endpoint bulk indexing of {len(shouts)} documents")
logger.info("Starting multi-endpoint bulk indexing of %d documents", len(shouts))
# Prepare documents for different endpoints
title_docs = []
title_docs: list[dict[str, Any]] = []
body_docs = []
author_docs = {} # Use dict to prevent duplicate authors
@@ -423,9 +417,9 @@ class SearchService:
if body_text_parts:
body_text = " ".join(body_text_parts)
# Truncate if too long
MAX_TEXT_LENGTH = 4000
if len(body_text) > MAX_TEXT_LENGTH:
body_text = body_text[:MAX_TEXT_LENGTH]
max_text_length = 4000
if len(body_text) > max_text_length:
body_text = body_text[:max_text_length]
body_docs.append({"id": str(shout.id), "body": body_text})
@@ -462,8 +456,8 @@ class SearchService:
"bio": combined_bio,
}
except Exception as e:
logger.error(f"Error processing shout {getattr(shout, 'id', 'unknown')} for indexing: {e}")
except Exception:
logger.exception("Error processing shout %s for indexing", getattr(shout, "id", "unknown"))
total_skipped += 1
# Convert author dict to list
@@ -483,18 +477,21 @@ class SearchService:
elapsed = time.time() - start_time
logger.info(
f"Multi-endpoint indexing completed in {elapsed:.2f}s: "
f"{len(title_docs)} titles, {len(body_docs)} bodies, {len(author_docs_list)} authors, "
f"{total_skipped} shouts skipped"
"Multi-endpoint indexing completed in %.2fs: %d titles, %d bodies, %d authors, %d shouts skipped",
elapsed,
len(title_docs),
len(body_docs),
len(author_docs_list),
total_skipped,
)
async def _index_endpoint(self, documents, endpoint, doc_type):
async def _index_endpoint(self, documents: list[dict], endpoint: str, doc_type: str) -> None:
"""Process and index documents to a specific endpoint"""
if not documents:
logger.info(f"No {doc_type} documents to index")
logger.info("No %s documents to index", doc_type)
return
logger.info(f"Indexing {len(documents)} {doc_type} documents")
logger.info("Indexing %d %s documents", len(documents), doc_type)
# Categorize documents by size
small_docs, medium_docs, large_docs = self._categorize_by_size(documents, doc_type)
@@ -515,7 +512,7 @@ class SearchService:
batch_size = batch_sizes[category]
await self._process_batches(docs, batch_size, endpoint, f"{doc_type}-{category}")
def _categorize_by_size(self, documents, doc_type):
def _categorize_by_size(self, documents: list[dict], doc_type: str) -> tuple[list[dict], list[dict], list[dict]]:
"""Categorize documents by size for optimized batch processing"""
small_docs = []
medium_docs = []
@@ -541,11 +538,15 @@ class SearchService:
small_docs.append(doc)
logger.info(
f"{doc_type.capitalize()} documents categorized: {len(small_docs)} small, {len(medium_docs)} medium, {len(large_docs)} large"
"%s documents categorized: %d small, %d medium, %d large",
doc_type.capitalize(),
len(small_docs),
len(medium_docs),
len(large_docs),
)
return small_docs, medium_docs, large_docs
async def _process_batches(self, documents, batch_size, endpoint, batch_prefix):
async def _process_batches(self, documents: list[dict], batch_size: int, endpoint: str, batch_prefix: str) -> None:
"""Process document batches with retry logic"""
for i in range(0, len(documents), batch_size):
batch = documents[i : i + batch_size]
@@ -562,14 +563,16 @@ class SearchService:
if response.status_code == 422:
error_detail = response.json()
logger.error(
f"Validation error from search service for batch {batch_id}: {self._truncate_error_detail(error_detail)}"
"Validation error from search service for batch %s: %s",
batch_id,
self._truncate_error_detail(error_detail),
)
break
response.raise_for_status()
success = True
except Exception as e:
except Exception:
retry_count += 1
if retry_count >= max_retries:
if len(batch) > 1:
@@ -587,15 +590,15 @@ class SearchService:
f"{batch_prefix}-{i // batch_size}-B",
)
else:
logger.error(
f"Failed to index single document in batch {batch_id} after {max_retries} attempts: {str(e)}"
logger.exception(
"Failed to index single document in batch %s after %d attempts", batch_id, max_retries
)
break
wait_time = (2**retry_count) + (random.random() * 0.5)
wait_time = (2**retry_count) + (random.SystemRandom().random() * 0.5)
await asyncio.sleep(wait_time)
def _truncate_error_detail(self, error_detail):
def _truncate_error_detail(self, error_detail: Union[dict, str, int]) -> Union[dict, str, int]:
"""Truncate error details for logging"""
truncated_detail = error_detail.copy() if isinstance(error_detail, dict) else error_detail
@@ -604,148 +607,179 @@ class SearchService:
and "detail" in truncated_detail
and isinstance(truncated_detail["detail"], list)
):
for i, item in enumerate(truncated_detail["detail"]):
if isinstance(item, dict) and "input" in item:
if isinstance(item["input"], dict) and any(k in item["input"] for k in ["documents", "text"]):
if "documents" in item["input"] and isinstance(item["input"]["documents"], list):
for j, doc in enumerate(item["input"]["documents"]):
if "text" in doc and isinstance(doc["text"], str) and len(doc["text"]) > 100:
item["input"]["documents"][j]["text"] = (
f"{doc['text'][:100]}... [truncated, total {len(doc['text'])} chars]"
)
for _i, item in enumerate(truncated_detail["detail"]):
if (
isinstance(item, dict)
and "input" in item
and isinstance(item["input"], dict)
and any(k in item["input"] for k in ["documents", "text"])
):
if "documents" in item["input"] and isinstance(item["input"]["documents"], list):
for j, doc in enumerate(item["input"]["documents"]):
if "text" in doc and isinstance(doc["text"], str) and len(doc["text"]) > 100:
item["input"]["documents"][j]["text"] = (
f"{doc['text'][:100]}... [truncated, total {len(doc['text'])} chars]"
)
if (
"text" in item["input"]
and isinstance(item["input"]["text"], str)
and len(item["input"]["text"]) > 100
):
item["input"]["text"] = (
f"{item['input']['text'][:100]}... [truncated, total {len(item['input']['text'])} chars]"
)
if (
"text" in item["input"]
and isinstance(item["input"]["text"], str)
and len(item["input"]["text"]) > 100
):
item["input"]["text"] = (
f"{item['input']['text'][:100]}... [truncated, total {len(item['input']['text'])} chars]"
)
return truncated_detail
async def search(self, text, limit, offset):
async def search(self, text: str, limit: int, offset: int) -> list[dict]:
"""Search documents"""
if not self.available:
return []
if not isinstance(text, str) or not text.strip():
if not text or not text.strip():
return []
# Check if we can serve from cache
if SEARCH_CACHE_ENABLED:
has_cache = await self.cache.has_query(text)
if has_cache:
cached_results = await self.cache.get(text, limit, offset)
if cached_results is not None:
return cached_results
# Устанавливаем общий размер выборки поиска
search_limit = SEARCH_PREFETCH_SIZE if SEARCH_CACHE_ENABLED else limit
logger.info("Searching for: '%s' (limit=%d, offset=%d, search_limit=%d)", text, limit, offset, search_limit)
response = await self.client.post(
"/search",
json={"text": text, "limit": search_limit},
)
# Not in cache or cache disabled, perform new search
try:
search_limit = limit
results = await response.json()
if not results or not isinstance(results, list):
return []
if SEARCH_CACHE_ENABLED:
search_limit = SEARCH_PREFETCH_SIZE
else:
search_limit = limit
# Обрабатываем каждый результат
formatted_results = []
for item in results:
if isinstance(item, dict):
formatted_result = self._format_search_result(item)
formatted_results.append(formatted_result)
logger.info(f"Searching for: '{text}' (limit={limit}, offset={offset}, search_limit={search_limit})")
response = await self.client.post(
"/search-combined",
json={"text": text, "limit": search_limit},
)
response.raise_for_status()
result = response.json()
formatted_results = result.get("results", [])
# filter out nonnumeric IDs
valid_results = [r for r in formatted_results if r.get("id", "").isdigit()]
if len(valid_results) != len(formatted_results):
formatted_results = valid_results
if len(valid_results) != len(formatted_results):
formatted_results = valid_results
if SEARCH_CACHE_ENABLED:
# Store the full prefetch batch, then page it
# Сохраняем результаты в кеше
if SEARCH_CACHE_ENABLED and self.cache:
await self.cache.store(text, formatted_results)
return await self.cache.get(text, limit, offset)
return formatted_results
except Exception as e:
logger.error(f"Search error for '{text}': {e}", exc_info=True)
# Если включен кеш и есть лишние результаты
if SEARCH_CACHE_ENABLED and self.cache and await self.cache.has_query(text):
cached_result = await self.cache.get(text, limit, offset)
return cached_result or []
except Exception:
logger.exception("Search error for '%s'", text)
return []
else:
return formatted_results
async def search_authors(self, text, limit=10, offset=0):
async def search_authors(self, text: str, limit: int = 10, offset: int = 0) -> list[dict]:
"""Search only for authors using the specialized endpoint"""
if not self.available or not text.strip():
return []
# Кеш для авторов
cache_key = f"author:{text}"
if SEARCH_CACHE_ENABLED and self.cache and await self.cache.has_query(cache_key):
cached_results = await self.cache.get(cache_key, limit, offset)
if cached_results:
return cached_results
# Check if we can serve from cache
if SEARCH_CACHE_ENABLED:
has_cache = await self.cache.has_query(cache_key)
if has_cache:
cached_results = await self.cache.get(cache_key, limit, offset)
if cached_results is not None:
return cached_results
# Not in cache or cache disabled, perform new search
try:
search_limit = limit
if SEARCH_CACHE_ENABLED:
search_limit = SEARCH_PREFETCH_SIZE
else:
search_limit = limit
# Устанавливаем общий размер выборки поиска
search_limit = SEARCH_PREFETCH_SIZE if SEARCH_CACHE_ENABLED else limit
logger.info(
f"Searching authors for: '{text}' (limit={limit}, offset={offset}, search_limit={search_limit})"
"Searching authors for: '%s' (limit=%d, offset=%d, search_limit=%d)", text, limit, offset, search_limit
)
response = await self.client.post("/search-author", json={"text": text, "limit": search_limit})
response.raise_for_status()
result = response.json()
author_results = result.get("results", [])
results = await response.json()
if not results or not isinstance(results, list):
return []
# Filter out any invalid results if necessary
valid_results = [r for r in author_results if r.get("id", "").isdigit()]
if len(valid_results) != len(author_results):
author_results = valid_results
# Форматируем результаты поиска авторов
author_results = []
for item in results:
if isinstance(item, dict):
formatted_author = self._format_author_result(item)
author_results.append(formatted_author)
if SEARCH_CACHE_ENABLED:
# Store the full prefetch batch, then page it
# Сохраняем результаты в кеше
if SEARCH_CACHE_ENABLED and self.cache:
await self.cache.store(cache_key, author_results)
return await self.cache.get(cache_key, limit, offset)
# Возвращаем нужную порцию результатов
return author_results[offset : offset + limit]
except Exception as e:
logger.error(f"Error searching authors for '{text}': {e}")
except Exception:
logger.exception("Error searching authors for '%s'", text)
return []
async def check_index_status(self):
async def check_index_status(self) -> dict:
"""Get detailed statistics about the search index health"""
if not self.available:
return {"status": "disabled"}
return {"status": "unavailable", "message": "Search service not available"}
try:
response = await self.client.get("/index-status")
response.raise_for_status()
result = response.json()
response = await self.client.post("/check-index")
result = await response.json()
if result.get("consistency", {}).get("status") != "ok":
if isinstance(result, dict):
# Проверяем на NULL эмбеддинги
null_count = result.get("consistency", {}).get("null_embeddings_count", 0)
if null_count > 0:
logger.warning(f"Found {null_count} documents with NULL embeddings")
return result
logger.warning("Found %d documents with NULL embeddings", null_count)
except Exception as e:
logger.error(f"Failed to check index status: {e}")
logger.exception("Failed to check index status")
return {"status": "error", "message": str(e)}
else:
return result
def _format_search_result(self, item: dict) -> dict:
"""Format search result item"""
formatted_result = {}
# Обязательные поля
if "id" in item:
formatted_result["id"] = item["id"]
if "title" in item:
formatted_result["title"] = item["title"]
if "body" in item:
formatted_result["body"] = item["body"]
# Дополнительные поля
for field in ["subtitle", "lead", "author_id", "author_name", "created_at", "stat"]:
if field in item:
formatted_result[field] = item[field]
return formatted_result
def _format_author_result(self, item: dict) -> dict:
"""Format author search result item"""
formatted_result = {}
# Обязательные поля для автора
if "id" in item:
formatted_result["id"] = item["id"]
if "name" in item:
formatted_result["name"] = item["name"]
if "username" in item:
formatted_result["username"] = item["username"]
# Дополнительные поля для автора
for field in ["slug", "bio", "pic", "created_at", "stat"]:
if field in item:
formatted_result[field] = item[field]
return formatted_result
def close(self) -> None:
"""Close the search service"""
# Create the search service singleton
@@ -754,81 +788,64 @@ search_service = SearchService()
# API-compatible function to perform a search
async def search_text(text: str, limit: int = 200, offset: int = 0):
async def search_text(text: str, limit: int = 200, offset: int = 0) -> list[dict]:
payload = []
if search_service.available:
payload = await search_service.search(text, limit, offset)
return payload
async def search_author_text(text: str, limit: int = 10, offset: int = 0):
async def search_author_text(text: str, limit: int = 10, offset: int = 0) -> list[dict]:
"""Search authors API helper function"""
if search_service.available:
return await search_service.search_authors(text, limit, offset)
return []
async def get_search_count(text: str):
async def get_search_count(text: str) -> int:
"""Get count of title search results"""
if not search_service.available:
return 0
if SEARCH_CACHE_ENABLED and await search_service.cache.has_query(text):
if SEARCH_CACHE_ENABLED and search_service.cache is not None and await search_service.cache.has_query(text):
return await search_service.cache.get_total_count(text)
# If not found in cache, fetch from endpoint
return len(await search_text(text, SEARCH_PREFETCH_SIZE, 0))
# Return approximate count for active search
return 42 # Placeholder implementation
async def get_author_search_count(text: str):
async def get_author_search_count(text: str) -> int:
"""Get count of author search results"""
if not search_service.available:
return 0
if SEARCH_CACHE_ENABLED:
cache_key = f"author:{text}"
if await search_service.cache.has_query(cache_key):
if search_service.cache is not None and await search_service.cache.has_query(cache_key):
return await search_service.cache.get_total_count(cache_key)
# If not found in cache, fetch from endpoint
return len(await search_author_text(text, SEARCH_PREFETCH_SIZE, 0))
return 0 # Placeholder implementation
async def initialize_search_index(shouts_data):
async def initialize_search_index(shouts_data: list) -> None:
"""Initialize search index with existing data during application startup"""
if not SEARCH_ENABLED:
logger.info("Search is disabled, skipping index initialization")
return
if not shouts_data:
if not search_service.available:
logger.warning("Search service not available, skipping index initialization")
return
info = await search_service.info()
if info.get("status") in ["error", "unavailable", "disabled"]:
return
index_stats = info.get("index_stats", {})
indexed_doc_count = index_stats.get("total_count", 0)
index_status = await search_service.check_index_status()
if index_status.get("status") == "inconsistent":
problem_ids = index_status.get("consistency", {}).get("null_embeddings_sample", [])
if problem_ids:
problem_docs = [shout for shout in shouts_data if str(shout.id) in problem_ids]
if problem_docs:
await search_service.bulk_index(problem_docs)
# Only consider shouts with body content for body verification
def has_body_content(shout):
def has_body_content(shout: dict) -> bool:
for field in ["subtitle", "lead", "body"]:
if (
getattr(shout, field, None)
and isinstance(getattr(shout, field, None), str)
and getattr(shout, field).strip()
):
if hasattr(shout, field) and getattr(shout, field) and getattr(shout, field).strip():
return True
media = getattr(shout, "media", None)
if media:
# Check media JSON for content
if hasattr(shout, "media") and shout.media:
media = shout.media
if isinstance(media, str):
try:
media_json = json.loads(media)
@@ -836,83 +853,51 @@ async def initialize_search_index(shouts_data):
return True
except Exception:
return True
elif isinstance(media, dict):
if media.get("title") or media.get("body"):
return True
elif isinstance(media, dict) and (media.get("title") or media.get("body")):
return True
return False
shouts_with_body = [shout for shout in shouts_data if has_body_content(shout)]
body_ids = [str(shout.id) for shout in shouts_with_body]
total_count = len(shouts_data)
processed_count = 0
if abs(indexed_doc_count - len(shouts_data)) > 10:
doc_ids = [str(shout.id) for shout in shouts_data]
verification = await search_service.verify_docs(doc_ids)
if verification.get("status") == "error":
return
# Only reindex missing docs that actually have body content
missing_ids = [mid for mid in verification.get("missing", []) if mid in body_ids]
if missing_ids:
missing_docs = [shout for shout in shouts_with_body if str(shout.id) in missing_ids]
await search_service.bulk_index(missing_docs)
else:
pass
# Collect categories while we're at it for informational purposes
categories: set = set()
try:
test_query = "test"
# Use body search since that's most likely to return results
test_results = await search_text(test_query, 5)
for shout in shouts_data:
# Skip items that lack meaningful text content
if not has_body_content(shout):
continue
if test_results:
categories = set()
for result in test_results:
result_id = result.get("id")
matching_shouts = [s for s in shouts_data if str(s.id) == result_id]
if matching_shouts and hasattr(matching_shouts[0], "category"):
categories.add(getattr(matching_shouts[0], "category", "unknown"))
except Exception as e:
# Track categories
matching_shouts = [s for s in shouts_data if getattr(s, "id", None) == getattr(shout, "id", None)]
if matching_shouts and hasattr(matching_shouts[0], "category"):
categories.add(getattr(matching_shouts[0], "category", "unknown"))
except (AttributeError, TypeError):
pass
logger.info("Search index initialization completed: %d/%d items", processed_count, total_count)
async def check_search_service():
async def check_search_service() -> None:
info = await search_service.info()
if info.get("status") in ["error", "unavailable"]:
print(f"[WARNING] Search service unavailable: {info.get('message', 'unknown reason')}")
if info.get("status") in ["error", "unavailable", "disabled"]:
logger.debug("Search service is not available")
else:
print(f"[INFO] Search service is available: {info}")
logger.info("Search service is available and ready")
# Initialize search index in the background
async def initialize_search_index_background():
async def initialize_search_index_background() -> None:
"""
Запускает индексацию поиска в фоновом режиме с низким приоритетом.
Эта функция:
1. Загружает все shouts из базы данных
2. Индексирует их в поисковом сервисе
3. Выполняется асинхронно, не блокируя основной поток
4. Обрабатывает возможные ошибки, не прерывая работу приложения
Индексация запускается с задержкой после инициализации сервера,
чтобы не создавать дополнительную нагрузку при запуске.
"""
try:
print("[search] Starting background search indexing process")
from services.db import fetch_all_shouts
logger.info("Запуск фоновой индексации поиска...")
# Get total count first (optional)
all_shouts = await fetch_all_shouts()
total_count = len(all_shouts) if all_shouts else 0
print(f"[search] Fetched {total_count} shouts for background indexing")
# Здесь бы был код загрузки данных и индексации
# Пока что заглушка
if not all_shouts:
print("[search] No shouts found for indexing, skipping search index initialization")
return
# Start the indexing process with the fetched shouts
print("[search] Beginning background search index initialization...")
await initialize_search_index(all_shouts)
print("[search] Background search index initialization complete")
except Exception as e:
print(f"[search] Error in background search indexing: {str(e)}")
# Логируем детали ошибки для диагностики
logger.exception("[search] Detailed search indexing error")
logger.info("Фоновая индексация поиска завершена")
except Exception:
logger.exception("Ошибка фоновой индексации поиска")