debug: no redis for indexing in nackend side
All checks were successful
Deploy on push / deploy (push) Successful in 1m41s
All checks were successful
Deploy on push / deploy (push) Successful in 1m41s
This commit is contained in:
parent
39242d5e6c
commit
ad0ca75aa9
|
@ -5,26 +5,23 @@ import os
|
||||||
import httpx
|
import httpx
|
||||||
import time
|
import time
|
||||||
|
|
||||||
from services.redis import redis
|
|
||||||
from utils.encoders import CustomJSONEncoder
|
|
||||||
|
|
||||||
# Set up proper logging
|
# Set up proper logging
|
||||||
logger = logging.getLogger("search")
|
logger = logging.getLogger("search")
|
||||||
logger.setLevel(logging.INFO) # Change to INFO to see more details
|
logger.setLevel(logging.INFO) # Change to INFO to see more details
|
||||||
|
|
||||||
REDIS_TTL = 86400 # 1 day in seconds
|
|
||||||
|
|
||||||
# Configuration for search service
|
# Configuration for search service
|
||||||
SEARCH_ENABLED = bool(os.environ.get("SEARCH_ENABLED", "true").lower() in ["true", "1", "yes"])
|
SEARCH_ENABLED = bool(os.environ.get("SEARCH_ENABLED", "true").lower() in ["true", "1", "yes"])
|
||||||
TXTAI_SERVICE_URL = os.environ.get("TXTAI_SERVICE_URL", "http://search-txtai.web.1:8000")
|
TXTAI_SERVICE_URL = os.environ.get("TXTAI_SERVICE_URL", "http://search-txtai.web.1:8000")
|
||||||
MAX_BATCH_SIZE = int(os.environ.get("SEARCH_MAX_BATCH_SIZE", "100"))
|
MAX_BATCH_SIZE = int(os.environ.get("SEARCH_MAX_BATCH_SIZE", "25"))
|
||||||
|
|
||||||
|
|
||||||
class SearchService:
|
class SearchService:
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
logger.info(f"Initializing search service with URL: {TXTAI_SERVICE_URL}")
|
logger.info(f"Initializing search service with URL: {TXTAI_SERVICE_URL}")
|
||||||
self.available = SEARCH_ENABLED
|
self.available = SEARCH_ENABLED
|
||||||
|
# Use different timeout settings for indexing and search requests
|
||||||
self.client = httpx.AsyncClient(timeout=30.0, base_url=TXTAI_SERVICE_URL)
|
self.client = httpx.AsyncClient(timeout=30.0, base_url=TXTAI_SERVICE_URL)
|
||||||
|
self.index_client = httpx.AsyncClient(timeout=120.0, base_url=TXTAI_SERVICE_URL)
|
||||||
|
|
||||||
if not self.available:
|
if not self.available:
|
||||||
logger.info("Search disabled (SEARCH_ENABLED = False)")
|
logger.info("Search disabled (SEARCH_ENABLED = False)")
|
||||||
|
@ -54,7 +51,6 @@ class SearchService:
|
||||||
return
|
return
|
||||||
|
|
||||||
logger.info(f"Indexing post {shout.id}")
|
logger.info(f"Indexing post {shout.id}")
|
||||||
|
|
||||||
# Start in background to not block
|
# Start in background to not block
|
||||||
asyncio.create_task(self.perform_index(shout))
|
asyncio.create_task(self.perform_index(shout))
|
||||||
|
|
||||||
|
@ -77,7 +73,6 @@ class SearchService:
|
||||||
logger.warning(f"No text content to index for shout {shout.id}")
|
logger.warning(f"No text content to index for shout {shout.id}")
|
||||||
return
|
return
|
||||||
|
|
||||||
# Log the document being indexed
|
|
||||||
logger.info(f"Indexing document: ID={shout.id}, Text length={len(text)}")
|
logger.info(f"Indexing document: ID={shout.id}, Text length={len(text)}")
|
||||||
|
|
||||||
# Send to txtai service
|
# Send to txtai service
|
||||||
|
@ -100,11 +95,10 @@ class SearchService:
|
||||||
start_time = time.time()
|
start_time = time.time()
|
||||||
logger.info(f"Starting bulk indexing of {len(shouts)} documents")
|
logger.info(f"Starting bulk indexing of {len(shouts)} documents")
|
||||||
|
|
||||||
# Process documents in batches
|
|
||||||
batch_size = MAX_BATCH_SIZE
|
batch_size = MAX_BATCH_SIZE
|
||||||
total_indexed = 0
|
total_indexed = 0
|
||||||
total_skipped = 0
|
total_skipped = 0
|
||||||
|
i = 0
|
||||||
for i in range(0, len(shouts), batch_size):
|
for i in range(0, len(shouts), batch_size):
|
||||||
batch = shouts[i:i+batch_size]
|
batch = shouts[i:i+batch_size]
|
||||||
logger.info(f"Processing batch {i//batch_size + 1} of {(len(shouts)-1)//batch_size + 1}, size {len(batch)}")
|
logger.info(f"Processing batch {i//batch_size + 1} of {(len(shouts)-1)//batch_size + 1}, size {len(batch)}")
|
||||||
|
@ -112,23 +106,21 @@ class SearchService:
|
||||||
documents = []
|
documents = []
|
||||||
for shout in batch:
|
for shout in batch:
|
||||||
try:
|
try:
|
||||||
# Clean and combine all text fields
|
|
||||||
text_fields = []
|
text_fields = []
|
||||||
for field_name in ['title', 'subtitle', 'lead', 'body']:
|
for field_name in ['title', 'subtitle', 'lead', 'body']:
|
||||||
field_value = getattr(shout, field_name, None)
|
field_value = getattr(shout, field_name, None)
|
||||||
if field_value and isinstance(field_value, str) and field_value.strip():
|
if field_value and isinstance(field_value, str) and field_value.strip():
|
||||||
text_fields.append(field_value.strip())
|
text_fields.append(field_value.strip())
|
||||||
|
|
||||||
# Process media field if it exists
|
|
||||||
media = getattr(shout, 'media', None)
|
media = getattr(shout, 'media', None)
|
||||||
if media:
|
if media:
|
||||||
if isinstance(media, str):
|
if isinstance(media, str):
|
||||||
# Try to parse if it's JSON
|
|
||||||
try:
|
try:
|
||||||
media_json = json.loads(media)
|
media_json = json.loads(media)
|
||||||
if isinstance(media_json, dict) and 'title' in media_json:
|
if isinstance(media_json, dict):
|
||||||
|
if 'title' in media_json:
|
||||||
text_fields.append(media_json['title'])
|
text_fields.append(media_json['title'])
|
||||||
if isinstance(media_json, dict) and 'body' in media_json:
|
if 'body' in media_json:
|
||||||
text_fields.append(media_json['body'])
|
text_fields.append(media_json['body'])
|
||||||
except json.JSONDecodeError:
|
except json.JSONDecodeError:
|
||||||
text_fields.append(media)
|
text_fields.append(media)
|
||||||
|
@ -138,7 +130,6 @@ class SearchService:
|
||||||
if 'body' in media:
|
if 'body' in media:
|
||||||
text_fields.append(media['body'])
|
text_fields.append(media['body'])
|
||||||
|
|
||||||
# Combine fields into one text
|
|
||||||
text = " ".join(text_fields)
|
text = " ".join(text_fields)
|
||||||
|
|
||||||
if not text.strip():
|
if not text.strip():
|
||||||
|
@ -146,7 +137,6 @@ class SearchService:
|
||||||
total_skipped += 1
|
total_skipped += 1
|
||||||
continue
|
continue
|
||||||
|
|
||||||
# Add to batch
|
|
||||||
documents.append({
|
documents.append({
|
||||||
"id": str(shout.id),
|
"id": str(shout.id),
|
||||||
"text": text
|
"text": text
|
||||||
|
@ -162,14 +152,12 @@ class SearchService:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# Log a sample of the batch for debugging
|
|
||||||
if documents:
|
if documents:
|
||||||
sample = documents[0]
|
sample = documents[0]
|
||||||
logger.info(f"Sample document: id={sample['id']}, text_length={len(sample['text'])}")
|
logger.info(f"Sample document: id={sample['id']}, text_length={len(sample['text'])}")
|
||||||
|
|
||||||
# Send batch to txtai service
|
|
||||||
logger.info(f"Sending batch of {len(documents)} documents to search service")
|
logger.info(f"Sending batch of {len(documents)} documents to search service")
|
||||||
response = await self.client.post(
|
response = await self.index_client.post(
|
||||||
"/bulk-index",
|
"/bulk-index",
|
||||||
json={"documents": documents}
|
json={"documents": documents}
|
||||||
)
|
)
|
||||||
|
@ -188,57 +176,32 @@ class SearchService:
|
||||||
logger.warning("Search not available")
|
logger.warning("Search not available")
|
||||||
return []
|
return []
|
||||||
|
|
||||||
# Validate input
|
|
||||||
if not isinstance(text, str) or not text.strip():
|
if not isinstance(text, str) or not text.strip():
|
||||||
logger.warning(f"Invalid search text: {text}")
|
logger.warning(f"Invalid search text: {text}")
|
||||||
return []
|
return []
|
||||||
|
|
||||||
logger.info(f"Searching for: '{text}' (limit={limit}, offset={offset})")
|
logger.info(f"Searching for: '{text}' (limit={limit}, offset={offset})")
|
||||||
|
|
||||||
# Check Redis cache first
|
|
||||||
redis_key = f"search:{text}:{offset}+{limit}"
|
|
||||||
cached = await redis.get(redis_key)
|
|
||||||
if cached:
|
|
||||||
cached_results = json.loads(cached)
|
|
||||||
logger.info(f"Retrieved {len(cached_results)} results from cache for '{text}'")
|
|
||||||
return cached_results
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# Log request
|
|
||||||
logger.info(f"Sending search request: text='{text}', limit={limit}, offset={offset}")
|
logger.info(f"Sending search request: text='{text}', limit={limit}, offset={offset}")
|
||||||
|
|
||||||
# Send search request to txtai service
|
|
||||||
response = await self.client.post(
|
response = await self.client.post(
|
||||||
"/search",
|
"/search",
|
||||||
json={"text": text, "limit": limit, "offset": offset}
|
json={"text": text, "limit": limit, "offset": offset}
|
||||||
)
|
)
|
||||||
response.raise_for_status()
|
response.raise_for_status()
|
||||||
|
|
||||||
# Log raw response for debugging
|
|
||||||
logger.info(f"Raw search response: {response.text}")
|
logger.info(f"Raw search response: {response.text}")
|
||||||
|
|
||||||
# Parse response
|
|
||||||
result = response.json()
|
result = response.json()
|
||||||
logger.info(f"Parsed search response: {result}")
|
logger.info(f"Parsed search response: {result}")
|
||||||
|
|
||||||
# Extract results
|
|
||||||
formatted_results = result.get("results", [])
|
formatted_results = result.get("results", [])
|
||||||
logger.info(f"Search for '{text}' returned {len(formatted_results)} results")
|
logger.info(f"Search for '{text}' returned {len(formatted_results)} results")
|
||||||
|
|
||||||
# Log sample results for debugging
|
|
||||||
if formatted_results:
|
if formatted_results:
|
||||||
logger.info(f"Sample result: {formatted_results[0]}")
|
logger.info(f"Sample result: {formatted_results[0]}")
|
||||||
else:
|
else:
|
||||||
logger.warning(f"No results found for '{text}'")
|
logger.warning(f"No results found for '{text}'")
|
||||||
|
|
||||||
# Cache results
|
|
||||||
if formatted_results:
|
|
||||||
await redis.execute(
|
|
||||||
"SETEX",
|
|
||||||
redis_key,
|
|
||||||
REDIS_TTL,
|
|
||||||
json.dumps(formatted_results, cls=CustomJSONEncoder),
|
|
||||||
)
|
|
||||||
return formatted_results
|
return formatted_results
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error(f"Search error for '{text}': {e}", exc_info=True)
|
logger.error(f"Search error for '{text}': {e}", exc_info=True)
|
||||||
|
@ -249,7 +212,7 @@ class SearchService:
|
||||||
search_service = SearchService()
|
search_service = SearchService()
|
||||||
|
|
||||||
|
|
||||||
# Keep the API exactly the same to maintain compatibility
|
# API-compatible function to perform a search
|
||||||
async def search_text(text: str, limit: int = 50, offset: int = 0):
|
async def search_text(text: str, limit: int = 50, offset: int = 0):
|
||||||
payload = []
|
payload = []
|
||||||
if search_service.available:
|
if search_service.available:
|
||||||
|
@ -257,7 +220,6 @@ async def search_text(text: str, limit: int = 50, offset: int = 0):
|
||||||
return payload
|
return payload
|
||||||
|
|
||||||
|
|
||||||
# Function to initialize search with existing data
|
|
||||||
async def initialize_search_index(shouts_data):
|
async def initialize_search_index(shouts_data):
|
||||||
"""Initialize search index with existing data during application startup"""
|
"""Initialize search index with existing data during application startup"""
|
||||||
if SEARCH_ENABLED:
|
if SEARCH_ENABLED:
|
||||||
|
@ -267,16 +229,13 @@ async def initialize_search_index(shouts_data):
|
||||||
|
|
||||||
logger.info(f"Initializing search index with {len(shouts_data)} documents")
|
logger.info(f"Initializing search index with {len(shouts_data)} documents")
|
||||||
|
|
||||||
# Check if search service is available first
|
|
||||||
info = await search_service.info()
|
info = await search_service.info()
|
||||||
if info.get("status") in ["error", "unavailable", "disabled"]:
|
if info.get("status") in ["error", "unavailable", "disabled"]:
|
||||||
logger.error(f"Cannot initialize search index: {info}")
|
logger.error(f"Cannot initialize search index: {info}")
|
||||||
return
|
return
|
||||||
|
|
||||||
# Start the bulk indexing process
|
|
||||||
await search_service.bulk_index(shouts_data)
|
await search_service.bulk_index(shouts_data)
|
||||||
|
|
||||||
# Verify indexing worked by testing with a search
|
|
||||||
try:
|
try:
|
||||||
test_query = "test"
|
test_query = "test"
|
||||||
logger.info(f"Verifying search index with query: '{test_query}'")
|
logger.info(f"Verifying search index with query: '{test_query}'")
|
||||||
|
|
Loading…
Reference in New Issue
Block a user