precache-topic-followers-debug
Some checks failed
Deploy on push / deploy (push) Failing after 4m0s

This commit is contained in:
2025-08-28 19:59:01 +03:00
parent c2e5816363
commit 8be128a69c
2 changed files with 50 additions and 134 deletions

67
cache/precache.py vendored
View File

@@ -1,7 +1,7 @@
import asyncio
import traceback
from sqlalchemy import and_, join, select
from sqlalchemy import and_, func, join, select
# Импорт Author, AuthorFollower отложен для избежания циклических импортов
from cache.cache import cache_author, cache_topic
@@ -69,29 +69,43 @@ async def precache_topics_authors(topic_id: int, session) -> None:
# Предварительное кеширование подписчиков тем
async def precache_topics_followers(topic_id: int, session) -> None:
followers_query = select(TopicFollower.follower).where(TopicFollower.topic == topic_id)
topic_followers = {row[0] for row in session.execute(followers_query) if row[0]}
try:
followers_query = select(TopicFollower.follower).where(TopicFollower.topic == topic_id)
topic_followers = {row[0] for row in session.execute(followers_query) if row[0]}
followers_payload = fast_json_dumps(list(topic_followers))
await redis.execute("SET", f"topic:followers:{topic_id}", followers_payload)
followers_payload = fast_json_dumps(list(topic_followers))
await redis.execute("SET", f"topic:followers:{topic_id}", followers_payload)
# Добавляем логирование для отладки
logger.debug(f"Precached {len(topic_followers)} followers for topic #{topic_id}")
if len(topic_followers) == 0:
logger.warning(f"⚠️ Topic #{topic_id} has 0 followers - this might indicate a data issue")
elif len(topic_followers) == 1:
logger.info(f" Topic #{topic_id} has exactly 1 follower - checking if this is correct")
except Exception as e:
logger.error(f"Error precaching followers for topic #{topic_id}: {e}")
# В случае ошибки, устанавливаем пустой список
await redis.execute("SET", f"topic:followers:{topic_id}", fast_json_dumps([]))
async def precache_data() -> None:
logger.info("precaching...")
logger.debug("Entering precache_data")
# Список паттернов ключей, которые нужно сохранить при FLUSHDB
preserve_patterns = [
"migrated_views_*", # Данные миграции просмотров
"session:*", # Сессии пользователей
"env_vars:*", # Переменные окружения
"oauth_*", # OAuth токены
]
# Сохраняем все важные ключи перед очисткой
all_keys_to_preserve = []
preserved_data = {}
try:
# Список паттернов ключей, которые нужно сохранить при FLUSHDB
preserve_patterns = [
"migrated_views_*", # Данные миграции просмотров
"session:*", # Сессии пользователей
"env_vars:*", # Переменные окружения
"oauth_*", # OAuth токены
]
# Сохраняем все важные ключи перед очисткой
all_keys_to_preserve = []
preserved_data = {}
for pattern in preserve_patterns:
keys = await redis.execute("KEYS", pattern)
if keys:
@@ -153,6 +167,25 @@ async def precache_data() -> None:
logger.info("Beginning topic precache phase")
with local_session() as session:
# Проверяем состояние таблицы topic_followers перед кешированием
total_followers = session.execute(select(func.count(TopicFollower.topic))).scalar()
unique_topics_with_followers = session.execute(
select(func.count(func.distinct(TopicFollower.topic)))
).scalar()
unique_followers = session.execute(select(func.count(func.distinct(TopicFollower.follower)))).scalar()
logger.info("📊 Database state before precaching:")
logger.info(f" Total topic_followers records: {total_followers}")
logger.info(f" Unique topics with followers: {unique_topics_with_followers}")
logger.info(f" Unique followers: {unique_followers}")
if total_followers == 0:
logger.warning(
"🚨 WARNING: topic_followers table is empty! This will cause all topics to show 0 followers."
)
elif unique_topics_with_followers == 0:
logger.warning("🚨 WARNING: No topics have followers! This will cause all topics to show 0 followers.")
# topics
q = select(Topic).where(Topic.community == 1)
topics = get_with_stat(q)