38 Commits

Author SHA1 Message Date
Stepan Vladovskiy
0018749905 Merge branch 'dev' into staging
All checks were successful
Deploy on push / deploy (push) Successful in 1m28s
2025-05-14 14:33:52 -03:00
a6b3b21894 draft-topics-fix2
All checks were successful
Deploy on push / deploy (push) Successful in 45s
2025-05-07 10:37:18 +03:00
51de649686 draft-topics-fix
All checks were successful
Deploy on push / deploy (push) Successful in 47s
2025-05-07 10:22:30 +03:00
2b7d5a25b5 unpublish-fix7
All checks were successful
Deploy on push / deploy (push) Successful in 46s
2025-05-03 11:52:14 +03:00
32cb810f51 unpublish-fix7 2025-05-03 11:52:10 +03:00
d2a8c23076 unpublish-fix5
All checks were successful
Deploy on push / deploy (push) Successful in 45s
2025-05-03 11:47:35 +03:00
96afda77a6 unpublish-fix5
All checks were successful
Deploy on push / deploy (push) Successful in 47s
2025-05-03 11:35:03 +03:00
785548d055 cache-revalidation-fix
All checks were successful
Deploy on push / deploy (push) Successful in 47s
2025-05-03 11:11:14 +03:00
d6202561a9 unpublish-fix4
All checks were successful
Deploy on push / deploy (push) Successful in 47s
2025-05-03 11:07:03 +03:00
3fbd2e677a unpublish-fix3
All checks were successful
Deploy on push / deploy (push) Successful in 47s
2025-05-03 11:00:19 +03:00
4f1eab513a unpublish-fix2
All checks were successful
Deploy on push / deploy (push) Successful in 46s
2025-05-03 10:57:55 +03:00
44852a1553 delete-draft-fix2 2025-05-03 10:56:34 +03:00
58ec60262b unpublish,delete-draft-fix
All checks were successful
Deploy on push / deploy (push) Successful in 1m23s
2025-05-03 10:53:40 +03:00
Stepan Vladovskiy
c344fcee2d refactoring(search.py): logs for search-combine and search-authors are equal
All checks were successful
Deploy on push / deploy (push) Successful in 6s
2025-05-02 18:28:06 -03:00
5f3d90fc90 draft-publication-debug
All checks were successful
Deploy on push / deploy (push) Successful in 48s
2025-04-28 16:24:08 +03:00
f71fc7fde9 draft-publication-info
All checks were successful
Deploy on push / deploy (push) Successful in 46s
2025-04-28 11:10:18 +03:00
ed71405082 topic-commented-stat-fix4
All checks were successful
Deploy on push / deploy (push) Successful in 46s
2025-04-28 10:30:58 +03:00
79e1f15a2e topic-commented-stat-fix3
All checks were successful
Deploy on push / deploy (push) Successful in 47s
2025-04-28 10:30:04 +03:00
b17acae0af topic-commented-stat-fix2
All checks were successful
Deploy on push / deploy (push) Successful in 46s
2025-04-28 10:24:48 +03:00
d293819ad9 topic-commented-stat-fix
All checks were successful
Deploy on push / deploy (push) Successful in 48s
2025-04-28 10:13:29 +03:00
bcbfdd76e9 html wrap fix
All checks were successful
Deploy on push / deploy (push) Successful in 48s
2025-04-27 12:53:49 +03:00
b735bf8cab draft-creator-adding
All checks were successful
Deploy on push / deploy (push) Successful in 47s
2025-04-27 09:15:07 +03:00
20fd40df0e updated-by-auto-fix
All checks were successful
Deploy on push / deploy (push) Successful in 47s
2025-04-26 23:46:07 +03:00
bde3211a5f updated-by-auto
All checks were successful
Deploy on push / deploy (push) Successful in 49s
2025-04-26 17:07:02 +03:00
4cd8883d72 validhtmlfix 2025-04-26 17:02:55 +03:00
0939e91700 empty-body-fix
All checks were successful
Deploy on push / deploy (push) Successful in 45s
2025-04-26 16:19:33 +03:00
dfbdfba2f0 draft-create-fix5
All checks were successful
Deploy on push / deploy (push) Successful in 46s
2025-04-26 16:13:07 +03:00
b66e347c91 draft-create-fix
All checks were successful
Deploy on push / deploy (push) Successful in 45s
2025-04-26 16:03:41 +03:00
6d9513f1b2 reaction-by-fix4
All checks were successful
Deploy on push / deploy (push) Successful in 45s
2025-04-26 15:57:51 +03:00
af7fbd2fc9 reaction-by-fix3
All checks were successful
Deploy on push / deploy (push) Successful in 47s
2025-04-26 15:50:20 +03:00
631ad47fe8 reaction-by-fix2
All checks were successful
Deploy on push / deploy (push) Successful in 46s
2025-04-26 15:47:44 +03:00
3b3cc1c1d8 reaction-by-ашч
All checks were successful
Deploy on push / deploy (push) Successful in 35s
2025-04-26 15:42:15 +03:00
e4943f524c reaction-by-upgrade2
All checks were successful
Deploy on push / deploy (push) Successful in 36s
2025-04-26 15:35:31 +03:00
e7684c9c05 reaction-by-upgrade
All checks were successful
Deploy on push / deploy (push) Successful in 36s
2025-04-26 14:10:05 +03:00
bdae2abe25 drafts schema restore + publish/unpublish fixes
All checks were successful
Deploy on push / deploy (push) Successful in 32s
2025-04-26 13:11:12 +03:00
a310d59432 draft-resolvers
All checks were successful
Deploy on push / deploy (push) Successful in 46s
2025-04-26 11:45:16 +03:00
6b2ac09f74 unpublish-fix
All checks were successful
Deploy on push / deploy (push) Successful in 46s
2025-04-26 10:16:55 +03:00
5024e963e3 notify-draft-hotfix
All checks were successful
Deploy on push / deploy (push) Successful in 47s
2025-04-24 12:12:48 +03:00
16 changed files with 697 additions and 279 deletions

View File

@@ -1,3 +1,14 @@
#### [0.4.20] - 2025-05-03
- Исправлена ошибка в классе `CacheRevalidationManager`: добавлена инициализация атрибута `_redis`
- Улучшена обработка соединения с Redis в менеджере ревалидации кэша:
- Автоматическое восстановление соединения в случае его потери
- Проверка соединения перед выполнением операций с кэшем
- Дополнительное логирование для упрощения диагностики проблем
- Исправлен резолвер `unpublish_shout`:
- Корректное формирование синтетического поля `publication` с `published_at: null`
- Возвращение полноценного словаря с данными вместо объекта модели
- Улучшена загрузка связанных данных (авторы, темы) для правильного формирования ответа
#### [0.4.19] - 2025-04-14 #### [0.4.19] - 2025-04-14
- dropped `Shout.description` and `Draft.description` to be UX-generated - dropped `Shout.description` and `Draft.description` to be UX-generated
- use redis to init views counters after migrator - use redis to init views counters after migrator

15
cache/revalidator.py vendored
View File

@@ -8,6 +8,7 @@ from cache.cache import (
invalidate_cache_by_prefix, invalidate_cache_by_prefix,
) )
from resolvers.stat import get_with_stat from resolvers.stat import get_with_stat
from services.redis import redis
from utils.logger import root_logger as logger from utils.logger import root_logger as logger
CACHE_REVALIDATION_INTERVAL = 300 # 5 minutes CACHE_REVALIDATION_INTERVAL = 300 # 5 minutes
@@ -21,9 +22,19 @@ class CacheRevalidationManager:
self.lock = asyncio.Lock() self.lock = asyncio.Lock()
self.running = True self.running = True
self.MAX_BATCH_SIZE = 10 # Максимальное количество элементов для поштучной обработки self.MAX_BATCH_SIZE = 10 # Максимальное количество элементов для поштучной обработки
self._redis = redis # Добавлена инициализация _redis для доступа к Redis-клиенту
async def start(self): async def start(self):
"""Запуск фонового воркера для ревалидации кэша.""" """Запуск фонового воркера для ревалидации кэша."""
# Проверяем, что у нас есть соединение с Redis
if not self._redis._client:
logger.warning("Redis connection not established. Waiting for connection...")
try:
await self._redis.connect()
logger.info("Redis connection established for revalidation manager")
except Exception as e:
logger.error(f"Failed to connect to Redis: {e}")
self.task = asyncio.create_task(self.revalidate_cache()) self.task = asyncio.create_task(self.revalidate_cache())
async def revalidate_cache(self): async def revalidate_cache(self):
@@ -39,6 +50,10 @@ class CacheRevalidationManager:
async def process_revalidation(self): async def process_revalidation(self):
"""Обновление кэша для всех сущностей, требующих ревалидации.""" """Обновление кэша для всех сущностей, требующих ревалидации."""
# Проверяем соединение с Redis
if not self._redis._client:
return # Выходим из метода, если не удалось подключиться
async with self.lock: async with self.lock:
# Ревалидация кэша авторов # Ревалидация кэша авторов
if self.items_to_revalidate["authors"]: if self.items_to_revalidate["authors"]:

View File

@@ -147,16 +147,32 @@ await invalidate_topics_cache(456)
```python ```python
class CacheRevalidationManager: class CacheRevalidationManager:
# ... def __init__(self, interval=CACHE_REVALIDATION_INTERVAL):
async def process_revalidation(self):
# ... # ...
self._redis = redis # Прямая ссылка на сервис Redis
async def start(self):
# Проверка и установка соединения с Redis
# ...
async def process_revalidation(self):
# Обработка элементов для ревалидации
# ...
def mark_for_revalidation(self, entity_id, entity_type): def mark_for_revalidation(self, entity_id, entity_type):
# Добавляет сущность в очередь на ревалидацию
# ... # ...
``` ```
Менеджер ревалидации работает как асинхронный фоновый процесс, который периодически (по умолчанию каждые 5 минут) проверяет наличие сущностей для ревалидации. Менеджер ревалидации работает как асинхронный фоновый процесс, который периодически (по умолчанию каждые 5 минут) проверяет наличие сущностей для ревалидации.
Особенности реализации: **Взаимодействие с Redis:**
- CacheRevalidationManager хранит прямую ссылку на сервис Redis через атрибут `_redis`
- При запуске проверяется наличие соединения с Redis и при необходимости устанавливается новое
- Включена автоматическая проверка соединения перед каждой операцией ревалидации
- Система самостоятельно восстанавливает соединение при его потере
**Особенности реализации:**
- Для авторов и тем используется поштучная ревалидация каждой записи - Для авторов и тем используется поштучная ревалидация каждой записи
- Для шаутов и реакций используется батчевая обработка, с порогом в 10 элементов - Для шаутов и реакций используется батчевая обработка, с порогом в 10 элементов
- При достижении порога система переключается на инвалидацию коллекций вместо поштучной обработки - При достижении порога система переключается на инвалидацию коллекций вместо поштучной обработки

View File

@@ -6,6 +6,7 @@ from sqlalchemy.orm import relationship
from orm.author import Author from orm.author import Author
from orm.topic import Topic from orm.topic import Topic
from services.db import Base from services.db import Base
from orm.shout import Shout
class DraftTopic(Base): class DraftTopic(Base):
@@ -26,12 +27,14 @@ class DraftAuthor(Base):
caption = Column(String, nullable=True, default="") caption = Column(String, nullable=True, default="")
class Draft(Base): class Draft(Base):
__tablename__ = "draft" __tablename__ = "draft"
# required # required
created_at: int = Column(Integer, nullable=False, default=lambda: int(time.time())) created_at: int = Column(Integer, nullable=False, default=lambda: int(time.time()))
created_by: int = Column(ForeignKey("author.id"), nullable=False) # Колонки для связей с автором
community: int = Column(ForeignKey("community.id"), nullable=False, default=1) created_by: int = Column("created_by", ForeignKey("author.id"), nullable=False)
community: int = Column("community", ForeignKey("community.id"), nullable=False, default=1)
# optional # optional
layout: str = Column(String, nullable=True, default="article") layout: str = Column(String, nullable=True, default="article")
@@ -49,7 +52,55 @@ class Draft(Base):
# auto # auto
updated_at: int | None = Column(Integer, nullable=True, index=True) updated_at: int | None = Column(Integer, nullable=True, index=True)
deleted_at: int | None = Column(Integer, nullable=True, index=True) deleted_at: int | None = Column(Integer, nullable=True, index=True)
updated_by: int | None = Column(ForeignKey("author.id"), nullable=True) updated_by: int | None = Column("updated_by", ForeignKey("author.id"), nullable=True)
deleted_by: int | None = Column(ForeignKey("author.id"), nullable=True) deleted_by: int | None = Column("deleted_by", ForeignKey("author.id"), nullable=True)
authors = relationship(Author, secondary="draft_author")
topics = relationship(Topic, secondary="draft_topic") # --- Relationships ---
# Только many-to-many связи через вспомогательные таблицы
authors = relationship(Author, secondary="draft_author", lazy="select")
topics = relationship(Topic, secondary="draft_topic", lazy="select")
# Связь с Community (если нужна как объект, а не ID)
# community = relationship("Community", foreign_keys=[community_id], lazy="joined")
# Пока оставляем community_id как ID
# Связь с публикацией (один-к-одному или один-к-нулю)
# Загружается через joinedload в резолвере
publication = relationship(
"Shout",
primaryjoin="Draft.id == Shout.draft",
foreign_keys="Shout.draft",
uselist=False,
lazy="noload", # Не грузим по умолчанию, только через options
viewonly=True # Указываем, что это связь только для чтения
)
def dict(self):
"""
Сериализует объект Draft в словарь.
Гарантирует, что поля topics и authors всегда будут списками.
"""
return {
"id": self.id,
"created_at": self.created_at,
"created_by": self.created_by,
"community": self.community,
"layout": self.layout,
"slug": self.slug,
"title": self.title,
"subtitle": self.subtitle,
"lead": self.lead,
"body": self.body,
"media": self.media or [],
"cover": self.cover,
"cover_caption": self.cover_caption,
"lang": self.lang,
"seo": self.seo,
"updated_at": self.updated_at,
"deleted_at": self.deleted_at,
"updated_by": self.updated_by,
"deleted_by": self.deleted_by,
# Гарантируем, что topics и authors всегда будут списками
"topics": [topic.dict() for topic in (self.topics or [])],
"authors": [author.dict() for author in (self.authors or [])]
}

View File

@@ -17,9 +17,11 @@ from resolvers.draft import (
delete_draft, delete_draft,
load_drafts, load_drafts,
publish_draft, publish_draft,
unpublish_draft,
update_draft, update_draft,
) )
from resolvers.editor import (
unpublish_shout,
)
from resolvers.feed import ( from resolvers.feed import (
load_shouts_coauthored, load_shouts_coauthored,
load_shouts_discussed, load_shouts_discussed,

View File

@@ -1,8 +1,6 @@
import time import time
from operator import or_
import trafilatura import trafilatura
from sqlalchemy.sql import and_ from sqlalchemy.orm import joinedload
from cache.cache import ( from cache.cache import (
cache_author, cache_author,
@@ -12,7 +10,7 @@ from cache.cache import (
invalidate_shouts_cache, invalidate_shouts_cache,
) )
from orm.author import Author from orm.author import Author
from orm.draft import Draft from orm.draft import Draft, DraftAuthor, DraftTopic
from orm.shout import Shout, ShoutAuthor, ShoutTopic from orm.shout import Shout, ShoutAuthor, ShoutTopic
from orm.topic import Topic from orm.topic import Topic
from services.auth import login_required from services.auth import login_required
@@ -20,34 +18,70 @@ from services.db import local_session
from services.notify import notify_shout from services.notify import notify_shout
from services.schema import mutation, query from services.schema import mutation, query
from services.search import search_service from services.search import search_service
from utils.html_wrapper import wrap_html_fragment
from utils.logger import root_logger as logger from utils.logger import root_logger as logger
def create_shout_from_draft(session, draft, author_id): def create_shout_from_draft(session, draft, author_id):
"""
Создаёт новый объект публикации (Shout) на основе черновика.
Args:
session: SQLAlchemy сессия (не используется, для совместимости)
draft (Draft): Объект черновика
author_id (int): ID автора публикации
Returns:
Shout: Новый объект публикации (не сохранённый в базе)
Пример:
>>> from orm.draft import Draft
>>> draft = Draft(id=1, title='Заголовок', body='Текст', slug='slug', created_by=1)
>>> shout = create_shout_from_draft(None, draft, 1)
>>> shout.title
'Заголовок'
>>> shout.body
'Текст'
>>> shout.created_by
1
"""
# Создаем новую публикацию # Создаем новую публикацию
shout = Shout( shout = Shout(
body=draft.body, body=draft.body or "",
slug=draft.slug, slug=draft.slug,
cover=draft.cover, cover=draft.cover,
cover_caption=draft.cover_caption, cover_caption=draft.cover_caption,
lead=draft.lead, lead=draft.lead,
title=draft.title, title=draft.title or "",
subtitle=draft.subtitle, subtitle=draft.subtitle,
layout=draft.layout, layout=draft.layout or "article",
media=draft.media, media=draft.media or [],
lang=draft.lang, lang=draft.lang or "ru",
seo=draft.seo, seo=draft.seo,
created_by=author_id, created_by=author_id,
community=draft.community, community=draft.community,
draft=draft.id, draft=draft.id,
deleted_at=None, deleted_at=None,
) )
# Инициализируем пустые массивы для связей
shout.topics = []
shout.authors = []
return shout return shout
@query.field("load_drafts") @query.field("load_drafts")
@login_required @login_required
async def load_drafts(_, info): async def load_drafts(_, info):
"""
Загружает все черновики, доступные текущему пользователю.
Предварительно загружает связанные объекты (topics, authors, publication),
чтобы избежать ошибок с отсоединенными объектами при сериализации.
Returns:
dict: Список черновиков или сообщение об ошибке
"""
user_id = info.context.get("user_id") user_id = info.context.get("user_id")
author_dict = info.context.get("author", {}) author_dict = info.context.get("author", {})
author_id = author_dict.get("id") author_id = author_dict.get("id")
@@ -55,13 +89,44 @@ async def load_drafts(_, info):
if not user_id or not author_id: if not user_id or not author_id:
return {"error": "User ID and author ID are required"} return {"error": "User ID and author ID are required"}
with local_session() as session: try:
drafts = ( with local_session() as session:
session.query(Draft) # Предзагружаем authors, topics и связанную publication
.filter(or_(Draft.authors.any(Author.id == author_id), Draft.created_by == author_id)) drafts_query = (
.all() session.query(Draft)
) .options(
return {"drafts": drafts} joinedload(Draft.topics),
joinedload(Draft.authors),
joinedload(Draft.publication) # Загружаем связанную публикацию
)
.filter(Draft.authors.any(Author.id == author_id))
)
drafts = drafts_query.all()
# Преобразуем объекты в словари, пока они в контексте сессии
drafts_data = []
for draft in drafts:
draft_dict = draft.dict()
# Всегда возвращаем массив для topics, даже если он пустой
draft_dict["topics"] = [topic.dict() for topic in (draft.topics or [])]
draft_dict["authors"] = [author.dict() for author in (draft.authors or [])]
# Добавляем информацию о публикации, если она есть
if draft.publication:
draft_dict["publication"] = {
"id": draft.publication.id,
"slug": draft.publication.slug,
"published_at": draft.publication.published_at
}
else:
draft_dict["publication"] = None
drafts_data.append(draft_dict)
return {"drafts": drafts_data}
except Exception as e:
logger.error(f"Failed to load drafts: {e}", exc_info=True)
return {"error": f"Failed to load drafts: {str(e)}"}
@mutation.field("create_draft") @mutation.field("create_draft")
@@ -116,11 +181,17 @@ async def create_draft(_, info, draft_input):
if "id" in draft_input: if "id" in draft_input:
del draft_input["id"] del draft_input["id"]
# Добавляем текущее время создания # Добавляем текущее время создания и ID автора
draft_input["created_at"] = int(time.time()) draft_input["created_at"] = int(time.time())
draft_input["created_by"] = author_id
draft = Draft(created_by=author_id, **draft_input) draft = Draft(**draft_input)
session.add(draft) session.add(draft)
session.flush()
# Добавляем создателя как автора
da = DraftAuthor(shout=draft.id, author=author_id)
session.add(da)
session.commit() session.commit()
return {"draft": draft} return {"draft": draft}
except Exception as e: except Exception as e:
@@ -128,7 +199,8 @@ async def create_draft(_, info, draft_input):
return {"error": f"Failed to create draft: {str(e)}"} return {"error": f"Failed to create draft: {str(e)}"}
def generate_teaser(body, limit=300): def generate_teaser(body, limit=300):
body_text = trafilatura.extract(body, include_comments=False, include_tables=False) body_html = wrap_html_fragment(body)
body_text = trafilatura.extract(body_html, include_comments=False, include_tables=False)
body_teaser = ". ".join(body_text[:limit].split(". ")[:-1]) body_teaser = ". ".join(body_text[:limit].split(". ")[:-1])
return body_teaser return body_teaser
@@ -140,7 +212,21 @@ async def update_draft(_, info, draft_id: int, draft_input):
Args: Args:
draft_id: ID черновика для обновления draft_id: ID черновика для обновления
draft_input: Данные для обновления черновика draft_input: Данные для обновления черновика согласно схеме DraftInput:
- layout: String
- author_ids: [Int!]
- topic_ids: [Int!]
- main_topic_id: Int
- media: [MediaItemInput]
- lead: String
- subtitle: String
- lang: String
- seo: String
- body: String
- title: String
- slug: String
- cover: String
- cover_caption: String
Returns: Returns:
dict: Обновленный черновик или сообщение об ошибке dict: Обновленный черновик или сообщение об ошибке
@@ -152,66 +238,89 @@ async def update_draft(_, info, draft_id: int, draft_input):
if not user_id or not author_id: if not user_id or not author_id:
return {"error": "Author ID are required"} return {"error": "Author ID are required"}
# Проверяем slug - он должен быть или не пустым, или не передаваться вообще try:
if "slug" in draft_input and (draft_input["slug"] is None or draft_input["slug"] == ""): with local_session() as session:
# Если slug пустой, либо удаляем его из входных данных, либо генерируем временный уникальный draft = session.query(Draft).filter(Draft.id == draft_id).first()
# Вариант 1: просто удаляем ключ из входных данных, чтобы оставить старое значение if not draft:
del draft_input["slug"] return {"error": "Draft not found"}
# Вариант 2 (если нужно обновить): генерируем временный уникальный slug
# import uuid
# draft_input["slug"] = f"draft-{uuid.uuid4().hex[:8]}"
with local_session() as session: # Фильтруем входные данные, оставляя только разрешенные поля
draft = session.query(Draft).filter(Draft.id == draft_id).first() allowed_fields = {
if not draft: "layout", "author_ids", "topic_ids", "main_topic_id",
return {"error": "Draft not found"} "media", "lead", "subtitle", "lang", "seo", "body",
"title", "slug", "cover", "cover_caption"
}
filtered_input = {k: v for k, v in draft_input.items() if k in allowed_fields}
# Generate SEO description if not provided and not already set # Проверяем slug
if "seo" not in draft_input and not draft.seo: if "slug" in filtered_input and not filtered_input["slug"]:
body_src = draft_input.get("body") if "body" in draft_input else draft.body del filtered_input["slug"]
lead_src = draft_input.get("lead") if "lead" in draft_input else draft.lead
body_text = None # Обновляем связи с авторами если переданы
if body_src: if "author_ids" in filtered_input:
author_ids = filtered_input.pop("author_ids")
if author_ids:
# Очищаем текущие связи
session.query(DraftAuthor).filter(DraftAuthor.shout == draft_id).delete()
# Добавляем новые связи
for aid in author_ids:
da = DraftAuthor(shout=draft_id, author=aid)
session.add(da)
# Обновляем связи с темами если переданы
if "topic_ids" in filtered_input:
topic_ids = filtered_input.pop("topic_ids")
main_topic_id = filtered_input.pop("main_topic_id", None)
if topic_ids:
# Очищаем текущие связи
session.query(DraftTopic).filter(DraftTopic.shout == draft_id).delete()
# Добавляем новые связи
for tid in topic_ids:
dt = DraftTopic(
shout=draft_id,
topic=tid,
main=(tid == main_topic_id) if main_topic_id else False
)
session.add(dt)
# Генерируем SEO если не предоставлено
if "seo" not in filtered_input and not draft.seo:
body_src = filtered_input.get("body", draft.body)
lead_src = filtered_input.get("lead", draft.lead)
body_html = wrap_html_fragment(body_src)
lead_html = wrap_html_fragment(lead_src)
try: try:
# Extract text, excluding comments and tables body_text = trafilatura.extract(body_html, include_comments=False, include_tables=False) if body_src else None
body_text = trafilatura.extract(body_src, include_comments=False, include_tables=False) lead_text = trafilatura.extract(lead_html, include_comments=False, include_tables=False) if lead_src else None
body_teaser = generate_teaser(body_text, 300) if body_text else ""
filtered_input["seo"] = lead_text if lead_text else body_teaser
except Exception as e: except Exception as e:
logger.warning(f"Trafilatura failed to extract body text for draft {draft_id}: {e}") logger.warning(f"Failed to generate SEO for draft {draft_id}: {e}")
lead_text = None # Обновляем основные поля черновика
if lead_src: for key, value in filtered_input.items():
try: setattr(draft, key, value)
# Extract text from lead
lead_text = trafilatura.extract(lead_src, include_comments=False, include_tables=False)
except Exception as e:
logger.warning(f"Trafilatura failed to extract lead text for draft {draft_id}: {e}")
# Generate body teaser only if body_text was successfully extracted # Обновляем метаданные
body_teaser = generate_teaser(body_text, 300) if body_text else "" draft.updated_at = int(time.time())
draft.updated_by = author_id
# Prioritize lead_text for SEO, fallback to body_teaser. Ensure it's a string. session.commit()
generated_seo = lead_text if lead_text else body_teaser
draft_input["seo"] = generated_seo if generated_seo else "" # Преобразуем объект в словарь для ответа
draft_dict = draft.dict()
draft_dict["topics"] = [topic.dict() for topic in draft.topics]
draft_dict["authors"] = [author.dict() for author in draft.authors]
# Добавляем объект автора в updated_by
draft_dict["updated_by"] = author_dict
return {"draft": draft_dict}
# Update the draft object with new data from draft_input except Exception as e:
# Assuming Draft.update is a helper that iterates keys or similar. logger.error(f"Failed to update draft: {e}", exc_info=True)
# A more standard SQLAlchemy approach would be: return {"error": f"Failed to update draft: {str(e)}"}
# for key, value in draft_input.items():
# if hasattr(draft, key):
# setattr(draft, key, value)
# But we stick to the existing pattern for now.
Draft.update(draft, draft_input)
# Set updated timestamp and author
current_time = int(time.time())
draft.updated_at = current_time
draft.updated_by = author_id # Assuming author_id is correctly fetched context
session.commit()
# Invalidate cache related to this draft if necessary (consider adding)
# await invalidate_draft_cache(draft_id)
return {"draft": draft}
@mutation.field("delete_draft") @mutation.field("delete_draft")
@@ -231,182 +340,136 @@ async def delete_draft(_, info, draft_id: int):
return {"draft": draft} return {"draft": draft}
def validate_html_content(html_content: str) -> tuple[bool, str]:
"""
Проверяет валидность HTML контента через trafilatura.
Args:
html_content: HTML строка для проверки
Returns:
tuple[bool, str]: (валидность, сообщение об ошибке)
Example:
>>> is_valid, error = validate_html_content("<p>Valid HTML</p>")
>>> is_valid
True
>>> error
''
>>> is_valid, error = validate_html_content("Invalid < HTML")
>>> is_valid
False
>>> 'Invalid HTML' in error
True
"""
if not html_content or not html_content.strip():
return False, "Content is empty"
try:
html_content = wrap_html_fragment(html_content)
extracted = trafilatura.extract(html_content)
if not extracted:
return False, "Invalid HTML structure or empty content"
return True, ""
except Exception as e:
logger.error(f"HTML validation error: {e}", exc_info=True)
return False, f"Invalid HTML content: {str(e)}"
@mutation.field("publish_draft") @mutation.field("publish_draft")
@login_required @login_required
async def publish_draft(_, info, draft_id: int): async def publish_draft(_, info, draft_id: int):
user_id = info.context.get("user_id") """
author_dict = info.context.get("author", {}) Публикует черновик, создавая новый Shout или обновляя существующий.
author_id = author_dict.get("id")
if not user_id or not author_id:
return {"error": "User ID and author ID are required"}
with local_session() as session:
draft = session.query(Draft).filter(Draft.id == draft_id).first()
if not draft:
return {"error": "Draft not found"}
shout = create_shout_from_draft(session, draft, author_id)
session.add(shout)
session.commit()
return {"shout": shout, "draft": draft}
@mutation.field("unpublish_draft")
@login_required
async def unpublish_draft(_, info, draft_id: int):
user_id = info.context.get("user_id")
author_dict = info.context.get("author", {})
author_id = author_dict.get("id")
if not user_id or not author_id:
return {"error": "User ID and author ID are required"}
with local_session() as session:
draft = session.query(Draft).filter(Draft.id == draft_id).first()
if not draft:
return {"error": "Draft not found"}
shout = session.query(Shout).filter(Shout.draft == draft.id).first()
if shout:
shout.published_at = None
session.commit()
return {"shout": shout, "draft": draft}
return {"error": "Failed to unpublish draft"}
@mutation.field("publish_shout")
@login_required
async def publish_shout(_, info, shout_id: int):
"""Publish draft as a shout or update existing shout.
Args: Args:
shout_id: ID существующей публикации или 0 для новой draft_id (int): ID черновика для публикации
draft: Объект черновика (опционально)
Returns:
dict: Результат публикации с shout или сообщением об ошибке
""" """
user_id = info.context.get("user_id") user_id = info.context.get("user_id")
author_dict = info.context.get("author", {}) author_dict = info.context.get("author", {})
author_id = author_dict.get("id") author_id = author_dict.get("id")
now = int(time.time())
if not user_id or not author_id: if not user_id or not author_id:
return {"error": "User ID and author ID are required"} return {"error": "Author ID is required"}
try: try:
with local_session() as session: with local_session() as session:
shout = session.query(Shout).filter(Shout.id == shout_id).first() # Загружаем черновик со всеми связями
if not shout: draft = (
return {"error": "Shout not found"} session.query(Draft)
was_published = shout.published_at is not None .options(
draft = session.query(Draft).where(Draft.id == shout.draft).first() joinedload(Draft.topics),
joinedload(Draft.authors),
joinedload(Draft.publication)
)
.filter(Draft.id == draft_id)
.first()
)
if not draft: if not draft:
return {"error": "Draft not found"} return {"error": "Draft not found"}
# Находим черновик если не передан
if not shout: # Проверка валидности HTML в body
shout = create_shout_from_draft(session, draft, author_id) is_valid, error = validate_html_content(draft.body)
else: if not is_valid:
return {"error": f"Cannot publish draft: {error}"}
# Проверяем, есть ли уже публикация для этого черновика
if draft.publication:
shout = draft.publication
# Обновляем существующую публикацию # Обновляем существующую публикацию
shout.draft = draft.id for field in ["body", "title", "subtitle", "lead", "cover", "cover_caption", "media", "lang", "seo"]:
shout.created_by = author_id if hasattr(draft, field):
shout.title = draft.title setattr(shout, field, getattr(draft, field))
shout.subtitle = draft.subtitle shout.updated_at = int(time.time())
shout.body = draft.body shout.updated_by = author_id
shout.cover = draft.cover else:
shout.cover_caption = draft.cover_caption # Создаем новую публикацию
shout.lead = draft.lead shout = create_shout_from_draft(session, draft, author_id)
shout.layout = draft.layout now = int(time.time())
shout.media = draft.media shout.created_at = now
shout.lang = draft.lang shout.published_at = now
shout.seo = draft.seo session.add(shout)
session.flush() # Получаем ID нового шаута
draft.updated_at = now # Очищаем существующие связи
shout.updated_at = now session.query(ShoutAuthor).filter(ShoutAuthor.shout == shout.id).delete()
session.query(ShoutTopic).filter(ShoutTopic.shout == shout.id).delete()
# Устанавливаем published_at только если была ранее снята с публикации # Добавляем авторов
if not was_published: for author in (draft.authors or []):
shout.published_at = now sa = ShoutAuthor(shout=shout.id, author=author.id)
# Обрабатываем связи с авторами
if (
not session.query(ShoutAuthor)
.filter(and_(ShoutAuthor.shout == shout.id, ShoutAuthor.author == author_id))
.first()
):
sa = ShoutAuthor(shout=shout.id, author=author_id)
session.add(sa) session.add(sa)
# Обрабатываем темы # Добавляем темы
if draft.topics: for topic in (draft.topics or []):
for topic in draft.topics: st = ShoutTopic(
st = ShoutTopic( topic=topic.id,
topic=topic.id, shout=shout.id, main=topic.main if hasattr(topic, "main") else False shout=shout.id,
) main=topic.main if hasattr(topic, "main") else False
session.add(st) )
session.add(st)
session.add(shout)
session.add(draft)
session.flush()
# Инвалидируем кэш только если это новая публикация или была снята с публикации
if not was_published:
cache_keys = ["feed", f"author_{author_id}", "random_top", "unrated"]
# Добавляем ключи для тем
for topic in shout.topics:
cache_keys.append(f"topic_{topic.id}")
cache_keys.append(f"topic_shouts_{topic.id}")
await cache_by_id(Topic, topic.id, cache_topic)
# Инвалидируем кэш
await invalidate_shouts_cache(cache_keys)
await invalidate_shout_related_cache(shout, author_id)
# Обновляем кэш авторов
for author in shout.authors:
await cache_by_id(Author, author.id, cache_author)
# Отправляем уведомление о публикации
await notify_shout(shout.dict(), "published")
# Обновляем поисковый индекс
search_service.index(shout)
else:
# Для уже опубликованных материалов просто отправляем уведомление об обновлении
await notify_shout(shout.dict(), "update")
session.commit() session.commit()
# Инвалидируем кеш
invalidate_shouts_cache()
invalidate_shout_related_cache(shout.id)
# Уведомляем о публикации
await notify_shout(shout.id)
# Обновляем поисковый индекс
search_service.index_shout(shout)
logger.info(f"Successfully published shout #{shout.id} from draft #{draft_id}")
logger.debug(f"Shout data: {shout.dict()}")
return {"shout": shout} return {"shout": shout}
except Exception as e: except Exception as e:
logger.error(f"Failed to publish shout: {e}", exc_info=True) logger.error(f"Failed to publish draft {draft_id}: {e}", exc_info=True)
if "session" in locals(): return {"error": f"Failed to publish draft: {str(e)}"}
session.rollback()
return {"error": f"Failed to publish shout: {str(e)}"}
@mutation.field("unpublish_shout")
@login_required
async def unpublish_shout(_, info, shout_id: int):
"""Unpublish a shout.
Args:
shout_id: The ID of the shout to unpublish
Returns:
dict: The unpublished shout or an error message
"""
author_dict = info.context.get("author", {})
author_id = author_dict.get("id")
if not author_id:
return {"error": "Author ID is required"}
shout = None
with local_session() as session:
try:
shout = session.query(Shout).filter(Shout.id == shout_id).first()
shout.published_at = None
session.commit()
invalidate_shout_related_cache(shout)
invalidate_shouts_cache()
except Exception:
session.rollback()
return {"error": "Failed to unpublish shout"}
return {"shout": shout}

View File

@@ -3,7 +3,7 @@ import time
import orjson import orjson
import trafilatura import trafilatura
from sqlalchemy import and_, desc, select from sqlalchemy import and_, desc, select
from sqlalchemy.orm import joinedload from sqlalchemy.orm import joinedload, selectinload
from sqlalchemy.sql.functions import coalesce from sqlalchemy.sql.functions import coalesce
from cache.cache import ( from cache.cache import (
@@ -13,6 +13,7 @@ from cache.cache import (
invalidate_shouts_cache, invalidate_shouts_cache,
) )
from orm.author import Author from orm.author import Author
from orm.draft import Draft
from orm.shout import Shout, ShoutAuthor, ShoutTopic from orm.shout import Shout, ShoutAuthor, ShoutTopic
from orm.topic import Topic from orm.topic import Topic
from resolvers.follower import follow, unfollow from resolvers.follower import follow, unfollow
@@ -20,8 +21,9 @@ from resolvers.stat import get_with_stat
from services.auth import login_required from services.auth import login_required
from services.db import local_session from services.db import local_session
from services.notify import notify_shout from services.notify import notify_shout
from services.schema import query from services.schema import mutation, query
from services.search import search_service from services.search import search_service
from utils.html_wrapper import wrap_html_fragment
from utils.logger import root_logger as logger from utils.logger import root_logger as logger
@@ -179,9 +181,11 @@ async def create_shout(_, info, inp):
# Создаем публикацию без topics # Создаем публикацию без topics
body = inp.get("body", "") body = inp.get("body", "")
lead = inp.get("lead", "") lead = inp.get("lead", "")
body_text = trafilatura.extract(body) body_html = wrap_html_fragment(body)
lead_text = trafilatura.extract(lead) lead_html = wrap_html_fragment(lead)
seo = inp.get("seo", lead_text or body_text[:300].split(". ")[:-1].join(". ")) body_text = trafilatura.extract(body_html)
lead_text = trafilatura.extract(lead_html)
seo = inp.get("seo", lead_text.strip() or body_text.strip()[:300].split(". ")[:-1].join(". "))
new_shout = Shout( new_shout = Shout(
slug=slug, slug=slug,
body=body, body=body,
@@ -645,39 +649,178 @@ def get_main_topic(topics):
"""Get the main topic from a list of ShoutTopic objects.""" """Get the main topic from a list of ShoutTopic objects."""
logger.info(f"Starting get_main_topic with {len(topics) if topics else 0} topics") logger.info(f"Starting get_main_topic with {len(topics) if topics else 0} topics")
logger.debug( logger.debug(
f"Topics data: {[(t.topic.slug if t.topic else 'no-topic', t.main) for t in topics] if topics else []}" f"Topics data: {[(t.slug, getattr(t, 'main', False)) for t in topics] if topics else []}"
) )
if not topics: if not topics:
logger.warning("No topics provided to get_main_topic") logger.warning("No topics provided to get_main_topic")
return {"id": 0, "title": "no topic", "slug": "notopic", "is_main": True} return {"id": 0, "title": "no topic", "slug": "notopic", "is_main": True}
# Find first main topic in original order # Проверяем, является ли topics списком объектов ShoutTopic или Topic
main_topic_rel = next((st for st in topics if st.main), None) if hasattr(topics[0], 'topic') and topics[0].topic:
logger.debug( # Для ShoutTopic объектов (старый формат)
f"Found main topic relation: {main_topic_rel.topic.slug if main_topic_rel and main_topic_rel.topic else None}" # Find first main topic in original order
) main_topic_rel = next((st for st in topics if getattr(st, 'main', False)), None)
logger.debug(
f"Found main topic relation: {main_topic_rel.topic.slug if main_topic_rel and main_topic_rel.topic else None}"
)
if main_topic_rel and main_topic_rel.topic: if main_topic_rel and main_topic_rel.topic:
result = { result = {
"slug": main_topic_rel.topic.slug, "slug": main_topic_rel.topic.slug,
"title": main_topic_rel.topic.title, "title": main_topic_rel.topic.title,
"id": main_topic_rel.topic.id, "id": main_topic_rel.topic.id,
"is_main": True, "is_main": True,
} }
logger.info(f"Returning main topic: {result}") logger.info(f"Returning main topic: {result}")
return result return result
# If no main found but topics exist, return first # If no main found but topics exist, return first
if topics and topics[0].topic: if topics and topics[0].topic:
logger.info(f"No main topic found, using first topic: {topics[0].topic.slug}") logger.info(f"No main topic found, using first topic: {topics[0].topic.slug}")
result = { result = {
"slug": topics[0].topic.slug, "slug": topics[0].topic.slug,
"title": topics[0].topic.title, "title": topics[0].topic.title,
"id": topics[0].topic.id, "id": topics[0].topic.id,
"is_main": True, "is_main": True,
} }
return result return result
else:
# Для Topic объектов (новый формат из selectinload)
# После смены на selectinload у нас просто список Topic объектов
if topics:
logger.info(f"Using first topic as main: {topics[0].slug}")
result = {
"slug": topics[0].slug,
"title": topics[0].title,
"id": topics[0].id,
"is_main": True,
}
return result
logger.warning("No valid topics found, returning default") logger.warning("No valid topics found, returning default")
return {"slug": "notopic", "title": "no topic", "id": 0, "is_main": True} return {"slug": "notopic", "title": "no topic", "id": 0, "is_main": True}
@mutation.field("unpublish_shout")
@login_required
async def unpublish_shout(_, info, shout_id: int):
"""Снимает публикацию (shout) с публикации.
Предзагружает связанный черновик (draft) и его авторов/темы, чтобы избежать
ошибок при последующем доступе к ним в GraphQL.
Args:
shout_id: ID публикации для снятия с публикации
Returns:
dict: Снятая с публикации публикация или сообщение об ошибке
"""
author_dict = info.context.get("author", {})
author_id = author_dict.get("id")
if not author_id:
# В идеале нужна проверка прав, имеет ли автор право снимать публикацию
return {"error": "Author ID is required"}
shout = None
with local_session() as session:
try:
# Загружаем Shout со всеми связями для правильного формирования ответа
shout = (
session.query(Shout)
.options(
joinedload(Shout.authors),
selectinload(Shout.topics)
)
.filter(Shout.id == shout_id)
.first()
)
if not shout:
logger.warning(f"Shout not found for unpublish: ID {shout_id}")
return {"error": "Shout not found"}
# Если у публикации есть связанный черновик, загружаем его с relationships
if shout.draft:
# Отдельно загружаем черновик с его связями
draft = (
session.query(Draft)
.options(
selectinload(Draft.authors),
selectinload(Draft.topics)
)
.filter(Draft.id == shout.draft)
.first()
)
# Связываем черновик с публикацией вручную для доступа через API
if draft:
shout.draft_obj = draft
# TODO: Добавить проверку прав доступа, если необходимо
# if author_id not in [a.id for a in shout.authors]: # Требует selectinload(Shout.authors) выше
# logger.warning(f"Author {author_id} denied unpublishing shout {shout_id}")
# return {"error": "Access denied"}
# Запоминаем старый slug и id для формирования поля publication
shout_slug = shout.slug
shout_id_for_publication = shout.id
# Снимаем с публикации (устанавливаем published_at в None)
shout.published_at = None
session.commit()
# Формируем полноценный словарь для ответа
shout_dict = shout.dict()
# Добавляем связанные данные
shout_dict["topics"] = (
[
{"id": topic.id, "slug": topic.slug, "title": topic.title}
for topic in shout.topics
]
if shout.topics
else []
)
# Добавляем main_topic
shout_dict["main_topic"] = get_main_topic(shout.topics)
# Добавляем авторов
shout_dict["authors"] = (
[
{"id": author.id, "name": author.name, "slug": author.slug}
for author in shout.authors
]
if shout.authors
else []
)
# Важно! Обновляем поле publication, отражая состояние "снят с публикации"
shout_dict["publication"] = {
"id": shout_id_for_publication,
"slug": shout_slug,
"published_at": None # Ключевое изменение - устанавливаем published_at в None
}
# Инвалидация кэша
try:
cache_keys = [
"feed", # лента
f"author_{author_id}", # публикации автора
"random_top", # случайные топовые
"unrated", # неоцененные
]
await invalidate_shout_related_cache(shout, author_id)
await invalidate_shouts_cache(cache_keys)
logger.info(f"Cache invalidated after unpublishing shout {shout_id}")
except Exception as cache_err:
logger.error(f"Failed to invalidate cache for unpublish shout {shout_id}: {cache_err}")
except Exception as e:
session.rollback()
logger.error(f"Failed to unpublish shout {shout_id}: {e}", exc_info=True)
return {"error": f"Failed to unpublish shout: {str(e)}"}
# Возвращаем сформированный словарь вместо объекта
logger.info(f"Shout {shout_id} unpublished successfully by author {author_id}")
return {"shout": shout_dict}

View File

@@ -487,12 +487,16 @@ def apply_reaction_filters(by, q):
shout_slug = by.get("shout") shout_slug = by.get("shout")
if shout_slug: if shout_slug:
q = q.filter(Shout.slug == shout_slug) q = q.filter(Shout.slug == shout_slug)
shout_id = by.get("shout_id")
if shout_id:
q = q.filter(Shout.id == shout_id)
shouts = by.get("shouts") shouts = by.get("shouts")
if shouts: if shouts:
q = q.filter(Shout.slug.in_(shouts)) q = q.filter(Shout.slug.in_(shouts))
created_by = by.get("created_by") created_by = by.get("created_by", by.get("author_id"))
if created_by: if created_by:
q = q.filter(Author.id == created_by) q = q.filter(Author.id == created_by)

View File

@@ -10,6 +10,7 @@ from cache.cache import (
) )
from orm.author import Author from orm.author import Author
from orm.topic import Topic from orm.topic import Topic
from orm.reaction import ReactionKind
from resolvers.stat import get_with_stat from resolvers.stat import get_with_stat
from services.auth import login_required from services.auth import login_required
from services.db import local_session from services.db import local_session
@@ -112,7 +113,7 @@ async def get_topics_with_stats(limit=100, offset=0, community_id=None, by=None)
shouts_stats_query = f""" shouts_stats_query = f"""
SELECT st.topic, COUNT(DISTINCT s.id) as shouts_count SELECT st.topic, COUNT(DISTINCT s.id) as shouts_count
FROM shout_topic st FROM shout_topic st
JOIN shout s ON st.shout = s.id AND s.deleted_at IS NULL JOIN shout s ON st.shout = s.id AND s.deleted_at IS NULL AND s.published_at IS NOT NULL
WHERE st.topic IN ({",".join(map(str, topic_ids))}) WHERE st.topic IN ({",".join(map(str, topic_ids))})
GROUP BY st.topic GROUP BY st.topic
""" """
@@ -121,7 +122,7 @@ async def get_topics_with_stats(limit=100, offset=0, community_id=None, by=None)
# Запрос на получение статистики по подписчикам для выбранных тем # Запрос на получение статистики по подписчикам для выбранных тем
followers_stats_query = f""" followers_stats_query = f"""
SELECT topic, COUNT(DISTINCT follower) as followers_count SELECT topic, COUNT(DISTINCT follower) as followers_count
FROM topic_followers FROM topic_followers tf
WHERE topic IN ({",".join(map(str, topic_ids))}) WHERE topic IN ({",".join(map(str, topic_ids))})
GROUP BY topic GROUP BY topic
""" """
@@ -143,7 +144,8 @@ async def get_topics_with_stats(limit=100, offset=0, community_id=None, by=None)
SELECT st.topic, COUNT(DISTINCT r.id) as comments_count SELECT st.topic, COUNT(DISTINCT r.id) as comments_count
FROM shout_topic st FROM shout_topic st
JOIN shout s ON st.shout = s.id AND s.deleted_at IS NULL AND s.published_at IS NOT NULL JOIN shout s ON st.shout = s.id AND s.deleted_at IS NULL AND s.published_at IS NOT NULL
JOIN reaction r ON r.shout = s.id JOIN reaction r ON r.shout = s.id AND r.kind = '{ReactionKind.COMMENT.value}' AND r.deleted_at IS NULL
JOIN author a ON r.created_by = a.id AND a.deleted_at IS NULL
WHERE st.topic IN ({",".join(map(str, topic_ids))}) WHERE st.topic IN ({",".join(map(str, topic_ids))})
GROUP BY st.topic GROUP BY st.topic
""" """

View File

@@ -92,12 +92,14 @@ input LoadShoutsOptions {
input ReactionBy { input ReactionBy {
shout: String shout: String
shout_id: Int
shouts: [String] shouts: [String]
search: String search: String
kinds: [ReactionKind] kinds: [ReactionKind]
reply_to: Int # filter reply_to: Int # filter
topic: String topic: String
created_by: Int created_by: Int
author_id: Int
author: String author: String
after: Int after: Int
sort: ReactionSort # sort sort: ReactionSort # sort

View File

@@ -107,6 +107,12 @@ type Shout {
score: Float score: Float
} }
type PublicationInfo {
id: Int!
slug: String!
published_at: Int
}
type Draft { type Draft {
id: Int! id: Int!
created_at: Int! created_at: Int!
@@ -129,9 +135,9 @@ type Draft {
deleted_at: Int deleted_at: Int
updated_by: Author updated_by: Author
deleted_by: Author deleted_by: Author
authors: [Author] authors: [Author]!
topics: [Topic] topics: [Topic]!
publication: PublicationInfo
} }
type Stat { type Stat {

View File

@@ -53,3 +53,66 @@ async def notify_follower(follower: dict, author_id: int, action: str = "follow"
except Exception as e: except Exception as e:
# Log the error and re-raise it # Log the error and re-raise it
logger.error(f"Failed to publish to channel {channel_name}: {e}") logger.error(f"Failed to publish to channel {channel_name}: {e}")
async def notify_draft(draft_data, action: str = "publish"):
"""
Отправляет уведомление о публикации или обновлении черновика.
Функция гарантирует, что данные черновика сериализуются корректно, включая
связанные атрибуты (topics, authors).
Args:
draft_data (dict): Словарь с данными черновика. Должен содержать минимум id и title
action (str, optional): Действие ("publish", "update"). По умолчанию "publish"
Returns:
None
Examples:
>>> draft = {"id": 1, "title": "Тестовый черновик", "slug": "test-draft"}
>>> await notify_draft(draft, "publish")
"""
channel_name = "draft"
try:
# Убеждаемся, что все необходимые данные присутствуют
# и объект не требует доступа к отсоединенным атрибутам
if isinstance(draft_data, dict):
draft_payload = draft_data
else:
# Если это ORM объект, преобразуем его в словарь с нужными атрибутами
draft_payload = {
"id": getattr(draft_data, "id", None),
"slug": getattr(draft_data, "slug", None),
"title": getattr(draft_data, "title", None),
"subtitle": getattr(draft_data, "subtitle", None),
"media": getattr(draft_data, "media", None),
"created_at": getattr(draft_data, "created_at", None),
"updated_at": getattr(draft_data, "updated_at", None)
}
# Если переданы связанные атрибуты, добавим их
if hasattr(draft_data, "topics") and draft_data.topics is not None:
draft_payload["topics"] = [
{"id": t.id, "name": t.name, "slug": t.slug}
for t in draft_data.topics
]
if hasattr(draft_data, "authors") and draft_data.authors is not None:
draft_payload["authors"] = [
{"id": a.id, "name": a.name, "slug": a.slug, "pic": getattr(a, "pic", None)}
for a in draft_data.authors
]
data = {"payload": draft_payload, "action": action}
# Сохраняем уведомление
save_notification(action, channel_name, data.get("payload"))
# Публикуем в Redis
json_data = orjson.dumps(data)
if json_data:
await redis.publish(channel_name, json_data)
except Exception as e:
logger.error(f"Failed to publish to channel {channel_name}: {e}")

View File

@@ -1,14 +1,15 @@
from asyncio.log import logger from asyncio.log import logger
import httpx import httpx
from ariadne import MutationType, QueryType from ariadne import MutationType, ObjectType, QueryType
from services.db import create_table_if_not_exists, local_session from services.db import create_table_if_not_exists, local_session
from settings import AUTH_URL from settings import AUTH_URL
query = QueryType() query = QueryType()
mutation = MutationType() mutation = MutationType()
resolvers = [query, mutation] type_draft = ObjectType("Draft")
resolvers = [query, mutation, type_draft]
async def request_graphql_data(gql, url=AUTH_URL, headers=None): async def request_graphql_data(gql, url=AUTH_URL, headers=None):

View File

@@ -696,8 +696,6 @@ class SearchService:
if not isinstance(text, str) or not text.strip(): if not isinstance(text, str) or not text.strip():
return [] return []
logger.info(f"Searching for: '{text}' (limit={limit}, offset={offset})")
# Check if we can serve from cache # Check if we can serve from cache
if SEARCH_CACHE_ENABLED: if SEARCH_CACHE_ENABLED:
has_cache = await self.cache.has_query(text) has_cache = await self.cache.has_query(text)
@@ -714,7 +712,9 @@ class SearchService:
search_limit = SEARCH_PREFETCH_SIZE search_limit = SEARCH_PREFETCH_SIZE
else: else:
search_limit = limit search_limit = limit
logger.info(f"Searching for: '{text}' (limit={limit}, offset={offset}, search_limit={search_limit})")
response = await self.client.post( response = await self.client.post(
"/search-combined", "/search-combined",
json={"text": text, "limit": search_limit}, json={"text": text, "limit": search_limit},

1
utils/__init__.py Normal file
View File

@@ -0,0 +1 @@

38
utils/html_wrapper.py Normal file
View File

@@ -0,0 +1,38 @@
"""
Модуль для обработки HTML-фрагментов
"""
def wrap_html_fragment(fragment: str) -> str:
"""
Оборачивает HTML-фрагмент в полную HTML-структуру для корректной обработки.
Args:
fragment: HTML-фрагмент для обработки
Returns:
str: Полный HTML-документ
Example:
>>> wrap_html_fragment("<p>Текст параграфа</p>")
'<!DOCTYPE html><html><head><meta charset="utf-8"></head><body><p>Текст параграфа</p></body></html>'
"""
if not fragment or not fragment.strip():
return fragment
# Проверяем, является ли контент полным HTML-документом
is_full_html = fragment.strip().startswith('<!DOCTYPE') or fragment.strip().startswith('<html')
# Если это фрагмент, оборачиваем его в полный HTML-документ
if not is_full_html:
return f"""<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title></title>
</head>
<body>
{fragment}
</body>
</html>"""
return fragment