async-revised
All checks were successful
Deploy on push / deploy (push) Successful in 5s

This commit is contained in:
Untone 2024-11-02 00:26:57 +03:00
parent 54c59d26b9
commit 0c009495a3
6 changed files with 91 additions and 55 deletions

10
cache/revalidator.py vendored
View File

@ -14,7 +14,7 @@ class CacheRevalidationManager:
async def start(self): async def start(self):
"""Запуск фонового воркера для ревалидации кэша.""" """Запуск фонового воркера для ревалидации кэша."""
asyncio.create_task(self.revalidate_cache()) self.task = asyncio.create_task(self.revalidate_cache())
async def revalidate_cache(self): async def revalidate_cache(self):
"""Циклическая проверка и ревалидация кэша каждые self.interval секунд.""" """Циклическая проверка и ревалидация кэша каждые self.interval секунд."""
@ -48,9 +48,15 @@ class CacheRevalidationManager:
"""Отметить сущность для ревалидации.""" """Отметить сущность для ревалидации."""
self.items_to_revalidate[entity_type].add(entity_id) self.items_to_revalidate[entity_type].add(entity_id)
def stop(self): async def stop(self):
"""Остановка фонового воркера.""" """Остановка фонового воркера."""
self.running = False self.running = False
if hasattr(self, 'task'):
self.task.cancel()
try:
await self.task
except asyncio.CancelledError:
pass
revalidation_manager = CacheRevalidationManager(interval=300) # Ревалидация каждые 5 минут revalidation_manager = CacheRevalidationManager(interval=300) # Ревалидация каждые 5 минут

31
main.py
View File

@ -74,19 +74,24 @@ async def create_all_tables_async():
async def lifespan(app): async def lifespan(app):
# Запуск всех сервисов при старте приложения try:
await asyncio.gather( await asyncio.gather(
create_all_tables_async(), create_all_tables_async(),
redis.connect(), redis.connect(),
precache_data(), precache_data(),
ViewedStorage.init(), ViewedStorage.init(),
search_service.info(), search_service.info(),
start(), start(),
revalidation_manager.start(), revalidation_manager.start(),
) )
yield yield
# Остановка сервисов при завершении работы приложения finally:
await redis.disconnect() tasks = [
redis.disconnect(),
ViewedStorage.stop(),
revalidation_manager.stop()
]
await asyncio.gather(*tasks, return_exceptions=True)
# Создаем экземпляр GraphQL # Создаем экземпляр GraphQL

View File

@ -1,5 +1,3 @@
import subprocess
from granian.constants import Interfaces from granian.constants import Interfaces
from granian.log import LogLevels from granian.log import LogLevels
from granian.server import Granian from granian.server import Granian
@ -8,23 +6,24 @@ from settings import PORT
from utils.logger import root_logger as logger from utils.logger import root_logger as logger
def is_docker_container_running(name):
cmd = ["docker", "ps", "-f", f"name={name}"]
output = subprocess.run(cmd, capture_output=True, text=True).stdout
logger.info(output)
return name in output
if __name__ == "__main__": if __name__ == "__main__":
logger.info("started") logger.info("started")
granian_instance = Granian( try:
"main:app", granian_instance = Granian(
address="0.0.0.0", # noqa S104 "main:app",
port=PORT, address="0.0.0.0",
interface=Interfaces.ASGI, port=PORT,
threads=4, interface=Interfaces.ASGI,
websockets=False, threads=4,
log_level=LogLevels.debug, websockets=False,
) log_level=LogLevels.debug,
granian_instance.serve() backlog=2048,
)
granian_instance.serve()
except Exception as error:
logger.error(f"Granian error: {error}", exc_info=True)
raise
finally:
logger.info("stopped")

View File

@ -22,7 +22,11 @@ if DB_URL.startswith("postgres"):
max_overflow=20, max_overflow=20,
pool_timeout=30, # Время ожидания свободного соединения pool_timeout=30, # Время ожидания свободного соединения
pool_recycle=1800, # Время жизни соединения pool_recycle=1800, # Время жизни соединения
connect_args={"sslmode": "disable"}, pool_pre_ping=True, # Добавить проверку соединений
connect_args={
"sslmode": "disable",
"connect_timeout": 40 # Добавить таймаут подключения
}
) )
else: else:
engine = create_engine(DB_URL, echo=False, connect_args={"check_same_thread": False}) engine = create_engine(DB_URL, echo=False, connect_args={"check_same_thread": False})

View File

@ -166,7 +166,19 @@ class SearchService:
async def perform_index(self, shout, index_body): async def perform_index(self, shout, index_body):
if self.client: if self.client:
self.client.index(index=self.index_name, id=str(shout.id), body=index_body) try:
await asyncio.wait_for(
self.client.index(
index=self.index_name,
id=str(shout.id),
body=index_body
),
timeout=40.0
)
except asyncio.TimeoutError:
logger.error(f"Indexing timeout for shout {shout.id}")
except Exception as e:
logger.error(f"Indexing error for shout {shout.id}: {e}")
async def search(self, text, limit, offset): async def search(self, text, limit, offset):
logger.info(f"Ищем: {text} {offset}+{limit}") logger.info(f"Ищем: {text} {offset}+{limit}")

View File

@ -37,6 +37,12 @@ class ViewedStorage:
auth_result = None auth_result = None
disabled = False disabled = False
start_date = datetime.now().strftime("%Y-%m-%d") start_date = datetime.now().strftime("%Y-%m-%d")
running = True
@staticmethod
async def stop():
self = ViewedStorage
self.running = False
@staticmethod @staticmethod
async def init(): async def init():
@ -196,22 +202,26 @@ class ViewedStorage:
if self.disabled: if self.disabled:
return return
while True: try:
try: while self.running:
await self.update_pages() try:
failed = 0 await self.update_pages()
except Exception as exc: failed = 0
failed += 1 except Exception as exc:
logger.debug(exc) failed += 1
logger.info(" - update failed #%d, wait 10 secs" % failed) logger.debug(exc)
if failed > 3: logger.warning(" - update failed #%d, wait 10 secs" % failed)
logger.info(" - views update failed, not trying anymore") if failed > 3 or isinstance(exc, asyncio.CancelledError):
break logger.error("ViewedStorage worker cancelled")
if failed == 0: break
when = datetime.now(timezone.utc) + timedelta(seconds=self.period) finally:
t = format(when.astimezone().isoformat()) self.running = False
logger.info(" ⎩ next update: %s" % (t.split("T")[0] + " " + t.split("T")[1].split(".")[0]))
await asyncio.sleep(self.period) if failed == 0:
else: when = datetime.now(timezone.utc) + timedelta(seconds=self.period)
await asyncio.sleep(10) t = format(when.astimezone().isoformat())
logger.info(" - try to update views again") logger.info(" ⎩ next update: %s" % (t.split("T")[0] + " " + t.split("T")[1].split(".")[0]))
await asyncio.sleep(self.period)
else:
await asyncio.sleep(10)
logger.info(" - try to update views again")