author-cache-removed

This commit is contained in:
Untone 2024-04-19 14:28:21 +03:00
parent 41af1a7349
commit 3c219bfa69
4 changed files with 24 additions and 77 deletions

View File

@ -7,7 +7,6 @@ authors = ["Tony Rewin <anton.rewin@gmail.com>"]
[tool.poetry.dependencies]
python = "^3.12"
sentry-sdk = "^1.44.1"
redis = "^5.0.3"
ariadne = "^0.23.0"
starlette = "^0.37.2"
itsdangerous = "^2.1.2"
@ -15,6 +14,7 @@ requests = "^2.31.0"
granian = "^1.2.1"
colorlog = "^6.8.2"
httpx = "^0.27.0"
redis = {version = "^5.0.3", extras = ["async"]}
[tool.poetry.group.dev.dependencies]
pre-commit = "^3.6.0"

View File

@ -6,7 +6,7 @@ from typing import Any, Dict, List, Optional, Union
from models.chat import ChatPayload
from resolvers.chats import create_chat
from services.auth import login_required
from services.core import CacheStorage
from services.core import get_author_by_id
from services.rediscache import redis
from services.schema import query
@ -113,12 +113,15 @@ async def load_chats(
member_ids = c["members"].copy()
c["members"] = []
for member_id in member_ids:
a = CacheStorage.authors_by_id.get(str(member_id))
if a:
a["online"] = a.get("id") in members_online
c["members"].append(a)
else:
logger.error(f"cant find author by id {member_id}")
if isinstance(member_id, int):
a = await get_author_by_id(int(member_id))
if a:
a["online"] = a.get("id") in members_online
c["members"].append(a)
else:
logger.error(f"cant find author by id {member_id}")
elif 'members' in member_id and member_id not in c['members']:
c['members'].append(member_id)
chats.append(c)
else:

View File

@ -3,7 +3,7 @@ from typing import Any, Dict, List, Union
from resolvers.load import load_messages
from services.auth import login_required
from services.core import CacheStorage
from services.core import get_author_by_id
from services.rediscache import redis
from services.schema import query
@ -24,14 +24,11 @@ async def search_recipients(_, info, text: str, limit: int = 50, offset: int = 0
members_ids = await redis.execute("SMEMBERS", f"/chats/{chat_id}/members")
if isinstance(members_ids, set):
for member_id in members_ids:
author = CacheStorage.authors_by_id.get(str(member_id))
author = await get_author_by_id(member_id)
if author:
if author["name"].startswith(text):
result.add(author)
more_amount = limit - len(result)
if more_amount > 0:
result.update(CacheStorage.authors[0:more_amount])
return {"members": list(result), "error": None}

View File

@ -1,7 +1,5 @@
import asyncio
import json
import logging
from datetime import datetime, timedelta, timezone
from services.logger import root_logger as logger
from services.rediscache import redis
@ -31,6 +29,17 @@ async def get_author_by_user(user: str):
return author
async def get_author_by_id(author_id: int):
author = None
redis_key = f"author:{author_id}"
result = await redis.execute("GET", redis_key)
if isinstance(result, str):
author = json.loads(result)
return author
async def get_author_followed(author_id: int):
authors = []
redis_key = f"author:{author_id}:follows-authors"
@ -40,65 +49,3 @@ async def get_author_followed(author_id: int):
authors = json.loads(result)
return authors
class CacheStorage:
lock = asyncio.Lock()
period = 5 * 60 # every 5 mins
client = None
authors = []
authors_by_user = {}
authors_by_id = {}
@staticmethod
async def init():
"""graphql client connection using permanent token"""
self = CacheStorage
async with self.lock:
task = asyncio.create_task(self.worker())
logger.info(task)
@staticmethod
async def update_authors():
self = CacheStorage
async with self.lock:
result = get_all_authors()
if isinstance(result, list):
logger.info(f"cache loaded {len(result)}")
CacheStorage.authors = result
for a in result:
user_id = a.get("user")
author_id = str(a.get("id"))
self.authors_by_user[user_id] = a
self.authors_by_id[author_id] = a
@staticmethod
async def worker():
"""async task worker"""
failed = 0
self = CacheStorage
while True:
try:
logger.info(" - updating profiles data...")
await self.update_authors()
failed = 0
except Exception as er:
failed += 1
logger.error(f"{er} - update failed #{failed}, wait 10 seconds")
if failed > 3:
logger.error(" - not trying to update anymore")
import traceback
traceback.print_exc()
break
if failed == 0:
when = datetime.now(timezone.utc) + timedelta(seconds=self.period)
t = format(when.astimezone().isoformat())
logger.info(
" ⎩ next update: %s"
% (t.split("T")[0] + " " + t.split("T")[1].split(".")[0])
)
await asyncio.sleep(self.period)
else:
await asyncio.sleep(10)
logger.info(" - trying to update data again")