cached-request
All checks were successful
deploy / deploy (push) Successful in 1m3s

This commit is contained in:
Untone 2023-12-19 18:58:26 +03:00
parent b141c26e80
commit 6c7f269206
6 changed files with 40 additions and 32 deletions

View File

@ -9,7 +9,6 @@ from sentry_sdk.integrations.ariadne import AriadneIntegration
from sentry_sdk.integrations.redis import RedisIntegration
from starlette.applications import Starlette
from services.core import get_all_authors
from services.rediscache import redis
from services.schema import resolvers
from settings import DEV_SERVER_PID_FILE_NAME, MODE, SENTRY_DSN
@ -51,6 +50,5 @@ async def shutdown():
await redis.disconnect()
get_all_authors()
app = Starlette(debug=True, on_startup=[start_up], on_shutdown=[shutdown])
app.mount("/", GraphQL(schema, debug=True))

View File

@ -4,7 +4,7 @@ import uuid
from models.chat import Chat, ChatUpdate
from services.auth import login_required
from services.core import authors_by_user
from services.core import get_all_authors
from services.presence import notify_chat
from services.rediscache import redis
from services.schema import mutation
@ -53,6 +53,7 @@ async def create_chat(_, info, title="", members=None):
print("create_chat members: %r" % members)
print("create_chat context: %r" % info.context)
user_id = info.context["user_id"]
authors_by_user, authors_by_id = get_all_authors()
author = authors_by_user[user_id]
author_id = author["id"]
chat = None

View File

@ -6,7 +6,7 @@ from models.chat import ChatPayload, Message
from models.member import ChatMember
from resolvers.chats import create_chat
from services.auth import login_required
from services.core import authors_by_id, get_my_followed
from services.core import get_all_authors, get_my_followed
from services.rediscache import redis
from services.schema import query
@ -65,6 +65,7 @@ async def load_chats(_, info, limit: int = 50, offset: int = 0) -> Dict[str, Uni
r = await create_chat(None, info, members=[1]) # member with id = 1 is discours
print(f"[resolvers.load] created chat: {r['chat_id']}")
cids.append(r["chat"]["id"])
authors_by_user, authors_by_id = get_all_authors()
for cid in cids:
async with lock:
chat_str: str = await redis.execute("GET", f"chats/{cid}")
@ -118,6 +119,7 @@ async def load_recipients(_, _info, limit=50, offset=0):
r = []
my_followings: List[ChatMember] = get_my_followed()
if len(my_followings) < limit:
authors_by_user, authors_by_id = get_all_authors()
my_followings = my_followings + list(authors_by_id.values())[offset : limit - len(my_followings)]
my_followings = list(set(my_followings))
for a in my_followings:

View File

@ -4,7 +4,7 @@ from typing import Any, Dict, List, Union
from resolvers.load import load_messages
from services.auth import login_required
from services.core import authors_by_id
from services.core import get_all_authors
from services.rediscache import redis
from services.schema import query
@ -18,6 +18,8 @@ async def search_recipients(_, info, text: str, limit: int = 50, offset: int = 0
author_id = info.context["author_id"]
authors_by_user, authors_by_id = get_all_authors()
existed_chats = await redis.execute("SMEMBERS", f"/chats_by_author/{author_id}")
if existed_chats:
for chat_id in list(json.loads(existed_chats))[offset : (offset + limit)]:

View File

@ -2,7 +2,7 @@ from functools import wraps
from aiohttp import ClientSession
from starlette.exceptions import HTTPException
from services.core import authors_by_user
from services.core import get_all_authors
from settings import AUTH_URL
@ -60,6 +60,7 @@ def login_required(f):
user_id = await check_auth(req)
if user_id:
context["user_id"] = user_id
authors_by_user, authors_by_id = get_all_authors()
author = authors_by_user.get(user_id)
if author and "id" in author:
context["author_id"] = author["id"]

View File

@ -1,27 +1,32 @@
from functools import lru_cache
from typing import Any, List
import requests
from models.member import ChatMember
from settings import API_BASE
headers = {"Content-Type": "application/json"}
authors_by_user = {}
authors_by_id = {}
@lru_cache(maxsize=128, typed=True)
def _request_endpoint(query_name, body) -> Any:
response = requests.post(API_BASE, headers=headers, json=body)
print(f"[services.core] requesting {query_name}...")
response = requests.post(API_BASE, headers={"Content-Type": "application/json"}, json=body)
print(f"[services.core] {query_name} response: <{response.status_code}> {response.text[:65]}..")
if response.status_code == 200:
try:
r = response.json()
if r:
return r.get("data", {}).get(query_name, {})
result = r.get("data", {}).get(query_name, {})
print(f"[services.core] entries amount in result: {len(result)} ")
return result
except ValueError as e:
print(f"[services.core] Error decoding JSON response: {e}")
return []
def get_all_authors() -> List[ChatMember]:
if len(authors_by_user.keys()) == 0:
print("[services.core] precaching authors...")
def get_all_authors():
authors_by_user = {}
authors_by_id = {}
query_name = "get_authors_all"
# Check if authors are already cached
@ -37,11 +42,10 @@ def get_all_authors() -> List[ChatMember]:
authors = _request_endpoint(query_name, gql)
for a in list(authors):
authors_by_user.__setitem__(a["user"], a)
authors_by_id.__setitem__(a["id"], a)
print(f"[services.core] {len(authors)} authors precached")
authors_by_user[a["user"]] = a
authors_by_id[a["id"]] = a
return list(authors_by_id.values())
return authors_by_user, authors_by_id
def get_my_followed() -> List[ChatMember]: