diff --git a/migration/__init__.py b/migration/__init__.py deleted file mode 100644 index 468fa886..00000000 --- a/migration/__init__.py +++ /dev/null @@ -1,292 +0,0 @@ -""" cmd managed migration """ -import asyncio -import gc -import json -import sys -from datetime import datetime, timezone - -import bs4 - -from migration.export import export_mdx -from migration.tables.comments import migrate as migrateComment -from migration.tables.comments import migrate_2stage as migrateComment_2stage -from migration.tables.content_items import get_shout_slug -from migration.tables.content_items import migrate as migrateShout -from migration.tables.remarks import migrate as migrateRemark -from migration.tables.topics import migrate as migrateTopic -from migration.tables.users import migrate as migrateUser, post_migrate as users_post_migrate -from migration.tables.users import migrate_2stage as migrateUser_2stage -from orm import init_tables -from orm.reaction import Reaction - -TODAY = datetime.strftime(datetime.now(tz=timezone.utc), "%Y%m%d") -OLD_DATE = "2016-03-05 22:22:00.350000" - - -async def users_handle(storage): - """migrating users first""" - counter = 0 - id_map = {} - print("[migration] migrating %d users" % (len(storage["users"]["data"]))) - for entry in storage["users"]["data"]: - oid = entry["_id"] - user = migrateUser(entry) - storage["users"]["by_oid"][oid] = user # full - del user["password"] - del user["emailConfirmed"] - del user["username"] - del user["email"] - storage["users"]["by_slug"][user["slug"]] = user # public - id_map[user["oid"]] = user["slug"] - counter += 1 - ce = 0 - for entry in storage["users"]["data"]: - ce += migrateUser_2stage(entry, id_map) - users_post_migrate() - - -async def topics_handle(storage): - """topics from categories and tags""" - counter = 0 - for t in storage["topics"]["tags"] + storage["topics"]["cats"]: - if t["slug"] in storage["replacements"]: - t["slug"] = storage["replacements"][t["slug"]] - topic = migrateTopic(t) - storage["topics"]["by_oid"][t["_id"]] = topic - storage["topics"]["by_slug"][t["slug"]] = topic - counter += 1 - else: - print("[migration] topic " + t["slug"] + " ignored") - for oldslug, newslug in storage["replacements"].items(): - if oldslug != newslug and oldslug in storage["topics"]["by_slug"]: - oid = storage["topics"]["by_slug"][oldslug]["_id"] - del storage["topics"]["by_slug"][oldslug] - storage["topics"]["by_oid"][oid] = storage["topics"]["by_slug"][newslug] - print("[migration] " + str(counter) + " topics migrated") - print( - "[migration] " - + str(len(storage["topics"]["by_oid"].values())) - + " topics by oid" - ) - print( - "[migration] " - + str(len(storage["topics"]["by_slug"].values())) - + " topics by slug" - ) - - -async def shouts_handle(storage, args): - """migrating content items one by one""" - counter = 0 - discours_author = 0 - anonymous_author = 0 - pub_counter = 0 - ignored = 0 - topics_dataset_bodies = [] - topics_dataset_tlist = [] - for entry in storage["shouts"]["data"]: - gc.collect() - # slug - slug = get_shout_slug(entry) - - # single slug mode - if "-" in args and slug not in args: - continue - - # migrate - shout_dict = await migrateShout(entry, storage) - if shout_dict: - storage["shouts"]["by_oid"][entry["_id"]] = shout_dict - storage["shouts"]["by_slug"][shout_dict["slug"]] = shout_dict - # shouts.topics - if not shout_dict["topics"]: - print("[migration] no topics!") - - # with author - author = shout_dict["authors"][0] - if author["slug"] == "discours": - discours_author += 1 - if author["slug"] == "anonymous": - anonymous_author += 1 - # print('[migration] ' + shout['slug'] + ' with author ' + author) - - if entry.get("published"): - if "mdx" in args: - export_mdx(shout_dict) - pub_counter += 1 - - # print main counter - counter += 1 - print('[migration] shouts_handle %d: %s @%s' % ( - (counter + 1), shout_dict["slug"], author["slug"] - )) - - b = bs4.BeautifulSoup(shout_dict["body"], "html.parser") - texts = [shout_dict["title"].lower().replace(r"[^а-яА-Яa-zA-Z]", "")] - texts = texts + b.findAll(text=True) - topics_dataset_bodies.append(" ".join([x.strip().lower() for x in texts])) - topics_dataset_tlist.append(shout_dict["topics"]) - else: - ignored += 1 - - # np.savetxt('topics_dataset.csv', (topics_dataset_bodies, topics_dataset_tlist), delimiter=', - # ', fmt='%s') - - print("[migration] " + str(counter) + " content items were migrated") - print("[migration] " + str(pub_counter) + " have been published") - print("[migration] " + str(discours_author) + " authored by @discours") - print("[migration] " + str(anonymous_author) + " authored by @anonymous") - - -async def remarks_handle(storage): - print("[migration] comments") - c = 0 - for entry_remark in storage["remarks"]["data"]: - remark = await migrateRemark(entry_remark, storage) - c += 1 - print("[migration] " + str(c) + " remarks migrated") - - -async def comments_handle(storage): - print("[migration] comments") - id_map = {} - ignored_counter = 0 - missed_shouts = {} - for oldcomment in storage["reactions"]["data"]: - if not oldcomment.get("deleted"): - reaction = await migrateComment(oldcomment, storage) - if type(reaction) == str: - missed_shouts[reaction] = oldcomment - elif type(reaction) == Reaction: - reaction = reaction.dict() - rid = reaction["id"] - oid = reaction["oid"] - id_map[oid] = rid - else: - ignored_counter += 1 - - for reaction in storage["reactions"]["data"]: - migrateComment_2stage(reaction, id_map) - print("[migration] " + str(len(id_map)) + " comments migrated") - print("[migration] " + str(ignored_counter) + " comments ignored") - print("[migration] " + str(len(missed_shouts.keys())) + " commented shouts missed") - missed_counter = 0 - for missed in missed_shouts.values(): - missed_counter += len(missed) - print("[migration] " + str(missed_counter) + " comments dropped") - - -async def all_handle(storage, args): - print("[migration] handle everything") - await users_handle(storage) - await topics_handle(storage) - print("[migration] users and topics are migrated") - await shouts_handle(storage, args) - # print("[migration] remarks...") - # await remarks_handle(storage) - print("[migration] migrating comments") - await comments_handle(storage) - # export_email_subscriptions() - print("[migration] done!") - - -def data_load(): - storage = { - "content_items": { - "by_oid": {}, - "by_slug": {}, - }, - "shouts": {"by_oid": {}, "by_slug": {}, "data": []}, - "reactions": {"by_oid": {}, "by_slug": {}, "by_content": {}, "data": []}, - "topics": { - "by_oid": {}, - "by_slug": {}, - "cats": [], - "tags": [], - }, - "remarks": {"data": []}, - "users": {"by_oid": {}, "by_slug": {}, "data": []}, - "replacements": json.loads(open("migration/tables/replacements.json").read()), - } - try: - users_data = json.loads(open("migration/data/users.json").read()) - print("[migration.load] " + str(len(users_data)) + " users ") - tags_data = json.loads(open("migration/data/tags.json").read()) - storage["topics"]["tags"] = tags_data - print("[migration.load] " + str(len(tags_data)) + " tags ") - cats_data = json.loads( - open("migration/data/content_item_categories.json").read() - ) - storage["topics"]["cats"] = cats_data - print("[migration.load] " + str(len(cats_data)) + " cats ") - comments_data = json.loads(open("migration/data/comments.json").read()) - storage["reactions"]["data"] = comments_data - print("[migration.load] " + str(len(comments_data)) + " comments ") - content_data = json.loads(open("migration/data/content_items.json").read()) - storage["shouts"]["data"] = content_data - print("[migration.load] " + str(len(content_data)) + " content items ") - - remarks_data = json.loads(open("migration/data/remarks.json").read()) - storage["remarks"]["data"] = remarks_data - print("[migration.load] " + str(len(remarks_data)) + " remarks data ") - - # fill out storage - for x in users_data: - storage["users"]["by_oid"][x["_id"]] = x - # storage['users']['by_slug'][x['slug']] = x - # no user.slug yet - print( - "[migration.load] " - + str(len(storage["users"]["by_oid"].keys())) - + " users by oid" - ) - for x in tags_data: - storage["topics"]["by_oid"][x["_id"]] = x - storage["topics"]["by_slug"][x["slug"]] = x - for x in cats_data: - storage["topics"]["by_oid"][x["_id"]] = x - storage["topics"]["by_slug"][x["slug"]] = x - print( - "[migration.load] " - + str(len(storage["topics"]["by_slug"].keys())) - + " topics by slug" - ) - for item in content_data: - slug = get_shout_slug(item) - storage["content_items"]["by_slug"][slug] = item - storage["content_items"]["by_oid"][item["_id"]] = item - print("[migration.load] " + str(len(content_data)) + " content items") - for x in comments_data: - storage["reactions"]["by_oid"][x["_id"]] = x - cid = x["contentItem"] - storage["reactions"]["by_content"][cid] = x - ci = storage["content_items"]["by_oid"].get(cid, {}) - if "slug" in ci: - storage["reactions"]["by_slug"][ci["slug"]] = x - print( - "[migration.load] " - + str(len(storage["reactions"]["by_content"].keys())) - + " with comments" - ) - storage["users"]["data"] = users_data - storage["topics"]["tags"] = tags_data - storage["topics"]["cats"] = cats_data - storage["shouts"]["data"] = content_data - storage["reactions"]["data"] = comments_data - except Exception as e: - raise e - return storage - - -async def handling_migration(): - init_tables() - await all_handle(data_load(), sys.argv) - - -def process(): - loop = asyncio.get_event_loop() - loop.run_until_complete(handling_migration()) - - -if __name__ == "__main__": - process() diff --git a/migration/bson2json.py b/migration/bson2json.py deleted file mode 100644 index 03effe19..00000000 --- a/migration/bson2json.py +++ /dev/null @@ -1,32 +0,0 @@ -import json -import os - -import bson -import gc -from .utils import DateTimeEncoder - - -def json_tables(): - print("[migration] unpack dump/discours/*.bson to migration/data/*.json") - data = { - "content_items": [], - "content_item_categories": [], - "tags": [], - "email_subscriptions": [], - "users": [], - "comments": [], - "remarks": [] - } - for table in data.keys(): - print('[migration] bson2json for ' + table) - gc.collect() - lc = [] - bs = open("dump/discours/" + table + ".bson", "rb").read() - base = 0 - while base < len(bs): - base, d = bson.decode_document(bs, base) - lc.append(d) - data[table] = lc - open(os.getcwd() + "/migration/data/" + table + ".json", "w").write( - json.dumps(lc, cls=DateTimeEncoder) - ) diff --git a/migration/data/.gitkeep b/migration/data/.gitkeep deleted file mode 100644 index e69de29b..00000000 diff --git a/migration/export.py b/migration/export.py deleted file mode 100644 index 102cfb14..00000000 --- a/migration/export.py +++ /dev/null @@ -1,159 +0,0 @@ -import json -import os -from datetime import datetime, timezone - -import frontmatter - -from .extract import extract_html, extract_media -from .utils import DateTimeEncoder - -OLD_DATE = "2016-03-05 22:22:00.350000" -EXPORT_DEST = "../discoursio-web/data/" -parentDir = "/".join(os.getcwd().split("/")[:-1]) -contentDir = parentDir + "/discoursio-web/content/" -ts = datetime.now(tz=timezone.utc) - - -def get_metadata(r): - authors = [] - for a in r["authors"]: - authors.append( - { # a short version for public listings - "slug": a.slug or "discours", - "name": a.name or "Дискурс", - "userpic": a.userpic or "https://discours.io/static/img/discours.png", - } - ) - metadata = {} - metadata["title"] = r.get("title", "").replace("{", "(").replace("}", ")") - metadata["authors"] = authors - metadata["createdAt"] = r.get("createdAt", ts) - metadata["layout"] = r["layout"] - metadata["topics"] = [topic for topic in r["topics"]] - metadata["topics"].sort() - if r.get("cover", False): - metadata["cover"] = r.get("cover") - return metadata - - -def export_mdx(r): - # print('[export] mdx %s' % r['slug']) - content = "" - metadata = get_metadata(r) - content = frontmatter.dumps(frontmatter.Post(r["body"], **metadata)) - ext = "mdx" - filepath = contentDir + r["slug"] - bc = bytes(content, "utf-8").decode("utf-8", "ignore") - open(filepath + "." + ext, "w").write(bc) - - -def export_body(shout, storage): - entry = storage["content_items"]["by_oid"][shout["oid"]] - if entry: - body = extract_html(entry) - media = extract_media(entry) - shout["body"] = body # prepare_html_body(entry) # prepare_md_body(entry) - shout["media"] = media - export_mdx(shout) - print("[export] html for %s" % shout["slug"]) - open(contentDir + shout["slug"] + ".html", "w").write(body) - else: - raise Exception("no content_items entry found") - - -def export_slug(slug, storage): - shout = storage["shouts"]["by_slug"][slug] - shout = storage["shouts"]["by_slug"].get(slug) - assert shout, "[export] no shout found by slug: %s " % slug - author = shout["authors"][0] - assert author, "[export] no author error" - export_body(shout, storage) - - -def export_email_subscriptions(): - email_subscriptions_data = json.loads( - open("migration/data/email_subscriptions.json").read() - ) - for data in email_subscriptions_data: - # TODO: migrate to mailgun list manually - # migrate_email_subscription(data) - pass - print( - "[migration] " - + str(len(email_subscriptions_data)) - + " email subscriptions exported" - ) - - -def export_shouts(storage): - # update what was just migrated or load json again - if len(storage["users"]["by_slugs"].keys()) == 0: - storage["users"]["by_slugs"] = json.loads( - open(EXPORT_DEST + "authors.json").read() - ) - print( - "[migration] " - + str(len(storage["users"]["by_slugs"].keys())) - + " exported authors " - ) - if len(storage["shouts"]["by_slugs"].keys()) == 0: - storage["shouts"]["by_slugs"] = json.loads( - open(EXPORT_DEST + "articles.json").read() - ) - print( - "[migration] " - + str(len(storage["shouts"]["by_slugs"].keys())) - + " exported articles " - ) - for slug in storage["shouts"]["by_slugs"].keys(): - export_slug(slug, storage) - - -def export_json( - export_articles={}, export_authors={}, export_topics={}, export_comments={} -): - open(EXPORT_DEST + "authors.json", "w").write( - json.dumps( - export_authors, - cls=DateTimeEncoder, - indent=4, - sort_keys=True, - ensure_ascii=False, - ) - ) - print("[migration] " + str(len(export_authors.items())) + " authors exported") - open(EXPORT_DEST + "topics.json", "w").write( - json.dumps( - export_topics, - cls=DateTimeEncoder, - indent=4, - sort_keys=True, - ensure_ascii=False, - ) - ) - print("[migration] " + str(len(export_topics.keys())) + " topics exported") - - open(EXPORT_DEST + "articles.json", "w").write( - json.dumps( - export_articles, - cls=DateTimeEncoder, - indent=4, - sort_keys=True, - ensure_ascii=False, - ) - ) - print("[migration] " + str(len(export_articles.items())) + " articles exported") - open(EXPORT_DEST + "comments.json", "w").write( - json.dumps( - export_comments, - cls=DateTimeEncoder, - indent=4, - sort_keys=True, - ensure_ascii=False, - ) - ) - print( - "[migration] " - + str(len(export_comments.items())) - + " exported articles with comments" - ) diff --git a/migration/extract.py b/migration/extract.py deleted file mode 100644 index 9ea84067..00000000 --- a/migration/extract.py +++ /dev/null @@ -1,434 +0,0 @@ -import base64 -import os -import re -import uuid - -from bs4 import BeautifulSoup - - -TOOLTIP_REGEX = r"(\/\/\/(.+)\/\/\/)" -contentDir = os.path.join( - os.path.dirname(os.path.realpath(__file__)), "..", "..", "discoursio-web", "content" -) -s3 = "https://discours-io.s3.amazonaws.com/" -cdn = "https://assets.discours.io" - - -def replace_tooltips(body): - # change if you prefer regexp - newbody = body - matches = list(re.finditer(TOOLTIP_REGEX, body, re.IGNORECASE | re.MULTILINE))[1:] - for match in matches: - newbody = body.replace( - match.group(1), '' - ) # NOTE: doesn't work - if len(matches) > 0: - print("[extract] found %d tooltips" % len(matches)) - return newbody - - - -def extract_footnotes(body, shout_dict): - parts = body.split("&&&") - lll = len(parts) - newparts = list(parts) - placed = False - if lll & 1: - if lll > 1: - i = 1 - print("[extract] found %d footnotes in body" % (lll - 1)) - for part in parts[1:]: - if i & 1: - placed = True - if 'a class="footnote-url" href=' in part: - print("[extract] footnote: " + part) - fn = 'a class="footnote-url" href="' - exxtracted_link = part.split(fn, 1)[1].split('"', 1)[0] - extracted_body = part.split(fn, 1)[1].split('>', 1)[1].split('', 1)[0] - print("[extract] footnote link: " + extracted_link) - with local_session() as session: - Reaction.create({ - "shout": shout_dict['id'], - "kind": ReactionKind.FOOTNOTE, - "body": extracted_body, - "range": str(body.index(fn + link) - len('<')) + ':' + str(body.index(extracted_body) + len('')) - }) - newparts[i] = "ℹ️" - else: - newparts[i] = part - i += 1 - return ("".join(newparts), placed) - - -def place_tooltips(body): - parts = body.split("&&&") - lll = len(parts) - newparts = list(parts) - placed = False - if lll & 1: - if lll > 1: - i = 1 - print("[extract] found %d tooltips" % (lll - 1)) - for part in parts[1:]: - if i & 1: - placed = True - if 'a class="footnote-url" href=' in part: - print("[extract] footnote: " + part) - fn = 'a class="footnote-url" href="' - link = part.split(fn, 1)[1].split('"', 1)[0] - extracted_part = ( - part.split(fn, 1)[0] + " " + part.split("/", 1)[-1] - ) - newparts[i] = ( - "" - + extracted_part - + "" - ) - else: - newparts[i] = "%s" % part - # print('[extract] ' + newparts[i]) - else: - # print('[extract] ' + part[:10] + '..') - newparts[i] = part - i += 1 - return ("".join(newparts), placed) - - -IMG_REGEX = r"\!\[(.*?)\]\((data\:image\/(png|jpeg|jpg);base64\,((?:[A-Za-z\d+\/]{4})*(?:[A-Za-z\d+\/]{3}=" -IMG_REGEX += r"|[A-Za-z\d+\/]{2}==)))\)" - -parentDir = "/".join(os.getcwd().split("/")[:-1]) -public = parentDir + "/discoursio-web/public" -cache = {} - - -def reextract_images(body, oid): - # change if you prefer regexp - matches = list(re.finditer(IMG_REGEX, body, re.IGNORECASE | re.MULTILINE))[1:] - i = 0 - for match in matches: - print("[extract] image " + match.group(1)) - ext = match.group(3) - name = oid + str(i) - link = public + "/upload/image-" + name + "." + ext - img = match.group(4) - title = match.group(1) # NOTE: this is not the title - if img not in cache: - content = base64.b64decode(img + "==") - print(str(len(img)) + " image bytes been written") - open("../" + link, "wb").write(content) - cache[img] = name - i += 1 - else: - print("[extract] image cached " + cache[img]) - body.replace( - str(match), "![" + title + "](" + cdn + link + ")" - ) # WARNING: this does not work - return body - - -IMAGES = { - "data:image/png": "png", - "data:image/jpg": "jpg", - "data:image/jpeg": "jpg", -} - -b64 = ";base64," - - -def extract_imageparts(bodyparts, prefix): - # recursive loop - newparts = list(bodyparts) - for current in bodyparts: - i = bodyparts.index(current) - for mime in IMAGES.keys(): - if mime == current[-len(mime) :] and (i + 1 < len(bodyparts)): - print("[extract] " + mime) - next = bodyparts[i + 1] - ext = IMAGES[mime] - b64end = next.index(")") - b64encoded = next[:b64end] - name = prefix + "-" + str(len(cache)) - link = "/upload/image-" + name + "." + ext - print("[extract] name: " + name) - print("[extract] link: " + link) - print("[extract] %d bytes" % len(b64encoded)) - if b64encoded not in cache: - try: - content = base64.b64decode(b64encoded + "==") - open(public + link, "wb").write(content) - print( - "[extract] " - + str(len(content)) - + " image bytes been written" - ) - cache[b64encoded] = name - except Exception: - raise Exception - # raise Exception('[extract] error decoding image %r' %b64encoded) - else: - print("[extract] cached link " + cache[b64encoded]) - name = cache[b64encoded] - link = cdn + "/upload/image-" + name + "." + ext - newparts[i] = ( - current[: -len(mime)] - + current[-len(mime) :] - + link - + next[-b64end:] - ) - newparts[i + 1] = next[:-b64end] - break - return ( - extract_imageparts( - newparts[i] + newparts[i + 1] + b64.join(bodyparts[(i + 2) :]), prefix - ) - if len(bodyparts) > (i + 1) - else "".join(newparts) - ) - - -def extract_dataimages(parts, prefix): - newparts = list(parts) - for part in parts: - i = parts.index(part) - if part.endswith("]("): - [ext, rest] = parts[i + 1].split(b64) - name = prefix + "-" + str(len(cache)) - if ext == "/jpeg": - ext = "jpg" - else: - ext = ext.replace("/", "") - link = "/upload/image-" + name + "." + ext - print("[extract] filename: " + link) - b64end = rest.find(")") - if b64end != -1: - b64encoded = rest[:b64end] - print("[extract] %d text bytes" % len(b64encoded)) - # write if not cached - if b64encoded not in cache: - try: - content = base64.b64decode(b64encoded + "==") - open(public + link, "wb").write(content) - print("[extract] " + str(len(content)) + " image bytes") - cache[b64encoded] = name - except Exception: - raise Exception - # raise Exception('[extract] error decoding image %r' %b64encoded) - else: - print("[extract] 0 image bytes, cached for " + cache[b64encoded]) - name = cache[b64encoded] - - # update link with CDN - link = cdn + "/upload/image-" + name + "." + ext - - # patch newparts - newparts[i + 1] = link + rest[b64end:] - else: - raise Exception("cannot find the end of base64 encoded string") - else: - print("[extract] dataimage skipping part " + str(i)) - continue - return "".join(newparts) - - -di = "data:image" - - -def extract_md_images(body, prefix): - newbody = "" - body = ( - body.replace("\n! [](" + di, "\n ![](" + di) - .replace("\n[](" + di, "\n![](" + di) - .replace(" [](" + di, " ![](" + di) - ) - parts = body.split(di) - if len(parts) > 1: - newbody = extract_dataimages(parts, prefix) - else: - newbody = body - return newbody - - -def cleanup_md(body): - newbody = ( - body.replace("<", "") - .replace(">", "") - .replace("{", "(") - .replace("}", ")") - .replace("…", "...") - .replace(" __ ", " ") - .replace("_ _", " ") - .replace("****", "") - .replace("\u00a0", " ") - .replace("\u02c6", "^") - .replace("\u00a0", " ") - .replace("\ufeff", "") - .replace("\u200b", "") - .replace("\u200c", "") - ) # .replace('\u2212', '-') - return newbody - - -def extract_md(body, shout_dict = None): - newbody = body - if newbody: - newbody = cleanup_md(newbody) - if not newbody: - raise Exception("cleanup error") - - if shout_dict: - - uid = shout_dict['id'] or uuid.uuid4() - newbody = extract_md_images(newbody, uid) - if not newbody: - raise Exception("extract_images error") - - newbody, placed = extract_footnotes(body, shout_dict) - if not newbody: - raise Exception("extract_footnotes error") - - return newbody - - -def extract_media(entry): - ''' normalized media extraction method ''' - # media [ { title pic url body } ]} - kind = entry.get("type") - if not kind: - print(entry) - raise Exception("shout no layout") - media = [] - for m in entry.get("media") or []: - # title - title = m.get("title", "").replace("\n", " ").replace(" ", " ") - artist = m.get("performer") or m.get("artist") - if artist: - title = artist + " - " + title - - # pic - url = m.get("fileUrl") or m.get("url", "") - pic = "" - if m.get("thumborId"): - pic = cdn + "/unsafe/1600x/" + m["thumborId"] - - # url - if not url: - if kind == "Image": - url = pic - elif "youtubeId" in m: - url = "https://youtube.com/?watch=" + m["youtubeId"] - elif "vimeoId" in m: - url = "https://vimeo.com/" + m["vimeoId"] - # body - body = m.get("body") or m.get("literatureBody") or "" - media.append({ - "url": url, - "pic": pic, - "title": title, - "body": body - }) - return media - - -def prepare_html_body(entry): - # body modifications - body = "" - kind = entry.get("type") - addon = "" - if kind == "Video": - addon = "" - for m in entry.get("media") or []: - if "youtubeId" in m: - addon += '