2022-08-11 09:14:12 +00:00
|
|
|
|
import os
|
|
|
|
|
import re
|
2022-11-10 05:40:32 +00:00
|
|
|
|
|
2023-10-26 21:07:35 +00:00
|
|
|
|
from bs4 import BeautifulSoup
|
2023-10-26 20:38:31 +00:00
|
|
|
|
|
2022-09-03 10:50:14 +00:00
|
|
|
|
TOOLTIP_REGEX = r"(\/\/\/(.+)\/\/\/)"
|
|
|
|
|
contentDir = os.path.join(
|
|
|
|
|
os.path.dirname(os.path.realpath(__file__)), "..", "..", "discoursio-web", "content"
|
|
|
|
|
)
|
2023-10-25 20:38:22 +00:00
|
|
|
|
|
|
|
|
|
cdn = "https://images.discours.io"
|
2022-09-03 10:50:14 +00:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def replace_tooltips(body):
|
|
|
|
|
# change if you prefer regexp
|
|
|
|
|
newbody = body
|
|
|
|
|
matches = list(re.finditer(TOOLTIP_REGEX, body, re.IGNORECASE | re.MULTILINE))[1:]
|
|
|
|
|
for match in matches:
|
|
|
|
|
newbody = body.replace(
|
|
|
|
|
match.group(1), '<Tooltip text="' + match.group(2) + '" />'
|
|
|
|
|
) # NOTE: doesn't work
|
|
|
|
|
if len(matches) > 0:
|
|
|
|
|
print("[extract] found %d tooltips" % len(matches))
|
|
|
|
|
return newbody
|
2022-08-11 09:14:12 +00:00
|
|
|
|
|
|
|
|
|
|
2023-10-30 21:00:55 +00:00
|
|
|
|
# def extract_footnotes(body, shout_dict):
|
|
|
|
|
# parts = body.split("&&&")
|
|
|
|
|
# lll = len(parts)
|
|
|
|
|
# newparts = list(parts)
|
|
|
|
|
# placed = False
|
|
|
|
|
# if lll & 1:
|
|
|
|
|
# if lll > 1:
|
|
|
|
|
# i = 1
|
|
|
|
|
# print("[extract] found %d footnotes in body" % (lll - 1))
|
|
|
|
|
# for part in parts[1:]:
|
|
|
|
|
# if i & 1:
|
|
|
|
|
# placed = True
|
|
|
|
|
# if 'a class="footnote-url" href=' in part:
|
|
|
|
|
# print("[extract] footnote: " + part)
|
|
|
|
|
# fn = 'a class="footnote-url" href="'
|
|
|
|
|
# exxtracted_link = part.split(fn, 1)[1].split('"', 1)[0]
|
|
|
|
|
# extracted_body = part.split(fn, 1)[1].split(">", 1)[1].split("</a>", 1)[0]
|
|
|
|
|
# print("[extract] footnote link: " + extracted_link)
|
|
|
|
|
# with local_session() as session:
|
|
|
|
|
# Reaction.create(
|
|
|
|
|
# {
|
|
|
|
|
# "shout": shout_dict["id"],
|
|
|
|
|
# "kind": ReactionKind.FOOTNOTE,
|
|
|
|
|
# "body": extracted_body,
|
|
|
|
|
# "range": str(body.index(fn + link) - len("<"))
|
|
|
|
|
# + ":"
|
|
|
|
|
# + str(body.index(extracted_body) + len("</a>")),
|
|
|
|
|
# }
|
|
|
|
|
# )
|
|
|
|
|
# newparts[i] = "<a href='#'>ℹ️</a>"
|
|
|
|
|
# else:
|
|
|
|
|
# newparts[i] = part
|
|
|
|
|
# i += 1
|
|
|
|
|
# return ("".join(newparts), placed)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# def place_tooltips(body):
|
|
|
|
|
# parts = body.split("&&&")
|
|
|
|
|
# lll = len(parts)
|
|
|
|
|
# newparts = list(parts)
|
|
|
|
|
# placed = False
|
|
|
|
|
# if lll & 1:
|
|
|
|
|
# if lll > 1:
|
|
|
|
|
# i = 1
|
|
|
|
|
# print("[extract] found %d tooltips" % (lll - 1))
|
|
|
|
|
# for part in parts[1:]:
|
|
|
|
|
# if i & 1:
|
|
|
|
|
# placed = True
|
|
|
|
|
# if 'a class="footnote-url" href=' in part:
|
|
|
|
|
# print("[extract] footnote: " + part)
|
|
|
|
|
# fn = 'a class="footnote-url" href="'
|
|
|
|
|
# link = part.split(fn, 1)[1].split('"', 1)[0]
|
|
|
|
|
# extracted_part = part.split(fn, 1)[0] + " " + part.split("/", 1)[-1]
|
|
|
|
|
# newparts[i] = (
|
|
|
|
|
# "<Tooltip"
|
|
|
|
|
# + (' link="' + link + '" ' if link else "")
|
|
|
|
|
# + ">"
|
|
|
|
|
# + extracted_part
|
|
|
|
|
# + "</Tooltip>"
|
|
|
|
|
# )
|
|
|
|
|
# else:
|
|
|
|
|
# newparts[i] = "<Tooltip>%s</Tooltip>" % part
|
|
|
|
|
# # print('[extract] ' + newparts[i])
|
|
|
|
|
# else:
|
|
|
|
|
# # print('[extract] ' + part[:10] + '..')
|
|
|
|
|
# newparts[i] = part
|
|
|
|
|
# i += 1
|
|
|
|
|
# return ("".join(newparts), placed)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
IMG_REGEX = (
|
|
|
|
|
r"\!\[(.*?)\]\((data\:image\/(png|jpeg|jpg);base64\,((?:[A-Za-z\d+\/]{4})*(?:[A-Za-z\d+\/]{3}="
|
|
|
|
|
)
|
2022-09-04 17:20:38 +00:00
|
|
|
|
IMG_REGEX += r"|[A-Za-z\d+\/]{2}==)))\)"
|
2022-08-11 09:14:12 +00:00
|
|
|
|
|
2022-09-03 10:50:14 +00:00
|
|
|
|
parentDir = "/".join(os.getcwd().split("/")[:-1])
|
|
|
|
|
public = parentDir + "/discoursio-web/public"
|
2022-08-11 09:14:12 +00:00
|
|
|
|
cache = {}
|
|
|
|
|
|
|
|
|
|
|
2023-10-30 21:00:55 +00:00
|
|
|
|
# def reextract_images(body, oid):
|
|
|
|
|
# # change if you prefer regexp
|
|
|
|
|
# matches = list(re.finditer(IMG_REGEX, body, re.IGNORECASE | re.MULTILINE))[1:]
|
|
|
|
|
# i = 0
|
|
|
|
|
# for match in matches:
|
|
|
|
|
# print("[extract] image " + match.group(1))
|
|
|
|
|
# ext = match.group(3)
|
|
|
|
|
# name = oid + str(i)
|
|
|
|
|
# link = public + "/upload/image-" + name + "." + ext
|
|
|
|
|
# img = match.group(4)
|
|
|
|
|
# title = match.group(1) # NOTE: this is not the title
|
|
|
|
|
# if img not in cache:
|
|
|
|
|
# content = base64.b64decode(img + "==")
|
|
|
|
|
# print(str(len(img)) + " image bytes been written")
|
|
|
|
|
# open("../" + link, "wb").write(content)
|
|
|
|
|
# cache[img] = name
|
|
|
|
|
# i += 1
|
|
|
|
|
# else:
|
|
|
|
|
# print("[extract] image cached " + cache[img])
|
|
|
|
|
# body.replace(
|
|
|
|
|
# str(match), ""
|
|
|
|
|
# ) # WARNING: this does not work
|
|
|
|
|
# return body
|
2022-09-03 10:50:14 +00:00
|
|
|
|
|
2022-08-11 09:14:12 +00:00
|
|
|
|
|
|
|
|
|
IMAGES = {
|
2022-09-03 10:50:14 +00:00
|
|
|
|
"data:image/png": "png",
|
|
|
|
|
"data:image/jpg": "jpg",
|
|
|
|
|
"data:image/jpeg": "jpg",
|
2022-08-11 09:14:12 +00:00
|
|
|
|
}
|
|
|
|
|
|
2022-09-03 10:50:14 +00:00
|
|
|
|
b64 = ";base64,"
|
|
|
|
|
|
|
|
|
|
di = "data:image"
|
|
|
|
|
|
2022-08-11 09:14:12 +00:00
|
|
|
|
|
2022-11-26 14:58:53 +00:00
|
|
|
|
def extract_media(entry):
|
2023-10-30 21:00:55 +00:00
|
|
|
|
"""normalized media extraction method"""
|
2022-11-26 14:58:53 +00:00
|
|
|
|
# media [ { title pic url body } ]}
|
2022-11-27 08:19:38 +00:00
|
|
|
|
kind = entry.get("type")
|
|
|
|
|
if not kind:
|
|
|
|
|
print(entry)
|
|
|
|
|
raise Exception("shout no layout")
|
2022-11-26 14:58:53 +00:00
|
|
|
|
media = []
|
2022-11-27 08:19:38 +00:00
|
|
|
|
for m in entry.get("media") or []:
|
2022-11-26 14:58:53 +00:00
|
|
|
|
# title
|
|
|
|
|
title = m.get("title", "").replace("\n", " ").replace(" ", " ")
|
|
|
|
|
artist = m.get("performer") or m.get("artist")
|
|
|
|
|
if artist:
|
|
|
|
|
title = artist + " - " + title
|
|
|
|
|
|
|
|
|
|
# pic
|
|
|
|
|
url = m.get("fileUrl") or m.get("url", "")
|
|
|
|
|
pic = ""
|
2022-11-27 08:19:38 +00:00
|
|
|
|
if m.get("thumborId"):
|
2023-10-25 20:38:22 +00:00
|
|
|
|
pic = cdn + "/unsafe/" + m["thumborId"]
|
2022-11-26 14:58:53 +00:00
|
|
|
|
|
|
|
|
|
# url
|
|
|
|
|
if not url:
|
|
|
|
|
if kind == "Image":
|
|
|
|
|
url = pic
|
|
|
|
|
elif "youtubeId" in m:
|
|
|
|
|
url = "https://youtube.com/?watch=" + m["youtubeId"]
|
2022-09-03 10:50:14 +00:00
|
|
|
|
elif "vimeoId" in m:
|
2022-11-26 14:58:53 +00:00
|
|
|
|
url = "https://vimeo.com/" + m["vimeoId"]
|
|
|
|
|
# body
|
2022-11-27 08:19:38 +00:00
|
|
|
|
body = m.get("body") or m.get("literatureBody") or ""
|
2023-10-30 21:00:55 +00:00
|
|
|
|
media.append({"url": url, "pic": pic, "title": title, "body": body})
|
2022-11-26 14:58:53 +00:00
|
|
|
|
return media
|
2022-09-03 10:50:14 +00:00
|
|
|
|
|
2022-08-11 09:14:12 +00:00
|
|
|
|
|
|
|
|
|
def prepare_html_body(entry):
|
2022-09-03 10:50:14 +00:00
|
|
|
|
# body modifications
|
|
|
|
|
body = ""
|
|
|
|
|
kind = entry.get("type")
|
|
|
|
|
addon = ""
|
|
|
|
|
if kind == "Video":
|
|
|
|
|
addon = ""
|
2022-11-27 08:19:38 +00:00
|
|
|
|
for m in entry.get("media") or []:
|
2022-09-03 10:50:14 +00:00
|
|
|
|
if "youtubeId" in m:
|
|
|
|
|
addon += '<iframe width="420" height="345" src="http://www.youtube.com/embed/'
|
|
|
|
|
addon += m["youtubeId"]
|
|
|
|
|
addon += '?autoplay=1" frameborder="0" allowfullscreen></iframe>\n'
|
|
|
|
|
elif "vimeoId" in m:
|
|
|
|
|
addon += '<iframe src="https://player.vimeo.com/video/'
|
|
|
|
|
addon += m["vimeoId"]
|
2022-09-04 17:20:38 +00:00
|
|
|
|
addon += ' width="420" height="345" frameborder="0" allow="autoplay; fullscreen"'
|
|
|
|
|
addon += " allowfullscreen></iframe>"
|
2022-09-03 10:50:14 +00:00
|
|
|
|
else:
|
|
|
|
|
print("[extract] media is not supported")
|
|
|
|
|
print(m)
|
|
|
|
|
body += addon
|
|
|
|
|
|
|
|
|
|
elif kind == "Music":
|
|
|
|
|
addon = ""
|
2022-11-27 08:19:38 +00:00
|
|
|
|
for m in entry.get("media") or []:
|
2022-09-03 10:50:14 +00:00
|
|
|
|
artist = m.get("performer")
|
|
|
|
|
trackname = ""
|
|
|
|
|
if artist:
|
|
|
|
|
trackname += artist + " - "
|
|
|
|
|
if "title" in m:
|
|
|
|
|
trackname += m.get("title", "")
|
|
|
|
|
addon += "<figure><figcaption>"
|
|
|
|
|
addon += trackname
|
|
|
|
|
addon += '</figcaption><audio controls src="'
|
|
|
|
|
addon += m.get("fileUrl", "")
|
|
|
|
|
addon += '"></audio></figure>'
|
|
|
|
|
body += addon
|
|
|
|
|
|
2022-11-26 14:58:53 +00:00
|
|
|
|
body = extract_html(entry)
|
2022-09-03 10:50:14 +00:00
|
|
|
|
# if body_orig: body += extract_md(html2text(body_orig), entry['_id'])
|
2022-11-26 14:58:53 +00:00
|
|
|
|
return body
|
2022-09-03 10:50:14 +00:00
|
|
|
|
|
2022-08-11 09:14:12 +00:00
|
|
|
|
|
2023-08-08 00:13:14 +00:00
|
|
|
|
def cleanup_html(body: str) -> str:
|
|
|
|
|
new_body = body
|
|
|
|
|
regex_remove = [
|
|
|
|
|
r"style=\"width:\s*\d+px;height:\s*\d+px;\"",
|
|
|
|
|
r"style=\"width:\s*\d+px;\"",
|
|
|
|
|
r"style=\"color: #000000;\"",
|
|
|
|
|
r"style=\"float: none;\"",
|
|
|
|
|
r"style=\"background: white;\"",
|
|
|
|
|
r"class=\"Apple-interchange-newline\"",
|
|
|
|
|
r"class=\"MsoNormalCxSpMiddle\"",
|
|
|
|
|
r"class=\"MsoNormal\"",
|
|
|
|
|
r"lang=\"EN-US\"",
|
|
|
|
|
r"id=\"docs-internal-guid-[\w-]+\"",
|
2023-08-12 16:10:28 +00:00
|
|
|
|
r"<p>\s*</p>",
|
2023-08-08 00:13:14 +00:00
|
|
|
|
r"<span></span>",
|
2023-08-12 16:10:28 +00:00
|
|
|
|
r"<i>\s*</i>",
|
|
|
|
|
r"<b>\s*</b>",
|
|
|
|
|
r"<h1>\s*</h1>",
|
|
|
|
|
r"<h2>\s*</h2>",
|
|
|
|
|
r"<h3>\s*</h3>",
|
|
|
|
|
r"<h4>\s*</h4>",
|
|
|
|
|
r"<div>\s*</div>",
|
2023-08-08 00:13:14 +00:00
|
|
|
|
]
|
2023-10-30 21:00:55 +00:00
|
|
|
|
regex_replace = {r"<br>\s*</p>": "</p>"}
|
2023-08-12 16:10:28 +00:00
|
|
|
|
changed = True
|
|
|
|
|
while changed:
|
|
|
|
|
# we need several iterations to clean nested tags this way
|
|
|
|
|
changed = False
|
|
|
|
|
new_body_iteration = new_body
|
|
|
|
|
for regex in regex_remove:
|
|
|
|
|
new_body = re.sub(regex, "", new_body)
|
|
|
|
|
for regex, replace in regex_replace.items():
|
|
|
|
|
new_body = re.sub(regex, replace, new_body)
|
|
|
|
|
if new_body_iteration != new_body:
|
|
|
|
|
changed = True
|
2023-08-08 00:13:14 +00:00
|
|
|
|
return new_body
|
|
|
|
|
|
2023-10-30 21:00:55 +00:00
|
|
|
|
|
|
|
|
|
def extract_html(entry, shout_id=None, cleanup=False):
|
|
|
|
|
body_orig = (entry.get("body") or "").replace(r"\(", "(").replace(r"\)", ")")
|
2023-08-08 00:13:14 +00:00
|
|
|
|
if cleanup:
|
|
|
|
|
# we do that before bs parsing to catch the invalid html
|
|
|
|
|
body_clean = cleanup_html(body_orig)
|
|
|
|
|
if body_clean != body_orig:
|
|
|
|
|
print(f"[migration] html cleaned for slug {entry.get('slug', None)}")
|
2023-08-08 00:35:11 +00:00
|
|
|
|
body_orig = body_clean
|
2023-10-30 21:00:55 +00:00
|
|
|
|
# if shout_id:
|
|
|
|
|
# extract_footnotes(body_orig, shout_id)
|
2022-11-26 15:19:45 +00:00
|
|
|
|
body_html = str(BeautifulSoup(body_orig, features="html.parser"))
|
2023-08-12 16:10:28 +00:00
|
|
|
|
if cleanup:
|
|
|
|
|
# we do that after bs parsing because it can add dummy tags
|
|
|
|
|
body_clean_html = cleanup_html(body_html)
|
|
|
|
|
if body_clean_html != body_html:
|
|
|
|
|
print(f"[migration] html cleaned after bs4 for slug {entry.get('slug', None)}")
|
|
|
|
|
body_html = body_clean_html
|
2022-11-26 15:19:45 +00:00
|
|
|
|
return body_html
|