2021-12-27 01:37:38 +00:00
|
|
|
# SPDX-License-Identifier: AGPL-3.0-only
|
2021-12-30 04:11:05 +00:00
|
|
|
import strutils, strformat, times, uri, tables, xmltree, htmlparser, htmlgen
|
2022-01-11 02:10:42 +00:00
|
|
|
import std/[enumerate, re]
|
2019-10-08 13:07:10 +00:00
|
|
|
import types, utils, query
|
2019-06-20 14:16:20 +00:00
|
|
|
|
|
|
|
const
|
2022-01-11 02:10:42 +00:00
|
|
|
cards = "cards.twitter.com/cards"
|
|
|
|
tco = "https://t.co"
|
|
|
|
twitter = parseUri("https://twitter.com")
|
|
|
|
|
|
|
|
let
|
|
|
|
twRegex = re"(?<=(?<!\S)https:\/\/|(?<=\s))(www\.|mobile\.)?twitter\.com"
|
|
|
|
twLinkRegex = re"""<a href="https:\/\/twitter.com([^"]+)">twitter\.com(\S+)</a>"""
|
|
|
|
|
2020-01-19 07:49:20 +00:00
|
|
|
ytRegex = re"([A-z.]+\.)?youtu(be\.com|\.be)"
|
2021-11-26 21:49:44 +00:00
|
|
|
igRegex = re"(www\.)?instagram\.com"
|
2021-12-27 01:13:05 +00:00
|
|
|
|
|
|
|
rdRegex = re"(?<![.b])((www|np|new|amp|old)\.)?reddit.com"
|
|
|
|
rdShortRegex = re"(?<![.b])redd\.it\/"
|
|
|
|
# Videos cannot be supported uniformly between Teddit and Libreddit,
|
|
|
|
# so v.redd.it links will not be replaced.
|
|
|
|
# Images aren't supported due to errors from Teddit when the image
|
|
|
|
# wasn't first displayed via a post on the Teddit instance.
|
|
|
|
|
2020-01-22 12:04:35 +00:00
|
|
|
wwwRegex = re"https?://(www[0-9]?\.)?"
|
2020-06-09 13:04:38 +00:00
|
|
|
m3u8Regex = re"""url="(.+.m3u8)""""
|
2022-01-06 02:57:14 +00:00
|
|
|
userPicRegex = re"_(normal|bigger|mini|200x200|400x400)(\.[A-z]+)$"
|
2020-01-22 12:04:35 +00:00
|
|
|
extRegex = re"(\.[A-z]+)$"
|
2022-01-11 02:10:42 +00:00
|
|
|
illegalXmlRegex = re"(*UTF8)[^\x09\x0A\x0D\x20-\x{D7FF}\x{E000}-\x{FFFD}\x{10000}-\x{10FFFF}]"
|
2020-06-17 12:15:13 +00:00
|
|
|
|
2021-01-08 01:25:43 +00:00
|
|
|
proc getUrlPrefix*(cfg: Config): string =
|
2021-12-30 03:18:40 +00:00
|
|
|
if cfg.useHttps: https & cfg.hostname
|
2021-01-08 01:25:43 +00:00
|
|
|
else: "http://" & cfg.hostname
|
|
|
|
|
2022-01-14 17:01:47 +00:00
|
|
|
proc shortLink*(text: string; length=28): string =
|
|
|
|
result = text.replace(wwwRegex, "")
|
|
|
|
if result.len > length:
|
|
|
|
result = result[0 ..< length] & "…"
|
|
|
|
|
|
|
|
proc stripHtml*(text: string; shorten=false): string =
|
2019-10-11 17:20:40 +00:00
|
|
|
var html = parseHtml(text)
|
|
|
|
for el in html.findAll("a"):
|
|
|
|
let link = el.attr("href")
|
|
|
|
if "http" in link:
|
2020-06-01 00:25:39 +00:00
|
|
|
if el.len == 0: continue
|
2022-01-14 17:01:47 +00:00
|
|
|
el[0].text =
|
|
|
|
if shorten: link.shortLink
|
|
|
|
else: link
|
2019-10-10 15:47:02 +00:00
|
|
|
html.innerText()
|
|
|
|
|
2020-11-07 22:53:49 +00:00
|
|
|
proc sanitizeXml*(text: string): string =
|
|
|
|
text.replace(illegalXmlRegex, "")
|
|
|
|
|
2021-12-27 01:27:49 +00:00
|
|
|
proc replaceUrls*(body: string; prefs: Prefs; absolute=""): string =
|
|
|
|
result = body
|
2021-12-26 23:42:52 +00:00
|
|
|
|
2022-01-11 02:10:42 +00:00
|
|
|
if prefs.replaceYouTube.len > 0 and "youtu" in result:
|
2019-08-15 17:19:21 +00:00
|
|
|
result = result.replace(ytRegex, prefs.replaceYouTube)
|
2020-03-08 23:47:00 +00:00
|
|
|
if prefs.replaceYouTube in result:
|
|
|
|
result = result.replace("/c/", "/")
|
2021-12-26 23:42:52 +00:00
|
|
|
|
2022-01-11 02:10:42 +00:00
|
|
|
if prefs.replaceTwitter.len > 0 and ("twitter.com" in body or tco in body):
|
2021-12-30 03:18:40 +00:00
|
|
|
result = result.replace(tco, https & prefs.replaceTwitter & "/t.co")
|
2020-03-08 23:33:52 +00:00
|
|
|
result = result.replace(cards, prefs.replaceTwitter & "/cards")
|
|
|
|
result = result.replace(twRegex, prefs.replaceTwitter)
|
2022-01-12 23:36:30 +00:00
|
|
|
result = result.replacef(twLinkRegex, a(
|
2021-12-30 04:11:05 +00:00
|
|
|
prefs.replaceTwitter & "$2", href = https & prefs.replaceTwitter & "$1"))
|
2021-12-26 23:42:52 +00:00
|
|
|
|
2022-01-11 02:10:42 +00:00
|
|
|
if prefs.replaceReddit.len > 0 and ("reddit.com" in result or "redd.it" in result):
|
2021-12-27 01:13:05 +00:00
|
|
|
result = result.replace(rdShortRegex, prefs.replaceReddit & "/comments/")
|
|
|
|
result = result.replace(rdRegex, prefs.replaceReddit)
|
|
|
|
if prefs.replaceReddit in result and "/gallery/" in result:
|
|
|
|
result = result.replace("/gallery/", "/comments/")
|
|
|
|
|
2022-01-11 02:10:42 +00:00
|
|
|
if prefs.replaceInstagram.len > 0 and "instagram.com" in result:
|
2021-12-26 23:42:52 +00:00
|
|
|
result = result.replace(igRegex, prefs.replaceInstagram)
|
|
|
|
|
|
|
|
if absolute.len > 0 and "href" in result:
|
2021-01-08 01:25:43 +00:00
|
|
|
result = result.replace("href=\"/", "href=\"" & absolute & "/")
|
2019-06-20 14:16:20 +00:00
|
|
|
|
2020-06-09 13:04:38 +00:00
|
|
|
proc getM3u8Url*(content: string): string =
|
2022-01-11 02:10:42 +00:00
|
|
|
var matches: array[1, string]
|
|
|
|
if re.find(content, m3u8Regex, matches) != -1:
|
|
|
|
result = matches[0]
|
2020-06-09 13:04:38 +00:00
|
|
|
|
2019-08-19 18:53:47 +00:00
|
|
|
proc proxifyVideo*(manifest: string; proxy: bool): string =
|
2022-01-11 02:10:42 +00:00
|
|
|
var replacements: seq[(string, string)]
|
|
|
|
for line in manifest.splitLines:
|
|
|
|
let url =
|
|
|
|
if line.startsWith("#EXT-X-MAP:URI"): line[16 .. ^2]
|
|
|
|
else: line
|
2022-01-12 18:19:14 +00:00
|
|
|
if url.startsWith('/'):
|
2022-01-11 02:10:42 +00:00
|
|
|
let path = "https://video.twimg.com" & url
|
|
|
|
replacements.add (url, if proxy: path.getVidUrl else: path)
|
|
|
|
return manifest.multiReplace(replacements)
|
2019-08-19 18:53:47 +00:00
|
|
|
|
2022-01-06 02:57:14 +00:00
|
|
|
proc getUserPic*(userPic: string; style=""): string =
|
2022-01-11 02:10:42 +00:00
|
|
|
userPic.replacef(userPicRegex, "$2").replacef(extRegex, style & "$1")
|
2019-06-20 14:16:20 +00:00
|
|
|
|
2022-01-23 06:04:50 +00:00
|
|
|
proc getUserPic*(user: User; style=""): string =
|
|
|
|
getUserPic(user.userPic, style)
|
2019-06-20 14:16:20 +00:00
|
|
|
|
2019-12-09 23:39:12 +00:00
|
|
|
proc getVideoEmbed*(cfg: Config; id: int64): string =
|
2021-01-08 01:25:43 +00:00
|
|
|
&"{getUrlPrefix(cfg)}/i/videos/{id}"
|
2019-08-07 20:02:19 +00:00
|
|
|
|
2022-01-23 06:04:50 +00:00
|
|
|
proc pageTitle*(user: User): string =
|
|
|
|
&"{user.fullname} (@{user.username})"
|
2019-06-25 02:52:38 +00:00
|
|
|
|
2020-03-29 07:15:05 +00:00
|
|
|
proc pageTitle*(tweet: Tweet): string =
|
2022-01-23 06:04:50 +00:00
|
|
|
&"{pageTitle(tweet.user)}: \"{stripHtml(tweet.text)}\""
|
2020-03-29 07:15:05 +00:00
|
|
|
|
2022-01-23 06:04:50 +00:00
|
|
|
proc pageDesc*(user: User): string =
|
|
|
|
if user.bio.len > 0:
|
|
|
|
stripHtml(user.bio)
|
2019-10-11 16:43:47 +00:00
|
|
|
else:
|
2022-01-23 06:04:50 +00:00
|
|
|
"The latest tweets from " & user.fullname
|
2019-08-07 20:02:19 +00:00
|
|
|
|
2022-01-23 06:04:50 +00:00
|
|
|
proc getJoinDate*(user: User): string =
|
|
|
|
user.joinDate.format("'Joined' MMMM YYYY")
|
2019-08-11 19:26:55 +00:00
|
|
|
|
2022-01-23 06:04:50 +00:00
|
|
|
proc getJoinDateFull*(user: User): string =
|
|
|
|
user.joinDate.format("h:mm tt - d MMM YYYY")
|
2019-08-11 19:26:55 +00:00
|
|
|
|
2019-06-25 02:52:38 +00:00
|
|
|
proc getTime*(tweet: Tweet): string =
|
2022-01-03 02:52:39 +00:00
|
|
|
tweet.time.format("MMM d', 'YYYY' · 'h:mm tt' UTC'")
|
2019-09-15 09:14:03 +00:00
|
|
|
|
|
|
|
proc getRfc822Time*(tweet: Tweet): string =
|
2021-06-23 21:15:51 +00:00
|
|
|
tweet.time.format("ddd', 'dd MMM yyyy HH:mm:ss 'GMT'")
|
2019-07-01 21:14:36 +00:00
|
|
|
|
2020-06-01 00:25:39 +00:00
|
|
|
proc getShortTime*(tweet: Tweet): string =
|
2020-06-02 19:06:44 +00:00
|
|
|
let now = now()
|
2021-12-20 02:11:12 +00:00
|
|
|
let since = now - tweet.time
|
2020-06-02 19:06:44 +00:00
|
|
|
|
2021-12-20 02:11:12 +00:00
|
|
|
if now.year != tweet.time.year:
|
2020-06-01 00:25:39 +00:00
|
|
|
result = tweet.time.format("d MMM yyyy")
|
|
|
|
elif since.inDays >= 1:
|
|
|
|
result = tweet.time.format("MMM d")
|
|
|
|
elif since.inHours >= 1:
|
|
|
|
result = $since.inHours & "h"
|
|
|
|
elif since.inMinutes >= 1:
|
|
|
|
result = $since.inMinutes & "m"
|
|
|
|
elif since.inSeconds > 1:
|
|
|
|
result = $since.inSeconds & "s"
|
|
|
|
else:
|
|
|
|
result = "now"
|
|
|
|
|
|
|
|
proc getLink*(tweet: Tweet; focus=true): string =
|
2019-10-10 16:22:14 +00:00
|
|
|
if tweet.id == 0: return
|
2022-01-23 06:04:50 +00:00
|
|
|
var username = tweet.user.username
|
2020-06-01 00:25:39 +00:00
|
|
|
if username.len == 0:
|
|
|
|
username = "i"
|
|
|
|
result = &"/{username}/status/{tweet.id}"
|
2019-10-22 07:17:58 +00:00
|
|
|
if focus: result &= "#m"
|
2019-09-08 12:34:26 +00:00
|
|
|
|
2019-10-08 13:07:10 +00:00
|
|
|
proc getTwitterLink*(path: string; params: Table[string, string]): string =
|
2020-06-17 12:15:13 +00:00
|
|
|
var
|
2019-10-08 13:07:10 +00:00
|
|
|
username = params.getOrDefault("name")
|
|
|
|
query = initQuery(params, username)
|
2020-06-17 12:15:13 +00:00
|
|
|
path = path
|
|
|
|
|
|
|
|
if "," in username:
|
|
|
|
query.fromUser = username.split(",")
|
|
|
|
path = "/search"
|
2019-10-08 13:07:10 +00:00
|
|
|
|
2020-06-17 12:15:13 +00:00
|
|
|
if "/search" notin path and query.fromUser.len < 2:
|
2021-12-30 02:59:11 +00:00
|
|
|
return $(twitter / path)
|
2019-10-08 13:07:10 +00:00
|
|
|
|
|
|
|
let p = {
|
2020-06-02 20:31:46 +00:00
|
|
|
"f": if query.kind == users: "user" else: "live",
|
2019-10-08 13:07:10 +00:00
|
|
|
"q": genQueryParam(query),
|
2020-06-01 00:25:39 +00:00
|
|
|
"src": "typed_query"
|
2019-10-08 13:07:10 +00:00
|
|
|
}
|
|
|
|
|
2020-06-17 12:15:13 +00:00
|
|
|
result = $(twitter / path ? p)
|
2019-10-08 13:07:10 +00:00
|
|
|
if username.len > 0:
|
|
|
|
result = result.replace("/" & username, "")
|
2019-12-21 04:44:58 +00:00
|
|
|
|
2022-01-23 06:04:50 +00:00
|
|
|
proc getLocation*(u: User | Tweet): (string, string) =
|
2020-03-09 00:03:24 +00:00
|
|
|
if "://" in u.location: return (u.location, "")
|
2019-12-21 04:44:58 +00:00
|
|
|
let loc = u.location.split(":")
|
|
|
|
let url = if loc.len > 1: "/search?q=place:" & loc[1] else: ""
|
|
|
|
(loc[0], url)
|
2020-04-14 21:56:31 +00:00
|
|
|
|
|
|
|
proc getSuspended*(username: string): string =
|
|
|
|
&"User \"{username}\" has been suspended"
|
2022-01-10 15:18:10 +00:00
|
|
|
|
|
|
|
proc titleize*(str: string): string =
|
|
|
|
const
|
|
|
|
lowercase = {'a'..'z'}
|
|
|
|
delims = {' ', '('}
|
|
|
|
|
|
|
|
result = str
|
|
|
|
for i, c in enumerate(str):
|
|
|
|
if c in lowercase and (i == 0 or str[i - 1] in delims):
|
|
|
|
result[i] = c.toUpperAscii
|