mirror of
https://github.com/zedeus/nitter.git
synced 2024-12-12 11:06:30 +00:00
Merge pull request #443 from jackyzy823/proxy
Add proxy for outgoing request
This commit is contained in:
commit
ebffb6d251
5 changed files with 15 additions and 1 deletions
|
@ -23,6 +23,8 @@ redisMaxConnections = 30
|
||||||
hmacKey = "secretkey" # random key for cryptographic signing of video urls
|
hmacKey = "secretkey" # random key for cryptographic signing of video urls
|
||||||
base64Media = false # use base64 encoding for proxied media urls
|
base64Media = false # use base64 encoding for proxied media urls
|
||||||
enableRSS = true # set this to false to disable RSS feeds
|
enableRSS = true # set this to false to disable RSS feeds
|
||||||
|
proxy = "" # proxy type http/https
|
||||||
|
proxyAuth = ""
|
||||||
tokenCount = 10
|
tokenCount = 10
|
||||||
# minimum amount of usable tokens. tokens are used to authorize API requests,
|
# minimum amount of usable tokens. tokens are used to authorize API requests,
|
||||||
# but they expire after ~1 hour, and have a limit of 187 requests.
|
# but they expire after ~1 hour, and have a limit of 187 requests.
|
||||||
|
|
|
@ -27,6 +27,8 @@ proc getConfig*(path: string): (Config, parseCfg.Config) =
|
||||||
base64Media: cfg.get("Config", "base64Media", false),
|
base64Media: cfg.get("Config", "base64Media", false),
|
||||||
minTokens: cfg.get("Config", "tokenCount", 10),
|
minTokens: cfg.get("Config", "tokenCount", 10),
|
||||||
enableRss: cfg.get("Config", "enableRSS", true),
|
enableRss: cfg.get("Config", "enableRSS", true),
|
||||||
|
proxy: cfg.get("Config", "proxy", ""),
|
||||||
|
proxyAuth: cfg.get("Config", "proxyAuth", ""),
|
||||||
|
|
||||||
listCacheTime: cfg.get("Cache", "listMinutes", 120),
|
listCacheTime: cfg.get("Cache", "listMinutes", 120),
|
||||||
rssCacheTime: cfg.get("Cache", "rssMinutes", 10),
|
rssCacheTime: cfg.get("Cache", "rssMinutes", 10),
|
||||||
|
|
|
@ -6,10 +6,17 @@ type
|
||||||
conns*: seq[AsyncHttpClient]
|
conns*: seq[AsyncHttpClient]
|
||||||
|
|
||||||
var maxConns {.threadvar.}: int
|
var maxConns {.threadvar.}: int
|
||||||
|
var proxy {.threadvar.}: Proxy
|
||||||
|
|
||||||
proc setMaxHttpConns*(n: int) =
|
proc setMaxHttpConns*(n: int) =
|
||||||
maxConns = n
|
maxConns = n
|
||||||
|
|
||||||
|
proc setHttpProxy*(url: string; auth: string) =
|
||||||
|
if url.len > 0:
|
||||||
|
proxy = newProxy(url, auth)
|
||||||
|
else:
|
||||||
|
proxy = nil
|
||||||
|
|
||||||
proc release*(pool: HttpPool; client: AsyncHttpClient) =
|
proc release*(pool: HttpPool; client: AsyncHttpClient) =
|
||||||
if pool.conns.len >= maxConns:
|
if pool.conns.len >= maxConns:
|
||||||
client.close()
|
client.close()
|
||||||
|
@ -20,7 +27,7 @@ template use*(pool: HttpPool; heads: HttpHeaders; body: untyped): untyped =
|
||||||
var c {.inject.}: AsyncHttpClient
|
var c {.inject.}: AsyncHttpClient
|
||||||
|
|
||||||
if pool.conns.len == 0:
|
if pool.conns.len == 0:
|
||||||
c = newAsyncHttpClient(headers=heads)
|
c = newAsyncHttpClient(headers=heads, proxy=proxy)
|
||||||
else:
|
else:
|
||||||
c = pool.conns.pop()
|
c = pool.conns.pop()
|
||||||
c.headers = heads
|
c.headers = heads
|
||||||
|
|
|
@ -32,6 +32,7 @@ setCacheTimes(cfg)
|
||||||
setHmacKey(cfg.hmacKey)
|
setHmacKey(cfg.hmacKey)
|
||||||
setProxyEncoding(cfg.base64Media)
|
setProxyEncoding(cfg.base64Media)
|
||||||
setMaxHttpConns(cfg.httpMaxConns)
|
setMaxHttpConns(cfg.httpMaxConns)
|
||||||
|
setHttpProxy(cfg.proxy, cfg.proxyAuth)
|
||||||
|
|
||||||
waitFor initRedisPool(cfg)
|
waitFor initRedisPool(cfg)
|
||||||
stdout.write &"Connected to Redis at {cfg.redisHost}:{cfg.redisPort}\n"
|
stdout.write &"Connected to Redis at {cfg.redisHost}:{cfg.redisPort}\n"
|
||||||
|
|
|
@ -217,6 +217,8 @@ type
|
||||||
base64Media*: bool
|
base64Media*: bool
|
||||||
minTokens*: int
|
minTokens*: int
|
||||||
enableRss*: bool
|
enableRss*: bool
|
||||||
|
proxy*: string
|
||||||
|
proxyAuth*: string
|
||||||
|
|
||||||
rssCacheTime*: int
|
rssCacheTime*: int
|
||||||
listCacheTime*: int
|
listCacheTime*: int
|
||||||
|
|
Loading…
Reference in a new issue