[fix] searx.network: fix rare cases where LOOP is None

* searx.network.client.LOOP is initialized in a thread
* searx.network.__init__ imports LOOP which may happen
  before the thread has initialized LOOP

This commit adds a new function "searx.network.client.get_loop()"
to fix this issue
This commit is contained in:
Alexandre Flament 2021-04-27 10:03:19 +02:00 committed by Markus Heiser
parent f724d6f6f1
commit 283ae7bfad
3 changed files with 13 additions and 8 deletions

View file

@ -9,7 +9,7 @@ import httpx
import h2.exceptions import h2.exceptions
from .network import get_network, initialize from .network import get_network, initialize
from .client import LOOP from .client import get_loop
from .raise_for_httperror import raise_for_httperror from .raise_for_httperror import raise_for_httperror
# queue.SimpleQueue: Support Python 3.6 # queue.SimpleQueue: Support Python 3.6
@ -98,7 +98,7 @@ def request(method, url, **kwargs):
network = get_context_network() network = get_context_network()
# do request # do request
future = asyncio.run_coroutine_threadsafe(network.request(method, url, **kwargs), LOOP) future = asyncio.run_coroutine_threadsafe(network.request(method, url, **kwargs), get_loop())
try: try:
response = future.result(timeout) response = future.result(timeout)
except concurrent.futures.TimeoutError as e: except concurrent.futures.TimeoutError as e:
@ -179,7 +179,7 @@ def stream(method, url, **kwargs):
""" """
q = SimpleQueue() q = SimpleQueue()
future = asyncio.run_coroutine_threadsafe(stream_chunk_to_queue(get_network(), q, method, url, **kwargs), future = asyncio.run_coroutine_threadsafe(stream_chunk_to_queue(get_network(), q, method, url, **kwargs),
LOOP) get_loop())
chunk_or_exception = q.get() chunk_or_exception = q.get()
while chunk_or_exception is not None: while chunk_or_exception is not None:
if isinstance(chunk_or_exception, Exception): if isinstance(chunk_or_exception, Exception):

View file

@ -120,7 +120,6 @@ class AsyncHTTPTransportFixed(httpx.AsyncHTTPTransport):
def get_transport_for_socks_proxy(verify, http2, local_address, proxy_url, limit, retries): def get_transport_for_socks_proxy(verify, http2, local_address, proxy_url, limit, retries):
global LOOP, TRANSPORT_KWARGS
# support socks5h (requests compatibility): # support socks5h (requests compatibility):
# https://requests.readthedocs.io/en/master/user/advanced/#socks # https://requests.readthedocs.io/en/master/user/advanced/#socks
# socks5:// hostname is resolved on client side # socks5:// hostname is resolved on client side
@ -136,7 +135,7 @@ def get_transport_for_socks_proxy(verify, http2, local_address, proxy_url, limit
return AsyncProxyTransportFixed(proxy_type=proxy_type, proxy_host=proxy_host, proxy_port=proxy_port, return AsyncProxyTransportFixed(proxy_type=proxy_type, proxy_host=proxy_host, proxy_port=proxy_port,
username=proxy_username, password=proxy_password, username=proxy_username, password=proxy_password,
rdns=rdns, rdns=rdns,
loop=LOOP, loop=get_loop(),
verify=verify, verify=verify,
http2=http2, http2=http2,
local_address=local_address, local_address=local_address,
@ -192,6 +191,11 @@ def new_client(enable_http, verify, enable_http2,
return httpx.AsyncClient(transport=transport, mounts=mounts, max_redirects=max_redirects) return httpx.AsyncClient(transport=transport, mounts=mounts, max_redirects=max_redirects)
def get_loop():
global LOOP
return LOOP
def init(): def init():
# log # log
for logger_name in ('hpack.hpack', 'hpack.table'): for logger_name in ('hpack.hpack', 'hpack.table'):

View file

@ -7,7 +7,7 @@ from itertools import cycle
import httpx import httpx
from .client import new_client, LOOP from .client import new_client, get_loop
DEFAULT_NAME = '__DEFAULT__' DEFAULT_NAME = '__DEFAULT__'
@ -291,8 +291,9 @@ def done():
So Network.aclose is called here using atexit.register So Network.aclose is called here using atexit.register
""" """
try: try:
if LOOP: loop = get_loop()
future = asyncio.run_coroutine_threadsafe(Network.aclose_all(), LOOP) if loop:
future = asyncio.run_coroutine_threadsafe(Network.aclose_all(), loop)
# wait 3 seconds to close the HTTP clients # wait 3 seconds to close the HTTP clients
future.result(3) future.result(3)
finally: finally: