31 Commits

Author SHA1 Message Date
Alexander Bersenev
027ee2713b use tls-only mode in the default config 2019-10-08 14:44:09 +05:00
Alexander Bersenev
3c4c92cdb2 adjust stats duration bucket sizes 2019-10-08 14:42:03 +05:00
Alexander Bersenev
3ef826cd6b more logical name for connection duration 2019-09-21 23:55:53 +05:00
Alexander Bersenev
9ec1e543bd expose the length of connections as metrics 2019-09-21 23:17:00 +05:00
Alexander Bersenev
fae04ed3c7 do not export proxy links by default 2019-09-20 18:57:37 +05:00
Alexander Bersenev
25685f370c reword the link desc 2019-09-20 18:10:37 +05:00
Alexander Bersenev
1feb8e5fc7 export proxy links as metrics 2019-09-20 17:52:30 +05:00
Alexander Bersenev
5e488203a2 check for socks mode before importing uvloop 2019-09-20 17:26:30 +05:00
Alexander Bersenev
94fd98a1fb correct spelling 2019-09-20 17:19:33 +05:00
Alexander Bersenev
4c5b0803fa add upstream socks proxy consistency checks 2019-09-20 17:17:16 +05:00
Alexander Bersenev
8fa8aabc8e ability to specify socks user and pass 2019-09-20 16:13:06 +05:00
Alexander Bersenev
f2fbaa923b refactor upstream proxy handling 2019-09-20 16:06:59 +05:00
Alexander Bersenev
79eaabdd23 move socks handling in the config parsing 2019-09-20 15:40:33 +05:00
Alexander Bersenev
23c7b0d53b make the socks module optional 2019-09-20 15:38:18 +05:00
pasha-zzz
516600a32d Add support for upstream SOCKS5 proxy (#143) 2019-09-20 15:28:29 +05:00
Alexander Bersenev
5fcd1c0158 change metrics pushes to pulls 2019-09-19 02:27:57 +05:00
Alexander Bersenev
781549f37f update readme 2019-09-19 00:08:20 +05:00
Alexander Bersenev
dc1223fd90 add hanshake_timeout metric 2019-09-18 23:22:06 +05:00
Alexander Bersenev
0d52ae0bc7 rename metrics, add connects_all metric 2019-09-18 23:13:32 +05:00
Alexander Bersenev
2b1469985d add an ability to export metrics to prometheus 2019-09-18 19:44:19 +05:00
Alexander Bersenev
4784491800 use only half of the digest as a key for used digest 2019-09-13 17:20:32 +05:00
Alexander Bersenev
7a2c6b9825 cast client port to int 2019-09-13 01:19:19 +05:00
Alexander Bersenev
37307a98fb print replay attackers summary 2019-09-08 14:28:46 +05:00
Alexander Bersenev
02b39168c3 print the possible replay-attackers summary instead of every time 2019-09-08 14:12:06 +05:00
Alexander Bersenev
163e7b7cce print a message about getting the cert 2019-09-08 03:12:19 +05:00
Alexander Bersenev
1ed13d9efa get rid from annoying time skew messages, print summary instead 2019-09-08 02:01:23 +05:00
Alexander Bersenev
51c8d68271 disable the first bad packet heuristics if the tls-only mode activated, it has time-based protection instead 2019-09-08 01:09:44 +05:00
Alexander Bersenev
50cd74051f add a message if uvloop is found 2019-08-30 16:05:08 +05:00
Alexander Bersenev
1d826866d1 print time skew message as one line 2019-08-26 17:00:34 +05:00
Alexander Bersenev
ff6b826e13 do not output canceled errors to get rid from scarry traceback on the proxy termination 2019-08-26 16:51:48 +05:00
Alexander Bersenev
3315ac1df6 add one more param into the undocummented mode of launch 2019-08-26 16:07:35 +05:00
3 changed files with 265 additions and 33 deletions

View File

@@ -1,6 +1,6 @@
# Async MTProto Proxy #
Fast and simple to setup mtproto proxy written on Python.
Fast and simple to setup MTProto proxy written in Python.
## Starting Up ##
@@ -26,4 +26,4 @@ The proxy can be launched:
- with a custom config: `python3 mtprotoproxy.py [configfile]`
- several times, clients will be automaticaly balanced between instances
- with uvloop module to get an extra speed boost
- with runtime statistics exported for [Prometheus](https://prometheus.io/): using [prometheus](https://github.com/alexbers/mtprotoproxy/tree/prometheus) branch
- with runtime statistics exported to [Prometheus](https://prometheus.io/)

View File

@@ -12,7 +12,7 @@ SECURE_ONLY = True
# Makes the proxy even more hard to detect
# Compatible only with the recent clients
# TLS_ONLY = True
TLS_ONLY = True
# The domain for TLS, bad clients are proxied there
# Use random existing domain, proxy checks it on start

View File

@@ -76,6 +76,7 @@ PADDING_FILLER = b"\x04\x00\x00\x00"
MIN_MSG_LEN = 12
MAX_MSG_LEN = 2 ** 24
STAT_DURATION_BUCKETS = [0.1, 0.5, 1, 2, 5, 15, 60, 300, 600, 1800, 2**31 - 1]
my_ip_info = {"ipv4": None, "ipv6": None}
used_handshakes = collections.OrderedDict()
@@ -83,6 +84,11 @@ disable_middle_proxy = False
is_time_skewed = False
fake_cert_len = random.randrange(1024, 4096)
mask_host_cached_ip = None
last_clients_with_time_skew = {}
last_clients_with_first_pkt_error = collections.Counter()
last_clients_with_same_handshake = collections.Counter()
proxy_start_time = 0
proxy_links = []
config = {}
@@ -103,6 +109,10 @@ def init_config():
conf_dict["USERS"] = {"user%d" % i: secrets[i].zfill(32) for i in range(len(secrets))}
if len(sys.argv) > 3:
conf_dict["AD_TAG"] = sys.argv[3]
if len(sys.argv) > 4:
conf_dict["TLS_DOMAIN"] = sys.argv[4]
conf_dict["TLS_ONLY"] = True
conf_dict["SECURE_ONLY"] = True
conf_dict = {k: v for k, v in conf_dict.items() if k.isupper()}
@@ -142,6 +152,16 @@ def init_config():
# the next host's port to forward bad clients
conf_dict.setdefault("MASK_PORT", 443)
# use upstream SOCKS5 proxy
conf_dict.setdefault("SOCKS5_HOST", None)
conf_dict.setdefault("SOCKS5_PORT", None)
conf_dict.setdefault("SOCKS5_USER", None)
conf_dict.setdefault("SOCKS5_PASS", None)
if conf_dict["SOCKS5_HOST"] and conf_dict["SOCKS5_PORT"]:
# Disable the middle proxy if using socks, they are not compatible
conf_dict["USE_MIDDLE_PROXY"] = False
# user tcp connection limits, the mapping from name to the integer limit
# one client can create many tcp connections, up to 8
conf_dict.setdefault("USER_MAX_TCP_CONNS", {})
@@ -156,10 +176,10 @@ def init_config():
conf_dict.setdefault("USER_DATA_QUOTA", {})
# length of used handshake randoms for active fingerprinting protection, zero to disable
conf_dict.setdefault("REPLAY_CHECK_LEN", 32768)
conf_dict.setdefault("REPLAY_CHECK_LEN", 65536)
# block bad first packets to even more protect against replay-based fingerprinting
conf_dict.setdefault("BLOCK_IF_FIRST_PKT_BAD", True)
conf_dict.setdefault("BLOCK_IF_FIRST_PKT_BAD", not conf_dict["TLS_ONLY"])
# delay in seconds between stats printing
conf_dict.setdefault("STATS_PRINT_PERIOD", 600)
@@ -201,10 +221,40 @@ def init_config():
# listen unix socket
conf_dict.setdefault("LISTEN_UNIX_SOCK", "")
# prometheus exporter listen port, use some random port here
conf_dict.setdefault("METRICS_PORT", None)
# prometheus listen addr ipv4
conf_dict.setdefault("METRICS_LISTEN_ADDR_IPV4", "0.0.0.0")
# prometheus listen addr ipv6
conf_dict.setdefault("METRICS_LISTEN_ADDR_IPV6", None)
# prometheus scrapers whitelist
conf_dict.setdefault("METRICS_WHITELIST", ["127.0.0.1", "::1"])
# export proxy link to prometheus
conf_dict.setdefault("METRICS_EXPORT_LINKS", False)
# allow access to config by attributes
config = type("config", (dict,), conf_dict)(conf_dict)
def apply_upstream_proxy_settings():
# apply socks settings in place
if config.SOCKS5_HOST and config.SOCKS5_PORT:
import socks
print_err("Socket-proxy mode activated, it is incompatible with advertising and uvloop")
socks.set_default_proxy(socks.PROXY_TYPE_SOCKS5, config.SOCKS5_HOST, config.SOCKS5_PORT,
username=config.SOCKS5_USER, password=config.SOCKS5_PASS)
if not hasattr(socket, "origsocket"):
socket.origsocket = socket.socket
socket.socket = socks.socksocket
elif hasattr(socket, "origsocket"):
socket.socket = socket.origsocket
del socket.origsocket
def try_use_cryptography_module():
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from cryptography.hazmat.backends import default_backend
@@ -294,24 +344,45 @@ def print_err(*params):
def init_stats():
global stats
stats = {user: collections.Counter() for user in config.USERS}
global user_stats
stats = collections.Counter()
user_stats = {user: collections.Counter() for user in config.USERS}
def update_stats(user, connects=0, curr_connects=0, octets=0, msgs=0):
def init_proxy_start_time():
global proxy_start_time
proxy_start_time = time.time()
def update_stats(**kw_stats):
global stats
stats.update(**kw_stats)
def update_user_stats(user, **kw_stats):
global user_stats
if user not in user_stats:
user_stats[user] = collections.Counter()
user_stats[user].update(**kw_stats)
def update_durations(duration):
global stats
if user not in stats:
stats[user] = collections.Counter()
for bucket in STAT_DURATION_BUCKETS:
if duration <= bucket:
break
stats[user].update(connects=connects, curr_connects=curr_connects,
octets=octets, msgs=msgs)
update_stats(**{"connects_with_duration_le_%s" % str(bucket): 1})
def get_curr_connects_count():
global stats
global user_stats
all_connects = 0
for user, stat in stats.items():
for user, stat in user_stats.items():
all_connects += stat["curr_connects"]
return all_connects
@@ -825,6 +896,8 @@ async def handle_bad_client(reader_clt, writer_clt, handshake):
global mask_host_cached_ip
update_stats(connects_bad=1)
if writer_clt.transport.is_closing():
return
@@ -899,6 +972,8 @@ async def handle_bad_client(reader_clt, writer_clt, handshake):
async def handle_fake_tls_handshake(handshake, reader, writer, peer):
global used_handshakes
global last_clients_with_time_skew
global last_clients_with_same_handshake
global fake_cert_len
TIME_SKEW_MIN = -20 * 60
@@ -910,6 +985,7 @@ async def handle_fake_tls_handshake(handshake, reader, writer, peer):
TLS_APP_HTTP2_HDR = b"\x17" + TLS_VERS
DIGEST_LEN = 32
DIGEST_HALFLEN = 16
DIGEST_POS = 11
SESSION_ID_LEN_POS = DIGEST_POS + DIGEST_LEN
@@ -920,8 +996,8 @@ async def handle_fake_tls_handshake(handshake, reader, writer, peer):
digest = handshake[DIGEST_POS: DIGEST_POS + DIGEST_LEN]
if digest in used_handshakes:
print_err("Active TLS fingerprinting detected from %s, handling it" % peer[0])
if digest[:DIGEST_HALFLEN] in used_handshakes:
last_clients_with_same_handshake[peer[0]] += 1
return False
sess_id_len = handshake[SESSION_ID_LEN_POS]
@@ -944,8 +1020,7 @@ async def handle_fake_tls_handshake(handshake, reader, writer, peer):
# some clients fail to read unix time and send the time since boot instead
client_time_is_small = timestamp < 60*60*24*1000
if not client_time_is_ok and not is_time_skewed and not client_time_is_small:
print_err("Client with time skew detected from %s, can be a replay-attack" % peer[0])
print_err("The clocks were %d minutes behind" % ((time.time() - timestamp) // 60))
last_clients_with_time_skew[peer[0]] = (time.time() - timestamp) // 60
continue
http_data = myrandom.getrandbytes(fake_cert_len)
@@ -967,7 +1042,7 @@ async def handle_fake_tls_handshake(handshake, reader, writer, peer):
if config.REPLAY_CHECK_LEN > 0:
while len(used_handshakes) >= config.REPLAY_CHECK_LEN:
used_handshakes.popitem(last=False)
used_handshakes[digest] = True
used_handshakes[digest[:DIGEST_HALFLEN]] = True
reader = FakeTLSStreamReader(reader)
writer = FakeTLSStreamWriter(writer)
@@ -997,7 +1072,7 @@ async def handle_proxy_protocol(reader, peer=None):
if proxy_fam in (PROXY_TCP4, PROXY_TCP6):
if len(proxy_addr) == 4:
src_addr = proxy_addr[0].decode('ascii')
src_port = proxy_addr[2].decode('ascii')
src_port = int(proxy_addr[2].decode('ascii'))
return (src_addr, src_port)
elif proxy_fam == PROXY_UNKNOWN:
return peer
@@ -1033,6 +1108,7 @@ async def handle_proxy_protocol(reader, peer=None):
async def handle_handshake(reader, writer):
global used_handshakes
global last_clients_with_same_handshake
TLS_START_BYTES = b"\x16\x03\x01\x02\x00\x01\x00\x01\xfc\x03\x03"
@@ -1078,7 +1154,7 @@ async def handle_handshake(reader, writer):
enc_prekey, enc_iv = enc_prekey_and_iv[:PREKEY_LEN], enc_prekey_and_iv[PREKEY_LEN:]
if dec_prekey_and_iv in used_handshakes:
print_err("Active fingerprinting detected from %s, handling it" % peer[0])
last_clients_with_same_handshake[peer[0]] += 1
await handle_bad_client(reader, writer, handshake)
return False
@@ -1364,10 +1440,13 @@ async def handle_client(reader_clt, writer_clt):
set_ack_timeout(writer_clt.get_extra_info("socket"), config.CLIENT_ACK_TIMEOUT)
set_bufsizes(writer_clt.get_extra_info("socket"), get_to_tg_bufsize(), get_to_clt_bufsize())
update_stats(connects_all=1)
try:
clt_data = await asyncio.wait_for(handle_handshake(reader_clt, writer_clt),
timeout=config.CLIENT_HANDSHAKE_TIMEOUT)
except asyncio.TimeoutError:
update_stats(handshake_timeouts=1)
return
if not clt_data:
@@ -1376,7 +1455,7 @@ async def handle_client(reader_clt, writer_clt):
reader_clt, writer_clt, proto_tag, user, dc_idx, enc_key_and_iv, peer = clt_data
cl_ip, cl_port = peer
update_stats(user, connects=1)
update_user_stats(user, connects=1)
connect_directly = (not config.USE_MIDDLE_PROXY or disable_middle_proxy)
@@ -1419,6 +1498,7 @@ async def handle_client(reader_clt, writer_clt):
return
async def connect_reader_to_writer(rd, wr, user, rd_buf_size, block_if_first_pkt_bad=False):
global last_clients_with_first_pkt_error
is_first_pkt = True
try:
while True:
@@ -1434,7 +1514,7 @@ async def handle_client(reader_clt, writer_clt):
ERR_PKT_DATA = b'l\xfe\xff\xff'
if block_if_first_pkt_bad and data == ERR_PKT_DATA:
print_err("Active fingerprinting detected from %s, dropping it" % cl_ip)
last_clients_with_first_pkt_error[cl_ip] += 1
wr.write_eof()
await wr.drain()
@@ -1445,7 +1525,7 @@ async def handle_client(reader_clt, writer_clt):
await wr.drain()
return
else:
update_stats(user, octets=len(data), msgs=1)
update_user_stats(user, octets=len(data), msgs=1)
wr.write(data, extra)
await wr.drain()
except (OSError, asyncio.streams.IncompleteReadError) as e:
@@ -1458,11 +1538,11 @@ async def handle_client(reader_clt, writer_clt):
task_tg_to_clt = asyncio.ensure_future(tg_to_clt)
task_clt_to_tg = asyncio.ensure_future(clt_to_tg)
update_stats(user, curr_connects=1)
update_user_stats(user, curr_connects=1)
tcp_limit_hit = (
user in config.USER_MAX_TCP_CONNS and
stats[user]["curr_connects"] > config.USER_MAX_TCP_CONNS[user]
user_stats[user]["curr_connects"] > config.USER_MAX_TCP_CONNS[user]
)
user_expired = (
@@ -1472,13 +1552,15 @@ async def handle_client(reader_clt, writer_clt):
user_data_quota_hit = (
user in config.USER_DATA_QUOTA and
stats[user]["octets"] > config.USER_DATA_QUOTA[user]
user_stats[user]["octets"] > config.USER_DATA_QUOTA[user]
)
if (not tcp_limit_hit) and (not user_expired) and (not user_data_quota_hit):
start = time.time()
await asyncio.wait([task_tg_to_clt, task_clt_to_tg], return_when=asyncio.FIRST_COMPLETED)
update_durations(time.time() - start)
update_stats(user, curr_connects=-1)
update_user_stats(user, curr_connects=-1)
task_tg_to_clt.cancel()
task_clt_to_tg.cancel()
@@ -1489,7 +1571,9 @@ async def handle_client(reader_clt, writer_clt):
async def handle_client_wrapper(reader, writer):
try:
await handle_client(reader, writer)
except (asyncio.IncompleteReadError, ConnectionResetError, TimeoutError):
except (asyncio.IncompleteReadError, asyncio.CancelledError):
pass
except (ConnectionResetError, TimeoutError):
pass
except Exception:
traceback.print_exc()
@@ -1497,18 +1581,138 @@ async def handle_client_wrapper(reader, writer):
writer.transport.abort()
async def stats_printer():
def make_metrics_pkt(metrics):
pkt_body_list = []
used_names = set()
for name, m_type, desc, val in metrics:
if name not in used_names:
pkt_body_list.append("# HELP %s %s" % (name, desc))
pkt_body_list.append("# TYPE %s %s" % (name, m_type))
used_names.add(name)
if isinstance(val, dict):
tags = []
for tag, tag_val in val.items():
if tag == "val":
continue
tag_val = tag_val.replace('"', r'\"')
tags.append('%s="%s"' % (tag, tag_val))
pkt_body_list.append("%s{%s} %s" % (name, ",".join(tags), val["val"]))
else:
pkt_body_list.append("%s %s" % (name, val))
pkt_body = "\n".join(pkt_body_list) + "\n"
pkt_header_list = []
pkt_header_list.append("HTTP/1.1 200 OK")
pkt_header_list.append("Content-Length: %d" % len(pkt_body))
pkt_header_list.append("Content-Type: text/plain; version=0.0.4; charset=utf-8")
pkt_header_list.append("Date: %s" % time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime()))
pkt_header = "\r\n".join(pkt_header_list)
pkt = pkt_header + "\r\n\r\n" + pkt_body
return pkt
async def handle_metrics(reader, writer):
global stats
global user_stats
global my_ip_info
global proxy_start_time
global proxy_links
global last_clients_with_time_skew
global last_clients_with_first_pkt_error
global last_clients_with_same_handshake
client_ip = writer.get_extra_info("peername")[0]
if client_ip not in config.METRICS_WHITELIST:
writer.close()
return
try:
metrics = []
metrics.append(["uptime", "counter", "proxy uptime", time.time() - proxy_start_time])
metrics.append(["connects_bad", "counter", "connects with bad secret",
stats["connects_bad"]])
metrics.append(["connects_all", "counter", "incoming connects", stats["connects_all"]])
metrics.append(["handshake_timeouts", "counter", "number of timed out handshakes",
stats["handshake_timeouts"]])
if config.METRICS_EXPORT_LINKS:
for link in proxy_links:
link_as_metric = link.copy()
link_as_metric["val"] = 1
metrics.append(["proxy_link_info", "counter",
"the proxy link info", link_as_metric])
bucket_start = 0
for bucket in STAT_DURATION_BUCKETS:
bucket_end = bucket if bucket != STAT_DURATION_BUCKETS[-1] else "+Inf"
metric = {
"bucket": "%s-%s" % (bucket_start, bucket_end),
"val": stats["connects_with_duration_le_%s" % str(bucket)]
}
metrics.append(["connects_by_duration", "counter", "connects by duration", metric])
bucket_start = bucket_end
user_metrics_desc = [
["user_connects", "counter", "user connects", "connects"],
["user_connects_curr", "gauge", "current user connects", "curr_connects"],
["user_octets", "counter", "octets proxied for user", "octets"],
["user_msgs", "counter", "msgs proxied for user", "msgs"],
]
for m_name, m_type, m_desc, stat_key in user_metrics_desc:
for user, stat in user_stats.items():
metric = {"user": user, "val": stat[stat_key]}
metrics.append([m_name, m_type, m_desc, metric])
pkt = make_metrics_pkt(metrics)
writer.write(pkt.encode())
await writer.drain()
except Exception:
traceback.print_exc()
finally:
writer.close()
async def stats_printer():
global user_stats
global last_clients_with_time_skew
global last_clients_with_first_pkt_error
global last_clients_with_same_handshake
while True:
await asyncio.sleep(config.STATS_PRINT_PERIOD)
print("Stats for", time.strftime("%d.%m.%Y %H:%M:%S"))
for user, stat in stats.items():
for user, stat in user_stats.items():
print("%s: %d connects (%d current), %.2f MB, %d msgs" % (
user, stat["connects"], stat["curr_connects"],
stat["octets"] / 1000000, stat["msgs"]))
print(flush=True)
if last_clients_with_time_skew:
print("Clients with time skew (possible replay-attackers):")
for ip, skew_minutes in last_clients_with_time_skew.items():
print("%s, clocks were %d minutes behind" % (ip, skew_minutes))
print(flush=True)
last_clients_with_time_skew.clear()
if last_clients_with_first_pkt_error:
print("Clients with error on the first packet (possible replay-attackers):")
for ip, times in last_clients_with_first_pkt_error.items():
print("%s, %d times" % (ip, times))
print(flush=True)
last_clients_with_first_pkt_error.clear()
if last_clients_with_same_handshake:
print("Clients with duplicate handshake (likely replay-attackers):")
for ip, times in last_clients_with_same_handshake.items():
print("%s, %d times" % (ip, times))
print(flush=True)
last_clients_with_same_handshake.clear()
async def make_https_req(url, host="core.telegram.org"):
""" Make request, return resp body and headers. """
@@ -1601,6 +1805,8 @@ async def get_mask_host_cert_len():
if cert:
if len(cert) != fake_cert_len:
fake_cert_len = len(cert)
print_err("Got cert from the MASK_HOST %s, its length is %d" %
(config.MASK_HOST, fake_cert_len))
else:
print_err("The MASK_HOST %s is not TLS 1.3 host, this is not recommended" %
config.MASK_HOST)
@@ -1753,6 +1959,7 @@ def init_ip_info():
def print_tg_info():
global my_ip_info
global proxy_links
print_default_warning = False
@@ -1766,17 +1973,23 @@ def print_tg_info():
if not ip_addrs:
ip_addrs = ["YOUR_IP"]
proxy_links = []
for user, secret in sorted(config.USERS.items(), key=lambda x: x[0]):
for ip in ip_addrs:
if not config.TLS_ONLY:
if not config.SECURE_ONLY:
params = {"server": ip, "port": config.PORT, "secret": secret}
params_encodeded = urllib.parse.urlencode(params, safe=':')
print("{}: tg://proxy?{}".format(user, params_encodeded), flush=True)
classic_link = "tg://proxy?{}".format(params_encodeded)
proxy_links.append({"user": user, "link": classic_link})
print("{}: {}".format(user, classic_link), flush=True)
params = {"server": ip, "port": config.PORT, "secret": "dd" + secret}
params_encodeded = urllib.parse.urlencode(params, safe=':')
print("{}: tg://proxy?{}".format(user, params_encodeded), flush=True)
dd_link = "tg://proxy?{}".format(params_encodeded)
proxy_links.append({"user": user, "link": dd_link})
print("{}: {}".format(user, dd_link), flush=True)
tls_secret = "ee" + secret + config.TLS_DOMAIN.encode().hex()
# the base64 links is buggy on ios
@@ -1784,7 +1997,9 @@ def print_tg_info():
# tls_secret_base64 = base64.urlsafe_b64encode(tls_secret)
params = {"server": ip, "port": config.PORT, "secret": tls_secret}
params_encodeded = urllib.parse.urlencode(params, safe=':')
print("{}: tg://proxy?{} (new)".format(user, params_encodeded), flush=True)
tls_link = "tg://proxy?{}".format(params_encodeded)
proxy_links.append({"user": user, "link": tls_link})
print("{}: {}".format(user, tls_link), flush=True)
if secret in ["00000000000000000000000000000000", "0123456789abcdef0123456789abcdef"]:
msg = "The default secret {} is used, this is not recommended".format(secret)
@@ -1825,6 +2040,7 @@ def setup_signals():
if hasattr(signal, 'SIGUSR2'):
def reload_signal(signum, frame):
init_config()
apply_upstream_proxy_settings()
print("Config reloaded", flush=True, file=sys.stderr)
print_tg_info()
@@ -1832,9 +2048,13 @@ def setup_signals():
def try_setup_uvloop():
if config.SOCKS5_HOST and config.SOCKS5_PORT:
# socks mode is not compatible with uvloop
return
try:
import uvloop
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
print_err("Found uvloop, using it for optimal performance")
except ImportError:
pass
@@ -1881,6 +2101,7 @@ def main():
try_setup_uvloop()
init_stats()
init_proxy_start_time()
if sys.platform == "win32":
loop = asyncio.ProactorEventLoop()
@@ -1927,6 +2148,16 @@ def main():
servers.append(loop.run_until_complete(task))
os.chmod(config.LISTEN_UNIX_SOCK, 0o666)
if config.METRICS_PORT is not None:
if config.METRICS_LISTEN_ADDR_IPV4:
task = asyncio.start_server(handle_metrics, config.METRICS_LISTEN_ADDR_IPV4,
config.METRICS_PORT, loop=loop)
servers.append(loop.run_until_complete(task))
if config.METRICS_LISTEN_ADDR_IPV6 and socket.has_ipv6:
task = asyncio.start_server(handle_metrics, config.METRICS_LISTEN_ADDR_IPV6,
config.METRICS_PORT, loop=loop)
servers.append(loop.run_until_complete(task))
try:
loop.run_forever()
except KeyboardInterrupt:
@@ -1947,6 +2178,7 @@ def main():
if __name__ == "__main__":
init_config()
apply_upstream_proxy_settings()
init_ip_info()
print_tg_info()
main()