diff --git a/bot/__init__.py b/bot/__init__.py index 023ac519d84..47666e53db5 100644 --- a/bot/__init__.py +++ b/bot/__init__.py @@ -5,6 +5,7 @@ import random import string import subprocess +import requests import aria2p import qbittorrentapi as qba @@ -35,9 +36,13 @@ CONFIG_FILE_URL = os.environ.get('CONFIG_FILE_URL', None) if CONFIG_FILE_URL is not None: - out = subprocess.run(["wget", "-q", "-O", "config.env", CONFIG_FILE_URL]) - if out.returncode != 0: - logging.error(out) + res = requests.get(CONFIG_FILE_URL) + if res.status_code == 200: + with open('config.env', 'wb') as f: + f.truncate(0) + f.write(res.content) + else: + logging.error(res.status_code) load_dotenv('config.env') @@ -48,7 +53,9 @@ subprocess.run(["qbittorrent-nox", "-d", "--profile=."]) Interval = [] - +DRIVES_NAMES = [] +DRIVES_IDS = [] +INDEX_URLS = [] def getConfig(name: str): return os.environ[name] @@ -60,9 +67,9 @@ def mktable(): sql = "CREATE TABLE users (uid bigint, sudo boolean DEFAULT FALSE);" cur.execute(sql) conn.commit() - LOGGER.info("Table Created!") + logging.info("Table Created!") except Error as e: - LOGGER.error(e) + logging.error(e) exit(1) try: @@ -88,7 +95,7 @@ def get_client() -> qba.TorrentsAPIMixIn: #qb_client.application.set_preferences({"disk_cache":64, "incomplete_files_ext":True, "max_connec":3000, "max_connec_per_torrent":300, "async_io_threads":8, "preallocate_all":True, "upnp":True, "dl_limit":-1, "up_limit":-1, "dht":True, "pex":True, "lsd":True, "encryption":0, "queueing_enabled":True, "max_active_downloads":15, "max_active_torrents":50, "dont_count_slow_torrents":True, "bittorrent_protocol":0, "recheck_completed_torrents":True, "enable_multi_connections_from_same_ip":True, "slow_torrent_dl_rate_threshold":100,"slow_torrent_inactive_timer":600}) return qb_client except qba.LoginFailed as e: - LOGGER.error(str(e)) + logging.error(str(e)) return None @@ -222,8 +229,12 @@ def get_client() -> qba.TorrentsAPIMixIn: INDEX_URL = getConfig('INDEX_URL') if len(INDEX_URL) == 0: INDEX_URL = None + INDEX_URLS.append(None) + else: + INDEX_URLS.append(INDEX_URL) except KeyError: INDEX_URL = None + INDEX_URLS.append(None) try: TORRENT_DIRECT_LIMIT = getConfig('TORRENT_DIRECT_LIMIT') if len(TORRENT_DIRECT_LIMIT) == 0: @@ -332,31 +343,70 @@ def get_client() -> qba.TorrentsAPIMixIn: if len(SERVER_PORT) == 0: SERVER_PORT = None except KeyError: - logging.warning('SERVER_PORT not provided!') + if IS_VPS: + logging.warning('SERVER_PORT not provided!') SERVER_PORT = None try: TOKEN_PICKLE_URL = getConfig('TOKEN_PICKLE_URL') if len(TOKEN_PICKLE_URL) == 0: TOKEN_PICKLE_URL = None else: - out = subprocess.run(["wget", "-q", "-O", "token.pickle", TOKEN_PICKLE_URL]) - if out.returncode != 0: - logging.error(out) + res = requests.get(TOKEN_PICKLE_URL) + if res.status_code == 200: + with open('token.pickle', 'wb') as f: + f.truncate(0) + f.write(res.content) + else: + logging.error(res.status_code) + raise KeyError except KeyError: - TOKEN_PICKLE_URL = None + pass try: ACCOUNTS_ZIP_URL = getConfig('ACCOUNTS_ZIP_URL') if len(ACCOUNTS_ZIP_URL) == 0: ACCOUNTS_ZIP_URL = None else: - out = subprocess.run(["wget", "-q", "-O", "accounts.zip", ACCOUNTS_ZIP_URL]) - if out.returncode != 0: - logging.error(out) + res = requests.get(ACCOUNTS_ZIP_URL) + if res.status_code == 200: + with open('accounts.zip', 'wb') as f: + f.truncate(0) + f.write(res.content) + else: + logging.error(res.status_code) raise KeyError subprocess.run(["unzip", "-q", "-o", "accounts.zip"]) os.remove("accounts.zip") except KeyError: - ACCOUNTS_ZIP_URL = None + pass +try: + MULTI_SEARCH_URL = getConfig('MULTI_SEARCH_URL') + if len(MULTI_SEARCH_URL) == 0: + MULTI_SEARCH_URL = None + else: + res = requests.get(MULTI_SEARCH_URL) + if res.status_code == 200: + with open('drive_folder', 'wb') as f: + f.truncate(0) + f.write(res.content) + else: + logging.error(res.status_code) + raise KeyError +except KeyError: + pass + +DRIVES_NAMES.append("Main") +DRIVES_IDS.append(parent_id) +if os.path.exists('drive_folder'): + with open('drive_folder', 'r+') as f: + lines = f.readlines() + for line in lines: + temp = line.strip().split() + DRIVES_NAMES.append(temp[0].replace("_", " ")) + DRIVES_IDS.append(temp[1]) + try: + INDEX_URLS.append(temp[2]) + except IndexError as e: + INDEX_URLS.append(None) updater = tg.Updater(token=BOT_TOKEN) bot = updater.bot diff --git a/bot/helper/mirror_utils/download_utils/aria2_download.py b/bot/helper/mirror_utils/download_utils/aria2_download.py index 115065aaad4..27ea0abce0c 100644 --- a/bot/helper/mirror_utils/download_utils/aria2_download.py +++ b/bot/helper/mirror_utils/download_utils/aria2_download.py @@ -16,7 +16,7 @@ def __init__(self): @new_thread def __onDownloadStarted(self, api, gid): if STOP_DUPLICATE or TORRENT_DIRECT_LIMIT is not None or TAR_UNZIP_LIMIT is not None: - sleep(2) + sleep(1) dl = getDownloadByGid(gid) download = aria2.get_download(gid) if STOP_DUPLICATE and dl is not None: @@ -27,14 +27,15 @@ def __onDownloadStarted(self, api, gid): if dl.getListener().extract: smsg = None else: - gdrive = GoogleDriveHelper(None) - smsg, button = gdrive.drive_list(sname) + gdrive = GoogleDriveHelper() + smsg, button = gdrive.drive_list(sname, True) if smsg: dl.getListener().onDownloadError('File/Folder already available in Drive.\n\n') aria2.remove([download], force=True) sendMarkup("Here are the search results:", dl.getListener().bot, dl.getListener().update, button) return if (TORRENT_DIRECT_LIMIT is not None or TAR_UNZIP_LIMIT is not None) and dl is not None: + sleep(1) size = aria2.get_download(gid).total_length if dl.getListener().isTar or dl.getListener().extract: is_tar_ext = True diff --git a/bot/helper/mirror_utils/download_utils/mega_downloader.py b/bot/helper/mirror_utils/download_utils/mega_downloader.py index 9c651c85b00..f8e6e7b679f 100644 --- a/bot/helper/mirror_utils/download_utils/mega_downloader.py +++ b/bot/helper/mirror_utils/download_utils/mega_downloader.py @@ -176,7 +176,7 @@ def add_download(mega_link: str, path: str, listener): smsg = None else: gd = GoogleDriveHelper() - smsg, button = gd.drive_list(mname) + smsg, button = gd.drive_list(mname, True) if smsg: msg1 = "File/Folder is already available in Drive.\nHere are the search results:" sendMarkup(msg1, listener.bot, listener.update, button) diff --git a/bot/helper/mirror_utils/download_utils/telegram_downloader.py b/bot/helper/mirror_utils/download_utils/telegram_downloader.py index 9a3daba11a5..80e4750c537 100644 --- a/bot/helper/mirror_utils/download_utils/telegram_downloader.py +++ b/bot/helper/mirror_utils/download_utils/telegram_downloader.py @@ -107,7 +107,7 @@ def add_download(self, message, path, filename): smsg = None else: gd = GoogleDriveHelper() - smsg, button = gd.drive_list(name) + smsg, button = gd.drive_list(name, True) if smsg: sendMarkup("File/Folder is already available in Drive.\nHere are the search results:", self.__listener.bot, self.__listener.update, button) return diff --git a/bot/helper/mirror_utils/upload_utils/gdriveTools.py b/bot/helper/mirror_utils/upload_utils/gdriveTools.py index dfc0333e1c7..7a9c51d6617 100644 --- a/bot/helper/mirror_utils/upload_utils/gdriveTools.py +++ b/bot/helper/mirror_utils/upload_utils/gdriveTools.py @@ -23,7 +23,7 @@ from bot.helper.telegram_helper import button_build from telegraph import Telegraph from bot import parent_id, DOWNLOAD_DIR, IS_TEAM_DRIVE, INDEX_URL, \ - USE_SERVICE_ACCOUNTS, telegraph_token, BUTTON_FOUR_NAME, BUTTON_FOUR_URL, BUTTON_FIVE_NAME, BUTTON_FIVE_URL, BUTTON_SIX_NAME, BUTTON_SIX_URL, SHORTENER, SHORTENER_API, VIEW_LINK + USE_SERVICE_ACCOUNTS, telegraph_token, BUTTON_FOUR_NAME, BUTTON_FOUR_URL, BUTTON_FIVE_NAME, BUTTON_FIVE_URL, BUTTON_SIX_NAME, BUTTON_SIX_URL, SHORTENER, SHORTENER_API, VIEW_LINK, DRIVES_NAMES, DRIVES_IDS, INDEX_URLS from bot.helper.ext_utils.bot_utils import get_readable_file_size, setInterval from bot.helper.ext_utils.fs_utils import get_mime_type, get_path_size from bot.helper.ext_utils.shortenurl import short_url @@ -372,10 +372,11 @@ def getFilesByFolderId(self,folder_id): q=q, spaces='drive', pageSize=200, - fields='nextPageToken, files(id, name, mimeType,size)', corpora='allDrives', orderBy='folder, name', + fields='nextPageToken, files(id, name, mimeType,size)', + corpora='allDrives', + orderBy='folder, name', pageToken=page_token).execute() - for file in response.get('files', []): - files.append(file) + files.extend(response.get('files', [])) page_token = response.get('nextPageToken', None) if page_token is None: break @@ -593,18 +594,24 @@ def edit_telegraph(self): return - def escapes(self, str): - chars = ['\\', "'", '"', r'\a', r'\b', r'\f', r'\n', r'\r', r'\t'] - for char in chars: - str = str.replace(char, '\\'+char) - return str + def escapes(self, str): + chars = ['\\', "'", '"', r'\a', r'\b', r'\f', r'\n', r'\r', r'\s', r'\t'] + for char in chars: + str = str.replace(char, ' ') + return str - def drive_list(self, fileName): - msg = "" - fileName = self.escapes(str(fileName)) + def drive_query(self, parent_id, fileName): # Create Search Query for API request. - query = f"'{parent_id}' in parents and (name contains '{fileName}')" + if self.stopDup: + query = f"'{parent_id}' in parents and name = '{fileName}' and " + else: + query = f"'{parent_id}' in parents and " + fileName = fileName.split(' ') + for name in fileName: + if name != '': + query += f"name contains '{name}' and " + query += "trashed = false" response = self.__service.files().list(supportsTeamDrives=True, includeTeamDriveItems=True, q=query, @@ -612,66 +619,82 @@ def drive_list(self, fileName): pageSize=200, fields='files(id, name, mimeType, size)', orderBy='name asc').execute() + return response + + + def drive_list(self, fileName, stopDup=False): + self.stopDup = stopDup + msg = "" + if not stopDup: + fileName = self.escapes(str(fileName)) content_count = 0 - if not response["files"]: - return '', '' - - msg += f'
{file.get('name')}
(folder)
{file.get('name')}
(folder)
{file.get('name')}
({get_readable_file_size(int(file.get('size')))})
{file.get('name')}
({get_readable_file_size(int(file.get('size')))})
{len(response['files'])}
results for {fileName}
"
+ msg = f"Found {all_contents_count}
results for {fileName}
"
buttons = button_build.ButtonMaker()
buttons.buildbutton("🔎 VIEW", f"https://telegra.ph/{self.path[0]}")
diff --git a/bot/modules/clone.py b/bot/modules/clone.py
index bd4dcd9a85e..2beb20b9f18 100644
--- a/bot/modules/clone.py
+++ b/bot/modules/clone.py
@@ -21,7 +21,7 @@ def cloneNode(update, context):
return
if STOP_DUPLICATE:
LOGGER.info('Checking File/Folder if already in Drive...')
- smsg, button = gd.drive_list(name)
+ smsg, button = gd.drive_list(name, True)
if smsg:
msg3 = "File/Folder is already available in Drive.\nHere are the search results:"
sendMarkup(msg3, context.bot, update, button)
diff --git a/bot/modules/list.py b/bot/modules/list.py
index cc0c3d4ec86..e81aac0bbda 100644
--- a/bot/modules/list.py
+++ b/bot/modules/list.py
@@ -1,18 +1,17 @@
from telegram.ext import CommandHandler
from bot.helper.mirror_utils.upload_utils.gdriveTools import GoogleDriveHelper
from bot import LOGGER, dispatcher
-from bot.helper.telegram_helper.message_utils import sendMessage, sendMarkup, editMessage
+from bot.helper.telegram_helper.message_utils import sendMessage, editMessage
from bot.helper.telegram_helper.filters import CustomFilters
-import threading
from bot.helper.telegram_helper.bot_commands import BotCommands
def list_drive(update, context):
try:
- search = update.message.text.split(' ',maxsplit=1)[1]
+ search = update.message.text.split(' ', maxsplit=1)[1]
LOGGER.info(f"Searching: {search}")
reply = sendMessage('Searching..... Please wait!', context.bot, update)
- gdrive = GoogleDriveHelper(None)
+ gdrive = GoogleDriveHelper()
msg, button = gdrive.drive_list(search)
if button:
diff --git a/bot/modules/mirror.py b/bot/modules/mirror.py
index a870a571090..2875355d67d 100644
--- a/bot/modules/mirror.py
+++ b/bot/modules/mirror.py
@@ -246,8 +246,7 @@ def _mirror(bot, update, isTar=False, extract=False, isZip=False, isQbit=False):
resp = requests.get(link)
if resp.status_code == 200:
file_name = str(time.time()).replace(".", "") + ".torrent"
- with open(file_name, "wb") as f:
- f.write(resp.content)
+ open(file_name, "wb").write(resp.content)
link = f"{file_name}"
else:
sendMessage("ERROR: link got HTTP response:" + resp.status_code, bot, update)
diff --git a/config_sample.env b/config_sample.env
index dcbf74e9e1f..137418462df 100644
--- a/config_sample.env
+++ b/config_sample.env
@@ -36,12 +36,13 @@ BASE_URL_OF_BOT = "" # Web Link, Required for Heroku to avoid sleep or use work
# These are optional, if you don't know, simply leave them, don't fill anything in them.
ACCOUNTS_ZIP_URL = ""
TOKEN_PICKLE_URL = ""
+MULTI_SEARCH_URL = "" # You can use gist raw link (remove commit id from the link, like config raw link check heroku guide)
# To use limit leave space between number and unit. Available units is (gb or GB, tb or TB)
TORRENT_DIRECT_LIMIT = ""
TAR_UNZIP_LIMIT = ""
CLONE_LIMIT = ""
MEGA_LIMIT = ""
-# Required for Heroku
+# Optional for Heroku features(dyno restart(/reboot) and update cnmd soon)
HEROKU_API_KEY = ""
HEROKU_APP_NAME = ""
# View Link button to open file Index Link in browser instead of direct download link
diff --git a/driveid.py b/driveid.py
new file mode 100644
index 00000000000..62c521723d4
--- /dev/null
+++ b/driveid.py
@@ -0,0 +1,50 @@
+import os
+import re
+print("\n\n"\
+ " Bot can search files recursively, but you have to add the list of drives you want to search.\n"\
+ " Use the following format: (You can use 'root' in the ID in case you wan to use main drive.)\n"\
+ " teamdrive NAME --> anything that u likes\n"\
+ " teamdrive ID --> id of teamdrives in which u likes to search ('root' for main drive)\n"\
+ " teamdrive INDEX URL --> enter index url for this drive.\n" \
+ " goto the respective drive and copy the url from address bar\n")
+msg = ''
+if os.path.exists('drive_folder'):
+ with open('drive_folder', 'r+') as f:
+ lines = f.read()
+ if not re.match(r'^\s*$', lines):
+ print(lines)
+ print("\n\n"\
+ " DO YOU WISH TO KEEP THE ABOVE DETAILS THAT YOU PREVIOUSLY ADDED???? ENTER (y/n)\n"\
+ " IF NOTHING SHOWS ENTER n")
+ while (1):
+ choice = input()
+ if choice == 'y' or choice == 'Y':
+ msg = f'{lines}'
+ break
+ elif choice == 'n' or choice == 'N':
+ break
+ else:
+ print("\n\n DO YOU WISH TO KEEP THE ABOVE DETAILS ???? y/n <=== this is option ..... OPEN YOUR EYES & READ...")
+num = int(input(" How Many Drive/Folder You Likes To Add : "))
+count = 1
+while count <= num :
+ print(f"\n > DRIVE - {count}\n")
+ name = input(" Enter Drive NAME (anything) : ")
+ id = input(" Enter Drive ID : ")
+ index = input(" Enter Drive INDEX URL (optional) : ")
+ if not name or not id:
+ print("\n\n ERROR : Dont leave the name/id without filling.")
+ exit(1)
+ name=name.replace(" ", "_")
+ if index:
+ if index[-1] == "/":
+ index = index[:-1]
+ else:
+ index = ''
+ count+=1
+ msg += f"{name} {id} {index}\n"
+with open('drive_folder', 'w') as file:
+ file.truncate(0)
+ file.write(msg)
+print("\n\n Done!")
+
diff --git a/qBittorrent.conf b/qBittorrent.conf
index 3424ca4e4d2..0d9be89a57d 100644
--- a/qBittorrent.conf
+++ b/qBittorrent.conf
@@ -9,7 +9,7 @@ Accepted=true
Cookies=@Invalid()
[BitTorrent]
-Session\AsyncIOThreadsCount=32
+Session\AsyncIOThreadsCount=4
Session\BTProtocol=Both
Session\MultiConnectionsPerIp=true
Session\SlowTorrentsDownloadRate=100
@@ -23,8 +23,8 @@ Advanced\IncludeOverhead=false
Advanced\RecheckOnCompletion=true
Advanced\LtTrackerExchange=true
Advanced\SuperSeeding=false
-Bittorrent\MaxConnecs=3000
-Bittorrent\MaxConnecsPerTorrent=500
+Bittorrent\MaxConnecs=-1
+Bittorrent\MaxConnecsPerTorrent=-1
Bittorrent\DHT=true
Bittorrent\DHTPort=6881
Bittorrent\Encryption=0
@@ -32,7 +32,7 @@ Bittorrent\PeX=true
Bittorrent\LSD=true
Bittorrent\sameDHTPortAsBT=true
Bittorrent\uTP=true
-Bittorrent\uTP_rate_limited=true
+Bittorrent\uTP_rate_limited=false
Connection\PortRangeMin=6881
Connection\UPnP=true
Downloads\DiskWriteCacheSize=64