たくさんのファイル(>数千万個)をダウンロードしたいです。各ファイルのURLがあります。私のファイルにURLのリストがありますURLs.txt
。
http://mydomain.com/0wd.pdf
http://mydomain.com/asz.pdf
http://mydomain.com/axz.pdf
http://mydomain.com/b00.pdf
http://mydomain.com/bb0.pdf
etc.
を介してダウンロードできますが、wget -i URLs.txt
1時間以上かかります。最高フォルダに入れることができるファイルの数。
ダウンロードしたファイルがファイル名の最初の文字を含むサブフォルダに分割されるように、このような大きなURLリストをダウンロードする方法は?たとえば、:
0/0wd.pdf
a/asz.pdf
a/axz.pdf
b/b00.pdf
b/bb0.pdf
etc.
それが重要な場合は、Ubuntuを使用してください。
答え1
たぶん、次のようなものがあります。
awk -F/ '{print substr($NF, 1, 1), $0}' urls.txt |
xargs -L1 bash -c 'mkdir -p -- "$0" && curl -sSF -O --output-dir "$0" "$1"'
各行の前にawk
ファイル名の最初の文字を追加し、その文字を使用してcurl
コマンドから出力ディレクトリを選択します。-P
GNU実装オプションを使用して、複数のxargs
抽出を並列に実行できます。
URLには空白、引用符、またはバックスラッシュが含まれていないと見なされますが、URLにはURIエンコーディング以外のものを含めないでください(curl
直接処理してURIエンコーディングを実行できます)。
例の入力が与えられたら、上記のコマンドを実行すると次のようになります。
.
├── 0
│ └── 0wd.pdf
├── a
│ ├── asz.pdf
│ └── axz.pdf
└── b
├── b00.pdf
└── bb0.pdf
答え2
ChatGPTはPythonでいくつかの動作コードを提供しています(Python 3.11で動作することを確認しました)。
import os import requests def download_files_with_subfolders(url_file): with open(url_file, 'r') as file: for url in file: url = url.strip() filename = os.path.basename(url) first_letter = filename[0] # Create subfolder if it doesn't exist subfolder = os.path.join(first_letter, '') os.makedirs(subfolder, exist_ok=True) # Download the file response = requests.get(url) if response.status_code == 200: file_path = os.path.join(subfolder, filename) with open(file_path, 'wb') as file: file.write(response.content) print(f"Downloaded: {url} -> {file_path}") else: print(f"Failed to download: {url} (Status code: {response.status_code})") if __name__ == "__main__": urls_file = "somefile.txt" download_files_with_subfolders(urls_file)
含むsomefile.txt
:
http://mydomain.com/0wd.pdf
http://mydomain.com/asz.pdf
http://mydomain.com/axz.pdf
http://mydomain.com/b00.pdf
http://mydomain.com/bb0.pdf
etc.
高度なバリエーション:
- 応答ヘッダーに最後の変更日を保持します(ほとんどChatGPTのコードでもあります)。
import requests
import os
from datetime import datetime
def download_file(url, local_filename):
# Send a GET request to the server
response = requests.get(url, stream=True)
# Check if the request was successful (status code 200)
if response.status_code == 200:
# Get the last modified date from the response headers
last_modified_header = response.headers.get('Last-Modified')
last_modified_date = datetime.strptime(last_modified_header, '%a, %d %b %Y %H:%M:%S %Z')
# Save the content to a local file while preserving the original date
with open(local_filename, 'wb') as f:
for chunk in response.iter_content(chunk_size=128):
f.write(chunk)
# Set the local file's last modified date to match the original date
os.utime(local_filename, (last_modified_date.timestamp(), last_modified_date.timestamp()))
print(f"Downloaded {local_filename} with the original date {last_modified_date}")
else:
print(f"Failed to download file. Status code: {response.status_code}")
def download_files_with_subfolders(url_file):
with open(url_file, 'r') as file:
for url in file:
url = url.strip()
filename = os.path.basename(url)
first_letter = filename[0]
# Create subfolder if it doesn't exist
subfolder = os.path.join(first_letter, '')
os.makedirs(subfolder, exist_ok=True)
file_path = os.path.join(subfolder, filename)
download_file(url, file_path)
if __name__ == "__main__":
urls_file = "somefile.txt"
download_files_with_subfolders(urls_file)
- マルチスレッドダウンロード:
import requests
import os
from datetime import datetime
from multiprocessing.dummy import Pool as ThreadPool
def download_file(url, local_filename):
# Send a GET request to the server
response = requests.get(url, stream=True)
# Check if the request was successful (status code 200)
if response.status_code == 200:
# Get the last modified date from the response headers
last_modified_header = response.headers.get('Last-Modified')
last_modified_date = datetime.strptime(last_modified_header, '%a, %d %b %Y %H:%M:%S %Z')
# Save the content to a local file while preserving the original date
with open(local_filename, 'wb') as f:
for chunk in response.iter_content(chunk_size=128):
f.write(chunk)
# Set the local file's last modified date to match the original date
os.utime(local_filename, (last_modified_date.timestamp(), last_modified_date.timestamp()))
print(f"Downloaded {local_filename} with the original date {last_modified_date}")
else:
print(f"Failed to download file. Status code: {response.status_code}")
def download_files_with_subfolders(url_file, num_threads=4):
download_arguments = []
with open(url_file, 'r') as file:
for url in file:
url = url.strip()
filename = os.path.basename(url)
first_letter = filename[0]
# Create subfolder if it doesn't exist
subfolder = os.path.join(first_letter, '')
os.makedirs(subfolder, exist_ok=True)
file_path = os.path.join(subfolder, filename)
download_arguments.append((url, file_path))
pool = ThreadPool(num_threads)
results = pool.starmap(download_file, download_arguments)
if __name__ == "__main__":
urls_file = "somefile.txt"
download_files_with_subfolders(urls_file, num_threads=10)
- 最初の文字のフォルダと2番目の文字のサブフォルダを作成します。たとえば、:
0/w/0wd.pdf
a/s/asz.pdf
a/x/axz.pdf
b/0/b00.pdf
b/b/bb0.pdf
etc.
パスワード:
import requests
import os
from datetime import datetime
from multiprocessing.dummy import Pool as ThreadPool
def download_file(url, local_filename):
# Send a GET request to the server
response = requests.get(url, stream=True)
# Check if the request was successful (status code 200)
if response.status_code == 200:
# Get the last modified date from the response headers
last_modified_header = response.headers.get('Last-Modified')
last_modified_date = datetime.strptime(last_modified_header, '%a, %d %b %Y %H:%M:%S %Z')
# Save the content to a local file while preserving the original date
with open(local_filename, 'wb') as f:
for chunk in response.iter_content(chunk_size=128):
f.write(chunk)
# Set the local file's last modified date to match the original date
os.utime(local_filename, (last_modified_date.timestamp(), last_modified_date.timestamp()))
print(f"Downloaded {local_filename} with the original date {last_modified_date}")
else:
print(f"Failed to download file. Status code: {response.status_code}")
def download_files_with_subfolders(url_file, num_threads=4):
download_arguments = []
with open(url_file, 'r') as file:
for url in file:
url = url.strip()
filename = os.path.basename(url)
first_letter = filename[0]
second_letter = filename[1]
# Create subfolder if it doesn't exist
subfolder = os.path.join(first_letter, '')
os.makedirs(subfolder, exist_ok=True)
subsubfolder = os.path.join(first_letter, second_letter)
os.makedirs(subsubfolder, exist_ok=True)
file_path = os.path.join(subsubfolder, filename)
download_arguments.append((url, file_path))
pool = ThreadPool(num_threads)
results = pool.starmap(download_file, download_arguments)
if __name__ == "__main__":
urls_file = "somefile.txt"
download_files_with_subfolders(urls_file, num_threads=10)