如何下载非常大的 URL 列表,以便将下载的文件分成包含文件名首字母的子文件夹?

如何下载非常大的 URL 列表,以便将下载的文件分成包含文件名首字母的子文件夹?

我想下载很多文件(>数千万)。我有每个文件的 URL。我的文件中有 URL 列表URLs.txt

http://mydomain.com/0wd.pdf
http://mydomain.com/asz.pdf
http://mydomain.com/axz.pdf
http://mydomain.com/b00.pdf
http://mydomain.com/bb0.pdf
etc.

我可以通过下载它们wget -i URLs.txt,但是它会超过最大限度一个文件夹中可放置的文件数。

如何下载这么大的 URL 列表,以便将下载的文件分成包含文件名首字母的子文件夹?例如,:

0/0wd.pdf
a/asz.pdf
a/axz.pdf
b/b00.pdf
b/bb0.pdf
etc.

如果这很重要的话,我使用 Ubuntu。

答案1

也许是这样的:

awk -F/ '{print substr($NF, 1, 1), $0}' urls.txt |
  xargs -L1 bash -c 'mkdir -p -- "$0" && curl -sSF -O --output-dir "$0" "$1"'

在每行前面加上awk文件名的第一个字符,然后使用该字符在curl命令中选择输出目录。您可以使用-PGNU 实现的选项来xargs并行运行多个提取。

假设 URL 不包含空格、引号或反斜杠,但 URL 不应包含 URI 编码以外的内容(即使curl能够处理它们并自行进行 URI 编码)。

给定您的示例输入,运行上述命令会产生:

.
├── 0
│   └── 0wd.pdf
├── a
│   ├── asz.pdf
│   └── axz.pdf
└── b
    ├── b00.pdf
    └── bb0.pdf

答案2

ChatGPT 提供了一些 Python 中的工作代码(我确认它适用于 Python 3.11):

import os import requests

def download_files_with_subfolders(url_file):
    with open(url_file, 'r') as file:
        for url in file:
            url = url.strip()
            filename = os.path.basename(url)
            first_letter = filename[0]

            # Create subfolder if it doesn't exist
            subfolder = os.path.join(first_letter, '')
            os.makedirs(subfolder, exist_ok=True)

            # Download the file
            response = requests.get(url)
            if response.status_code == 200:
                file_path = os.path.join(subfolder, filename)
                with open(file_path, 'wb') as file:
                    file.write(response.content)
                print(f"Downloaded: {url} -> {file_path}")
            else:
                print(f"Failed to download: {url} (Status code: {response.status_code})")

if __name__ == "__main__":
    urls_file = "somefile.txt"
    download_files_with_subfolders(urls_file) 

包含somefile.txt

http://mydomain.com/0wd.pdf
http://mydomain.com/asz.pdf
http://mydomain.com/axz.pdf
http://mydomain.com/b00.pdf
http://mydomain.com/bb0.pdf
etc.

更高级的变体:

  1. 保留响应标头中的最后修改日期(代码也主要来自 ChatGPT):
import requests
import os
from datetime import datetime

def download_file(url, local_filename):
    # Send a GET request to the server
    response = requests.get(url, stream=True)

    # Check if the request was successful (status code 200)
    if response.status_code == 200:
        # Get the last modified date from the response headers
        last_modified_header = response.headers.get('Last-Modified')
        last_modified_date = datetime.strptime(last_modified_header, '%a, %d %b %Y %H:%M:%S %Z')

        # Save the content to a local file while preserving the original date
        with open(local_filename, 'wb') as f:
            for chunk in response.iter_content(chunk_size=128):
                f.write(chunk)

        # Set the local file's last modified date to match the original date
        os.utime(local_filename, (last_modified_date.timestamp(), last_modified_date.timestamp()))

        print(f"Downloaded {local_filename} with the original date {last_modified_date}")
    else:
        print(f"Failed to download file. Status code: {response.status_code}")


def download_files_with_subfolders(url_file):
    with open(url_file, 'r') as file:
        for url in file:
            url = url.strip()
            filename = os.path.basename(url)
            first_letter = filename[0]

            # Create subfolder if it doesn't exist
            subfolder = os.path.join(first_letter, '')
            os.makedirs(subfolder, exist_ok=True)

            file_path = os.path.join(subfolder, filename)
            download_file(url, file_path)

if __name__ == "__main__":
    urls_file = "somefile.txt"
    download_files_with_subfolders(urls_file)
  1. 多线程下载:
import requests
import os
from datetime import datetime

from multiprocessing.dummy import Pool as ThreadPool

def download_file(url, local_filename):
    # Send a GET request to the server
    response = requests.get(url, stream=True)

    # Check if the request was successful (status code 200)
    if response.status_code == 200:
        # Get the last modified date from the response headers
        last_modified_header = response.headers.get('Last-Modified')
        last_modified_date = datetime.strptime(last_modified_header, '%a, %d %b %Y %H:%M:%S %Z')

        # Save the content to a local file while preserving the original date
        with open(local_filename, 'wb') as f:
            for chunk in response.iter_content(chunk_size=128):
                f.write(chunk)

        # Set the local file's last modified date to match the original date
        os.utime(local_filename, (last_modified_date.timestamp(), last_modified_date.timestamp()))

        print(f"Downloaded {local_filename} with the original date {last_modified_date}")
    else:
        print(f"Failed to download file. Status code: {response.status_code}")


def download_files_with_subfolders(url_file, num_threads=4):
    download_arguments = []
    with open(url_file, 'r') as file:
        for url in file:
            url = url.strip()
            filename = os.path.basename(url)
            first_letter = filename[0]

            # Create subfolder if it doesn't exist
            subfolder = os.path.join(first_letter, '')
            os.makedirs(subfolder, exist_ok=True)

            file_path = os.path.join(subfolder, filename)
            download_arguments.append((url, file_path))

    pool = ThreadPool(num_threads)
    results = pool.starmap(download_file, download_arguments)


if __name__ == "__main__":
    urls_file = "somefile.txt"
    download_files_with_subfolders(urls_file, num_threads=10)
  1. 为第一个字母创建一个文件夹,为第二个字母创建一个子文件夹。例如,:
0/w/0wd.pdf
a/s/asz.pdf
a/x/axz.pdf
b/0/b00.pdf
b/b/bb0.pdf
etc.

代码:

import requests
import os
from datetime import datetime

from multiprocessing.dummy import Pool as ThreadPool

def download_file(url, local_filename):
    # Send a GET request to the server
    response = requests.get(url, stream=True)

    # Check if the request was successful (status code 200)
    if response.status_code == 200:
        # Get the last modified date from the response headers
        last_modified_header = response.headers.get('Last-Modified')
        last_modified_date = datetime.strptime(last_modified_header, '%a, %d %b %Y %H:%M:%S %Z')

        # Save the content to a local file while preserving the original date
        with open(local_filename, 'wb') as f:
            for chunk in response.iter_content(chunk_size=128):
                f.write(chunk)

        # Set the local file's last modified date to match the original date
        os.utime(local_filename, (last_modified_date.timestamp(), last_modified_date.timestamp()))

        print(f"Downloaded {local_filename} with the original date {last_modified_date}")
    else:
        print(f"Failed to download file. Status code: {response.status_code}")


def download_files_with_subfolders(url_file, num_threads=4):
    download_arguments = []
    with open(url_file, 'r') as file:
        for url in file:
            url = url.strip()
            filename = os.path.basename(url)
            first_letter = filename[0]
            second_letter = filename[1]

            # Create subfolder if it doesn't exist
            subfolder = os.path.join(first_letter, '')
            os.makedirs(subfolder, exist_ok=True)
            subsubfolder = os.path.join(first_letter, second_letter)
            os.makedirs(subsubfolder, exist_ok=True)

            file_path = os.path.join(subsubfolder, filename)
            download_arguments.append((url, file_path))

    pool = ThreadPool(num_threads)
    results = pool.starmap(download_file, download_arguments)


if __name__ == "__main__":
    urls_file = "somefile.txt"
    download_files_with_subfolders(urls_file, num_threads=10)

相关内容