We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
run code, it report:
C:\ProgramData\anaconda3\envs\python310\python.exe C:\Users\Administrator\Personal_scripts\pythonProject\temp.py Traceback (most recent call last): File "C:\ProgramData\anaconda3\envs\python310\lib\multiprocessing\queues.py", line 245, in _feed obj = _ForkingPickler.dumps(obj) File "C:\ProgramData\anaconda3\envs\python310\lib\multiprocessing\reduction.py", line 51, in dumps cls(buf, protocol).dump(obj) TypeError: cannot pickle 'TaskStepMethWrapper' object
code:
from urllib.parse import unquote,urlparse import aiohttp,aiomultiprocess,asyncio,os from bs4 import BeautifulSoup main_url = "http://am.adianshi.com:6805" download_dict = {} # save_path = os.path.join(os.getcwd(), "download") save_path = r"D:\BaiduNetdiskDownload\开卡教程" async def get_detail_url(base_url: str, html: str) -> list[str]: soup = BeautifulSoup(html, "lxml") file_hrefs = soup.select("div div li[class='item file'] a[href]") curlink = os.path.join(save_path, urlparse(unquote(base_url)).path[1:].replace('/', '\\')) download_dict[curlink] = [] for file_href in file_hrefs: link = "http://am.adianshi.com:6805" + unquote(file_href['href']) download_dict[curlink].append(link) folder_hrefs = soup.select("div div li[class='item folder'] a[href]") item_folder = [] for folder_href in folder_hrefs: link = "http://am.adianshi.com:6805" + folder_href['href'] item_folder.append(link) # print(item_folder) return item_folder async def fetch_detail(url: str, aiohttp_session: aiohttp.ClientSession, pool: aiomultiprocess.Pool): async with aiohttp_session.get(url) as response: html = await response.text() item_folder = await get_detail_url(url, html) async def aiomultiprocess_main(): async with aiohttp.ClientSession() as aiohttp_session: async with aiomultiprocess.Pool() as pool: task = pool.apply(fetch_detail, args=(main_url, aiohttp_session, pool)) await task print(download_dict) if __name__ == "__main__": asyncio.run(aiomultiprocess_main())
The text was updated successfully, but these errors were encountered:
No branches or pull requests
Description
run code, it report:
code:
Details
The text was updated successfully, but these errors were encountered: