本文主要介绍了Python通过tkinter实现百度搜索的示例代码,分享给大家,具体如下:
\"\"\" 百度搜索可视化 \"\"\" import tkinter import win32api from selenium.webdriver import Chrome entry = None def callback(): global entry keywords = entry.get() if not keywords: win32api.MessageBox(0, \'请输入搜索关键字\', \'提示\', 0) return chrome = Chrome() chrome.get(\'https://www.baidu.com/\') chrome.find_element_by_id(\'kw\').send_keys(keywords) chrome.find_element_by_id(\'su\').click() # bilibili关键字搜索 # chrome.get(\'https://www.bilibili.com/\') # chrome.find_element_by_xpath(\'//form[@id=\"nav_searchform\"]/input\').send_keys(keywords) # chrome.find_element_by_xpath(\'//div[@class=\"nav-search-btn\"]/button\').click() def main(): global entry tk = tkinter.Tk() # tk.resizable(width=False,height=False) # 固定窗体大小?无效 tk.title(\'百度搜索\') # 1.设置窗体居中 # screenwidth = tk.winfo_screenwidth() # 获取屏幕宽度 # screenheight = tk.winfo_screenheight() # 获取屏幕高度 # # 计算窗体大小,位置参数,width,height:窗体宽高 # width = 100 # height = 50 # size = \'%dx%d+%d+%d\' % (width, height, (screenwidth - width) / 2, (screenheight - height) / 2) # tk.geometry(size) # 设置窗体位置为屏幕居中 # 2.设置窗体右下角,无效 # screenwidth = tk.winfo_screenwidth() # 获取屏幕宽度 # screenheight = tk.winfo_screenheight() # 获取屏幕高度 # print(screenwidth,screenheight) # # 计算窗体大小,位置参数,width,height:窗体宽高 # width = 100 # height = 50 # size = \'%dx%d+%d+%d\' % (width, height, (screenwidth - width), (screenheight - height)) # tk.geometry(size) # 设置窗体位置为屏幕右下角 # 获取窗体x,y # tk.update() # print(tk.winfo_x()) # print(tk.winfo_y()) tk.geometry(\'+0+0\') # 固定屏幕左上角 # tk.geometry(\'+1440+770\') entry = tkinter.Entry(tk) entry.pack() button = tkinter.Button(tk, text=\'百度一下\', command=callback) button.pack() tk.mainloop() if __name__ == \'__main__\': main()
补充:python模拟百度搜索点击链接
# coding: utf-8 import os import time import requests import urllib.parse from bs4 import BeautifulSoup from urllib.parse import urlparse from fake_useragent import UserAgent from multiprocessing.pool import ThreadPool LOCATIONS = {} GLOBAL_THREAD = 500 GLOBAL_TIMEOUT = 50 def get_links(keyword, generator, pages): links = [] for page in range(int(pages.split(\"-\")[0]), int(pages.split(\"-\")[1]) + 1): for genera in range(int(generator.split(\"-\")[0]), int(generator.split(\"-\")[1]) + 1): links.append( \"http://www.baidu.com.cn/s?wd=\" + urllib.parse.quote(keyword + str(genera)) + \"&pn=\" + str(page * 10)) return links def get_page(url): headers = {\"user-agent\": UserAgent().chrome} req = requests.get(url, headers=headers) req.encoding = \"utf-8\" soup = BeautifulSoup(req.text, \"lxml\") for link in soup.select(\"div.result > h3.t > a\"): req = requests.get(link.get(\"href\"), headers=headers, allow_redirects=False) if \"=\" in req.headers[\"location\"]: root = urlparse(req.headers[\"location\"]).netloc LOCATIONS[root] = req.headers[\"location\"] def baidu_search(): try: os.system(\"cls\") print(\"-\" * 56 + \"\\n\") print(\"| BaiduSearch Engine By 美图博客[https://www.meitubk.com/] |\\n\") print(\"-\" * 56 + \"\\n\") keyword = input(\"Keyword: \") generator = input(\"Generator(1-10): \") pages = input(\"Pages(0-10): \") start = time.time() pool = ThreadPool(processes=GLOBAL_THREAD) pool.map(get_page, get_links(keyword, generator, pages)) pool.close() pool.join() end = time.time() path = r\"D:\\Desktop\\result.txt\" save_result(path) print(\"\\nSava in %s\" % path) print(\"Result count: %d\" % len(LOCATIONS.values())) print(\"Running time: %ds\" % (end - start)) except: print(\"\\nInput Error!\") exit(0) def save_result(path): with open(path, \"w\") as file: for url in list(LOCATIONS.values()): file.write(url + \"\\n\") baidu_search()
© 版权声明
THE END
暂无评论内容