i want to extract links from multiple web pages.Everything works fine for extract but for multiple urls first url getting twice and last one not getting.What is the reason for this?
import re
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
import csv
from bs4 import BeautifulSoup
URLs = ["https://www.oddsportal1.com/soccer/turkey/super-lig-2019-2020/results/#/page/1","https://www.oddsportal1.com/soccer/turkey/super-lig-2019-2020/results/#/page/2",
"https://www.oddsportal1.com/soccer/turkey/super-lig-2019-2020/results/#/page/3","https://www.oddsportal1.com/soccer/turkey/super-lig-2019-2020/results/#/page/4","https://www.oddsportal1.com/soccer/turkey/super-lig-2019-2020/results/#/page/5",
"https://www.oddsportal1.com/soccer/turkey/super-lig-2019-2020/results/#/page/6","https://www.oddsportal1.com/soccer/turkey/super-lig-2019-2020/results/#/page/7"]
driver = webdriver.Chrome(ChromeDriverManager().install())
file = open('linkler.csv', 'w+', newline='')
writer = csv.writer(file)
writer.writerow(['linkler'])
for link in URLs:
driver.get(link)
html_source = driver.page_source
soup = BeautifulSoup(html_source, "html.parser")
for links in soup.findAll('a', attrs={'href': re.compile("^/soccer/turkey/super-lig-2019-2020/")}):
writer.writerow([links.get('href')])
driver.quit()