I would like to create a pandas dataframe that includes all rows fulfilling the condition(and I managed to do it )scraped from a multiple page website .But the final result is that I am getting the pandas dataframe that has only the rows which belong to the last page of range i declared in the loop . I would be extremely grateful if someone pointed out where the error is that instead of the result from all pages , only the last one i get.
import requests
import pandas
from bs4 import BeautifulSoup
headers= {'User-Agent': 'Mozilla/5.0'}
for num in range (1,3):
url =' https://biznes.interia.pl/gieldy/notowania-gpw/profil-akcji-grn,wId,7380,tab,przebieg-sesji,pack,{}'.format(num)
response = requests.get(url,headers=headers)
content = response.content
soup = BeautifulSoup(content,"html.parser")
notow = soup.find_all('table',class_ = 'business-table-trading-table')
#on a given page, select only the rows containing the word "Transakcja"
rows = notow[0].select('tr:has(td:contains("TRANSAKCJA"))')
data = []
for row in rows :
cols = row.find_all('td')
cols = [ele.text.strip() for ele in cols]
cols = data.append([ele for ele in cols if ele] )
#final dataframe which should have contained the result from all scraped pages
df = pandas.DataFrame(data,)
print(df)