Reputation: 95
I've created a script to parse the singers
, their concerning links
, actors
and their concerning links
out of different containers from a webpage. The script is doing fine. What I can't do is write the results in a csv file accordingly.
I've tried with:
import csv
import requests
from bs4 import BeautifulSoup
from urllib.parse import urljoin
base_url = 'https://www.hindigeetmala.net'
link = 'https://www.hindigeetmala.net/movie/2_states.htm'
res = requests.get(link)
soup = BeautifulSoup(res.text,"lxml")
with open("hindigeetmala.csv","w",newline="") as f:
writer = csv.writer(f)
writer.writerow(['singer_records','actor_records'])
for item in soup.select("tr[itemprop='track']"):
try:
singers = [i.get_text(strip=True) for i in item.select("span[itemprop='byArtist']") if i.get_text(strip=True)]
except Exception: singers = ""
try:
singer_links = [urljoin(base_url,i.get("href")) for i in item.select("a:has(> span[itemprop='byArtist'])") if i.get("href")]
except Exception: singer_links = ""
singer_records = [i for i in zip(singers,singer_links)]
try:
actors = [i.get_text(strip=True) for i in item.select("a[href^='/actor/']") if i.get("href")]
except Exception: actors = ""
try:
actor_links = [urljoin(base_url,i.get("href")) for i in item.select("a[href^='/actor/']") if i.get("href")]
except Exception: actor_links = ""
actor_records = [i for i in zip(actors,actor_links)]
song_name = item.select_one("span[itemprop='name']").get_text(strip=True)
writer.writerow([singer_records,actor_records,song_name])
print(singer_records,actor_records,song_name)
If I execute the script as is, this is the output I get.
When I try like writer.writerow([*singer_records,*actor_records,song_name])
, I get this type of output. Only the first pair of tuple is written.
This is my expected output.
How can I write the result, as in name and their links in a csv file according to the third image?
PS All the images of the output represent the first column of csv files for brevity.
Upvotes: 0
Views: 78
Reputation: 229
with feedback from SIM, I think this is what you looking for (I just added one function for formatting your lists)
import csv
import requests
from bs4 import BeautifulSoup
from urllib.parse import urljoin
base_url = 'https://www.hindigeetmala.net'
link = 'https://www.hindigeetmala.net/movie/2_states.htm'
res = requests.get(link)
soup = BeautifulSoup(res.text, "lxml")
def merge_results(inpt):
return [','.join(nested_items for nested_items in
[','.join("'" + tuple_item + "'" for tuple_item in item)
for item in inpt])]
with open("hindigeetmala.csv", "w", newline="") as f:
writer = csv.writer(f)
writer.writerow(['singer_records', 'actor_records'])
for item in soup.select("tr[itemprop='track']"):
try:
singers = [i.get_text(strip=True) for i in item.select(
"span[itemprop='byArtist']") if i.get_text(strip=True)]
except Exception:
singers = ""
try:
singer_links = [urljoin(base_url, i.get("href")) for i in item.select(
"a:has(> span[itemprop='byArtist'])") if i.get("href")]
except Exception:
singer_links = ""
singer_records = [i for i in zip(singers, singer_links)]
try:
actors = [i.get_text(strip=True) for i in item.select(
"a[href^='/actor/']") if i.get("href")]
except Exception:
actors = ""
try:
actor_links = [urljoin(base_url, i.get("href")) for i in item.select(
"a[href^='/actor/']") if i.get("href")]
except Exception:
actor_links = ""
actor_records = [i for i in zip(actors, actor_links)]
song_name = item.select_one(
"span[itemprop='name']").get_text(strip=True)
writer.writerow(merge_results(singer_records) +
merge_results(actor_records)+[song_name])
print(singer_records, actor_records, song_name)
Upvotes: 1