import requests # or urllib
import csv
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
# get Youtube Data API Key
API_KEY = "" # insert your API key
# youtube channel ID
channel_id = "" # insert Youtube channel ID
page_token = ""
print('how are u')
videos = []
next = True
while next:
url = ("https://www.googleapis.com/youtube/v3/search?key="
"{}&channelId={}&part=snippet,id"
"&order=date&maxResults=50&pageToken={}"
).format(
API_KEY,
channel_id,
page_token
)
resp = requests.get(url)
data = resp.json()
for i in data['items']:
videos.append(i)
# iterate through result pagination
is_next = data.get('nextPageToken')
if is_next:
page_token = is_next
else:
next = False
# structuring the data
rows = []
count =0
for i in videos:
title = i['snippet'].get('title')
description = i['snippet'].get('description', "")
videoId = "https://www.youtube.com/watch?v={}".format(
i['id'].get('videoId', ""))
# add special formula [=image("url")], so we can view the thumbnail in google docs spreadsheet
count = count + 1
print(videoId)
print(count)
thumb = "=image(\"{}\")".format(i['snippet']['thumbnails'].get('default').get('url', ""))
rows.append(";".join([title, description, videoId, thumb]))
print("#")
print(rows[count-1])
print("#")
# data is now ready to write to csv file
# writing to csv file
path = "videos.csv"
with open(path, "w") as csv_file:
writer = csv.writer(csv_file, delimiter=";")
for row in rows:
print(row)
writer.writerow(row.split(";"))
print(count)
import csv
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
# get Youtube Data API Key
API_KEY = "" # insert your API key
# youtube channel ID
channel_id = "" # insert Youtube channel ID
page_token = ""
print('how are u')
videos = []
next = True
while next:
url = ("https://www.googleapis.com/youtube/v3/search?key="
"{}&channelId={}&part=snippet,id"
"&order=date&maxResults=50&pageToken={}"
).format(
API_KEY,
channel_id,
page_token
)
resp = requests.get(url)
data = resp.json()
for i in data['items']:
videos.append(i)
# iterate through result pagination
is_next = data.get('nextPageToken')
if is_next:
page_token = is_next
else:
next = False
# structuring the data
rows = []
count =0
for i in videos:
title = i['snippet'].get('title')
description = i['snippet'].get('description', "")
videoId = "https://www.youtube.com/watch?v={}".format(
i['id'].get('videoId', ""))
# add special formula [=image("url")], so we can view the thumbnail in google docs spreadsheet
count = count + 1
print(videoId)
print(count)
thumb = "=image(\"{}\")".format(i['snippet']['thumbnails'].get('default').get('url', ""))
rows.append(";".join([title, description, videoId, thumb]))
print("#")
print(rows[count-1])
print("#")
# data is now ready to write to csv file
# writing to csv file
path = "videos.csv"
with open(path, "w") as csv_file:
writer = csv.writer(csv_file, delimiter=";")
for row in rows:
print(row)
writer.writerow(row.split(";"))
print(count)
Comments
Post a Comment