Skip to main content

download all videos from a channel final working code

# -*- coding: utf-8 -*-
"""
Created on Thu Apr 26 12:26:19 2018

@author: kishlay
"""

import requests  # or urllib
import csv
import sys
reload(sys)
import urllib
from datetime import date, timedelta , datetime , time
import rfc3339
sys.setdefaultencoding('utf-8')


# get Youtube Data API Key
API_KEY = ""  # insert your API key
# youtube channel ID
channel_id = ""  # insert Youtube channel ID
page_token = ""

print('how are u')
videos = []
videosExtendTest = []

test = datetime.now()
delta = timedelta(5*30)
firstDate = test - delta
finalFormat = rfc3339.rfc3339(firstDate)
#print(finalFormat)

#print(test)
formatted = rfc3339.rfc3339(test)
print(rfc3339.rfc3339(test))
next = True

publishedBefore= datetime.now()
period = delta
finaltime = date(2008,1,1)

while True:
    publishedAfter = publishedBefore - delta;
    print("\n*****outerloop**\n startdate = "+str(publishedAfter)+ "\n before = "+str(publishedBefore))
    if publishedAfter.date() < finaltime:
        print('\n\n\n breaking'+ str(publishedAfter))
        break
       
    publishedAfterFormatted = rfc3339.rfc3339(publishedAfter)
    publishedBeforeFormatted = rfc3339.rfc3339(publishedBefore)
    page_token =''
    while True:
        print("inner Loop")
        url = ("https://www.googleapis.com/youtube/v3/search?key="
                "{}&channelId={}&part=snippet,id"
                "&order=date&maxResults=50&pageToken={}&publishedAfter={}&publishedBefore={}"
                ).format(
                    API_KEY,
                    channel_id,
                    page_token,
                    urllib.quote(publishedAfterFormatted),
                    urllib.quote(publishedBeforeFormatted)
                )
        resp = requests.get(url)
        data = resp.json()
       
        #print("data = "+str(data))
        print("legnth is data "+str(len(data['items'])))
       
       
        print("before length" + str(  len(videos)))
       
   
        for i in data['items']:
            videos.append(i)
            #videosExtendTest.extend(i)
        print("length of video array" + str(len(videos)))
     
   
        # iterate through result pagination
        is_next = data.get('nextPageToken')
        if is_next:
            page_token = is_next
        else:
            break
    publishedBefore = publishedAfter;

# structuring the data
rows = []
count =0
for i in videos:
    title = i['snippet'].get('title')
    description = i['snippet'].get('description', "")
    videoId = "https://www.youtube.com/watch?v={}".format(
        i['id'].get('videoId', ""))
    # add special formula [=image("url")], so we can view the thumbnail in google docs spreadsheet
    count = count + 1
    #print(videoId)
    #print(count)
 
    thumb = "=image(\"{}\")".format(i['snippet']['thumbnails'].get('default').get('url', ""))
    #rows.append(";".join([title, description, videoId, thumb]))
    rows.append(";".join([title, videoId]))
    #print("#")
    #print(rows[count-1])
    #print("#")
    # data is now ready to write to csv file



# writing to csv file
path = "videos.csv"
with open(path, "w") as csv_file:
    writer = csv.writer(csv_file, delimiter=";")
    for row in rows:
        #print(row)
        writer.writerow(row.split(";"))
     
print(count)

Comments

Popular posts from this blog

Gui logging in node js and python

For node.js Use  frontail for logging https://www.npmjs.com/package/frontail For Python -- use Cutelog https://pypi.org/project/cutelog/ In NodeJs for using frontail we need to use log the logs in a file for logging logs to file , we will use winston Using winston https://www.npmjs.com/package/winston Eg. of using winstonconst { createLogger, format, transports } = require('winston'); const { combine, timestamp, label, prettyPrint } = format; const logger = createLogger({   level: 'info',   format: format.json(),   transports: [     //     // - Write to all logs with level `info` and below to `combined.log`      // - Write all logs error (and below) to `error.log`.     //     new transports.File({ filename: 'error.log', level: 'error' }),     new transports.File({ filename: 'combined.log' })   ] }); logger.log({   level: 'info',   message: 'What time is...

opening multiple ports tunnels ngrok in ubuntu

Location for the config yml file /home/example/.ngrok2/ngrok.yml content of config file authtoken: 4nq9771bPxe8ctg7LKr_2ClH7Y15Zqe4bWLWF9p tunnels: app-foo: addr: 80 proto: http host_header: app-foo.dev app-bar: addr: 80 proto: http host_header: app-bar.dev how to start ngrok with considering the config file: ngrok start --all

rename field in elastic Search

https://qiita.com/tkprof/items/e50368eb1473497a16d0 How to Rename an Elasticsearch field from columns: - {name: xxx, type: double} to columns: - {name: yyy, type: double} Pipeline API and reindex create a new Pipeline API : Rename Processor PUT _ingest/pipeline/pipeline_rename_xxx { "description" : "rename xxx", "processors" : [ { "rename": { "field": "xxx", "target_field": "yyy" } } ] } { "acknowledged": true } then reindex POST _reindex { "source": { "index": "source" }, "dest": { "index": "dest", "pipeline": "pipeline_rename_xxx" } }