Skip to main content

Working code for google smart card response dialogflow

'use strict';

const express = require('express');
const bodyParser = require('body-parser');
const NodeCache = require("node-cache");
var HashMap = require('hashmap');
//var courses = require('./index');
var chromeCast = require('./test.js');
const opn = require('opn');


process.env.DEBUG = 'actions-on-google:*';
const App = require('actions-on-google').DialogflowApp;
const {WebhookClient, Payload} = require('dialogflow-fulfillment');

var ytc = require('./testy2');
var search = require('./search');

const restService = express();
restService.use(bodyParser.json());
const {  BasicCard } = require('actions-on-google') //use v2 alpha

const DialogflowApp = require('actions-on-google').DialogflowApp; // Google Assistant helper library

const googleAssistantRequest = 'google'; // Constant to identify Google Assistant requests

restService.post('/look', function(request, response) {
    console.log('look request');
    start(request, response);
});

restService.listen((process.env.PORT || 4000), function() {
    console.log("Server listening 4000");
});


// Construct rich response for Google Assistant
const app = new DialogflowApp();

function start(request, response) {
    // An action is a string used to identify what needs to be done in fulfillment
    let action = request.body.result.action; // https://dialogflow.com/docs/actions-and-parameters
    //  console.log(request.body);

    const agent = new WebhookClient({ request, response });
    if (agent === null) {
    //  console.log("agent is null %j" , agent);
    }else{
      //console.log("agent is not null %j" + JSON.stringify(agent), agent);
    }

    const userQuery = request.body.result.resolvedQuery;
    console.log(userQuery);
    // Parameters are any entites that Dialogflow has extracted from the request.
    const parameters = request.body.result.parameters; // https://dialogflow.com/docs/actions-and-parameters
    var params = parameters;
    var sessionID = request.body.sessionId;

    // getting meta data of the response
    const metadata = request.body.result.metadata;
    const curentIntentName = metadata.intentName;

    // Contexts are objects used to track and store conversation state
    const inputContexts = request.body.result.contexts; // https://dialogflow.com/docs/contexts

    // Get the request source (Google Assistant, Slack, API, etc) and initialize DialogflowApp
    const requestSource = (request.body.originalRequest) ? request.body.originalRequest.source : undefined;
    const app = new DialogflowApp({
        request: request,
        response: response
    });



    // Create handlers for Dialogflow actions as well as a 'default' handler
    const actionHandlers = {
        // The default welcome intent has been matched, welcome the user (https://dialogflow.com/docs/events#default_welcome_intent)
        'input.welcome': () => {
            sendResponse('Hello, Welcome to enterprise tiger BOT!'); // Send simple response to user
        },
        // The default fallback intent has been matched, try to recover (https://dialogflow.com/docs/intents#fallback_intents)
        'input.unknown': () => {
          let responseToUser = {
            googleRichResponse: googleRichResponse, // Optional, uncomment to enable
            //googleOutputContexts: ['weather', 2, { ['city']: 'rome' }], // Optional, uncomment to enable
            speech: 'This message is from Dialogflow\'s Cloud Functions for Firebase editor!', // spoken response
            text: 'This is from Dialogflow\'s Cloud Functions for Firebase editor! :-)' // displayed response
          };
          sendGoogleResponse(responseToUser);
        //  app.ask(richResponses);

        },
        // Default handler for unknown or undefined actions
        'default': () => {
            // Use the Actions on Google lib to respond to Google requests; for other requests use JSON
            let responseToUser = {
              googleRichResponse: googleRichResponse, // Optional, uncomment to enable
              //googleOutputContexts: ['weather', 2, { ['city']: 'rome' }], // Optional, uncomment to enable
              speech: 'This message is from Dialogflow\'s Cloud Functions for Firebase editor!', // spoken response
              text: 'This is from Dialogflow\'s Cloud Functions for Firebase editor! :-)' // displayed response
            };
            sendGoogleResponse(responseToUser);
            //  app.ask(richResponses);
        },
        'question': () => {
            // search.findVideoAndTime(userQuery, function(err, videoId , starttime){
            //   console.log("videoId is " + videoId + " starttime is "+ starttime);
            //   if(startime-5 > 0){
            //     startime = startime -5;
            //   }
            //   ytc.youtubecast(videoId,startime);
            // });
            search.findVideoAndTime(userQuery, function(err, top3VideosDetails) {
                reply = videoResult(top3VideosDetails)
                app.tell(reply);
            });
        },
    };

    // If undefined or unknown action use the default handler
    if (!actionHandlers[action]) {
        action = 'default';
    }

    // Run the proper handler function to handle the request from Dialogflow
    actionHandlers[action]();

    // Function to send correctly formatted Google Assistant responses to Dialogflow which are then sent to the user
    function sendGoogleResponse (responseToUser) {
      if (typeof responseToUser === 'string') {
        app.ask(responseToUser); // Google Assistant response
      } else {
        // If speech or displayText is defined use it to respond
        let googleResponse = app.buildRichResponse().addSimpleResponse({
          speech: responseToUser.speech || responseToUser.displayText,
          displayText: responseToUser.displayText || responseToUser.speech
        });
        // Optional: Overwrite previous response with rich response
        if (responseToUser.googleRichResponse) {
          googleResponse = responseToUser.googleRichResponse;
        }
        // Optional: add contexts (https://dialogflow.com/docs/contexts)
        if (responseToUser.googleOutputContexts) {
          app.setContext(...responseToUser.googleOutputContexts);
        }
        console.log('Response to Dialogflow (AoG): ' + JSON.stringify(googleResponse));
        app.ask(googleResponse); // Send response to Dialogflow and Google Assistant
      }
    }

    // Function to send correctly formatted responses to Dialogflow which are then sent to the user
    function sendResponse(responseToUser) {
        // if the response is a string send it as a response to the user
        if (typeof responseToUser === 'string') {
          console.log("inside string if");
            let responseJson = {};
            responseJson.speech = responseToUser; // spoken response
            responseJson.displayText = responseToUser; // displayed response
            response.json(responseJson); // Send response to Dialogflow
        } else {
          console.log("inside non string else");
            // If the response to the user includes rich responses or contexts send them to Dialogflow
            let responseJson = {};

            // If speech or displayText is defined, use it to respond (if one isn't defined use the other's value)
            responseJson.speech = responseToUser.speech || responseToUser.displayText;
            responseJson.displayText = responseToUser.displayText || responseToUser.speech;
            responseJson.followupEvent = responseToUser.followupEvent;

            // Optional: add rich messages for integrations (https://dialogflow.com/docs/rich-messages)
            responseJson.data = responseToUser.richResponses;

            // Optional: add contexts (https://dialogflow.com/docs/contexts)
            responseJson.contextOut = responseToUser.outputContexts;
            console.log("outputjson : " + JSON.stringify(responseJson));
            response.json(responseJson); // Send response to Dialogflow

        }
    }
}

function videoResult(top3VideosDetails) {
  console.log("\n\ntesting REsult : "+top3VideosDetails+ "\n\n legnth = "+ top3VideosDetails.length);
    var replyText = '';
    for (var i = 0; i < top3VideosDetails.length; i++) {
      var obj = JSON.parse(top3VideosDetails[i]);
      console.log("\n obj is" + obj);
        replyText = replyText + " \n  \n " +(i + 1) + ". Title : " + obj.title + " \n  \n Snippet: " + obj.text + " \n  \n ";
    }
    //replyText =  'asdwdsaawdasdasdsadasdassdadashrtdfdkmrpadwasdasdasdasdasdsasdasdfsdfsdfsddfhuFDSFSLKEWKRMASKMFKDSLFzxzc'
    ytc.youtubecast('MZZPF9rXzes',130);
    return replyText;
}

const richResponses = {

  "speech": "This is a API.AI default speech response",
  "displayText": "This is a API.AI default display text response",
  "googleRichResponse":{
         "google":{
           "expectUserResponse": true,
           "expectedInputs": [
             {
               "inputPrompt": {
                 "richInitialPrompt": {
                   "items": [
                     {
                       "simpleResponse": {
                         "textToSpeech": "he ha"
                       }
                     },
                     {
                       "basicCard": {
                         "title": "hey man",
                         "subtitle": "fsdf",
                         "formattedText": "sdfsfdfsfdfd"
                       }
                     }
                   ]
                 }
               },
               "possibleIntents": [
                 {
                   "intent": "assistant.intent.action.TEXT"
                 }
               ]
             }
           ]
         }
     }
}

const richrespo2 = {
    "data": {
        "google": {
            "expect_user_response": true,
            "permissions_request": null
        },
    },
    "messages": [
        {
            "speech": "content to be read aloud", /* this is the message required by Api.AI's web interface */
            "type": 0
        },

        // Below are the Actions Rich Messages
        {
            "platform": "google",
            "type": "simple_response",
            "displayText": "top level text", /* basic top text used for screen devices */
            "textToSpeech": "voice speech to be read out loud"  /* only used for voice interface */
        },
        {
            "platform": "google",
            "type": "basic_card",
            "title": "title text",
            "subtitle": "subtitle text",
            "formattedText": "text with newlines and such",
            "image": {
                "url": "http://example.com/image.png",
                "accessibilityText": "image descrition for screen readers"  /* this property is now required */
            },
            "buttons": [
                {
                    "title": "Link title",
                    "openUrlAction": {
                        "url": "https://example.com/linkout.html"
                    }
                }
            ]
        },
        {
            "platform": "google",
            "type": "suggestion_chips",
            "suggestions": [
                {
                    "title": "Next"
                },
                {
                    "title": "Previous"
                },
                {
                    "title": "Return to Results"
                }
            ]
        }
    ]
}

const googleRichResponse = app.buildRichResponse()
  .addSimpleResponse('This is the first simple response for Google Assistant')
  .addSuggestions(
    ['Suggestion Chip', 'Another Suggestion Chip'])
    // Create a basic card and add it to the rich response
  .addBasicCard(app.buildBasicCard(`This is a basic card.  Text in a
 basic card can include "quotes" and most other unicode characters
 including emoji 📱.  Basic cards also support some markdown
 formatting like *emphasis* or _italics_, **strong** or __bold__,
 and ***bold itallic*** or ___strong emphasis___ as well as other things
 like line  \nbreaks`) // Note the two spaces before '\n' required for a
                        // line break to be rendered in the card
    .setSubtitle('This is a subtitle')
    .setTitle('Title: this is a title')
    .addButton('This is a button', 'https://assistant.google.com/')
    .setImage('https://developers.google.com/actions/images/badges/XPM_BADGING_GoogleAssistant_VER.png',
      'Image alternate text'))
  .addSimpleResponse({ speech: 'This is another simple response',
    displayText: 'This is the another simple response 💁' });


    const third = {
    "conversationToken": "",
    "expectUserResponse": true,
    "expectedInputs": [
        {
            "inputPrompt": {
                "initialPrompts": [
                    {
                        "ssml": "<speak>Here are <say-as interpret-as=\"characters\">SSML</say-as> samples. I can pause <break time=\"3\" />. I can play a sound <audio src=\"https://www.example.com/MY_WAVE_FILE.wav\">your wave file</audio>. I can speak in cardinals. Your position is <say-as interpret-as=\"cardinal\">10</say-as> in line. Or I can speak in ordinals. You are <say-as interpret-as=\"ordinal\">10</say-as> in line. Or I can even speak in digits. Your position in line is <say-as interpret-as=\"digits\">10</say-as>. I can also substitute phrases, like the <sub alias=\"World Wide Web Consortium\">W3C</sub>. Finally, I can speak a paragraph with two sentences. <p><s>This is sentence one.</s><s>This is sentence two.</s></p></speak>"
                    }
                ],
                "noInputPrompts": []
            },
            "possibleIntents": [
                {
                    "intent": "actions.intent.TEXT"
                }
            ]
        }
    ]
}


const fourth = {
  "conversationToken": "[]",
  "expectUserResponse": true,
  "expectedInputs": [
    {
      "inputPrompt": {
        "richInitialPrompt": {
          "items": [
            {
              "simpleResponse": {
                "textToSpeech": "he ha"
              }
            },
            {
              "basicCard": {
                "title": "hey man",
                "subtitle": "fsdf",
                "formattedText": "sdfsfdfsfdfd"
              }
            }
          ]
        }
      },
      "possibleIntents": [
        {
          "intent": "assistant.intent.action.TEXT"
        }
      ]
    }
  ]
}

Comments

Popular posts from this blog

Gui logging in node js and python

For node.js Use  frontail for logging https://www.npmjs.com/package/frontail For Python -- use Cutelog https://pypi.org/project/cutelog/ In NodeJs for using frontail we need to use log the logs in a file for logging logs to file , we will use winston Using winston https://www.npmjs.com/package/winston Eg. of using winstonconst { createLogger, format, transports } = require('winston'); const { combine, timestamp, label, prettyPrint } = format; const logger = createLogger({   level: 'info',   format: format.json(),   transports: [     //     // - Write to all logs with level `info` and below to `combined.log`      // - Write all logs error (and below) to `error.log`.     //     new transports.File({ filename: 'error.log', level: 'error' }),     new transports.File({ filename: 'combined.log' })   ] }); logger.log({   level: 'info',   message: 'What time is...

opening multiple ports tunnels ngrok in ubuntu

Location for the config yml file /home/example/.ngrok2/ngrok.yml content of config file authtoken: 4nq9771bPxe8ctg7LKr_2ClH7Y15Zqe4bWLWF9p tunnels: app-foo: addr: 80 proto: http host_header: app-foo.dev app-bar: addr: 80 proto: http host_header: app-bar.dev how to start ngrok with considering the config file: ngrok start --all

rename field in elastic Search

https://qiita.com/tkprof/items/e50368eb1473497a16d0 How to Rename an Elasticsearch field from columns: - {name: xxx, type: double} to columns: - {name: yyy, type: double} Pipeline API and reindex create a new Pipeline API : Rename Processor PUT _ingest/pipeline/pipeline_rename_xxx { "description" : "rename xxx", "processors" : [ { "rename": { "field": "xxx", "target_field": "yyy" } } ] } { "acknowledged": true } then reindex POST _reindex { "source": { "index": "source" }, "dest": { "index": "dest", "pipeline": "pipeline_rename_xxx" } }