diff --git a/google-api-grpc/proto-google-cloud-dialogflow-v2/src/main/java/com/google/cloud/dialogflow/v2/QueryResult.java b/google-api-grpc/proto-google-cloud-dialogflow-v2/src/main/java/com/google/cloud/dialogflow/v2/QueryResult.java index 87eb37d96ce1..182436f05562 100644 --- a/google-api-grpc/proto-google-cloud-dialogflow-v2/src/main/java/com/google/cloud/dialogflow/v2/QueryResult.java +++ b/google-api-grpc/proto-google-cloud-dialogflow-v2/src/main/java/com/google/cloud/dialogflow/v2/QueryResult.java @@ -764,7 +764,7 @@ public com.google.cloud.dialogflow.v2.ContextOrBuilder getOutputContextsOrBuilde *
* The intent that matched the conversational query. Some, not
* all fields are filled in this message, including but not limited to:
- * `name`, `display_name` and `webhook_state`.
+ * `name`, `display_name`, `end_interaction` and `is_fallback`.
*
*
* .google.cloud.dialogflow.v2.Intent intent = 11;
@@ -778,7 +778,7 @@ public boolean hasIntent() {
*
* The intent that matched the conversational query. Some, not
* all fields are filled in this message, including but not limited to:
- * `name`, `display_name` and `webhook_state`.
+ * `name`, `display_name`, `end_interaction` and `is_fallback`.
*
*
* .google.cloud.dialogflow.v2.Intent intent = 11;
@@ -792,7 +792,7 @@ public com.google.cloud.dialogflow.v2.Intent getIntent() {
*
* The intent that matched the conversational query. Some, not
* all fields are filled in this message, including but not limited to:
- * `name`, `display_name` and `webhook_state`.
+ * `name`, `display_name`, `end_interaction` and `is_fallback`.
*
*
* .google.cloud.dialogflow.v2.Intent intent = 11;
@@ -3401,7 +3401,7 @@ public com.google.cloud.dialogflow.v2.Context.Builder addOutputContextsBuilder(i
*
* The intent that matched the conversational query. Some, not
* all fields are filled in this message, including but not limited to:
- * `name`, `display_name` and `webhook_state`.
+ * `name`, `display_name`, `end_interaction` and `is_fallback`.
*
*
* .google.cloud.dialogflow.v2.Intent intent = 11;
@@ -3415,7 +3415,7 @@ public boolean hasIntent() {
*
* The intent that matched the conversational query. Some, not
* all fields are filled in this message, including but not limited to:
- * `name`, `display_name` and `webhook_state`.
+ * `name`, `display_name`, `end_interaction` and `is_fallback`.
*
*
* .google.cloud.dialogflow.v2.Intent intent = 11;
@@ -3435,7 +3435,7 @@ public com.google.cloud.dialogflow.v2.Intent getIntent() {
*
* The intent that matched the conversational query. Some, not
* all fields are filled in this message, including but not limited to:
- * `name`, `display_name` and `webhook_state`.
+ * `name`, `display_name`, `end_interaction` and `is_fallback`.
*
*
* .google.cloud.dialogflow.v2.Intent intent = 11;
@@ -3459,7 +3459,7 @@ public Builder setIntent(com.google.cloud.dialogflow.v2.Intent value) {
*
* The intent that matched the conversational query. Some, not
* all fields are filled in this message, including but not limited to:
- * `name`, `display_name` and `webhook_state`.
+ * `name`, `display_name`, `end_interaction` and `is_fallback`.
*
*
* .google.cloud.dialogflow.v2.Intent intent = 11;
@@ -3480,7 +3480,7 @@ public Builder setIntent(com.google.cloud.dialogflow.v2.Intent.Builder builderFo
*
* The intent that matched the conversational query. Some, not
* all fields are filled in this message, including but not limited to:
- * `name`, `display_name` and `webhook_state`.
+ * `name`, `display_name`, `end_interaction` and `is_fallback`.
*
*
* .google.cloud.dialogflow.v2.Intent intent = 11;
@@ -3508,7 +3508,7 @@ public Builder mergeIntent(com.google.cloud.dialogflow.v2.Intent value) {
*
* The intent that matched the conversational query. Some, not
* all fields are filled in this message, including but not limited to:
- * `name`, `display_name` and `webhook_state`.
+ * `name`, `display_name`, `end_interaction` and `is_fallback`.
*
*
* .google.cloud.dialogflow.v2.Intent intent = 11;
@@ -3530,7 +3530,7 @@ public Builder clearIntent() {
*
* The intent that matched the conversational query. Some, not
* all fields are filled in this message, including but not limited to:
- * `name`, `display_name` and `webhook_state`.
+ * `name`, `display_name`, `end_interaction` and `is_fallback`.
*
*
* .google.cloud.dialogflow.v2.Intent intent = 11;
@@ -3546,7 +3546,7 @@ public com.google.cloud.dialogflow.v2.Intent.Builder getIntentBuilder() {
*
* The intent that matched the conversational query. Some, not
* all fields are filled in this message, including but not limited to:
- * `name`, `display_name` and `webhook_state`.
+ * `name`, `display_name`, `end_interaction` and `is_fallback`.
*
*
* .google.cloud.dialogflow.v2.Intent intent = 11;
@@ -3566,7 +3566,7 @@ public com.google.cloud.dialogflow.v2.IntentOrBuilder getIntentOrBuilder() {
*
* The intent that matched the conversational query. Some, not
* all fields are filled in this message, including but not limited to:
- * `name`, `display_name` and `webhook_state`.
+ * `name`, `display_name`, `end_interaction` and `is_fallback`.
*
*
* .google.cloud.dialogflow.v2.Intent intent = 11;
diff --git a/google-api-grpc/proto-google-cloud-dialogflow-v2/src/main/java/com/google/cloud/dialogflow/v2/QueryResultOrBuilder.java b/google-api-grpc/proto-google-cloud-dialogflow-v2/src/main/java/com/google/cloud/dialogflow/v2/QueryResultOrBuilder.java
index 036314075561..a07c1f911af9 100644
--- a/google-api-grpc/proto-google-cloud-dialogflow-v2/src/main/java/com/google/cloud/dialogflow/v2/QueryResultOrBuilder.java
+++ b/google-api-grpc/proto-google-cloud-dialogflow-v2/src/main/java/com/google/cloud/dialogflow/v2/QueryResultOrBuilder.java
@@ -360,7 +360,7 @@ public interface QueryResultOrBuilder
*
* The intent that matched the conversational query. Some, not
* all fields are filled in this message, including but not limited to:
- * `name`, `display_name` and `webhook_state`.
+ * `name`, `display_name`, `end_interaction` and `is_fallback`.
*
*
* .google.cloud.dialogflow.v2.Intent intent = 11;
@@ -372,7 +372,7 @@ public interface QueryResultOrBuilder
*
* The intent that matched the conversational query. Some, not
* all fields are filled in this message, including but not limited to:
- * `name`, `display_name` and `webhook_state`.
+ * `name`, `display_name`, `end_interaction` and `is_fallback`.
*
*
* .google.cloud.dialogflow.v2.Intent intent = 11;
@@ -384,7 +384,7 @@ public interface QueryResultOrBuilder
*
* The intent that matched the conversational query. Some, not
* all fields are filled in this message, including but not limited to:
- * `name`, `display_name` and `webhook_state`.
+ * `name`, `display_name`, `end_interaction` and `is_fallback`.
*
*
* .google.cloud.dialogflow.v2.Intent intent = 11;
diff --git a/google-api-grpc/proto-google-cloud-dialogflow-v2/src/main/proto/google/cloud/dialogflow/v2/session.proto b/google-api-grpc/proto-google-cloud-dialogflow-v2/src/main/proto/google/cloud/dialogflow/v2/session.proto
index 815b4087865e..b87f313d4de1 100644
--- a/google-api-grpc/proto-google-cloud-dialogflow-v2/src/main/proto/google/cloud/dialogflow/v2/session.proto
+++ b/google-api-grpc/proto-google-cloud-dialogflow-v2/src/main/proto/google/cloud/dialogflow/v2/session.proto
@@ -244,7 +244,7 @@ message QueryResult {
// The intent that matched the conversational query. Some, not
// all fields are filled in this message, including but not limited to:
- // `name`, `display_name` and `webhook_state`.
+ // `name`, `display_name`, `end_interaction` and `is_fallback`.
Intent intent = 11;
// The intent detection confidence. Values range from 0.0
diff --git a/google-api-grpc/proto-google-cloud-dialogflow-v2beta1/src/main/java/com/google/cloud/dialogflow/v2beta1/AudioConfigProto.java b/google-api-grpc/proto-google-cloud-dialogflow-v2beta1/src/main/java/com/google/cloud/dialogflow/v2beta1/AudioConfigProto.java
index 1b06bb6cb40c..56aea4fe53f3 100644
--- a/google-api-grpc/proto-google-cloud-dialogflow-v2beta1/src/main/java/com/google/cloud/dialogflow/v2beta1/AudioConfigProto.java
+++ b/google-api-grpc/proto-google-cloud-dialogflow-v2beta1/src/main/java/com/google/cloud/dialogflow/v2beta1/AudioConfigProto.java
@@ -12,6 +12,10 @@ public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry r
registerAllExtensions((com.google.protobuf.ExtensionRegistryLite) registry);
}
+ static final com.google.protobuf.Descriptors.Descriptor
+ internal_static_google_cloud_dialogflow_v2beta1_SpeechContext_descriptor;
+ static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+ internal_static_google_cloud_dialogflow_v2beta1_SpeechContext_fieldAccessorTable;
static final com.google.protobuf.Descriptors.Descriptor
internal_static_google_cloud_dialogflow_v2beta1_SpeechWordInfo_descriptor;
static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
@@ -44,53 +48,56 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
"\n2google/cloud/dialogflow/v2beta1/audio_"
+ "config.proto\022\037google.cloud.dialogflow.v2"
+ "beta1\032\036google/protobuf/duration.proto\032\034g"
- + "oogle/api/annotations.proto\"\222\001\n\016SpeechWo"
- + "rdInfo\022\014\n\004word\030\003 \001(\t\022/\n\014start_offset\030\001 \001"
- + "(\0132\031.google.protobuf.Duration\022-\n\nend_off"
- + "set\030\002 \001(\0132\031.google.protobuf.Duration\022\022\n\n"
- + "confidence\030\004 \001(\002\"\261\002\n\020InputAudioConfig\022F\n"
- + "\016audio_encoding\030\001 \001(\0162..google.cloud.dia"
- + "logflow.v2beta1.AudioEncoding\022\031\n\021sample_"
- + "rate_hertz\030\002 \001(\005\022\025\n\rlanguage_code\030\003 \001(\t\022"
- + "\030\n\020enable_word_info\030\r \001(\010\022\024\n\014phrase_hint"
- + "s\030\004 \003(\t\022\r\n\005model\030\007 \001(\t\022J\n\rmodel_variant\030"
- + "\n \001(\01623.google.cloud.dialogflow.v2beta1."
- + "SpeechModelVariant\022\030\n\020single_utterance\030\010"
- + " \001(\010\"k\n\024VoiceSelectionParams\022\014\n\004name\030\001 \001"
- + "(\t\022E\n\013ssml_gender\030\002 \001(\01620.google.cloud.d"
- + "ialogflow.v2beta1.SsmlVoiceGender\"\270\001\n\026Sy"
- + "nthesizeSpeechConfig\022\025\n\rspeaking_rate\030\001 "
- + "\001(\001\022\r\n\005pitch\030\002 \001(\001\022\026\n\016volume_gain_db\030\003 \001"
- + "(\001\022\032\n\022effects_profile_id\030\005 \003(\t\022D\n\005voice\030"
- + "\004 \001(\01325.google.cloud.dialogflow.v2beta1."
- + "VoiceSelectionParams\"\327\001\n\021OutputAudioConf"
- + "ig\022L\n\016audio_encoding\030\001 \001(\01624.google.clou"
- + "d.dialogflow.v2beta1.OutputAudioEncoding"
- + "\022\031\n\021sample_rate_hertz\030\002 \001(\005\022Y\n\030synthesiz"
- + "e_speech_config\030\003 \001(\01327.google.cloud.dia"
- + "logflow.v2beta1.SynthesizeSpeechConfig*\373"
- + "\001\n\rAudioEncoding\022\036\n\032AUDIO_ENCODING_UNSPE"
- + "CIFIED\020\000\022\034\n\030AUDIO_ENCODING_LINEAR_16\020\001\022\027"
- + "\n\023AUDIO_ENCODING_FLAC\020\002\022\030\n\024AUDIO_ENCODIN"
- + "G_MULAW\020\003\022\026\n\022AUDIO_ENCODING_AMR\020\004\022\031\n\025AUD"
- + "IO_ENCODING_AMR_WB\020\005\022\033\n\027AUDIO_ENCODING_O"
- + "GG_OPUS\020\006\022)\n%AUDIO_ENCODING_SPEEX_WITH_H"
- + "EADER_BYTE\020\007*v\n\022SpeechModelVariant\022$\n SP"
- + "EECH_MODEL_VARIANT_UNSPECIFIED\020\000\022\026\n\022USE_"
- + "BEST_AVAILABLE\020\001\022\020\n\014USE_STANDARD\020\002\022\020\n\014US"
- + "E_ENHANCED\020\003*\215\001\n\017SsmlVoiceGender\022!\n\035SSML"
- + "_VOICE_GENDER_UNSPECIFIED\020\000\022\032\n\026SSML_VOIC"
- + "E_GENDER_MALE\020\001\022\034\n\030SSML_VOICE_GENDER_FEM"
- + "ALE\020\002\022\035\n\031SSML_VOICE_GENDER_NEUTRAL\020\003*\244\001\n"
- + "\023OutputAudioEncoding\022%\n!OUTPUT_AUDIO_ENC"
- + "ODING_UNSPECIFIED\020\000\022#\n\037OUTPUT_AUDIO_ENCO"
- + "DING_LINEAR_16\020\001\022\035\n\031OUTPUT_AUDIO_ENCODIN"
- + "G_MP3\020\002\022\"\n\036OUTPUT_AUDIO_ENCODING_OGG_OPU"
- + "S\020\003B\256\001\n#com.google.cloud.dialogflow.v2be"
- + "ta1B\020AudioConfigProtoP\001ZIgoogle.golang.o"
- + "rg/genproto/googleapis/cloud/dialogflow/"
- + "v2beta1;dialogflow\370\001\001\242\002\002DF\252\002\037Google.Clou"
- + "d.Dialogflow.V2beta1b\006proto3"
+ + "oogle/api/annotations.proto\"/\n\rSpeechCon"
+ + "text\022\017\n\007phrases\030\001 \003(\t\022\r\n\005boost\030\002 \001(\002\"\222\001\n"
+ + "\016SpeechWordInfo\022\014\n\004word\030\003 \001(\t\022/\n\014start_o"
+ + "ffset\030\001 \001(\0132\031.google.protobuf.Duration\022-"
+ + "\n\nend_offset\030\002 \001(\0132\031.google.protobuf.Dur"
+ + "ation\022\022\n\nconfidence\030\004 \001(\002\"\372\002\n\020InputAudio"
+ + "Config\022F\n\016audio_encoding\030\001 \001(\0162..google."
+ + "cloud.dialogflow.v2beta1.AudioEncoding\022\031"
+ + "\n\021sample_rate_hertz\030\002 \001(\005\022\025\n\rlanguage_co"
+ + "de\030\003 \001(\t\022\030\n\020enable_word_info\030\r \001(\010\022\024\n\014ph"
+ + "rase_hints\030\004 \003(\t\022G\n\017speech_contexts\030\013 \003("
+ + "\0132..google.cloud.dialogflow.v2beta1.Spee"
+ + "chContext\022\r\n\005model\030\007 \001(\t\022J\n\rmodel_varian"
+ + "t\030\n \001(\01623.google.cloud.dialogflow.v2beta"
+ + "1.SpeechModelVariant\022\030\n\020single_utterance"
+ + "\030\010 \001(\010\"k\n\024VoiceSelectionParams\022\014\n\004name\030\001"
+ + " \001(\t\022E\n\013ssml_gender\030\002 \001(\01620.google.cloud"
+ + ".dialogflow.v2beta1.SsmlVoiceGender\"\270\001\n\026"
+ + "SynthesizeSpeechConfig\022\025\n\rspeaking_rate\030"
+ + "\001 \001(\001\022\r\n\005pitch\030\002 \001(\001\022\026\n\016volume_gain_db\030\003"
+ + " \001(\001\022\032\n\022effects_profile_id\030\005 \003(\t\022D\n\005voic"
+ + "e\030\004 \001(\01325.google.cloud.dialogflow.v2beta"
+ + "1.VoiceSelectionParams\"\327\001\n\021OutputAudioCo"
+ + "nfig\022L\n\016audio_encoding\030\001 \001(\01624.google.cl"
+ + "oud.dialogflow.v2beta1.OutputAudioEncodi"
+ + "ng\022\031\n\021sample_rate_hertz\030\002 \001(\005\022Y\n\030synthes"
+ + "ize_speech_config\030\003 \001(\01327.google.cloud.d"
+ + "ialogflow.v2beta1.SynthesizeSpeechConfig"
+ + "*\373\001\n\rAudioEncoding\022\036\n\032AUDIO_ENCODING_UNS"
+ + "PECIFIED\020\000\022\034\n\030AUDIO_ENCODING_LINEAR_16\020\001"
+ + "\022\027\n\023AUDIO_ENCODING_FLAC\020\002\022\030\n\024AUDIO_ENCOD"
+ + "ING_MULAW\020\003\022\026\n\022AUDIO_ENCODING_AMR\020\004\022\031\n\025A"
+ + "UDIO_ENCODING_AMR_WB\020\005\022\033\n\027AUDIO_ENCODING"
+ + "_OGG_OPUS\020\006\022)\n%AUDIO_ENCODING_SPEEX_WITH"
+ + "_HEADER_BYTE\020\007*v\n\022SpeechModelVariant\022$\n "
+ + "SPEECH_MODEL_VARIANT_UNSPECIFIED\020\000\022\026\n\022US"
+ + "E_BEST_AVAILABLE\020\001\022\020\n\014USE_STANDARD\020\002\022\020\n\014"
+ + "USE_ENHANCED\020\003*\215\001\n\017SsmlVoiceGender\022!\n\035SS"
+ + "ML_VOICE_GENDER_UNSPECIFIED\020\000\022\032\n\026SSML_VO"
+ + "ICE_GENDER_MALE\020\001\022\034\n\030SSML_VOICE_GENDER_F"
+ + "EMALE\020\002\022\035\n\031SSML_VOICE_GENDER_NEUTRAL\020\003*\244"
+ + "\001\n\023OutputAudioEncoding\022%\n!OUTPUT_AUDIO_E"
+ + "NCODING_UNSPECIFIED\020\000\022#\n\037OUTPUT_AUDIO_EN"
+ + "CODING_LINEAR_16\020\001\022\035\n\031OUTPUT_AUDIO_ENCOD"
+ + "ING_MP3\020\002\022\"\n\036OUTPUT_AUDIO_ENCODING_OGG_O"
+ + "PUS\020\003B\256\001\n#com.google.cloud.dialogflow.v2"
+ + "beta1B\020AudioConfigProtoP\001ZIgoogle.golang"
+ + ".org/genproto/googleapis/cloud/dialogflo"
+ + "w/v2beta1;dialogflow\370\001\001\242\002\002DF\252\002\037Google.Cl"
+ + "oud.Dialogflow.V2beta1b\006proto3"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -107,8 +114,16 @@ public com.google.protobuf.ExtensionRegistry assignDescriptors(
com.google.api.AnnotationsProto.getDescriptor(),
},
assigner);
- internal_static_google_cloud_dialogflow_v2beta1_SpeechWordInfo_descriptor =
+ internal_static_google_cloud_dialogflow_v2beta1_SpeechContext_descriptor =
getDescriptor().getMessageTypes().get(0);
+ internal_static_google_cloud_dialogflow_v2beta1_SpeechContext_fieldAccessorTable =
+ new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
+ internal_static_google_cloud_dialogflow_v2beta1_SpeechContext_descriptor,
+ new java.lang.String[] {
+ "Phrases", "Boost",
+ });
+ internal_static_google_cloud_dialogflow_v2beta1_SpeechWordInfo_descriptor =
+ getDescriptor().getMessageTypes().get(1);
internal_static_google_cloud_dialogflow_v2beta1_SpeechWordInfo_fieldAccessorTable =
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_cloud_dialogflow_v2beta1_SpeechWordInfo_descriptor,
@@ -116,7 +131,7 @@ public com.google.protobuf.ExtensionRegistry assignDescriptors(
"Word", "StartOffset", "EndOffset", "Confidence",
});
internal_static_google_cloud_dialogflow_v2beta1_InputAudioConfig_descriptor =
- getDescriptor().getMessageTypes().get(1);
+ getDescriptor().getMessageTypes().get(2);
internal_static_google_cloud_dialogflow_v2beta1_InputAudioConfig_fieldAccessorTable =
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_cloud_dialogflow_v2beta1_InputAudioConfig_descriptor,
@@ -126,12 +141,13 @@ public com.google.protobuf.ExtensionRegistry assignDescriptors(
"LanguageCode",
"EnableWordInfo",
"PhraseHints",
+ "SpeechContexts",
"Model",
"ModelVariant",
"SingleUtterance",
});
internal_static_google_cloud_dialogflow_v2beta1_VoiceSelectionParams_descriptor =
- getDescriptor().getMessageTypes().get(2);
+ getDescriptor().getMessageTypes().get(3);
internal_static_google_cloud_dialogflow_v2beta1_VoiceSelectionParams_fieldAccessorTable =
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_cloud_dialogflow_v2beta1_VoiceSelectionParams_descriptor,
@@ -139,7 +155,7 @@ public com.google.protobuf.ExtensionRegistry assignDescriptors(
"Name", "SsmlGender",
});
internal_static_google_cloud_dialogflow_v2beta1_SynthesizeSpeechConfig_descriptor =
- getDescriptor().getMessageTypes().get(3);
+ getDescriptor().getMessageTypes().get(4);
internal_static_google_cloud_dialogflow_v2beta1_SynthesizeSpeechConfig_fieldAccessorTable =
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_cloud_dialogflow_v2beta1_SynthesizeSpeechConfig_descriptor,
@@ -147,7 +163,7 @@ public com.google.protobuf.ExtensionRegistry assignDescriptors(
"SpeakingRate", "Pitch", "VolumeGainDb", "EffectsProfileId", "Voice",
});
internal_static_google_cloud_dialogflow_v2beta1_OutputAudioConfig_descriptor =
- getDescriptor().getMessageTypes().get(4);
+ getDescriptor().getMessageTypes().get(5);
internal_static_google_cloud_dialogflow_v2beta1_OutputAudioConfig_fieldAccessorTable =
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_cloud_dialogflow_v2beta1_OutputAudioConfig_descriptor,
diff --git a/google-api-grpc/proto-google-cloud-dialogflow-v2beta1/src/main/java/com/google/cloud/dialogflow/v2beta1/InputAudioConfig.java b/google-api-grpc/proto-google-cloud-dialogflow-v2beta1/src/main/java/com/google/cloud/dialogflow/v2beta1/InputAudioConfig.java
index 001d042fdcb5..94dae8d850b8 100644
--- a/google-api-grpc/proto-google-cloud-dialogflow-v2beta1/src/main/java/com/google/cloud/dialogflow/v2beta1/InputAudioConfig.java
+++ b/google-api-grpc/proto-google-cloud-dialogflow-v2beta1/src/main/java/com/google/cloud/dialogflow/v2beta1/InputAudioConfig.java
@@ -26,6 +26,7 @@ private InputAudioConfig() {
audioEncoding_ = 0;
languageCode_ = "";
phraseHints_ = com.google.protobuf.LazyStringArrayList.EMPTY;
+ speechContexts_ = java.util.Collections.emptyList();
model_ = "";
modelVariant_ = 0;
}
@@ -102,6 +103,19 @@ private InputAudioConfig(
modelVariant_ = rawValue;
break;
}
+ case 90:
+ {
+ if (!((mutable_bitField0_ & 0x00000020) != 0)) {
+ speechContexts_ =
+ new java.util.ArrayList+ * Optional. Context information to assist speech recognition. + * See [the Cloud Speech + * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints) + * for more details. + *+ * + *
repeated .google.cloud.dialogflow.v2beta1.SpeechContext speech_contexts = 11;
+ */
+ public java.util.List+ * Optional. Context information to assist speech recognition. + * See [the Cloud Speech + * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints) + * for more details. + *+ * + *
repeated .google.cloud.dialogflow.v2beta1.SpeechContext speech_contexts = 11;
+ */
+ public java.util.List extends com.google.cloud.dialogflow.v2beta1.SpeechContextOrBuilder>
+ getSpeechContextsOrBuilderList() {
+ return speechContexts_;
+ }
+ /**
+ *
+ *
+ * + * Optional. Context information to assist speech recognition. + * See [the Cloud Speech + * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints) + * for more details. + *+ * + *
repeated .google.cloud.dialogflow.v2beta1.SpeechContext speech_contexts = 11;
+ */
+ public int getSpeechContextsCount() {
+ return speechContexts_.size();
+ }
+ /**
+ *
+ *
+ * + * Optional. Context information to assist speech recognition. + * See [the Cloud Speech + * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints) + * for more details. + *+ * + *
repeated .google.cloud.dialogflow.v2beta1.SpeechContext speech_contexts = 11;
+ */
+ public com.google.cloud.dialogflow.v2beta1.SpeechContext getSpeechContexts(int index) {
+ return speechContexts_.get(index);
+ }
+ /**
+ *
+ *
+ * + * Optional. Context information to assist speech recognition. + * See [the Cloud Speech + * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints) + * for more details. + *+ * + *
repeated .google.cloud.dialogflow.v2beta1.SpeechContext speech_contexts = 11;
+ */
+ public com.google.cloud.dialogflow.v2beta1.SpeechContextOrBuilder getSpeechContextsOrBuilder(
+ int index) {
+ return speechContexts_.get(index);
+ }
+
public static final int MODEL_FIELD_NUMBER = 7;
private volatile java.lang.Object model_;
/**
@@ -488,6 +585,9 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io
.getNumber()) {
output.writeEnum(10, modelVariant_);
}
+ for (int i = 0; i < speechContexts_.size(); i++) {
+ output.writeMessage(11, speechContexts_.get(i));
+ }
if (enableWordInfo_ != false) {
output.writeBool(13, enableWordInfo_);
}
@@ -530,6 +630,9 @@ public int getSerializedSize() {
.getNumber()) {
size += com.google.protobuf.CodedOutputStream.computeEnumSize(10, modelVariant_);
}
+ for (int i = 0; i < speechContexts_.size(); i++) {
+ size += com.google.protobuf.CodedOutputStream.computeMessageSize(11, speechContexts_.get(i));
+ }
if (enableWordInfo_ != false) {
size += com.google.protobuf.CodedOutputStream.computeBoolSize(13, enableWordInfo_);
}
@@ -554,6 +657,7 @@ public boolean equals(final java.lang.Object obj) {
if (!getLanguageCode().equals(other.getLanguageCode())) return false;
if (getEnableWordInfo() != other.getEnableWordInfo()) return false;
if (!getPhraseHintsList().equals(other.getPhraseHintsList())) return false;
+ if (!getSpeechContextsList().equals(other.getSpeechContextsList())) return false;
if (!getModel().equals(other.getModel())) return false;
if (modelVariant_ != other.modelVariant_) return false;
if (getSingleUtterance() != other.getSingleUtterance()) return false;
@@ -580,6 +684,10 @@ public int hashCode() {
hash = (37 * hash) + PHRASE_HINTS_FIELD_NUMBER;
hash = (53 * hash) + getPhraseHintsList().hashCode();
}
+ if (getSpeechContextsCount() > 0) {
+ hash = (37 * hash) + SPEECH_CONTEXTS_FIELD_NUMBER;
+ hash = (53 * hash) + getSpeechContextsList().hashCode();
+ }
hash = (37 * hash) + MODEL_FIELD_NUMBER;
hash = (53 * hash) + getModel().hashCode();
hash = (37 * hash) + MODEL_VARIANT_FIELD_NUMBER;
@@ -725,7 +833,9 @@ private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
}
private void maybeForceBuilderInitialization() {
- if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {}
+ if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {
+ getSpeechContextsFieldBuilder();
+ }
}
@java.lang.Override
@@ -741,6 +851,12 @@ public Builder clear() {
phraseHints_ = com.google.protobuf.LazyStringArrayList.EMPTY;
bitField0_ = (bitField0_ & ~0x00000010);
+ if (speechContextsBuilder_ == null) {
+ speechContexts_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000020);
+ } else {
+ speechContextsBuilder_.clear();
+ }
model_ = "";
modelVariant_ = 0;
@@ -785,6 +901,15 @@ public com.google.cloud.dialogflow.v2beta1.InputAudioConfig buildPartial() {
bitField0_ = (bitField0_ & ~0x00000010);
}
result.phraseHints_ = phraseHints_;
+ if (speechContextsBuilder_ == null) {
+ if (((bitField0_ & 0x00000020) != 0)) {
+ speechContexts_ = java.util.Collections.unmodifiableList(speechContexts_);
+ bitField0_ = (bitField0_ & ~0x00000020);
+ }
+ result.speechContexts_ = speechContexts_;
+ } else {
+ result.speechContexts_ = speechContextsBuilder_.build();
+ }
result.model_ = model_;
result.modelVariant_ = modelVariant_;
result.singleUtterance_ = singleUtterance_;
@@ -862,6 +987,33 @@ public Builder mergeFrom(com.google.cloud.dialogflow.v2beta1.InputAudioConfig ot
}
onChanged();
}
+ if (speechContextsBuilder_ == null) {
+ if (!other.speechContexts_.isEmpty()) {
+ if (speechContexts_.isEmpty()) {
+ speechContexts_ = other.speechContexts_;
+ bitField0_ = (bitField0_ & ~0x00000020);
+ } else {
+ ensureSpeechContextsIsMutable();
+ speechContexts_.addAll(other.speechContexts_);
+ }
+ onChanged();
+ }
+ } else {
+ if (!other.speechContexts_.isEmpty()) {
+ if (speechContextsBuilder_.isEmpty()) {
+ speechContextsBuilder_.dispose();
+ speechContextsBuilder_ = null;
+ speechContexts_ = other.speechContexts_;
+ bitField0_ = (bitField0_ & ~0x00000020);
+ speechContextsBuilder_ =
+ com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders
+ ? getSpeechContextsFieldBuilder()
+ : null;
+ } else {
+ speechContextsBuilder_.addAllMessages(other.speechContexts_);
+ }
+ }
+ }
if (!other.getModel().isEmpty()) {
model_ = other.model_;
onChanged();
@@ -1384,6 +1536,422 @@ public Builder addPhraseHintsBytes(com.google.protobuf.ByteString value) {
return this;
}
+ private java.util.List+ * Optional. Context information to assist speech recognition. + * See [the Cloud Speech + * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints) + * for more details. + *+ * + *
repeated .google.cloud.dialogflow.v2beta1.SpeechContext speech_contexts = 11;
+ */
+ public java.util.List+ * Optional. Context information to assist speech recognition. + * See [the Cloud Speech + * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints) + * for more details. + *+ * + *
repeated .google.cloud.dialogflow.v2beta1.SpeechContext speech_contexts = 11;
+ */
+ public int getSpeechContextsCount() {
+ if (speechContextsBuilder_ == null) {
+ return speechContexts_.size();
+ } else {
+ return speechContextsBuilder_.getCount();
+ }
+ }
+ /**
+ *
+ *
+ * + * Optional. Context information to assist speech recognition. + * See [the Cloud Speech + * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints) + * for more details. + *+ * + *
repeated .google.cloud.dialogflow.v2beta1.SpeechContext speech_contexts = 11;
+ */
+ public com.google.cloud.dialogflow.v2beta1.SpeechContext getSpeechContexts(int index) {
+ if (speechContextsBuilder_ == null) {
+ return speechContexts_.get(index);
+ } else {
+ return speechContextsBuilder_.getMessage(index);
+ }
+ }
+ /**
+ *
+ *
+ * + * Optional. Context information to assist speech recognition. + * See [the Cloud Speech + * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints) + * for more details. + *+ * + *
repeated .google.cloud.dialogflow.v2beta1.SpeechContext speech_contexts = 11;
+ */
+ public Builder setSpeechContexts(
+ int index, com.google.cloud.dialogflow.v2beta1.SpeechContext value) {
+ if (speechContextsBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureSpeechContextsIsMutable();
+ speechContexts_.set(index, value);
+ onChanged();
+ } else {
+ speechContextsBuilder_.setMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ *
+ *
+ * + * Optional. Context information to assist speech recognition. + * See [the Cloud Speech + * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints) + * for more details. + *+ * + *
repeated .google.cloud.dialogflow.v2beta1.SpeechContext speech_contexts = 11;
+ */
+ public Builder setSpeechContexts(
+ int index, com.google.cloud.dialogflow.v2beta1.SpeechContext.Builder builderForValue) {
+ if (speechContextsBuilder_ == null) {
+ ensureSpeechContextsIsMutable();
+ speechContexts_.set(index, builderForValue.build());
+ onChanged();
+ } else {
+ speechContextsBuilder_.setMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ *
+ *
+ * + * Optional. Context information to assist speech recognition. + * See [the Cloud Speech + * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints) + * for more details. + *+ * + *
repeated .google.cloud.dialogflow.v2beta1.SpeechContext speech_contexts = 11;
+ */
+ public Builder addSpeechContexts(com.google.cloud.dialogflow.v2beta1.SpeechContext value) {
+ if (speechContextsBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureSpeechContextsIsMutable();
+ speechContexts_.add(value);
+ onChanged();
+ } else {
+ speechContextsBuilder_.addMessage(value);
+ }
+ return this;
+ }
+ /**
+ *
+ *
+ * + * Optional. Context information to assist speech recognition. + * See [the Cloud Speech + * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints) + * for more details. + *+ * + *
repeated .google.cloud.dialogflow.v2beta1.SpeechContext speech_contexts = 11;
+ */
+ public Builder addSpeechContexts(
+ int index, com.google.cloud.dialogflow.v2beta1.SpeechContext value) {
+ if (speechContextsBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureSpeechContextsIsMutable();
+ speechContexts_.add(index, value);
+ onChanged();
+ } else {
+ speechContextsBuilder_.addMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ *
+ *
+ * + * Optional. Context information to assist speech recognition. + * See [the Cloud Speech + * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints) + * for more details. + *+ * + *
repeated .google.cloud.dialogflow.v2beta1.SpeechContext speech_contexts = 11;
+ */
+ public Builder addSpeechContexts(
+ com.google.cloud.dialogflow.v2beta1.SpeechContext.Builder builderForValue) {
+ if (speechContextsBuilder_ == null) {
+ ensureSpeechContextsIsMutable();
+ speechContexts_.add(builderForValue.build());
+ onChanged();
+ } else {
+ speechContextsBuilder_.addMessage(builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ *
+ *
+ * + * Optional. Context information to assist speech recognition. + * See [the Cloud Speech + * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints) + * for more details. + *+ * + *
repeated .google.cloud.dialogflow.v2beta1.SpeechContext speech_contexts = 11;
+ */
+ public Builder addSpeechContexts(
+ int index, com.google.cloud.dialogflow.v2beta1.SpeechContext.Builder builderForValue) {
+ if (speechContextsBuilder_ == null) {
+ ensureSpeechContextsIsMutable();
+ speechContexts_.add(index, builderForValue.build());
+ onChanged();
+ } else {
+ speechContextsBuilder_.addMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ *
+ *
+ * + * Optional. Context information to assist speech recognition. + * See [the Cloud Speech + * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints) + * for more details. + *+ * + *
repeated .google.cloud.dialogflow.v2beta1.SpeechContext speech_contexts = 11;
+ */
+ public Builder addAllSpeechContexts(
+ java.lang.Iterable extends com.google.cloud.dialogflow.v2beta1.SpeechContext> values) {
+ if (speechContextsBuilder_ == null) {
+ ensureSpeechContextsIsMutable();
+ com.google.protobuf.AbstractMessageLite.Builder.addAll(values, speechContexts_);
+ onChanged();
+ } else {
+ speechContextsBuilder_.addAllMessages(values);
+ }
+ return this;
+ }
+ /**
+ *
+ *
+ * + * Optional. Context information to assist speech recognition. + * See [the Cloud Speech + * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints) + * for more details. + *+ * + *
repeated .google.cloud.dialogflow.v2beta1.SpeechContext speech_contexts = 11;
+ */
+ public Builder clearSpeechContexts() {
+ if (speechContextsBuilder_ == null) {
+ speechContexts_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000020);
+ onChanged();
+ } else {
+ speechContextsBuilder_.clear();
+ }
+ return this;
+ }
+ /**
+ *
+ *
+ * + * Optional. Context information to assist speech recognition. + * See [the Cloud Speech + * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints) + * for more details. + *+ * + *
repeated .google.cloud.dialogflow.v2beta1.SpeechContext speech_contexts = 11;
+ */
+ public Builder removeSpeechContexts(int index) {
+ if (speechContextsBuilder_ == null) {
+ ensureSpeechContextsIsMutable();
+ speechContexts_.remove(index);
+ onChanged();
+ } else {
+ speechContextsBuilder_.remove(index);
+ }
+ return this;
+ }
+ /**
+ *
+ *
+ * + * Optional. Context information to assist speech recognition. + * See [the Cloud Speech + * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints) + * for more details. + *+ * + *
repeated .google.cloud.dialogflow.v2beta1.SpeechContext speech_contexts = 11;
+ */
+ public com.google.cloud.dialogflow.v2beta1.SpeechContext.Builder getSpeechContextsBuilder(
+ int index) {
+ return getSpeechContextsFieldBuilder().getBuilder(index);
+ }
+ /**
+ *
+ *
+ * + * Optional. Context information to assist speech recognition. + * See [the Cloud Speech + * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints) + * for more details. + *+ * + *
repeated .google.cloud.dialogflow.v2beta1.SpeechContext speech_contexts = 11;
+ */
+ public com.google.cloud.dialogflow.v2beta1.SpeechContextOrBuilder getSpeechContextsOrBuilder(
+ int index) {
+ if (speechContextsBuilder_ == null) {
+ return speechContexts_.get(index);
+ } else {
+ return speechContextsBuilder_.getMessageOrBuilder(index);
+ }
+ }
+ /**
+ *
+ *
+ * + * Optional. Context information to assist speech recognition. + * See [the Cloud Speech + * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints) + * for more details. + *+ * + *
repeated .google.cloud.dialogflow.v2beta1.SpeechContext speech_contexts = 11;
+ */
+ public java.util.List extends com.google.cloud.dialogflow.v2beta1.SpeechContextOrBuilder>
+ getSpeechContextsOrBuilderList() {
+ if (speechContextsBuilder_ != null) {
+ return speechContextsBuilder_.getMessageOrBuilderList();
+ } else {
+ return java.util.Collections.unmodifiableList(speechContexts_);
+ }
+ }
+ /**
+ *
+ *
+ * + * Optional. Context information to assist speech recognition. + * See [the Cloud Speech + * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints) + * for more details. + *+ * + *
repeated .google.cloud.dialogflow.v2beta1.SpeechContext speech_contexts = 11;
+ */
+ public com.google.cloud.dialogflow.v2beta1.SpeechContext.Builder addSpeechContextsBuilder() {
+ return getSpeechContextsFieldBuilder()
+ .addBuilder(com.google.cloud.dialogflow.v2beta1.SpeechContext.getDefaultInstance());
+ }
+ /**
+ *
+ *
+ * + * Optional. Context information to assist speech recognition. + * See [the Cloud Speech + * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints) + * for more details. + *+ * + *
repeated .google.cloud.dialogflow.v2beta1.SpeechContext speech_contexts = 11;
+ */
+ public com.google.cloud.dialogflow.v2beta1.SpeechContext.Builder addSpeechContextsBuilder(
+ int index) {
+ return getSpeechContextsFieldBuilder()
+ .addBuilder(
+ index, com.google.cloud.dialogflow.v2beta1.SpeechContext.getDefaultInstance());
+ }
+ /**
+ *
+ *
+ * + * Optional. Context information to assist speech recognition. + * See [the Cloud Speech + * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints) + * for more details. + *+ * + *
repeated .google.cloud.dialogflow.v2beta1.SpeechContext speech_contexts = 11;
+ */
+ public java.util.List+ * Optional. Context information to assist speech recognition. + * See [the Cloud Speech + * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints) + * for more details. + *+ * + *
repeated .google.cloud.dialogflow.v2beta1.SpeechContext speech_contexts = 11;
+ */
+ java.util.List+ * Optional. Context information to assist speech recognition. + * See [the Cloud Speech + * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints) + * for more details. + *+ * + *
repeated .google.cloud.dialogflow.v2beta1.SpeechContext speech_contexts = 11;
+ */
+ com.google.cloud.dialogflow.v2beta1.SpeechContext getSpeechContexts(int index);
+ /**
+ *
+ *
+ * + * Optional. Context information to assist speech recognition. + * See [the Cloud Speech + * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints) + * for more details. + *+ * + *
repeated .google.cloud.dialogflow.v2beta1.SpeechContext speech_contexts = 11;
+ */
+ int getSpeechContextsCount();
+ /**
+ *
+ *
+ * + * Optional. Context information to assist speech recognition. + * See [the Cloud Speech + * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints) + * for more details. + *+ * + *
repeated .google.cloud.dialogflow.v2beta1.SpeechContext speech_contexts = 11;
+ */
+ java.util.List extends com.google.cloud.dialogflow.v2beta1.SpeechContextOrBuilder>
+ getSpeechContextsOrBuilderList();
+ /**
+ *
+ *
+ * + * Optional. Context information to assist speech recognition. + * See [the Cloud Speech + * documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints) + * for more details. + *+ * + *
repeated .google.cloud.dialogflow.v2beta1.SpeechContext speech_contexts = 11;
+ */
+ com.google.cloud.dialogflow.v2beta1.SpeechContextOrBuilder getSpeechContextsOrBuilder(int index);
+
/**
*
*
diff --git a/google-api-grpc/proto-google-cloud-dialogflow-v2beta1/src/main/java/com/google/cloud/dialogflow/v2beta1/QueryResult.java b/google-api-grpc/proto-google-cloud-dialogflow-v2beta1/src/main/java/com/google/cloud/dialogflow/v2beta1/QueryResult.java
index 6ecaff97ea12..66c521aa2478 100644
--- a/google-api-grpc/proto-google-cloud-dialogflow-v2beta1/src/main/java/com/google/cloud/dialogflow/v2beta1/QueryResult.java
+++ b/google-api-grpc/proto-google-cloud-dialogflow-v2beta1/src/main/java/com/google/cloud/dialogflow/v2beta1/QueryResult.java
@@ -784,7 +784,7 @@ public com.google.cloud.dialogflow.v2beta1.ContextOrBuilder getOutputContextsOrB
*
* The intent that matched the conversational query. Some, not
* all fields are filled in this message, including but not limited to:
- * `name`, `display_name` and `webhook_state`.
+ * `name`, `display_name`, `end_interaction` and `is_fallback`.
*
*
* .google.cloud.dialogflow.v2beta1.Intent intent = 11;
@@ -798,7 +798,7 @@ public boolean hasIntent() {
*
* The intent that matched the conversational query. Some, not
* all fields are filled in this message, including but not limited to:
- * `name`, `display_name` and `webhook_state`.
+ * `name`, `display_name`, `end_interaction` and `is_fallback`.
*
*
* .google.cloud.dialogflow.v2beta1.Intent intent = 11;
@@ -814,7 +814,7 @@ public com.google.cloud.dialogflow.v2beta1.Intent getIntent() {
*
* The intent that matched the conversational query. Some, not
* all fields are filled in this message, including but not limited to:
- * `name`, `display_name` and `webhook_state`.
+ * `name`, `display_name`, `end_interaction` and `is_fallback`.
*
*
* .google.cloud.dialogflow.v2beta1.Intent intent = 11;
@@ -3519,7 +3519,7 @@ public com.google.cloud.dialogflow.v2beta1.Context.Builder addOutputContextsBuil
*
* The intent that matched the conversational query. Some, not
* all fields are filled in this message, including but not limited to:
- * `name`, `display_name` and `webhook_state`.
+ * `name`, `display_name`, `end_interaction` and `is_fallback`.
*
*
* .google.cloud.dialogflow.v2beta1.Intent intent = 11;
@@ -3533,7 +3533,7 @@ public boolean hasIntent() {
*
* The intent that matched the conversational query. Some, not
* all fields are filled in this message, including but not limited to:
- * `name`, `display_name` and `webhook_state`.
+ * `name`, `display_name`, `end_interaction` and `is_fallback`.
*
*
* .google.cloud.dialogflow.v2beta1.Intent intent = 11;
@@ -3553,7 +3553,7 @@ public com.google.cloud.dialogflow.v2beta1.Intent getIntent() {
*
* The intent that matched the conversational query. Some, not
* all fields are filled in this message, including but not limited to:
- * `name`, `display_name` and `webhook_state`.
+ * `name`, `display_name`, `end_interaction` and `is_fallback`.
*
*
* .google.cloud.dialogflow.v2beta1.Intent intent = 11;
@@ -3577,7 +3577,7 @@ public Builder setIntent(com.google.cloud.dialogflow.v2beta1.Intent value) {
*
* The intent that matched the conversational query. Some, not
* all fields are filled in this message, including but not limited to:
- * `name`, `display_name` and `webhook_state`.
+ * `name`, `display_name`, `end_interaction` and `is_fallback`.
*
*
* .google.cloud.dialogflow.v2beta1.Intent intent = 11;
@@ -3598,7 +3598,7 @@ public Builder setIntent(com.google.cloud.dialogflow.v2beta1.Intent.Builder buil
*
* The intent that matched the conversational query. Some, not
* all fields are filled in this message, including but not limited to:
- * `name`, `display_name` and `webhook_state`.
+ * `name`, `display_name`, `end_interaction` and `is_fallback`.
*
*
* .google.cloud.dialogflow.v2beta1.Intent intent = 11;
@@ -3626,7 +3626,7 @@ public Builder mergeIntent(com.google.cloud.dialogflow.v2beta1.Intent value) {
*
* The intent that matched the conversational query. Some, not
* all fields are filled in this message, including but not limited to:
- * `name`, `display_name` and `webhook_state`.
+ * `name`, `display_name`, `end_interaction` and `is_fallback`.
*
*
* .google.cloud.dialogflow.v2beta1.Intent intent = 11;
@@ -3648,7 +3648,7 @@ public Builder clearIntent() {
*
* The intent that matched the conversational query. Some, not
* all fields are filled in this message, including but not limited to:
- * `name`, `display_name` and `webhook_state`.
+ * `name`, `display_name`, `end_interaction` and `is_fallback`.
*
*
* .google.cloud.dialogflow.v2beta1.Intent intent = 11;
@@ -3664,7 +3664,7 @@ public com.google.cloud.dialogflow.v2beta1.Intent.Builder getIntentBuilder() {
*
* The intent that matched the conversational query. Some, not
* all fields are filled in this message, including but not limited to:
- * `name`, `display_name` and `webhook_state`.
+ * `name`, `display_name`, `end_interaction` and `is_fallback`.
*
*
* .google.cloud.dialogflow.v2beta1.Intent intent = 11;
@@ -3684,7 +3684,7 @@ public com.google.cloud.dialogflow.v2beta1.IntentOrBuilder getIntentOrBuilder()
*
* The intent that matched the conversational query. Some, not
* all fields are filled in this message, including but not limited to:
- * `name`, `display_name` and `webhook_state`.
+ * `name`, `display_name`, `end_interaction` and `is_fallback`.
*
*
* .google.cloud.dialogflow.v2beta1.Intent intent = 11;
diff --git a/google-api-grpc/proto-google-cloud-dialogflow-v2beta1/src/main/java/com/google/cloud/dialogflow/v2beta1/QueryResultOrBuilder.java b/google-api-grpc/proto-google-cloud-dialogflow-v2beta1/src/main/java/com/google/cloud/dialogflow/v2beta1/QueryResultOrBuilder.java
index 89f61ed1de01..a95878cd0c7a 100644
--- a/google-api-grpc/proto-google-cloud-dialogflow-v2beta1/src/main/java/com/google/cloud/dialogflow/v2beta1/QueryResultOrBuilder.java
+++ b/google-api-grpc/proto-google-cloud-dialogflow-v2beta1/src/main/java/com/google/cloud/dialogflow/v2beta1/QueryResultOrBuilder.java
@@ -361,7 +361,7 @@ com.google.cloud.dialogflow.v2beta1.Intent.MessageOrBuilder getFulfillmentMessag
*
* The intent that matched the conversational query. Some, not
* all fields are filled in this message, including but not limited to:
- * `name`, `display_name` and `webhook_state`.
+ * `name`, `display_name`, `end_interaction` and `is_fallback`.
*
*
* .google.cloud.dialogflow.v2beta1.Intent intent = 11;
@@ -373,7 +373,7 @@ com.google.cloud.dialogflow.v2beta1.Intent.MessageOrBuilder getFulfillmentMessag
*
* The intent that matched the conversational query. Some, not
* all fields are filled in this message, including but not limited to:
- * `name`, `display_name` and `webhook_state`.
+ * `name`, `display_name`, `end_interaction` and `is_fallback`.
*
*
* .google.cloud.dialogflow.v2beta1.Intent intent = 11;
@@ -385,7 +385,7 @@ com.google.cloud.dialogflow.v2beta1.Intent.MessageOrBuilder getFulfillmentMessag
*
* The intent that matched the conversational query. Some, not
* all fields are filled in this message, including but not limited to:
- * `name`, `display_name` and `webhook_state`.
+ * `name`, `display_name`, `end_interaction` and `is_fallback`.
*
*
* .google.cloud.dialogflow.v2beta1.Intent intent = 11;
diff --git a/google-api-grpc/proto-google-cloud-dialogflow-v2beta1/src/main/java/com/google/cloud/dialogflow/v2beta1/SpeechContext.java b/google-api-grpc/proto-google-cloud-dialogflow-v2beta1/src/main/java/com/google/cloud/dialogflow/v2beta1/SpeechContext.java
new file mode 100644
index 000000000000..125505d12bc6
--- /dev/null
+++ b/google-api-grpc/proto-google-cloud-dialogflow-v2beta1/src/main/java/com/google/cloud/dialogflow/v2beta1/SpeechContext.java
@@ -0,0 +1,907 @@
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+// source: google/cloud/dialogflow/v2beta1/audio_config.proto
+
+package com.google.cloud.dialogflow.v2beta1;
+
+/**
+ *
+ *
+ * + * Hints for the speech recognizer to help with recognition in a specific + * conversation state. + *+ * + * Protobuf type {@code google.cloud.dialogflow.v2beta1.SpeechContext} + */ +public final class SpeechContext extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.cloud.dialogflow.v2beta1.SpeechContext) + SpeechContextOrBuilder { + private static final long serialVersionUID = 0L; + // Use SpeechContext.newBuilder() to construct. + private SpeechContext(com.google.protobuf.GeneratedMessageV3.Builder> builder) { + super(builder); + } + + private SpeechContext() { + phrases_ = com.google.protobuf.LazyStringArrayList.EMPTY; + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet getUnknownFields() { + return this.unknownFields; + } + + private SpeechContext( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + java.lang.String s = input.readStringRequireUtf8(); + if (!((mutable_bitField0_ & 0x00000001) != 0)) { + phrases_ = new com.google.protobuf.LazyStringArrayList(); + mutable_bitField0_ |= 0x00000001; + } + phrases_.add(s); + break; + } + case 21: + { + boost_ = input.readFloat(); + break; + } + default: + { + if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); + } finally { + if (((mutable_bitField0_ & 0x00000001) != 0)) { + phrases_ = phrases_.getUnmodifiableView(); + } + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.cloud.dialogflow.v2beta1.AudioConfigProto + .internal_static_google_cloud_dialogflow_v2beta1_SpeechContext_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dialogflow.v2beta1.AudioConfigProto + .internal_static_google_cloud_dialogflow_v2beta1_SpeechContext_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dialogflow.v2beta1.SpeechContext.class, + com.google.cloud.dialogflow.v2beta1.SpeechContext.Builder.class); + } + + private int bitField0_; + public static final int PHRASES_FIELD_NUMBER = 1; + private com.google.protobuf.LazyStringList phrases_; + /** + * + * + *
+ * Optional. A list of strings containing words and phrases that the speech + * recognizer should recognize with higher likelihood. + * This list can be used to: + * * improve accuracy for words and phrases you expect the user to say, + * e.g. typical commands for your Dialogflow agent + * * add additional words to the speech recognizer vocabulary + * * ... + * See the [Cloud Speech + * documentation](https://cloud.google.com/speech-to-text/quotas) for usage + * limits. + *+ * + *
repeated string phrases = 1;
+ */
+ public com.google.protobuf.ProtocolStringList getPhrasesList() {
+ return phrases_;
+ }
+ /**
+ *
+ *
+ * + * Optional. A list of strings containing words and phrases that the speech + * recognizer should recognize with higher likelihood. + * This list can be used to: + * * improve accuracy for words and phrases you expect the user to say, + * e.g. typical commands for your Dialogflow agent + * * add additional words to the speech recognizer vocabulary + * * ... + * See the [Cloud Speech + * documentation](https://cloud.google.com/speech-to-text/quotas) for usage + * limits. + *+ * + *
repeated string phrases = 1;
+ */
+ public int getPhrasesCount() {
+ return phrases_.size();
+ }
+ /**
+ *
+ *
+ * + * Optional. A list of strings containing words and phrases that the speech + * recognizer should recognize with higher likelihood. + * This list can be used to: + * * improve accuracy for words and phrases you expect the user to say, + * e.g. typical commands for your Dialogflow agent + * * add additional words to the speech recognizer vocabulary + * * ... + * See the [Cloud Speech + * documentation](https://cloud.google.com/speech-to-text/quotas) for usage + * limits. + *+ * + *
repeated string phrases = 1;
+ */
+ public java.lang.String getPhrases(int index) {
+ return phrases_.get(index);
+ }
+ /**
+ *
+ *
+ * + * Optional. A list of strings containing words and phrases that the speech + * recognizer should recognize with higher likelihood. + * This list can be used to: + * * improve accuracy for words and phrases you expect the user to say, + * e.g. typical commands for your Dialogflow agent + * * add additional words to the speech recognizer vocabulary + * * ... + * See the [Cloud Speech + * documentation](https://cloud.google.com/speech-to-text/quotas) for usage + * limits. + *+ * + *
repeated string phrases = 1;
+ */
+ public com.google.protobuf.ByteString getPhrasesBytes(int index) {
+ return phrases_.getByteString(index);
+ }
+
+ public static final int BOOST_FIELD_NUMBER = 2;
+ private float boost_;
+ /**
+ *
+ *
+ * + * Optional. Boost for this context compared to other contexts: + * * If the boost is positive, Dialogflow will increase the probability that + * the phrases in this context are recognized over similar sounding phrases. + * * If the boost is unspecified or non-positive, Dialogflow will not apply + * any boost. + * Dialogflow recommends that you use boosts in the range (0, 20] and that you + * find a value that fits your use case with binary search. + *+ * + *
float boost = 2;
+ */
+ public float getBoost() {
+ return boost_;
+ }
+
+ private byte memoizedIsInitialized = -1;
+
+ @java.lang.Override
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized == 1) return true;
+ if (isInitialized == 0) return false;
+
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ @java.lang.Override
+ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException {
+ for (int i = 0; i < phrases_.size(); i++) {
+ com.google.protobuf.GeneratedMessageV3.writeString(output, 1, phrases_.getRaw(i));
+ }
+ if (boost_ != 0F) {
+ output.writeFloat(2, boost_);
+ }
+ unknownFields.writeTo(output);
+ }
+
+ @java.lang.Override
+ public int getSerializedSize() {
+ int size = memoizedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ {
+ int dataSize = 0;
+ for (int i = 0; i < phrases_.size(); i++) {
+ dataSize += computeStringSizeNoTag(phrases_.getRaw(i));
+ }
+ size += dataSize;
+ size += 1 * getPhrasesList().size();
+ }
+ if (boost_ != 0F) {
+ size += com.google.protobuf.CodedOutputStream.computeFloatSize(2, boost_);
+ }
+ size += unknownFields.getSerializedSize();
+ memoizedSize = size;
+ return size;
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof com.google.cloud.dialogflow.v2beta1.SpeechContext)) {
+ return super.equals(obj);
+ }
+ com.google.cloud.dialogflow.v2beta1.SpeechContext other =
+ (com.google.cloud.dialogflow.v2beta1.SpeechContext) obj;
+
+ if (!getPhrasesList().equals(other.getPhrasesList())) return false;
+ if (java.lang.Float.floatToIntBits(getBoost())
+ != java.lang.Float.floatToIntBits(other.getBoost())) return false;
+ if (!unknownFields.equals(other.unknownFields)) return false;
+ return true;
+ }
+
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptor().hashCode();
+ if (getPhrasesCount() > 0) {
+ hash = (37 * hash) + PHRASES_FIELD_NUMBER;
+ hash = (53 * hash) + getPhrasesList().hashCode();
+ }
+ hash = (37 * hash) + BOOST_FIELD_NUMBER;
+ hash = (53 * hash) + java.lang.Float.floatToIntBits(getBoost());
+ hash = (29 * hash) + unknownFields.hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static com.google.cloud.dialogflow.v2beta1.SpeechContext parseFrom(
+ java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+
+ public static com.google.cloud.dialogflow.v2beta1.SpeechContext parseFrom(
+ java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+
+ public static com.google.cloud.dialogflow.v2beta1.SpeechContext parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+
+ public static com.google.cloud.dialogflow.v2beta1.SpeechContext parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+
+ public static com.google.cloud.dialogflow.v2beta1.SpeechContext parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+
+ public static com.google.cloud.dialogflow.v2beta1.SpeechContext parseFrom(
+ byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+
+ public static com.google.cloud.dialogflow.v2beta1.SpeechContext parseFrom(
+ java.io.InputStream input) throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input);
+ }
+
+ public static com.google.cloud.dialogflow.v2beta1.SpeechContext parseFrom(
+ java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseWithIOException(
+ PARSER, input, extensionRegistry);
+ }
+
+ public static com.google.cloud.dialogflow.v2beta1.SpeechContext parseDelimitedFrom(
+ java.io.InputStream input) throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input);
+ }
+
+ public static com.google.cloud.dialogflow.v2beta1.SpeechContext parseDelimitedFrom(
+ java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(
+ PARSER, input, extensionRegistry);
+ }
+
+ public static com.google.cloud.dialogflow.v2beta1.SpeechContext parseFrom(
+ com.google.protobuf.CodedInputStream input) throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input);
+ }
+
+ public static com.google.cloud.dialogflow.v2beta1.SpeechContext parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseWithIOException(
+ PARSER, input, extensionRegistry);
+ }
+
+ @java.lang.Override
+ public Builder newBuilderForType() {
+ return newBuilder();
+ }
+
+ public static Builder newBuilder() {
+ return DEFAULT_INSTANCE.toBuilder();
+ }
+
+ public static Builder newBuilder(com.google.cloud.dialogflow.v2beta1.SpeechContext prototype) {
+ return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
+ }
+
+ @java.lang.Override
+ public Builder toBuilder() {
+ return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this);
+ }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ *
+ *
+ * + * Hints for the speech recognizer to help with recognition in a specific + * conversation state. + *+ * + * Protobuf type {@code google.cloud.dialogflow.v2beta1.SpeechContext} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder
+ * Optional. A list of strings containing words and phrases that the speech + * recognizer should recognize with higher likelihood. + * This list can be used to: + * * improve accuracy for words and phrases you expect the user to say, + * e.g. typical commands for your Dialogflow agent + * * add additional words to the speech recognizer vocabulary + * * ... + * See the [Cloud Speech + * documentation](https://cloud.google.com/speech-to-text/quotas) for usage + * limits. + *+ * + *
repeated string phrases = 1;
+ */
+ public com.google.protobuf.ProtocolStringList getPhrasesList() {
+ return phrases_.getUnmodifiableView();
+ }
+ /**
+ *
+ *
+ * + * Optional. A list of strings containing words and phrases that the speech + * recognizer should recognize with higher likelihood. + * This list can be used to: + * * improve accuracy for words and phrases you expect the user to say, + * e.g. typical commands for your Dialogflow agent + * * add additional words to the speech recognizer vocabulary + * * ... + * See the [Cloud Speech + * documentation](https://cloud.google.com/speech-to-text/quotas) for usage + * limits. + *+ * + *
repeated string phrases = 1;
+ */
+ public int getPhrasesCount() {
+ return phrases_.size();
+ }
+ /**
+ *
+ *
+ * + * Optional. A list of strings containing words and phrases that the speech + * recognizer should recognize with higher likelihood. + * This list can be used to: + * * improve accuracy for words and phrases you expect the user to say, + * e.g. typical commands for your Dialogflow agent + * * add additional words to the speech recognizer vocabulary + * * ... + * See the [Cloud Speech + * documentation](https://cloud.google.com/speech-to-text/quotas) for usage + * limits. + *+ * + *
repeated string phrases = 1;
+ */
+ public java.lang.String getPhrases(int index) {
+ return phrases_.get(index);
+ }
+ /**
+ *
+ *
+ * + * Optional. A list of strings containing words and phrases that the speech + * recognizer should recognize with higher likelihood. + * This list can be used to: + * * improve accuracy for words and phrases you expect the user to say, + * e.g. typical commands for your Dialogflow agent + * * add additional words to the speech recognizer vocabulary + * * ... + * See the [Cloud Speech + * documentation](https://cloud.google.com/speech-to-text/quotas) for usage + * limits. + *+ * + *
repeated string phrases = 1;
+ */
+ public com.google.protobuf.ByteString getPhrasesBytes(int index) {
+ return phrases_.getByteString(index);
+ }
+ /**
+ *
+ *
+ * + * Optional. A list of strings containing words and phrases that the speech + * recognizer should recognize with higher likelihood. + * This list can be used to: + * * improve accuracy for words and phrases you expect the user to say, + * e.g. typical commands for your Dialogflow agent + * * add additional words to the speech recognizer vocabulary + * * ... + * See the [Cloud Speech + * documentation](https://cloud.google.com/speech-to-text/quotas) for usage + * limits. + *+ * + *
repeated string phrases = 1;
+ */
+ public Builder setPhrases(int index, java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensurePhrasesIsMutable();
+ phrases_.set(index, value);
+ onChanged();
+ return this;
+ }
+ /**
+ *
+ *
+ * + * Optional. A list of strings containing words and phrases that the speech + * recognizer should recognize with higher likelihood. + * This list can be used to: + * * improve accuracy for words and phrases you expect the user to say, + * e.g. typical commands for your Dialogflow agent + * * add additional words to the speech recognizer vocabulary + * * ... + * See the [Cloud Speech + * documentation](https://cloud.google.com/speech-to-text/quotas) for usage + * limits. + *+ * + *
repeated string phrases = 1;
+ */
+ public Builder addPhrases(java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensurePhrasesIsMutable();
+ phrases_.add(value);
+ onChanged();
+ return this;
+ }
+ /**
+ *
+ *
+ * + * Optional. A list of strings containing words and phrases that the speech + * recognizer should recognize with higher likelihood. + * This list can be used to: + * * improve accuracy for words and phrases you expect the user to say, + * e.g. typical commands for your Dialogflow agent + * * add additional words to the speech recognizer vocabulary + * * ... + * See the [Cloud Speech + * documentation](https://cloud.google.com/speech-to-text/quotas) for usage + * limits. + *+ * + *
repeated string phrases = 1;
+ */
+ public Builder addAllPhrases(java.lang.Iterable+ * Optional. A list of strings containing words and phrases that the speech + * recognizer should recognize with higher likelihood. + * This list can be used to: + * * improve accuracy for words and phrases you expect the user to say, + * e.g. typical commands for your Dialogflow agent + * * add additional words to the speech recognizer vocabulary + * * ... + * See the [Cloud Speech + * documentation](https://cloud.google.com/speech-to-text/quotas) for usage + * limits. + *+ * + *
repeated string phrases = 1;
+ */
+ public Builder clearPhrases() {
+ phrases_ = com.google.protobuf.LazyStringArrayList.EMPTY;
+ bitField0_ = (bitField0_ & ~0x00000001);
+ onChanged();
+ return this;
+ }
+ /**
+ *
+ *
+ * + * Optional. A list of strings containing words and phrases that the speech + * recognizer should recognize with higher likelihood. + * This list can be used to: + * * improve accuracy for words and phrases you expect the user to say, + * e.g. typical commands for your Dialogflow agent + * * add additional words to the speech recognizer vocabulary + * * ... + * See the [Cloud Speech + * documentation](https://cloud.google.com/speech-to-text/quotas) for usage + * limits. + *+ * + *
repeated string phrases = 1;
+ */
+ public Builder addPhrasesBytes(com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ checkByteStringIsUtf8(value);
+ ensurePhrasesIsMutable();
+ phrases_.add(value);
+ onChanged();
+ return this;
+ }
+
+ private float boost_;
+ /**
+ *
+ *
+ * + * Optional. Boost for this context compared to other contexts: + * * If the boost is positive, Dialogflow will increase the probability that + * the phrases in this context are recognized over similar sounding phrases. + * * If the boost is unspecified or non-positive, Dialogflow will not apply + * any boost. + * Dialogflow recommends that you use boosts in the range (0, 20] and that you + * find a value that fits your use case with binary search. + *+ * + *
float boost = 2;
+ */
+ public float getBoost() {
+ return boost_;
+ }
+ /**
+ *
+ *
+ * + * Optional. Boost for this context compared to other contexts: + * * If the boost is positive, Dialogflow will increase the probability that + * the phrases in this context are recognized over similar sounding phrases. + * * If the boost is unspecified or non-positive, Dialogflow will not apply + * any boost. + * Dialogflow recommends that you use boosts in the range (0, 20] and that you + * find a value that fits your use case with binary search. + *+ * + *
float boost = 2;
+ */
+ public Builder setBoost(float value) {
+
+ boost_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ *
+ *
+ * + * Optional. Boost for this context compared to other contexts: + * * If the boost is positive, Dialogflow will increase the probability that + * the phrases in this context are recognized over similar sounding phrases. + * * If the boost is unspecified or non-positive, Dialogflow will not apply + * any boost. + * Dialogflow recommends that you use boosts in the range (0, 20] and that you + * find a value that fits your use case with binary search. + *+ * + *
float boost = 2;
+ */
+ public Builder clearBoost() {
+
+ boost_ = 0F;
+ onChanged();
+ return this;
+ }
+
+ @java.lang.Override
+ public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) {
+ return super.setUnknownFields(unknownFields);
+ }
+
+ @java.lang.Override
+ public final Builder mergeUnknownFields(
+ final com.google.protobuf.UnknownFieldSet unknownFields) {
+ return super.mergeUnknownFields(unknownFields);
+ }
+
+ // @@protoc_insertion_point(builder_scope:google.cloud.dialogflow.v2beta1.SpeechContext)
+ }
+
+ // @@protoc_insertion_point(class_scope:google.cloud.dialogflow.v2beta1.SpeechContext)
+ private static final com.google.cloud.dialogflow.v2beta1.SpeechContext DEFAULT_INSTANCE;
+
+ static {
+ DEFAULT_INSTANCE = new com.google.cloud.dialogflow.v2beta1.SpeechContext();
+ }
+
+ public static com.google.cloud.dialogflow.v2beta1.SpeechContext getDefaultInstance() {
+ return DEFAULT_INSTANCE;
+ }
+
+ private static final com.google.protobuf.Parser+ * Optional. A list of strings containing words and phrases that the speech + * recognizer should recognize with higher likelihood. + * This list can be used to: + * * improve accuracy for words and phrases you expect the user to say, + * e.g. typical commands for your Dialogflow agent + * * add additional words to the speech recognizer vocabulary + * * ... + * See the [Cloud Speech + * documentation](https://cloud.google.com/speech-to-text/quotas) for usage + * limits. + *+ * + *
repeated string phrases = 1;
+ */
+ java.util.List+ * Optional. A list of strings containing words and phrases that the speech + * recognizer should recognize with higher likelihood. + * This list can be used to: + * * improve accuracy for words and phrases you expect the user to say, + * e.g. typical commands for your Dialogflow agent + * * add additional words to the speech recognizer vocabulary + * * ... + * See the [Cloud Speech + * documentation](https://cloud.google.com/speech-to-text/quotas) for usage + * limits. + *+ * + *
repeated string phrases = 1;
+ */
+ int getPhrasesCount();
+ /**
+ *
+ *
+ * + * Optional. A list of strings containing words and phrases that the speech + * recognizer should recognize with higher likelihood. + * This list can be used to: + * * improve accuracy for words and phrases you expect the user to say, + * e.g. typical commands for your Dialogflow agent + * * add additional words to the speech recognizer vocabulary + * * ... + * See the [Cloud Speech + * documentation](https://cloud.google.com/speech-to-text/quotas) for usage + * limits. + *+ * + *
repeated string phrases = 1;
+ */
+ java.lang.String getPhrases(int index);
+ /**
+ *
+ *
+ * + * Optional. A list of strings containing words and phrases that the speech + * recognizer should recognize with higher likelihood. + * This list can be used to: + * * improve accuracy for words and phrases you expect the user to say, + * e.g. typical commands for your Dialogflow agent + * * add additional words to the speech recognizer vocabulary + * * ... + * See the [Cloud Speech + * documentation](https://cloud.google.com/speech-to-text/quotas) for usage + * limits. + *+ * + *
repeated string phrases = 1;
+ */
+ com.google.protobuf.ByteString getPhrasesBytes(int index);
+
+ /**
+ *
+ *
+ * + * Optional. Boost for this context compared to other contexts: + * * If the boost is positive, Dialogflow will increase the probability that + * the phrases in this context are recognized over similar sounding phrases. + * * If the boost is unspecified or non-positive, Dialogflow will not apply + * any boost. + * Dialogflow recommends that you use boosts in the range (0, 20] and that you + * find a value that fits your use case with binary search. + *+ * + *
float boost = 2;
+ */
+ float getBoost();
+}
diff --git a/google-api-grpc/proto-google-cloud-dialogflow-v2beta1/src/main/proto/google/cloud/dialogflow/v2beta1/audio_config.proto b/google-api-grpc/proto-google-cloud-dialogflow-v2beta1/src/main/proto/google/cloud/dialogflow/v2beta1/audio_config.proto
index 3de27b94161d..8b62caa025d9 100644
--- a/google-api-grpc/proto-google-cloud-dialogflow-v2beta1/src/main/proto/google/cloud/dialogflow/v2beta1/audio_config.proto
+++ b/google-api-grpc/proto-google-cloud-dialogflow-v2beta1/src/main/proto/google/cloud/dialogflow/v2beta1/audio_config.proto
@@ -77,29 +77,32 @@ enum AudioEncoding {
AUDIO_ENCODING_SPEEX_WITH_HEADER_BYTE = 7;
}
-// Information for a word recognized by the speech recognizer.
-message SpeechWordInfo {
- // The word this info is for.
- string word = 3;
-
- // Time offset relative to the beginning of the audio that corresponds to the
- // start of the spoken word. This is an experimental feature and the accuracy
- // of the time offset can vary.
- google.protobuf.Duration start_offset = 1;
-
- // Time offset relative to the beginning of the audio that corresponds to the
- // end of the spoken word. This is an experimental feature and the accuracy of
- // the time offset can vary.
- google.protobuf.Duration end_offset = 2;
-
- // The Speech confidence between 0.0 and 1.0 for this word. A higher number
- // indicates an estimated greater likelihood that the recognized word is
- // correct. The default of 0.0 is a sentinel value indicating that confidence
- // was not set.
+// Hints for the speech recognizer to help with recognition in a specific
+// conversation state.
+message SpeechContext {
+ // Optional. A list of strings containing words and phrases that the speech
+ // recognizer should recognize with higher likelihood.
//
- // This field is not guaranteed to be fully stable over time for the same
- // audio input. Users should also not rely on it to always be provided.
- float confidence = 4;
+ // This list can be used to:
+ // * improve accuracy for words and phrases you expect the user to say,
+ // e.g. typical commands for your Dialogflow agent
+ // * add additional words to the speech recognizer vocabulary
+ // * ...
+ //
+ // See the [Cloud Speech
+ // documentation](https://cloud.google.com/speech-to-text/quotas) for usage
+ // limits.
+ repeated string phrases = 1;
+
+ // Optional. Boost for this context compared to other contexts:
+ // * If the boost is positive, Dialogflow will increase the probability that
+ // the phrases in this context are recognized over similar sounding phrases.
+ // * If the boost is unspecified or non-positive, Dialogflow will not apply
+ // any boost.
+ //
+ // Dialogflow recommends that you use boosts in the range (0, 20] and that you
+ // find a value that fits your use case with binary search.
+ float boost = 2;
}
// Variant of the specified [Speech model][google.cloud.dialogflow.v2beta1.InputAudioConfig.model] to use.
@@ -145,6 +148,31 @@ enum SpeechModelVariant {
USE_ENHANCED = 3;
}
+// Information for a word recognized by the speech recognizer.
+message SpeechWordInfo {
+ // The word this info is for.
+ string word = 3;
+
+ // Time offset relative to the beginning of the audio that corresponds to the
+ // start of the spoken word. This is an experimental feature and the accuracy
+ // of the time offset can vary.
+ google.protobuf.Duration start_offset = 1;
+
+ // Time offset relative to the beginning of the audio that corresponds to the
+ // end of the spoken word. This is an experimental feature and the accuracy of
+ // the time offset can vary.
+ google.protobuf.Duration end_offset = 2;
+
+ // The Speech confidence between 0.0 and 1.0 for this word. A higher number
+ // indicates an estimated greater likelihood that the recognized word is
+ // correct. The default of 0.0 is a sentinel value indicating that confidence
+ // was not set.
+ //
+ // This field is not guaranteed to be fully stable over time for the same
+ // audio input. Users should also not rely on it to always be provided.
+ float confidence = 4;
+}
+
// Instructs the speech recognizer on how to process the audio content.
message InputAudioConfig {
// Required. Audio encoding of the audio content to process.
@@ -178,6 +206,13 @@ message InputAudioConfig {
// for more details.
repeated string phrase_hints = 4;
+ // Optional. Context information to assist speech recognition.
+ //
+ // See [the Cloud Speech
+ // documentation](https://cloud.google.com/speech-to-text/docs/basics#phrase-hints)
+ // for more details.
+ repeated SpeechContext speech_contexts = 11;
+
// Optional. Which Speech model to select for the given request. Select the
// model best suited to your domain to get best results. If a model is not
// explicitly specified, then we auto-select a model based on the parameters
@@ -207,20 +242,6 @@ message InputAudioConfig {
bool single_utterance = 8;
}
-// Description of which voice to use for speech synthesis.
-message VoiceSelectionParams {
- // Optional. The name of the voice. If not set, the service will choose a
- // voice based on the other parameters such as language_code and gender.
- string name = 1;
-
- // Optional. The preferred gender of the voice. If not set, the service will
- // choose a voice based on the other parameters such as language_code and
- // name. Note that this is only a preference, not requirement. If a
- // voice of the appropriate gender is not available, the synthesizer should
- // substitute a voice with a different gender rather than failing the request.
- SsmlVoiceGender ssml_gender = 2;
-}
-
// Gender of the voice as described in
// [SSML voice element](https://www.w3.org/TR/speech-synthesis11/#edef_voice).
enum SsmlVoiceGender {
@@ -238,6 +259,20 @@ enum SsmlVoiceGender {
SSML_VOICE_GENDER_NEUTRAL = 3;
}
+// Description of which voice to use for speech synthesis.
+message VoiceSelectionParams {
+ // Optional. The name of the voice. If not set, the service will choose a
+ // voice based on the other parameters such as language_code and gender.
+ string name = 1;
+
+ // Optional. The preferred gender of the voice. If not set, the service will
+ // choose a voice based on the other parameters such as language_code and
+ // name. Note that this is only a preference, not requirement. If a
+ // voice of the appropriate gender is not available, the synthesizer should
+ // substitute a voice with a different gender rather than failing the request.
+ SsmlVoiceGender ssml_gender = 2;
+}
+
// Configuration of how speech should be synthesized.
message SynthesizeSpeechConfig {
// Optional. Speaking rate/speed, in the range [0.25, 4.0]. 1.0 is the normal
@@ -270,22 +305,6 @@ message SynthesizeSpeechConfig {
VoiceSelectionParams voice = 4;
}
-// Instructs the speech synthesizer how to generate the output audio content.
-message OutputAudioConfig {
- // Required. Audio encoding of the synthesized audio content.
- OutputAudioEncoding audio_encoding = 1;
-
- // Optional. The synthesis sample rate (in hertz) for this audio. If not
- // provided, then the synthesizer will use the default sample rate based on
- // the audio encoding. If this is different from the voice's natural sample
- // rate, then the synthesizer will honor this request by converting to the
- // desired sample rate (which might result in worse audio quality).
- int32 sample_rate_hertz = 2;
-
- // Optional. Configuration of how speech should be synthesized.
- SynthesizeSpeechConfig synthesize_speech_config = 3;
-}
-
// Audio encoding of the output audio format in Text-To-Speech.
enum OutputAudioEncoding {
// Not specified.
@@ -304,3 +323,19 @@ enum OutputAudioEncoding {
// than MP3 while using approximately the same bitrate.
OUTPUT_AUDIO_ENCODING_OGG_OPUS = 3;
}
+
+// Instructs the speech synthesizer how to generate the output audio content.
+message OutputAudioConfig {
+ // Required. Audio encoding of the synthesized audio content.
+ OutputAudioEncoding audio_encoding = 1;
+
+ // Optional. The synthesis sample rate (in hertz) for this audio. If not
+ // provided, then the synthesizer will use the default sample rate based on
+ // the audio encoding. If this is different from the voice's natural sample
+ // rate, then the synthesizer will honor this request by converting to the
+ // desired sample rate (which might result in worse audio quality).
+ int32 sample_rate_hertz = 2;
+
+ // Optional. Configuration of how speech should be synthesized.
+ SynthesizeSpeechConfig synthesize_speech_config = 3;
+}
diff --git a/google-api-grpc/proto-google-cloud-dialogflow-v2beta1/src/main/proto/google/cloud/dialogflow/v2beta1/session.proto b/google-api-grpc/proto-google-cloud-dialogflow-v2beta1/src/main/proto/google/cloud/dialogflow/v2beta1/session.proto
index 8d0f91f4597a..d911d8477361 100644
--- a/google-api-grpc/proto-google-cloud-dialogflow-v2beta1/src/main/proto/google/cloud/dialogflow/v2beta1/session.proto
+++ b/google-api-grpc/proto-google-cloud-dialogflow-v2beta1/src/main/proto/google/cloud/dialogflow/v2beta1/session.proto
@@ -270,7 +270,7 @@ message QueryResult {
// The intent that matched the conversational query. Some, not
// all fields are filled in this message, including but not limited to:
- // `name`, `display_name` and `webhook_state`.
+ // `name`, `display_name`, `end_interaction` and `is_fallback`.
Intent intent = 11;
// The intent detection confidence. Values range from 0.0
diff --git a/google-cloud-clients/google-cloud-dialogflow/synth.metadata b/google-cloud-clients/google-cloud-dialogflow/synth.metadata
index 2fef1e907a78..64781bbc106f 100644
--- a/google-cloud-clients/google-cloud-dialogflow/synth.metadata
+++ b/google-cloud-clients/google-cloud-dialogflow/synth.metadata
@@ -1,5 +1,5 @@
{
- "updateTime": "2019-07-25T07:44:19.651403Z",
+ "updateTime": "2019-07-26T07:43:37.903627Z",
"sources": [
{
"generator": {
@@ -12,8 +12,8 @@
"git": {
"name": "googleapis",
"remote": "https://github.com/googleapis/googleapis.git",
- "sha": "4b12afe72950f36bef6f196a05f4430e4421a873",
- "internalRef": "259790363"
+ "sha": "0906a9e74f4db789aee7fc5016ab828d3dd24f03",
+ "internalRef": "260061471"
}
}
],