diff --git a/google-api-grpc/grpc-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/JobControllerGrpc.java b/google-api-grpc/grpc-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/JobControllerGrpc.java index 878cac6c3a8f..9415c1b447bb 100644 --- a/google-api-grpc/grpc-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/JobControllerGrpc.java +++ b/google-api-grpc/grpc-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/JobControllerGrpc.java @@ -419,7 +419,8 @@ public void updateJob( *
      * Starts a job cancellation request. To access the job resource
      * after cancellation, call
-     * [regions/{region}/jobs.list](/dataproc/docs/reference/rest/v1/projects.regions.jobs/list) or
+     * [regions/{region}/jobs.list](/dataproc/docs/reference/rest/v1/projects.regions.jobs/list)
+     * or
      * [regions/{region}/jobs.get](/dataproc/docs/reference/rest/v1/projects.regions.jobs/get).
      * 
*/ @@ -578,7 +579,8 @@ public void updateJob( *
      * Starts a job cancellation request. To access the job resource
      * after cancellation, call
-     * [regions/{region}/jobs.list](/dataproc/docs/reference/rest/v1/projects.regions.jobs/list) or
+     * [regions/{region}/jobs.list](/dataproc/docs/reference/rest/v1/projects.regions.jobs/list)
+     * or
      * [regions/{region}/jobs.get](/dataproc/docs/reference/rest/v1/projects.regions.jobs/get).
      * 
*/ @@ -686,7 +688,8 @@ public com.google.cloud.dataproc.v1.Job updateJob( *
      * Starts a job cancellation request. To access the job resource
      * after cancellation, call
-     * [regions/{region}/jobs.list](/dataproc/docs/reference/rest/v1/projects.regions.jobs/list) or
+     * [regions/{region}/jobs.list](/dataproc/docs/reference/rest/v1/projects.regions.jobs/list)
+     * or
      * [regions/{region}/jobs.get](/dataproc/docs/reference/rest/v1/projects.regions.jobs/get).
      * 
*/ @@ -791,7 +794,8 @@ protected JobControllerFutureStub build( *
      * Starts a job cancellation request. To access the job resource
      * after cancellation, call
-     * [regions/{region}/jobs.list](/dataproc/docs/reference/rest/v1/projects.regions.jobs/list) or
+     * [regions/{region}/jobs.list](/dataproc/docs/reference/rest/v1/projects.regions.jobs/list)
+     * or
      * [regions/{region}/jobs.get](/dataproc/docs/reference/rest/v1/projects.regions.jobs/get).
      * 
*/ diff --git a/google-api-grpc/grpc-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/WorkflowTemplateServiceGrpc.java b/google-api-grpc/grpc-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/WorkflowTemplateServiceGrpc.java index e264e35c35a2..acdde52b32a3 100644 --- a/google-api-grpc/grpc-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/WorkflowTemplateServiceGrpc.java +++ b/google-api-grpc/grpc-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/WorkflowTemplateServiceGrpc.java @@ -558,7 +558,8 @@ public void instantiateWorkflowTemplate( *
      * Instantiates a template and begins execution.
      * This method is equivalent to executing the sequence
-     * [CreateWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.CreateWorkflowTemplate], [InstantiateWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.InstantiateWorkflowTemplate],
+     * [CreateWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.CreateWorkflowTemplate],
+     * [InstantiateWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.InstantiateWorkflowTemplate],
      * [DeleteWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.DeleteWorkflowTemplate].
      * The returned Operation can be used to track execution of
      * workflow by polling
@@ -774,7 +775,8 @@ public void instantiateWorkflowTemplate(
      * 
      * Instantiates a template and begins execution.
      * This method is equivalent to executing the sequence
-     * [CreateWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.CreateWorkflowTemplate], [InstantiateWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.InstantiateWorkflowTemplate],
+     * [CreateWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.CreateWorkflowTemplate],
+     * [InstantiateWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.InstantiateWorkflowTemplate],
      * [DeleteWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.DeleteWorkflowTemplate].
      * The returned Operation can be used to track execution of
      * workflow by polling
@@ -938,7 +940,8 @@ public com.google.longrunning.Operation instantiateWorkflowTemplate(
      * 
      * Instantiates a template and begins execution.
      * This method is equivalent to executing the sequence
-     * [CreateWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.CreateWorkflowTemplate], [InstantiateWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.InstantiateWorkflowTemplate],
+     * [CreateWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.CreateWorkflowTemplate],
+     * [InstantiateWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.InstantiateWorkflowTemplate],
      * [DeleteWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.DeleteWorkflowTemplate].
      * The returned Operation can be used to track execution of
      * workflow by polling
@@ -1094,7 +1097,8 @@ protected WorkflowTemplateServiceFutureStub build(
      * 
      * Instantiates a template and begins execution.
      * This method is equivalent to executing the sequence
-     * [CreateWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.CreateWorkflowTemplate], [InstantiateWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.InstantiateWorkflowTemplate],
+     * [CreateWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.CreateWorkflowTemplate],
+     * [InstantiateWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.InstantiateWorkflowTemplate],
      * [DeleteWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.DeleteWorkflowTemplate].
      * The returned Operation can be used to track execution of
      * workflow by polling
diff --git a/google-api-grpc/grpc-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/JobControllerGrpc.java b/google-api-grpc/grpc-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/JobControllerGrpc.java
index 8a110a8f9ce7..88b2dd93fb70 100644
--- a/google-api-grpc/grpc-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/JobControllerGrpc.java
+++ b/google-api-grpc/grpc-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/JobControllerGrpc.java
@@ -429,7 +429,8 @@ public void updateJob(
      * 
      * Starts a job cancellation request. To access the job resource
      * after cancellation, call
-     * [regions/{region}/jobs.list](/dataproc/docs/reference/rest/v1beta2/projects.regions.jobs/list) or
+     * [regions/{region}/jobs.list](/dataproc/docs/reference/rest/v1beta2/projects.regions.jobs/list)
+     * or
      * [regions/{region}/jobs.get](/dataproc/docs/reference/rest/v1beta2/projects.regions.jobs/get).
      * 
*/ @@ -589,7 +590,8 @@ public void updateJob( *
      * Starts a job cancellation request. To access the job resource
      * after cancellation, call
-     * [regions/{region}/jobs.list](/dataproc/docs/reference/rest/v1beta2/projects.regions.jobs/list) or
+     * [regions/{region}/jobs.list](/dataproc/docs/reference/rest/v1beta2/projects.regions.jobs/list)
+     * or
      * [regions/{region}/jobs.get](/dataproc/docs/reference/rest/v1beta2/projects.regions.jobs/get).
      * 
*/ @@ -697,7 +699,8 @@ public com.google.cloud.dataproc.v1beta2.Job updateJob( *
      * Starts a job cancellation request. To access the job resource
      * after cancellation, call
-     * [regions/{region}/jobs.list](/dataproc/docs/reference/rest/v1beta2/projects.regions.jobs/list) or
+     * [regions/{region}/jobs.list](/dataproc/docs/reference/rest/v1beta2/projects.regions.jobs/list)
+     * or
      * [regions/{region}/jobs.get](/dataproc/docs/reference/rest/v1beta2/projects.regions.jobs/get).
      * 
*/ @@ -802,7 +805,8 @@ protected JobControllerFutureStub build( *
      * Starts a job cancellation request. To access the job resource
      * after cancellation, call
-     * [regions/{region}/jobs.list](/dataproc/docs/reference/rest/v1beta2/projects.regions.jobs/list) or
+     * [regions/{region}/jobs.list](/dataproc/docs/reference/rest/v1beta2/projects.regions.jobs/list)
+     * or
      * [regions/{region}/jobs.get](/dataproc/docs/reference/rest/v1beta2/projects.regions.jobs/get).
      * 
*/ diff --git a/google-api-grpc/grpc-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/WorkflowTemplateServiceGrpc.java b/google-api-grpc/grpc-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/WorkflowTemplateServiceGrpc.java index 7f36cf78e37a..ec0d02ff76ce 100644 --- a/google-api-grpc/grpc-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/WorkflowTemplateServiceGrpc.java +++ b/google-api-grpc/grpc-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/WorkflowTemplateServiceGrpc.java @@ -566,7 +566,8 @@ public void instantiateWorkflowTemplate( *
      * Instantiates a template and begins execution.
      * This method is equivalent to executing the sequence
-     * [CreateWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.CreateWorkflowTemplate], [InstantiateWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.InstantiateWorkflowTemplate],
+     * [CreateWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.CreateWorkflowTemplate],
+     * [InstantiateWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.InstantiateWorkflowTemplate],
      * [DeleteWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.DeleteWorkflowTemplate].
      * The returned Operation can be used to track execution of
      * workflow by polling
@@ -782,7 +783,8 @@ public void instantiateWorkflowTemplate(
      * 
      * Instantiates a template and begins execution.
      * This method is equivalent to executing the sequence
-     * [CreateWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.CreateWorkflowTemplate], [InstantiateWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.InstantiateWorkflowTemplate],
+     * [CreateWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.CreateWorkflowTemplate],
+     * [InstantiateWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.InstantiateWorkflowTemplate],
      * [DeleteWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.DeleteWorkflowTemplate].
      * The returned Operation can be used to track execution of
      * workflow by polling
@@ -946,7 +948,8 @@ public com.google.longrunning.Operation instantiateWorkflowTemplate(
      * 
      * Instantiates a template and begins execution.
      * This method is equivalent to executing the sequence
-     * [CreateWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.CreateWorkflowTemplate], [InstantiateWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.InstantiateWorkflowTemplate],
+     * [CreateWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.CreateWorkflowTemplate],
+     * [InstantiateWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.InstantiateWorkflowTemplate],
      * [DeleteWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.DeleteWorkflowTemplate].
      * The returned Operation can be used to track execution of
      * workflow by polling
@@ -1103,7 +1106,8 @@ protected WorkflowTemplateServiceFutureStub build(
      * 
      * Instantiates a template and begins execution.
      * This method is equivalent to executing the sequence
-     * [CreateWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.CreateWorkflowTemplate], [InstantiateWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.InstantiateWorkflowTemplate],
+     * [CreateWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.CreateWorkflowTemplate],
+     * [InstantiateWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.InstantiateWorkflowTemplate],
      * [DeleteWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.DeleteWorkflowTemplate].
      * The returned Operation can be used to track execution of
      * workflow by polling
diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/AcceleratorConfig.java b/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/AcceleratorConfig.java
index 5579c309b128..57f0be33c68a 100644
--- a/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/AcceleratorConfig.java
+++ b/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/AcceleratorConfig.java
@@ -106,13 +106,15 @@ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
    * 
    * Full URL, partial URI, or short name of the accelerator type resource to
    * expose to this instance. See
-   * [Compute Engine AcceleratorTypes](/compute/docs/reference/beta/acceleratorTypes).
+   * [Compute Engine
+   * AcceleratorTypes](/compute/docs/reference/beta/acceleratorTypes).
    * Examples:
    * * `https://www.googleapis.com/compute/beta/projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80`
    * * `projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80`
    * * `nvidia-tesla-k80`
    * **Auto Zone Exception**: If you are using the Cloud Dataproc
-   * [Auto Zone Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement)
+   * [Auto Zone
+   * Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement)
    * feature, you must use the short name of the accelerator type
    * resource, for example, `nvidia-tesla-k80`.
    * 
@@ -136,13 +138,15 @@ public java.lang.String getAcceleratorTypeUri() { *
    * Full URL, partial URI, or short name of the accelerator type resource to
    * expose to this instance. See
-   * [Compute Engine AcceleratorTypes](/compute/docs/reference/beta/acceleratorTypes).
+   * [Compute Engine
+   * AcceleratorTypes](/compute/docs/reference/beta/acceleratorTypes).
    * Examples:
    * * `https://www.googleapis.com/compute/beta/projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80`
    * * `projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80`
    * * `nvidia-tesla-k80`
    * **Auto Zone Exception**: If you are using the Cloud Dataproc
-   * [Auto Zone Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement)
+   * [Auto Zone
+   * Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement)
    * feature, you must use the short name of the accelerator type
    * resource, for example, `nvidia-tesla-k80`.
    * 
@@ -516,13 +520,15 @@ public Builder mergeFrom( *
      * Full URL, partial URI, or short name of the accelerator type resource to
      * expose to this instance. See
-     * [Compute Engine AcceleratorTypes](/compute/docs/reference/beta/acceleratorTypes).
+     * [Compute Engine
+     * AcceleratorTypes](/compute/docs/reference/beta/acceleratorTypes).
      * Examples:
      * * `https://www.googleapis.com/compute/beta/projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80`
      * * `projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80`
      * * `nvidia-tesla-k80`
      * **Auto Zone Exception**: If you are using the Cloud Dataproc
-     * [Auto Zone Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement)
+     * [Auto Zone
+     * Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement)
      * feature, you must use the short name of the accelerator type
      * resource, for example, `nvidia-tesla-k80`.
      * 
@@ -546,13 +552,15 @@ public java.lang.String getAcceleratorTypeUri() { *
      * Full URL, partial URI, or short name of the accelerator type resource to
      * expose to this instance. See
-     * [Compute Engine AcceleratorTypes](/compute/docs/reference/beta/acceleratorTypes).
+     * [Compute Engine
+     * AcceleratorTypes](/compute/docs/reference/beta/acceleratorTypes).
      * Examples:
      * * `https://www.googleapis.com/compute/beta/projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80`
      * * `projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80`
      * * `nvidia-tesla-k80`
      * **Auto Zone Exception**: If you are using the Cloud Dataproc
-     * [Auto Zone Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement)
+     * [Auto Zone
+     * Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement)
      * feature, you must use the short name of the accelerator type
      * resource, for example, `nvidia-tesla-k80`.
      * 
@@ -576,13 +584,15 @@ public com.google.protobuf.ByteString getAcceleratorTypeUriBytes() { *
      * Full URL, partial URI, or short name of the accelerator type resource to
      * expose to this instance. See
-     * [Compute Engine AcceleratorTypes](/compute/docs/reference/beta/acceleratorTypes).
+     * [Compute Engine
+     * AcceleratorTypes](/compute/docs/reference/beta/acceleratorTypes).
      * Examples:
      * * `https://www.googleapis.com/compute/beta/projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80`
      * * `projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80`
      * * `nvidia-tesla-k80`
      * **Auto Zone Exception**: If you are using the Cloud Dataproc
-     * [Auto Zone Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement)
+     * [Auto Zone
+     * Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement)
      * feature, you must use the short name of the accelerator type
      * resource, for example, `nvidia-tesla-k80`.
      * 
@@ -604,13 +614,15 @@ public Builder setAcceleratorTypeUri(java.lang.String value) { *
      * Full URL, partial URI, or short name of the accelerator type resource to
      * expose to this instance. See
-     * [Compute Engine AcceleratorTypes](/compute/docs/reference/beta/acceleratorTypes).
+     * [Compute Engine
+     * AcceleratorTypes](/compute/docs/reference/beta/acceleratorTypes).
      * Examples:
      * * `https://www.googleapis.com/compute/beta/projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80`
      * * `projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80`
      * * `nvidia-tesla-k80`
      * **Auto Zone Exception**: If you are using the Cloud Dataproc
-     * [Auto Zone Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement)
+     * [Auto Zone
+     * Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement)
      * feature, you must use the short name of the accelerator type
      * resource, for example, `nvidia-tesla-k80`.
      * 
@@ -629,13 +641,15 @@ public Builder clearAcceleratorTypeUri() { *
      * Full URL, partial URI, or short name of the accelerator type resource to
      * expose to this instance. See
-     * [Compute Engine AcceleratorTypes](/compute/docs/reference/beta/acceleratorTypes).
+     * [Compute Engine
+     * AcceleratorTypes](/compute/docs/reference/beta/acceleratorTypes).
      * Examples:
      * * `https://www.googleapis.com/compute/beta/projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80`
      * * `projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80`
      * * `nvidia-tesla-k80`
      * **Auto Zone Exception**: If you are using the Cloud Dataproc
-     * [Auto Zone Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement)
+     * [Auto Zone
+     * Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement)
      * feature, you must use the short name of the accelerator type
      * resource, for example, `nvidia-tesla-k80`.
      * 
diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/AcceleratorConfigOrBuilder.java b/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/AcceleratorConfigOrBuilder.java index aaf5cfabf1e6..80c0e2c49f8b 100644 --- a/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/AcceleratorConfigOrBuilder.java +++ b/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/AcceleratorConfigOrBuilder.java @@ -14,13 +14,15 @@ public interface AcceleratorConfigOrBuilder *
    * Full URL, partial URI, or short name of the accelerator type resource to
    * expose to this instance. See
-   * [Compute Engine AcceleratorTypes](/compute/docs/reference/beta/acceleratorTypes).
+   * [Compute Engine
+   * AcceleratorTypes](/compute/docs/reference/beta/acceleratorTypes).
    * Examples:
    * * `https://www.googleapis.com/compute/beta/projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80`
    * * `projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80`
    * * `nvidia-tesla-k80`
    * **Auto Zone Exception**: If you are using the Cloud Dataproc
-   * [Auto Zone Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement)
+   * [Auto Zone
+   * Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement)
    * feature, you must use the short name of the accelerator type
    * resource, for example, `nvidia-tesla-k80`.
    * 
@@ -34,13 +36,15 @@ public interface AcceleratorConfigOrBuilder *
    * Full URL, partial URI, or short name of the accelerator type resource to
    * expose to this instance. See
-   * [Compute Engine AcceleratorTypes](/compute/docs/reference/beta/acceleratorTypes).
+   * [Compute Engine
+   * AcceleratorTypes](/compute/docs/reference/beta/acceleratorTypes).
    * Examples:
    * * `https://www.googleapis.com/compute/beta/projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80`
    * * `projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80`
    * * `nvidia-tesla-k80`
    * **Auto Zone Exception**: If you are using the Cloud Dataproc
-   * [Auto Zone Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement)
+   * [Auto Zone
+   * Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement)
    * feature, you must use the short name of the accelerator type
    * resource, for example, `nvidia-tesla-k80`.
    * 
diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/Cluster.java b/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/Cluster.java index 80a6248e7c02..311193dde42d 100644 --- a/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/Cluster.java +++ b/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/Cluster.java @@ -362,8 +362,9 @@ public int getLabelsCount() { * Label **keys** must contain 1 to 63 characters, and must conform to * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). * Label **values** may be empty, but, if present, must contain 1 to 63 - * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). - * No more than 32 labels can be associated with a cluster. + * characters, and must conform to [RFC + * 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be + * associated with a cluster. *
* * map<string, string> labels = 8; @@ -387,8 +388,9 @@ public java.util.Map getLabels() { * Label **keys** must contain 1 to 63 characters, and must conform to * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). * Label **values** may be empty, but, if present, must contain 1 to 63 - * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). - * No more than 32 labels can be associated with a cluster. + * characters, and must conform to [RFC + * 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be + * associated with a cluster. *
* * map<string, string> labels = 8; @@ -404,8 +406,9 @@ public java.util.Map getLabelsMap() { * Label **keys** must contain 1 to 63 characters, and must conform to * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). * Label **values** may be empty, but, if present, must contain 1 to 63 - * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). - * No more than 32 labels can be associated with a cluster. + * characters, and must conform to [RFC + * 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be + * associated with a cluster. *
* * map<string, string> labels = 8; @@ -425,8 +428,9 @@ public java.lang.String getLabelsOrDefault(java.lang.String key, java.lang.Strin * Label **keys** must contain 1 to 63 characters, and must conform to * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). * Label **values** may be empty, but, if present, must contain 1 to 63 - * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). - * No more than 32 labels can be associated with a cluster. + * characters, and must conform to [RFC + * 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be + * associated with a cluster. *
* * map<string, string> labels = 8; @@ -599,8 +603,8 @@ public com.google.protobuf.ByteString getClusterUuidBytes() { * *
    * Contains cluster daemon metrics such as HDFS and YARN stats.
-   * **Beta Feature**: This report is available for testing purposes only. It may
-   * be changed before final release.
+   * **Beta Feature**: This report is available for testing purposes only. It
+   * may be changed before final release.
    * 
* * .google.cloud.dataproc.v1.ClusterMetrics metrics = 9; @@ -613,8 +617,8 @@ public boolean hasMetrics() { * *
    * Contains cluster daemon metrics such as HDFS and YARN stats.
-   * **Beta Feature**: This report is available for testing purposes only. It may
-   * be changed before final release.
+   * **Beta Feature**: This report is available for testing purposes only. It
+   * may be changed before final release.
    * 
* * .google.cloud.dataproc.v1.ClusterMetrics metrics = 9; @@ -629,8 +633,8 @@ public com.google.cloud.dataproc.v1.ClusterMetrics getMetrics() { * *
    * Contains cluster daemon metrics such as HDFS and YARN stats.
-   * **Beta Feature**: This report is available for testing purposes only. It may
-   * be changed before final release.
+   * **Beta Feature**: This report is available for testing purposes only. It
+   * may be changed before final release.
    * 
* * .google.cloud.dataproc.v1.ClusterMetrics metrics = 9; @@ -1587,8 +1591,9 @@ public int getLabelsCount() { * Label **keys** must contain 1 to 63 characters, and must conform to * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). * Label **values** may be empty, but, if present, must contain 1 to 63 - * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). - * No more than 32 labels can be associated with a cluster. + * characters, and must conform to [RFC + * 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be + * associated with a cluster. *
* * map<string, string> labels = 8; @@ -1612,8 +1617,9 @@ public java.util.Map getLabels() { * Label **keys** must contain 1 to 63 characters, and must conform to * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). * Label **values** may be empty, but, if present, must contain 1 to 63 - * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). - * No more than 32 labels can be associated with a cluster. + * characters, and must conform to [RFC + * 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be + * associated with a cluster. *
* * map<string, string> labels = 8; @@ -1629,8 +1635,9 @@ public java.util.Map getLabelsMap() { * Label **keys** must contain 1 to 63 characters, and must conform to * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). * Label **values** may be empty, but, if present, must contain 1 to 63 - * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). - * No more than 32 labels can be associated with a cluster. + * characters, and must conform to [RFC + * 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be + * associated with a cluster. *
* * map<string, string> labels = 8; @@ -1651,8 +1658,9 @@ public java.lang.String getLabelsOrDefault( * Label **keys** must contain 1 to 63 characters, and must conform to * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). * Label **values** may be empty, but, if present, must contain 1 to 63 - * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). - * No more than 32 labels can be associated with a cluster. + * characters, and must conform to [RFC + * 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be + * associated with a cluster. *
* * map<string, string> labels = 8; @@ -1680,8 +1688,9 @@ public Builder clearLabels() { * Label **keys** must contain 1 to 63 characters, and must conform to * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). * Label **values** may be empty, but, if present, must contain 1 to 63 - * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). - * No more than 32 labels can be associated with a cluster. + * characters, and must conform to [RFC + * 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be + * associated with a cluster. * * * map<string, string> labels = 8; @@ -1706,8 +1715,9 @@ public java.util.Map getMutableLabels() { * Label **keys** must contain 1 to 63 characters, and must conform to * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). * Label **values** may be empty, but, if present, must contain 1 to 63 - * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). - * No more than 32 labels can be associated with a cluster. + * characters, and must conform to [RFC + * 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be + * associated with a cluster. * * * map<string, string> labels = 8; @@ -1730,8 +1740,9 @@ public Builder putLabels(java.lang.String key, java.lang.String value) { * Label **keys** must contain 1 to 63 characters, and must conform to * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). * Label **values** may be empty, but, if present, must contain 1 to 63 - * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). - * No more than 32 labels can be associated with a cluster. + * characters, and must conform to [RFC + * 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be + * associated with a cluster. * * * map<string, string> labels = 8; @@ -2387,8 +2398,8 @@ public Builder setClusterUuidBytes(com.google.protobuf.ByteString value) { * *
      * Contains cluster daemon metrics such as HDFS and YARN stats.
-     * **Beta Feature**: This report is available for testing purposes only. It may
-     * be changed before final release.
+     * **Beta Feature**: This report is available for testing purposes only. It
+     * may be changed before final release.
      * 
* * .google.cloud.dataproc.v1.ClusterMetrics metrics = 9; @@ -2401,8 +2412,8 @@ public boolean hasMetrics() { * *
      * Contains cluster daemon metrics such as HDFS and YARN stats.
-     * **Beta Feature**: This report is available for testing purposes only. It may
-     * be changed before final release.
+     * **Beta Feature**: This report is available for testing purposes only. It
+     * may be changed before final release.
      * 
* * .google.cloud.dataproc.v1.ClusterMetrics metrics = 9; @@ -2421,8 +2432,8 @@ public com.google.cloud.dataproc.v1.ClusterMetrics getMetrics() { * *
      * Contains cluster daemon metrics such as HDFS and YARN stats.
-     * **Beta Feature**: This report is available for testing purposes only. It may
-     * be changed before final release.
+     * **Beta Feature**: This report is available for testing purposes only. It
+     * may be changed before final release.
      * 
* * .google.cloud.dataproc.v1.ClusterMetrics metrics = 9; @@ -2445,8 +2456,8 @@ public Builder setMetrics(com.google.cloud.dataproc.v1.ClusterMetrics value) { * *
      * Contains cluster daemon metrics such as HDFS and YARN stats.
-     * **Beta Feature**: This report is available for testing purposes only. It may
-     * be changed before final release.
+     * **Beta Feature**: This report is available for testing purposes only. It
+     * may be changed before final release.
      * 
* * .google.cloud.dataproc.v1.ClusterMetrics metrics = 9; @@ -2466,8 +2477,8 @@ public Builder setMetrics(com.google.cloud.dataproc.v1.ClusterMetrics.Builder bu * *
      * Contains cluster daemon metrics such as HDFS and YARN stats.
-     * **Beta Feature**: This report is available for testing purposes only. It may
-     * be changed before final release.
+     * **Beta Feature**: This report is available for testing purposes only. It
+     * may be changed before final release.
      * 
* * .google.cloud.dataproc.v1.ClusterMetrics metrics = 9; @@ -2494,8 +2505,8 @@ public Builder mergeMetrics(com.google.cloud.dataproc.v1.ClusterMetrics value) { * *
      * Contains cluster daemon metrics such as HDFS and YARN stats.
-     * **Beta Feature**: This report is available for testing purposes only. It may
-     * be changed before final release.
+     * **Beta Feature**: This report is available for testing purposes only. It
+     * may be changed before final release.
      * 
* * .google.cloud.dataproc.v1.ClusterMetrics metrics = 9; @@ -2516,8 +2527,8 @@ public Builder clearMetrics() { * *
      * Contains cluster daemon metrics such as HDFS and YARN stats.
-     * **Beta Feature**: This report is available for testing purposes only. It may
-     * be changed before final release.
+     * **Beta Feature**: This report is available for testing purposes only. It
+     * may be changed before final release.
      * 
* * .google.cloud.dataproc.v1.ClusterMetrics metrics = 9; @@ -2532,8 +2543,8 @@ public com.google.cloud.dataproc.v1.ClusterMetrics.Builder getMetricsBuilder() { * *
      * Contains cluster daemon metrics such as HDFS and YARN stats.
-     * **Beta Feature**: This report is available for testing purposes only. It may
-     * be changed before final release.
+     * **Beta Feature**: This report is available for testing purposes only. It
+     * may be changed before final release.
      * 
* * .google.cloud.dataproc.v1.ClusterMetrics metrics = 9; @@ -2552,8 +2563,8 @@ public com.google.cloud.dataproc.v1.ClusterMetricsOrBuilder getMetricsOrBuilder( * *
      * Contains cluster daemon metrics such as HDFS and YARN stats.
-     * **Beta Feature**: This report is available for testing purposes only. It may
-     * be changed before final release.
+     * **Beta Feature**: This report is available for testing purposes only. It
+     * may be changed before final release.
      * 
* * .google.cloud.dataproc.v1.ClusterMetrics metrics = 9; diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClusterConfig.java b/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClusterConfig.java index c251c40bd113..3c5100476e6c 100644 --- a/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClusterConfig.java +++ b/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClusterConfig.java @@ -487,8 +487,10 @@ public com.google.cloud.dataproc.v1.SoftwareConfigOrBuilder getSoftwareConfigOrB * Optional. Commands to execute on each node after config is * completed. By default, executables are run on master and all worker nodes. * You can test a node's `role` metadata to run an executable on - * a master or worker node, as shown below using `curl` (you can also use `wget`): - * ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) + * a master or worker node, as shown below using `curl` (you can also use + * `wget`): + * ROLE=$(curl -H Metadata-Flavor:Google + * http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) * if [[ "${ROLE}" == 'Master' ]]; then * ... master specific actions ... * else @@ -510,8 +512,10 @@ public com.google.cloud.dataproc.v1.SoftwareConfigOrBuilder getSoftwareConfigOrB * Optional. Commands to execute on each node after config is * completed. By default, executables are run on master and all worker nodes. * You can test a node's `role` metadata to run an executable on - * a master or worker node, as shown below using `curl` (you can also use `wget`): - * ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) + * a master or worker node, as shown below using `curl` (you can also use + * `wget`): + * ROLE=$(curl -H Metadata-Flavor:Google + * http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) * if [[ "${ROLE}" == 'Master' ]]; then * ... master specific actions ... * else @@ -533,8 +537,10 @@ public com.google.cloud.dataproc.v1.SoftwareConfigOrBuilder getSoftwareConfigOrB * Optional. Commands to execute on each node after config is * completed. By default, executables are run on master and all worker nodes. * You can test a node's `role` metadata to run an executable on - * a master or worker node, as shown below using `curl` (you can also use `wget`): - * ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) + * a master or worker node, as shown below using `curl` (you can also use + * `wget`): + * ROLE=$(curl -H Metadata-Flavor:Google + * http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) * if [[ "${ROLE}" == 'Master' ]]; then * ... master specific actions ... * else @@ -555,8 +561,10 @@ public int getInitializationActionsCount() { * Optional. Commands to execute on each node after config is * completed. By default, executables are run on master and all worker nodes. * You can test a node's `role` metadata to run an executable on - * a master or worker node, as shown below using `curl` (you can also use `wget`): - * ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) + * a master or worker node, as shown below using `curl` (you can also use + * `wget`): + * ROLE=$(curl -H Metadata-Flavor:Google + * http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) * if [[ "${ROLE}" == 'Master' ]]; then * ... master specific actions ... * else @@ -577,8 +585,10 @@ public com.google.cloud.dataproc.v1.NodeInitializationAction getInitializationAc * Optional. Commands to execute on each node after config is * completed. By default, executables are run on master and all worker nodes. * You can test a node's `role` metadata to run an executable on - * a master or worker node, as shown below using `curl` (you can also use `wget`): - * ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) + * a master or worker node, as shown below using `curl` (you can also use + * `wget`): + * ROLE=$(curl -H Metadata-Flavor:Google + * http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) * if [[ "${ROLE}" == 'Master' ]]; then * ... master specific actions ... * else @@ -2277,8 +2287,10 @@ private void ensureInitializationActionsIsMutable() { * Optional. Commands to execute on each node after config is * completed. By default, executables are run on master and all worker nodes. * You can test a node's `role` metadata to run an executable on - * a master or worker node, as shown below using `curl` (you can also use `wget`): - * ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) + * a master or worker node, as shown below using `curl` (you can also use + * `wget`): + * ROLE=$(curl -H Metadata-Flavor:Google + * http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) * if [[ "${ROLE}" == 'Master' ]]; then * ... master specific actions ... * else @@ -2305,8 +2317,10 @@ private void ensureInitializationActionsIsMutable() { * Optional. Commands to execute on each node after config is * completed. By default, executables are run on master and all worker nodes. * You can test a node's `role` metadata to run an executable on - * a master or worker node, as shown below using `curl` (you can also use `wget`): - * ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) + * a master or worker node, as shown below using `curl` (you can also use + * `wget`): + * ROLE=$(curl -H Metadata-Flavor:Google + * http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) * if [[ "${ROLE}" == 'Master' ]]; then * ... master specific actions ... * else @@ -2332,8 +2346,10 @@ public int getInitializationActionsCount() { * Optional. Commands to execute on each node after config is * completed. By default, executables are run on master and all worker nodes. * You can test a node's `role` metadata to run an executable on - * a master or worker node, as shown below using `curl` (you can also use `wget`): - * ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) + * a master or worker node, as shown below using `curl` (you can also use + * `wget`): + * ROLE=$(curl -H Metadata-Flavor:Google + * http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) * if [[ "${ROLE}" == 'Master' ]]; then * ... master specific actions ... * else @@ -2360,8 +2376,10 @@ public com.google.cloud.dataproc.v1.NodeInitializationAction getInitializationAc * Optional. Commands to execute on each node after config is * completed. By default, executables are run on master and all worker nodes. * You can test a node's `role` metadata to run an executable on - * a master or worker node, as shown below using `curl` (you can also use `wget`): - * ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) + * a master or worker node, as shown below using `curl` (you can also use + * `wget`): + * ROLE=$(curl -H Metadata-Flavor:Google + * http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) * if [[ "${ROLE}" == 'Master' ]]; then * ... master specific actions ... * else @@ -2394,8 +2412,10 @@ public Builder setInitializationActions( * Optional. Commands to execute on each node after config is * completed. By default, executables are run on master and all worker nodes. * You can test a node's `role` metadata to run an executable on - * a master or worker node, as shown below using `curl` (you can also use `wget`): - * ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) + * a master or worker node, as shown below using `curl` (you can also use + * `wget`): + * ROLE=$(curl -H Metadata-Flavor:Google + * http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) * if [[ "${ROLE}" == 'Master' ]]; then * ... master specific actions ... * else @@ -2425,8 +2445,10 @@ public Builder setInitializationActions( * Optional. Commands to execute on each node after config is * completed. By default, executables are run on master and all worker nodes. * You can test a node's `role` metadata to run an executable on - * a master or worker node, as shown below using `curl` (you can also use `wget`): - * ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) + * a master or worker node, as shown below using `curl` (you can also use + * `wget`): + * ROLE=$(curl -H Metadata-Flavor:Google + * http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) * if [[ "${ROLE}" == 'Master' ]]; then * ... master specific actions ... * else @@ -2459,8 +2481,10 @@ public Builder addInitializationActions( * Optional. Commands to execute on each node after config is * completed. By default, executables are run on master and all worker nodes. * You can test a node's `role` metadata to run an executable on - * a master or worker node, as shown below using `curl` (you can also use `wget`): - * ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) + * a master or worker node, as shown below using `curl` (you can also use + * `wget`): + * ROLE=$(curl -H Metadata-Flavor:Google + * http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) * if [[ "${ROLE}" == 'Master' ]]; then * ... master specific actions ... * else @@ -2493,8 +2517,10 @@ public Builder addInitializationActions( * Optional. Commands to execute on each node after config is * completed. By default, executables are run on master and all worker nodes. * You can test a node's `role` metadata to run an executable on - * a master or worker node, as shown below using `curl` (you can also use `wget`): - * ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) + * a master or worker node, as shown below using `curl` (you can also use + * `wget`): + * ROLE=$(curl -H Metadata-Flavor:Google + * http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) * if [[ "${ROLE}" == 'Master' ]]; then * ... master specific actions ... * else @@ -2524,8 +2550,10 @@ public Builder addInitializationActions( * Optional. Commands to execute on each node after config is * completed. By default, executables are run on master and all worker nodes. * You can test a node's `role` metadata to run an executable on - * a master or worker node, as shown below using `curl` (you can also use `wget`): - * ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) + * a master or worker node, as shown below using `curl` (you can also use + * `wget`): + * ROLE=$(curl -H Metadata-Flavor:Google + * http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) * if [[ "${ROLE}" == 'Master' ]]; then * ... master specific actions ... * else @@ -2555,8 +2583,10 @@ public Builder addInitializationActions( * Optional. Commands to execute on each node after config is * completed. By default, executables are run on master and all worker nodes. * You can test a node's `role` metadata to run an executable on - * a master or worker node, as shown below using `curl` (you can also use `wget`): - * ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) + * a master or worker node, as shown below using `curl` (you can also use + * `wget`): + * ROLE=$(curl -H Metadata-Flavor:Google + * http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) * if [[ "${ROLE}" == 'Master' ]]; then * ... master specific actions ... * else @@ -2587,8 +2617,10 @@ public Builder addAllInitializationActions( * Optional. Commands to execute on each node after config is * completed. By default, executables are run on master and all worker nodes. * You can test a node's `role` metadata to run an executable on - * a master or worker node, as shown below using `curl` (you can also use `wget`): - * ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) + * a master or worker node, as shown below using `curl` (you can also use + * `wget`): + * ROLE=$(curl -H Metadata-Flavor:Google + * http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) * if [[ "${ROLE}" == 'Master' ]]; then * ... master specific actions ... * else @@ -2617,8 +2649,10 @@ public Builder clearInitializationActions() { * Optional. Commands to execute on each node after config is * completed. By default, executables are run on master and all worker nodes. * You can test a node's `role` metadata to run an executable on - * a master or worker node, as shown below using `curl` (you can also use `wget`): - * ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) + * a master or worker node, as shown below using `curl` (you can also use + * `wget`): + * ROLE=$(curl -H Metadata-Flavor:Google + * http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) * if [[ "${ROLE}" == 'Master' ]]; then * ... master specific actions ... * else @@ -2647,8 +2681,10 @@ public Builder removeInitializationActions(int index) { * Optional. Commands to execute on each node after config is * completed. By default, executables are run on master and all worker nodes. * You can test a node's `role` metadata to run an executable on - * a master or worker node, as shown below using `curl` (you can also use `wget`): - * ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) + * a master or worker node, as shown below using `curl` (you can also use + * `wget`): + * ROLE=$(curl -H Metadata-Flavor:Google + * http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) * if [[ "${ROLE}" == 'Master' ]]; then * ... master specific actions ... * else @@ -2671,8 +2707,10 @@ public Builder removeInitializationActions(int index) { * Optional. Commands to execute on each node after config is * completed. By default, executables are run on master and all worker nodes. * You can test a node's `role` metadata to run an executable on - * a master or worker node, as shown below using `curl` (you can also use `wget`): - * ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) + * a master or worker node, as shown below using `curl` (you can also use + * `wget`): + * ROLE=$(curl -H Metadata-Flavor:Google + * http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) * if [[ "${ROLE}" == 'Master' ]]; then * ... master specific actions ... * else @@ -2699,8 +2737,10 @@ public Builder removeInitializationActions(int index) { * Optional. Commands to execute on each node after config is * completed. By default, executables are run on master and all worker nodes. * You can test a node's `role` metadata to run an executable on - * a master or worker node, as shown below using `curl` (you can also use `wget`): - * ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) + * a master or worker node, as shown below using `curl` (you can also use + * `wget`): + * ROLE=$(curl -H Metadata-Flavor:Google + * http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) * if [[ "${ROLE}" == 'Master' ]]; then * ... master specific actions ... * else @@ -2727,8 +2767,10 @@ public Builder removeInitializationActions(int index) { * Optional. Commands to execute on each node after config is * completed. By default, executables are run on master and all worker nodes. * You can test a node's `role` metadata to run an executable on - * a master or worker node, as shown below using `curl` (you can also use `wget`): - * ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) + * a master or worker node, as shown below using `curl` (you can also use + * `wget`): + * ROLE=$(curl -H Metadata-Flavor:Google + * http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) * if [[ "${ROLE}" == 'Master' ]]; then * ... master specific actions ... * else @@ -2752,8 +2794,10 @@ public Builder removeInitializationActions(int index) { * Optional. Commands to execute on each node after config is * completed. By default, executables are run on master and all worker nodes. * You can test a node's `role` metadata to run an executable on - * a master or worker node, as shown below using `curl` (you can also use `wget`): - * ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) + * a master or worker node, as shown below using `curl` (you can also use + * `wget`): + * ROLE=$(curl -H Metadata-Flavor:Google + * http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) * if [[ "${ROLE}" == 'Master' ]]; then * ... master specific actions ... * else @@ -2778,8 +2822,10 @@ public Builder removeInitializationActions(int index) { * Optional. Commands to execute on each node after config is * completed. By default, executables are run on master and all worker nodes. * You can test a node's `role` metadata to run an executable on - * a master or worker node, as shown below using `curl` (you can also use `wget`): - * ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) + * a master or worker node, as shown below using `curl` (you can also use + * `wget`): + * ROLE=$(curl -H Metadata-Flavor:Google + * http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) * if [[ "${ROLE}" == 'Master' ]]; then * ... master specific actions ... * else diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClusterConfigOrBuilder.java b/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClusterConfigOrBuilder.java index 498a1ca281ee..ceb6a9678d98 100644 --- a/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClusterConfigOrBuilder.java +++ b/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClusterConfigOrBuilder.java @@ -213,8 +213,10 @@ public interface ClusterConfigOrBuilder * Optional. Commands to execute on each node after config is * completed. By default, executables are run on master and all worker nodes. * You can test a node's `role` metadata to run an executable on - * a master or worker node, as shown below using `curl` (you can also use `wget`): - * ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) + * a master or worker node, as shown below using `curl` (you can also use + * `wget`): + * ROLE=$(curl -H Metadata-Flavor:Google + * http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) * if [[ "${ROLE}" == 'Master' ]]; then * ... master specific actions ... * else @@ -234,8 +236,10 @@ public interface ClusterConfigOrBuilder * Optional. Commands to execute on each node after config is * completed. By default, executables are run on master and all worker nodes. * You can test a node's `role` metadata to run an executable on - * a master or worker node, as shown below using `curl` (you can also use `wget`): - * ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) + * a master or worker node, as shown below using `curl` (you can also use + * `wget`): + * ROLE=$(curl -H Metadata-Flavor:Google + * http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) * if [[ "${ROLE}" == 'Master' ]]; then * ... master specific actions ... * else @@ -254,8 +258,10 @@ public interface ClusterConfigOrBuilder * Optional. Commands to execute on each node after config is * completed. By default, executables are run on master and all worker nodes. * You can test a node's `role` metadata to run an executable on - * a master or worker node, as shown below using `curl` (you can also use `wget`): - * ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) + * a master or worker node, as shown below using `curl` (you can also use + * `wget`): + * ROLE=$(curl -H Metadata-Flavor:Google + * http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) * if [[ "${ROLE}" == 'Master' ]]; then * ... master specific actions ... * else @@ -274,8 +280,10 @@ public interface ClusterConfigOrBuilder * Optional. Commands to execute on each node after config is * completed. By default, executables are run on master and all worker nodes. * You can test a node's `role` metadata to run an executable on - * a master or worker node, as shown below using `curl` (you can also use `wget`): - * ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) + * a master or worker node, as shown below using `curl` (you can also use + * `wget`): + * ROLE=$(curl -H Metadata-Flavor:Google + * http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) * if [[ "${ROLE}" == 'Master' ]]; then * ... master specific actions ... * else @@ -295,8 +303,10 @@ public interface ClusterConfigOrBuilder * Optional. Commands to execute on each node after config is * completed. By default, executables are run on master and all worker nodes. * You can test a node's `role` metadata to run an executable on - * a master or worker node, as shown below using `curl` (you can also use `wget`): - * ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) + * a master or worker node, as shown below using `curl` (you can also use + * `wget`): + * ROLE=$(curl -H Metadata-Flavor:Google + * http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) * if [[ "${ROLE}" == 'Master' ]]; then * ... master specific actions ... * else diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClusterOrBuilder.java b/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClusterOrBuilder.java index 694910f3c03e..e1d1173ac4a0 100644 --- a/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClusterOrBuilder.java +++ b/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/ClusterOrBuilder.java @@ -94,8 +94,9 @@ public interface ClusterOrBuilder * Label **keys** must contain 1 to 63 characters, and must conform to * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). * Label **values** may be empty, but, if present, must contain 1 to 63 - * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). - * No more than 32 labels can be associated with a cluster. + * characters, and must conform to [RFC + * 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be + * associated with a cluster. * * * map<string, string> labels = 8; @@ -109,8 +110,9 @@ public interface ClusterOrBuilder * Label **keys** must contain 1 to 63 characters, and must conform to * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). * Label **values** may be empty, but, if present, must contain 1 to 63 - * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). - * No more than 32 labels can be associated with a cluster. + * characters, and must conform to [RFC + * 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be + * associated with a cluster. * * * map<string, string> labels = 8; @@ -127,8 +129,9 @@ public interface ClusterOrBuilder * Label **keys** must contain 1 to 63 characters, and must conform to * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). * Label **values** may be empty, but, if present, must contain 1 to 63 - * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). - * No more than 32 labels can be associated with a cluster. + * characters, and must conform to [RFC + * 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be + * associated with a cluster. * * * map<string, string> labels = 8; @@ -142,8 +145,9 @@ public interface ClusterOrBuilder * Label **keys** must contain 1 to 63 characters, and must conform to * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). * Label **values** may be empty, but, if present, must contain 1 to 63 - * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). - * No more than 32 labels can be associated with a cluster. + * characters, and must conform to [RFC + * 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be + * associated with a cluster. * * * map<string, string> labels = 8; @@ -157,8 +161,9 @@ public interface ClusterOrBuilder * Label **keys** must contain 1 to 63 characters, and must conform to * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). * Label **values** may be empty, but, if present, must contain 1 to 63 - * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). - * No more than 32 labels can be associated with a cluster. + * characters, and must conform to [RFC + * 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be + * associated with a cluster. * * * map<string, string> labels = 8; @@ -276,8 +281,8 @@ public interface ClusterOrBuilder * *
    * Contains cluster daemon metrics such as HDFS and YARN stats.
-   * **Beta Feature**: This report is available for testing purposes only. It may
-   * be changed before final release.
+   * **Beta Feature**: This report is available for testing purposes only. It
+   * may be changed before final release.
    * 
* * .google.cloud.dataproc.v1.ClusterMetrics metrics = 9; @@ -288,8 +293,8 @@ public interface ClusterOrBuilder * *
    * Contains cluster daemon metrics such as HDFS and YARN stats.
-   * **Beta Feature**: This report is available for testing purposes only. It may
-   * be changed before final release.
+   * **Beta Feature**: This report is available for testing purposes only. It
+   * may be changed before final release.
    * 
* * .google.cloud.dataproc.v1.ClusterMetrics metrics = 9; @@ -300,8 +305,8 @@ public interface ClusterOrBuilder * *
    * Contains cluster daemon metrics such as HDFS and YARN stats.
-   * **Beta Feature**: This report is available for testing purposes only. It may
-   * be changed before final release.
+   * **Beta Feature**: This report is available for testing purposes only. It
+   * may be changed before final release.
    * 
* * .google.cloud.dataproc.v1.ClusterMetrics metrics = 9; diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/CreateClusterRequest.java b/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/CreateClusterRequest.java index 5d4363e29f26..12c9ffeab496 100644 --- a/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/CreateClusterRequest.java +++ b/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/CreateClusterRequest.java @@ -257,10 +257,11 @@ public com.google.cloud.dataproc.v1.ClusterOrBuilder getClusterOrBuilder() { * *
    * Optional. A unique id used to identify the request. If the server
-   * receives two [CreateClusterRequest][google.cloud.dataproc.v1.CreateClusterRequest] requests  with the same
-   * id, then the second request will be ignored and the
-   * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the backend
-   * is returned.
+   * receives two
+   * [CreateClusterRequest][google.cloud.dataproc.v1.CreateClusterRequest]
+   * requests  with the same id, then the second request will be ignored and the
+   * first [google.longrunning.Operation][google.longrunning.Operation] created
+   * and stored in the backend is returned.
    * It is recommended to always set this value to a
    * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
    * The id must contain only letters (a-z, A-Z), numbers (0-9),
@@ -285,10 +286,11 @@ public java.lang.String getRequestId() {
    *
    * 
    * Optional. A unique id used to identify the request. If the server
-   * receives two [CreateClusterRequest][google.cloud.dataproc.v1.CreateClusterRequest] requests  with the same
-   * id, then the second request will be ignored and the
-   * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the backend
-   * is returned.
+   * receives two
+   * [CreateClusterRequest][google.cloud.dataproc.v1.CreateClusterRequest]
+   * requests  with the same id, then the second request will be ignored and the
+   * first [google.longrunning.Operation][google.longrunning.Operation] created
+   * and stored in the backend is returned.
    * It is recommended to always set this value to a
    * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
    * The id must contain only letters (a-z, A-Z), numbers (0-9),
@@ -1068,10 +1070,11 @@ public com.google.cloud.dataproc.v1.ClusterOrBuilder getClusterOrBuilder() {
      *
      * 
      * Optional. A unique id used to identify the request. If the server
-     * receives two [CreateClusterRequest][google.cloud.dataproc.v1.CreateClusterRequest] requests  with the same
-     * id, then the second request will be ignored and the
-     * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the backend
-     * is returned.
+     * receives two
+     * [CreateClusterRequest][google.cloud.dataproc.v1.CreateClusterRequest]
+     * requests  with the same id, then the second request will be ignored and the
+     * first [google.longrunning.Operation][google.longrunning.Operation] created
+     * and stored in the backend is returned.
      * It is recommended to always set this value to a
      * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
      * The id must contain only letters (a-z, A-Z), numbers (0-9),
@@ -1096,10 +1099,11 @@ public java.lang.String getRequestId() {
      *
      * 
      * Optional. A unique id used to identify the request. If the server
-     * receives two [CreateClusterRequest][google.cloud.dataproc.v1.CreateClusterRequest] requests  with the same
-     * id, then the second request will be ignored and the
-     * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the backend
-     * is returned.
+     * receives two
+     * [CreateClusterRequest][google.cloud.dataproc.v1.CreateClusterRequest]
+     * requests  with the same id, then the second request will be ignored and the
+     * first [google.longrunning.Operation][google.longrunning.Operation] created
+     * and stored in the backend is returned.
      * It is recommended to always set this value to a
      * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
      * The id must contain only letters (a-z, A-Z), numbers (0-9),
@@ -1124,10 +1128,11 @@ public com.google.protobuf.ByteString getRequestIdBytes() {
      *
      * 
      * Optional. A unique id used to identify the request. If the server
-     * receives two [CreateClusterRequest][google.cloud.dataproc.v1.CreateClusterRequest] requests  with the same
-     * id, then the second request will be ignored and the
-     * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the backend
-     * is returned.
+     * receives two
+     * [CreateClusterRequest][google.cloud.dataproc.v1.CreateClusterRequest]
+     * requests  with the same id, then the second request will be ignored and the
+     * first [google.longrunning.Operation][google.longrunning.Operation] created
+     * and stored in the backend is returned.
      * It is recommended to always set this value to a
      * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
      * The id must contain only letters (a-z, A-Z), numbers (0-9),
@@ -1150,10 +1155,11 @@ public Builder setRequestId(java.lang.String value) {
      *
      * 
      * Optional. A unique id used to identify the request. If the server
-     * receives two [CreateClusterRequest][google.cloud.dataproc.v1.CreateClusterRequest] requests  with the same
-     * id, then the second request will be ignored and the
-     * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the backend
-     * is returned.
+     * receives two
+     * [CreateClusterRequest][google.cloud.dataproc.v1.CreateClusterRequest]
+     * requests  with the same id, then the second request will be ignored and the
+     * first [google.longrunning.Operation][google.longrunning.Operation] created
+     * and stored in the backend is returned.
      * It is recommended to always set this value to a
      * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
      * The id must contain only letters (a-z, A-Z), numbers (0-9),
@@ -1173,10 +1179,11 @@ public Builder clearRequestId() {
      *
      * 
      * Optional. A unique id used to identify the request. If the server
-     * receives two [CreateClusterRequest][google.cloud.dataproc.v1.CreateClusterRequest] requests  with the same
-     * id, then the second request will be ignored and the
-     * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the backend
-     * is returned.
+     * receives two
+     * [CreateClusterRequest][google.cloud.dataproc.v1.CreateClusterRequest]
+     * requests  with the same id, then the second request will be ignored and the
+     * first [google.longrunning.Operation][google.longrunning.Operation] created
+     * and stored in the backend is returned.
      * It is recommended to always set this value to a
      * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
      * The id must contain only letters (a-z, A-Z), numbers (0-9),
diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/CreateClusterRequestOrBuilder.java b/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/CreateClusterRequestOrBuilder.java
index b184fa2abe11..9f3c6fea3d09 100644
--- a/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/CreateClusterRequestOrBuilder.java
+++ b/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/CreateClusterRequestOrBuilder.java
@@ -88,10 +88,11 @@ public interface CreateClusterRequestOrBuilder
    *
    * 
    * Optional. A unique id used to identify the request. If the server
-   * receives two [CreateClusterRequest][google.cloud.dataproc.v1.CreateClusterRequest] requests  with the same
-   * id, then the second request will be ignored and the
-   * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the backend
-   * is returned.
+   * receives two
+   * [CreateClusterRequest][google.cloud.dataproc.v1.CreateClusterRequest]
+   * requests  with the same id, then the second request will be ignored and the
+   * first [google.longrunning.Operation][google.longrunning.Operation] created
+   * and stored in the backend is returned.
    * It is recommended to always set this value to a
    * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
    * The id must contain only letters (a-z, A-Z), numbers (0-9),
@@ -106,10 +107,11 @@ public interface CreateClusterRequestOrBuilder
    *
    * 
    * Optional. A unique id used to identify the request. If the server
-   * receives two [CreateClusterRequest][google.cloud.dataproc.v1.CreateClusterRequest] requests  with the same
-   * id, then the second request will be ignored and the
-   * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the backend
-   * is returned.
+   * receives two
+   * [CreateClusterRequest][google.cloud.dataproc.v1.CreateClusterRequest]
+   * requests  with the same id, then the second request will be ignored and the
+   * first [google.longrunning.Operation][google.longrunning.Operation] created
+   * and stored in the backend is returned.
    * It is recommended to always set this value to a
    * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
    * The id must contain only letters (a-z, A-Z), numbers (0-9),
diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/DeleteClusterRequest.java b/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/DeleteClusterRequest.java
index ef42fc05022e..3c916cff3428 100644
--- a/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/DeleteClusterRequest.java
+++ b/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/DeleteClusterRequest.java
@@ -306,10 +306,11 @@ public com.google.protobuf.ByteString getClusterUuidBytes() {
    *
    * 
    * Optional. A unique id used to identify the request. If the server
-   * receives two [DeleteClusterRequest][google.cloud.dataproc.v1.DeleteClusterRequest] requests  with the same
-   * id, then the second request will be ignored and the
-   * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
-   * backend is returned.
+   * receives two
+   * [DeleteClusterRequest][google.cloud.dataproc.v1.DeleteClusterRequest]
+   * requests  with the same id, then the second request will be ignored and the
+   * first [google.longrunning.Operation][google.longrunning.Operation] created
+   * and stored in the backend is returned.
    * It is recommended to always set this value to a
    * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
    * The id must contain only letters (a-z, A-Z), numbers (0-9),
@@ -334,10 +335,11 @@ public java.lang.String getRequestId() {
    *
    * 
    * Optional. A unique id used to identify the request. If the server
-   * receives two [DeleteClusterRequest][google.cloud.dataproc.v1.DeleteClusterRequest] requests  with the same
-   * id, then the second request will be ignored and the
-   * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
-   * backend is returned.
+   * receives two
+   * [DeleteClusterRequest][google.cloud.dataproc.v1.DeleteClusterRequest]
+   * requests  with the same id, then the second request will be ignored and the
+   * first [google.longrunning.Operation][google.longrunning.Operation] created
+   * and stored in the backend is returned.
    * It is recommended to always set this value to a
    * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
    * The id must contain only letters (a-z, A-Z), numbers (0-9),
@@ -1133,10 +1135,11 @@ public Builder setClusterUuidBytes(com.google.protobuf.ByteString value) {
      *
      * 
      * Optional. A unique id used to identify the request. If the server
-     * receives two [DeleteClusterRequest][google.cloud.dataproc.v1.DeleteClusterRequest] requests  with the same
-     * id, then the second request will be ignored and the
-     * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
-     * backend is returned.
+     * receives two
+     * [DeleteClusterRequest][google.cloud.dataproc.v1.DeleteClusterRequest]
+     * requests  with the same id, then the second request will be ignored and the
+     * first [google.longrunning.Operation][google.longrunning.Operation] created
+     * and stored in the backend is returned.
      * It is recommended to always set this value to a
      * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
      * The id must contain only letters (a-z, A-Z), numbers (0-9),
@@ -1161,10 +1164,11 @@ public java.lang.String getRequestId() {
      *
      * 
      * Optional. A unique id used to identify the request. If the server
-     * receives two [DeleteClusterRequest][google.cloud.dataproc.v1.DeleteClusterRequest] requests  with the same
-     * id, then the second request will be ignored and the
-     * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
-     * backend is returned.
+     * receives two
+     * [DeleteClusterRequest][google.cloud.dataproc.v1.DeleteClusterRequest]
+     * requests  with the same id, then the second request will be ignored and the
+     * first [google.longrunning.Operation][google.longrunning.Operation] created
+     * and stored in the backend is returned.
      * It is recommended to always set this value to a
      * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
      * The id must contain only letters (a-z, A-Z), numbers (0-9),
@@ -1189,10 +1193,11 @@ public com.google.protobuf.ByteString getRequestIdBytes() {
      *
      * 
      * Optional. A unique id used to identify the request. If the server
-     * receives two [DeleteClusterRequest][google.cloud.dataproc.v1.DeleteClusterRequest] requests  with the same
-     * id, then the second request will be ignored and the
-     * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
-     * backend is returned.
+     * receives two
+     * [DeleteClusterRequest][google.cloud.dataproc.v1.DeleteClusterRequest]
+     * requests  with the same id, then the second request will be ignored and the
+     * first [google.longrunning.Operation][google.longrunning.Operation] created
+     * and stored in the backend is returned.
      * It is recommended to always set this value to a
      * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
      * The id must contain only letters (a-z, A-Z), numbers (0-9),
@@ -1215,10 +1220,11 @@ public Builder setRequestId(java.lang.String value) {
      *
      * 
      * Optional. A unique id used to identify the request. If the server
-     * receives two [DeleteClusterRequest][google.cloud.dataproc.v1.DeleteClusterRequest] requests  with the same
-     * id, then the second request will be ignored and the
-     * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
-     * backend is returned.
+     * receives two
+     * [DeleteClusterRequest][google.cloud.dataproc.v1.DeleteClusterRequest]
+     * requests  with the same id, then the second request will be ignored and the
+     * first [google.longrunning.Operation][google.longrunning.Operation] created
+     * and stored in the backend is returned.
      * It is recommended to always set this value to a
      * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
      * The id must contain only letters (a-z, A-Z), numbers (0-9),
@@ -1238,10 +1244,11 @@ public Builder clearRequestId() {
      *
      * 
      * Optional. A unique id used to identify the request. If the server
-     * receives two [DeleteClusterRequest][google.cloud.dataproc.v1.DeleteClusterRequest] requests  with the same
-     * id, then the second request will be ignored and the
-     * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
-     * backend is returned.
+     * receives two
+     * [DeleteClusterRequest][google.cloud.dataproc.v1.DeleteClusterRequest]
+     * requests  with the same id, then the second request will be ignored and the
+     * first [google.longrunning.Operation][google.longrunning.Operation] created
+     * and stored in the backend is returned.
      * It is recommended to always set this value to a
      * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
      * The id must contain only letters (a-z, A-Z), numbers (0-9),
diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/DeleteClusterRequestOrBuilder.java b/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/DeleteClusterRequestOrBuilder.java
index aa1568386b61..fd3e5387be8c 100644
--- a/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/DeleteClusterRequestOrBuilder.java
+++ b/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/DeleteClusterRequestOrBuilder.java
@@ -101,10 +101,11 @@ public interface DeleteClusterRequestOrBuilder
    *
    * 
    * Optional. A unique id used to identify the request. If the server
-   * receives two [DeleteClusterRequest][google.cloud.dataproc.v1.DeleteClusterRequest] requests  with the same
-   * id, then the second request will be ignored and the
-   * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
-   * backend is returned.
+   * receives two
+   * [DeleteClusterRequest][google.cloud.dataproc.v1.DeleteClusterRequest]
+   * requests  with the same id, then the second request will be ignored and the
+   * first [google.longrunning.Operation][google.longrunning.Operation] created
+   * and stored in the backend is returned.
    * It is recommended to always set this value to a
    * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
    * The id must contain only letters (a-z, A-Z), numbers (0-9),
@@ -119,10 +120,11 @@ public interface DeleteClusterRequestOrBuilder
    *
    * 
    * Optional. A unique id used to identify the request. If the server
-   * receives two [DeleteClusterRequest][google.cloud.dataproc.v1.DeleteClusterRequest] requests  with the same
-   * id, then the second request will be ignored and the
-   * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
-   * backend is returned.
+   * receives two
+   * [DeleteClusterRequest][google.cloud.dataproc.v1.DeleteClusterRequest]
+   * requests  with the same id, then the second request will be ignored and the
+   * first [google.longrunning.Operation][google.longrunning.Operation] created
+   * and stored in the backend is returned.
    * It is recommended to always set this value to a
    * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
    * The id must contain only letters (a-z, A-Z), numbers (0-9),
diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/GceClusterConfig.java b/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/GceClusterConfig.java
index 7b6f21ee7cbb..158050f4de16 100644
--- a/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/GceClusterConfig.java
+++ b/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/GceClusterConfig.java
@@ -355,11 +355,11 @@ public com.google.protobuf.ByteString getSubnetworkUriBytes() {
    *
    * 
    * Optional. If true, all instances in the cluster will only have internal IP
-   * addresses. By default, clusters are not restricted to internal IP addresses,
-   * and will have ephemeral external IP addresses assigned to each instance.
-   * This `internal_ip_only` restriction can only be enabled for subnetwork
-   * enabled networks, and all off-cluster dependencies must be configured to be
-   * accessible without external IP addresses.
+   * addresses. By default, clusters are not restricted to internal IP
+   * addresses, and will have ephemeral external IP addresses assigned to each
+   * instance. This `internal_ip_only` restriction can only be enabled for
+   * subnetwork enabled networks, and all off-cluster dependencies must be
+   * configured to be accessible without external IP addresses.
    * 
* * bool internal_ip_only = 7; @@ -379,7 +379,8 @@ public boolean getInternalIpOnly() { * permissions equivalent to the following IAM roles: * * roles/logging.logWriter * * roles/storage.objectAdmin - * (see https://cloud.google.com/compute/docs/access/service-accounts#custom_service_accounts + * (see + * https://cloud.google.com/compute/docs/access/service-accounts#custom_service_accounts * for more information). * Example: `[account_id]@[project_id].iam.gserviceaccount.com` *
@@ -406,7 +407,8 @@ public java.lang.String getServiceAccount() { * permissions equivalent to the following IAM roles: * * roles/logging.logWriter * * roles/storage.objectAdmin - * (see https://cloud.google.com/compute/docs/access/service-accounts#custom_service_accounts + * (see + * https://cloud.google.com/compute/docs/access/service-accounts#custom_service_accounts * for more information). * Example: `[account_id]@[project_id].iam.gserviceaccount.com` *
@@ -601,7 +603,8 @@ public int getMetadataCount() { * *
    * The Compute Engine metadata entries to add to all instances (see
-   * [Project and instance metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
+   * [Project and instance
+   * metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
    * 
* * map<string, string> metadata = 5; @@ -622,7 +625,8 @@ public java.util.Map getMetadata() { * *
    * The Compute Engine metadata entries to add to all instances (see
-   * [Project and instance metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
+   * [Project and instance
+   * metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
    * 
* * map<string, string> metadata = 5; @@ -635,7 +639,8 @@ public java.util.Map getMetadataMap() { * *
    * The Compute Engine metadata entries to add to all instances (see
-   * [Project and instance metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
+   * [Project and instance
+   * metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
    * 
* * map<string, string> metadata = 5; @@ -653,7 +658,8 @@ public java.lang.String getMetadataOrDefault( * *
    * The Compute Engine metadata entries to add to all instances (see
-   * [Project and instance metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
+   * [Project and instance
+   * metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
    * 
* * map<string, string> metadata = 5; @@ -1556,11 +1562,11 @@ public Builder setSubnetworkUriBytes(com.google.protobuf.ByteString value) { * *
      * Optional. If true, all instances in the cluster will only have internal IP
-     * addresses. By default, clusters are not restricted to internal IP addresses,
-     * and will have ephemeral external IP addresses assigned to each instance.
-     * This `internal_ip_only` restriction can only be enabled for subnetwork
-     * enabled networks, and all off-cluster dependencies must be configured to be
-     * accessible without external IP addresses.
+     * addresses. By default, clusters are not restricted to internal IP
+     * addresses, and will have ephemeral external IP addresses assigned to each
+     * instance. This `internal_ip_only` restriction can only be enabled for
+     * subnetwork enabled networks, and all off-cluster dependencies must be
+     * configured to be accessible without external IP addresses.
      * 
* * bool internal_ip_only = 7; @@ -1573,11 +1579,11 @@ public boolean getInternalIpOnly() { * *
      * Optional. If true, all instances in the cluster will only have internal IP
-     * addresses. By default, clusters are not restricted to internal IP addresses,
-     * and will have ephemeral external IP addresses assigned to each instance.
-     * This `internal_ip_only` restriction can only be enabled for subnetwork
-     * enabled networks, and all off-cluster dependencies must be configured to be
-     * accessible without external IP addresses.
+     * addresses. By default, clusters are not restricted to internal IP
+     * addresses, and will have ephemeral external IP addresses assigned to each
+     * instance. This `internal_ip_only` restriction can only be enabled for
+     * subnetwork enabled networks, and all off-cluster dependencies must be
+     * configured to be accessible without external IP addresses.
      * 
* * bool internal_ip_only = 7; @@ -1593,11 +1599,11 @@ public Builder setInternalIpOnly(boolean value) { * *
      * Optional. If true, all instances in the cluster will only have internal IP
-     * addresses. By default, clusters are not restricted to internal IP addresses,
-     * and will have ephemeral external IP addresses assigned to each instance.
-     * This `internal_ip_only` restriction can only be enabled for subnetwork
-     * enabled networks, and all off-cluster dependencies must be configured to be
-     * accessible without external IP addresses.
+     * addresses. By default, clusters are not restricted to internal IP
+     * addresses, and will have ephemeral external IP addresses assigned to each
+     * instance. This `internal_ip_only` restriction can only be enabled for
+     * subnetwork enabled networks, and all off-cluster dependencies must be
+     * configured to be accessible without external IP addresses.
      * 
* * bool internal_ip_only = 7; @@ -1619,7 +1625,8 @@ public Builder clearInternalIpOnly() { * permissions equivalent to the following IAM roles: * * roles/logging.logWriter * * roles/storage.objectAdmin - * (see https://cloud.google.com/compute/docs/access/service-accounts#custom_service_accounts + * (see + * https://cloud.google.com/compute/docs/access/service-accounts#custom_service_accounts * for more information). * Example: `[account_id]@[project_id].iam.gserviceaccount.com` *
@@ -1646,7 +1653,8 @@ public java.lang.String getServiceAccount() { * permissions equivalent to the following IAM roles: * * roles/logging.logWriter * * roles/storage.objectAdmin - * (see https://cloud.google.com/compute/docs/access/service-accounts#custom_service_accounts + * (see + * https://cloud.google.com/compute/docs/access/service-accounts#custom_service_accounts * for more information). * Example: `[account_id]@[project_id].iam.gserviceaccount.com` *
@@ -1673,7 +1681,8 @@ public com.google.protobuf.ByteString getServiceAccountBytes() { * permissions equivalent to the following IAM roles: * * roles/logging.logWriter * * roles/storage.objectAdmin - * (see https://cloud.google.com/compute/docs/access/service-accounts#custom_service_accounts + * (see + * https://cloud.google.com/compute/docs/access/service-accounts#custom_service_accounts * for more information). * Example: `[account_id]@[project_id].iam.gserviceaccount.com` *
@@ -1698,7 +1707,8 @@ public Builder setServiceAccount(java.lang.String value) { * permissions equivalent to the following IAM roles: * * roles/logging.logWriter * * roles/storage.objectAdmin - * (see https://cloud.google.com/compute/docs/access/service-accounts#custom_service_accounts + * (see + * https://cloud.google.com/compute/docs/access/service-accounts#custom_service_accounts * for more information). * Example: `[account_id]@[project_id].iam.gserviceaccount.com` *
@@ -1720,7 +1730,8 @@ public Builder clearServiceAccount() { * permissions equivalent to the following IAM roles: * * roles/logging.logWriter * * roles/storage.objectAdmin - * (see https://cloud.google.com/compute/docs/access/service-accounts#custom_service_accounts + * (see + * https://cloud.google.com/compute/docs/access/service-accounts#custom_service_accounts * for more information). * Example: `[account_id]@[project_id].iam.gserviceaccount.com` *
@@ -2154,7 +2165,8 @@ public int getMetadataCount() { * *
      * The Compute Engine metadata entries to add to all instances (see
-     * [Project and instance metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
+     * [Project and instance
+     * metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
      * 
* * map<string, string> metadata = 5; @@ -2175,7 +2187,8 @@ public java.util.Map getMetadata() { * *
      * The Compute Engine metadata entries to add to all instances (see
-     * [Project and instance metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
+     * [Project and instance
+     * metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
      * 
* * map<string, string> metadata = 5; @@ -2188,7 +2201,8 @@ public java.util.Map getMetadataMap() { * *
      * The Compute Engine metadata entries to add to all instances (see
-     * [Project and instance metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
+     * [Project and instance
+     * metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
      * 
* * map<string, string> metadata = 5; @@ -2206,7 +2220,8 @@ public java.lang.String getMetadataOrDefault( * *
      * The Compute Engine metadata entries to add to all instances (see
-     * [Project and instance metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
+     * [Project and instance
+     * metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
      * 
* * map<string, string> metadata = 5; @@ -2231,7 +2246,8 @@ public Builder clearMetadata() { * *
      * The Compute Engine metadata entries to add to all instances (see
-     * [Project and instance metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
+     * [Project and instance
+     * metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
      * 
* * map<string, string> metadata = 5; @@ -2253,7 +2269,8 @@ public java.util.Map getMutableMetadata() { * *
      * The Compute Engine metadata entries to add to all instances (see
-     * [Project and instance metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
+     * [Project and instance
+     * metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
      * 
* * map<string, string> metadata = 5; @@ -2273,7 +2290,8 @@ public Builder putMetadata(java.lang.String key, java.lang.String value) { * *
      * The Compute Engine metadata entries to add to all instances (see
-     * [Project and instance metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
+     * [Project and instance
+     * metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
      * 
* * map<string, string> metadata = 5; diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/GceClusterConfigOrBuilder.java b/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/GceClusterConfigOrBuilder.java index a253f4df1e08..0d73bf7c3a55 100644 --- a/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/GceClusterConfigOrBuilder.java +++ b/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/GceClusterConfigOrBuilder.java @@ -118,11 +118,11 @@ public interface GceClusterConfigOrBuilder * *
    * Optional. If true, all instances in the cluster will only have internal IP
-   * addresses. By default, clusters are not restricted to internal IP addresses,
-   * and will have ephemeral external IP addresses assigned to each instance.
-   * This `internal_ip_only` restriction can only be enabled for subnetwork
-   * enabled networks, and all off-cluster dependencies must be configured to be
-   * accessible without external IP addresses.
+   * addresses. By default, clusters are not restricted to internal IP
+   * addresses, and will have ephemeral external IP addresses assigned to each
+   * instance. This `internal_ip_only` restriction can only be enabled for
+   * subnetwork enabled networks, and all off-cluster dependencies must be
+   * configured to be accessible without external IP addresses.
    * 
* * bool internal_ip_only = 7; @@ -138,7 +138,8 @@ public interface GceClusterConfigOrBuilder * permissions equivalent to the following IAM roles: * * roles/logging.logWriter * * roles/storage.objectAdmin - * (see https://cloud.google.com/compute/docs/access/service-accounts#custom_service_accounts + * (see + * https://cloud.google.com/compute/docs/access/service-accounts#custom_service_accounts * for more information). * Example: `[account_id]@[project_id].iam.gserviceaccount.com` *
@@ -155,7 +156,8 @@ public interface GceClusterConfigOrBuilder * permissions equivalent to the following IAM roles: * * roles/logging.logWriter * * roles/storage.objectAdmin - * (see https://cloud.google.com/compute/docs/access/service-accounts#custom_service_accounts + * (see + * https://cloud.google.com/compute/docs/access/service-accounts#custom_service_accounts * for more information). * Example: `[account_id]@[project_id].iam.gserviceaccount.com` *
@@ -295,7 +297,8 @@ public interface GceClusterConfigOrBuilder * *
    * The Compute Engine metadata entries to add to all instances (see
-   * [Project and instance metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
+   * [Project and instance
+   * metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
    * 
* * map<string, string> metadata = 5; @@ -306,7 +309,8 @@ public interface GceClusterConfigOrBuilder * *
    * The Compute Engine metadata entries to add to all instances (see
-   * [Project and instance metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
+   * [Project and instance
+   * metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
    * 
* * map<string, string> metadata = 5; @@ -320,7 +324,8 @@ public interface GceClusterConfigOrBuilder * *
    * The Compute Engine metadata entries to add to all instances (see
-   * [Project and instance metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
+   * [Project and instance
+   * metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
    * 
* * map<string, string> metadata = 5; @@ -331,7 +336,8 @@ public interface GceClusterConfigOrBuilder * *
    * The Compute Engine metadata entries to add to all instances (see
-   * [Project and instance metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
+   * [Project and instance
+   * metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
    * 
* * map<string, string> metadata = 5; @@ -342,7 +348,8 @@ public interface GceClusterConfigOrBuilder * *
    * The Compute Engine metadata entries to add to all instances (see
-   * [Project and instance metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
+   * [Project and instance
+   * metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
    * 
* * map<string, string> metadata = 5; diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/HadoopJob.java b/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/HadoopJob.java index ae75537794d2..82c7557c1e11 100644 --- a/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/HadoopJob.java +++ b/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/HadoopJob.java @@ -8,8 +8,10 @@ * *
  * A Cloud Dataproc job for running
- * [Apache Hadoop MapReduce](https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduceTutorial.html)
- * jobs on [Apache Hadoop YARN](https://hadoop.apache.org/docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YARN.html).
+ * [Apache Hadoop
+ * MapReduce](https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduceTutorial.html)
+ * jobs on [Apache Hadoop
+ * YARN](https://hadoop.apache.org/docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YARN.html).
  * 
* * Protobuf type {@code google.cloud.dataproc.v1.HadoopJob} @@ -359,8 +361,8 @@ public com.google.protobuf.ByteString getMainClassBytes() { * *
    * Optional. The arguments to pass to the driver. Do not
-   * include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as job
-   * properties, since a collision may occur that causes an incorrect job
+   * include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as
+   * job properties, since a collision may occur that causes an incorrect job
    * submission.
    * 
* @@ -374,8 +376,8 @@ public com.google.protobuf.ProtocolStringList getArgsList() { * *
    * Optional. The arguments to pass to the driver. Do not
-   * include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as job
-   * properties, since a collision may occur that causes an incorrect job
+   * include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as
+   * job properties, since a collision may occur that causes an incorrect job
    * submission.
    * 
* @@ -389,8 +391,8 @@ public int getArgsCount() { * *
    * Optional. The arguments to pass to the driver. Do not
-   * include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as job
-   * properties, since a collision may occur that causes an incorrect job
+   * include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as
+   * job properties, since a collision may occur that causes an incorrect job
    * submission.
    * 
* @@ -404,8 +406,8 @@ public java.lang.String getArgs(int index) { * *
    * Optional. The arguments to pass to the driver. Do not
-   * include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as job
-   * properties, since a collision may occur that causes an incorrect job
+   * include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as
+   * job properties, since a collision may occur that causes an incorrect job
    * submission.
    * 
* @@ -1021,8 +1023,10 @@ protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.Build * *
    * A Cloud Dataproc job for running
-   * [Apache Hadoop MapReduce](https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduceTutorial.html)
-   * jobs on [Apache Hadoop YARN](https://hadoop.apache.org/docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YARN.html).
+   * [Apache Hadoop
+   * MapReduce](https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduceTutorial.html)
+   * jobs on [Apache Hadoop
+   * YARN](https://hadoop.apache.org/docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YARN.html).
    * 
* * Protobuf type {@code google.cloud.dataproc.v1.HadoopJob} @@ -1571,8 +1575,8 @@ private void ensureArgsIsMutable() { * *
      * Optional. The arguments to pass to the driver. Do not
-     * include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as job
-     * properties, since a collision may occur that causes an incorrect job
+     * include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as
+     * job properties, since a collision may occur that causes an incorrect job
      * submission.
      * 
* @@ -1586,8 +1590,8 @@ public com.google.protobuf.ProtocolStringList getArgsList() { * *
      * Optional. The arguments to pass to the driver. Do not
-     * include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as job
-     * properties, since a collision may occur that causes an incorrect job
+     * include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as
+     * job properties, since a collision may occur that causes an incorrect job
      * submission.
      * 
* @@ -1601,8 +1605,8 @@ public int getArgsCount() { * *
      * Optional. The arguments to pass to the driver. Do not
-     * include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as job
-     * properties, since a collision may occur that causes an incorrect job
+     * include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as
+     * job properties, since a collision may occur that causes an incorrect job
      * submission.
      * 
* @@ -1616,8 +1620,8 @@ public java.lang.String getArgs(int index) { * *
      * Optional. The arguments to pass to the driver. Do not
-     * include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as job
-     * properties, since a collision may occur that causes an incorrect job
+     * include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as
+     * job properties, since a collision may occur that causes an incorrect job
      * submission.
      * 
* @@ -1631,8 +1635,8 @@ public com.google.protobuf.ByteString getArgsBytes(int index) { * *
      * Optional. The arguments to pass to the driver. Do not
-     * include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as job
-     * properties, since a collision may occur that causes an incorrect job
+     * include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as
+     * job properties, since a collision may occur that causes an incorrect job
      * submission.
      * 
* @@ -1652,8 +1656,8 @@ public Builder setArgs(int index, java.lang.String value) { * *
      * Optional. The arguments to pass to the driver. Do not
-     * include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as job
-     * properties, since a collision may occur that causes an incorrect job
+     * include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as
+     * job properties, since a collision may occur that causes an incorrect job
      * submission.
      * 
* @@ -1673,8 +1677,8 @@ public Builder addArgs(java.lang.String value) { * *
      * Optional. The arguments to pass to the driver. Do not
-     * include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as job
-     * properties, since a collision may occur that causes an incorrect job
+     * include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as
+     * job properties, since a collision may occur that causes an incorrect job
      * submission.
      * 
* @@ -1691,8 +1695,8 @@ public Builder addAllArgs(java.lang.Iterable values) { * *
      * Optional. The arguments to pass to the driver. Do not
-     * include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as job
-     * properties, since a collision may occur that causes an incorrect job
+     * include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as
+     * job properties, since a collision may occur that causes an incorrect job
      * submission.
      * 
* @@ -1709,8 +1713,8 @@ public Builder clearArgs() { * *
      * Optional. The arguments to pass to the driver. Do not
-     * include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as job
-     * properties, since a collision may occur that causes an incorrect job
+     * include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as
+     * job properties, since a collision may occur that causes an incorrect job
      * submission.
      * 
* diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/HadoopJobOrBuilder.java b/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/HadoopJobOrBuilder.java index ae5a7b54c607..d6733db880c5 100644 --- a/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/HadoopJobOrBuilder.java +++ b/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/HadoopJobOrBuilder.java @@ -65,8 +65,8 @@ public interface HadoopJobOrBuilder * *
    * Optional. The arguments to pass to the driver. Do not
-   * include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as job
-   * properties, since a collision may occur that causes an incorrect job
+   * include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as
+   * job properties, since a collision may occur that causes an incorrect job
    * submission.
    * 
* @@ -78,8 +78,8 @@ public interface HadoopJobOrBuilder * *
    * Optional. The arguments to pass to the driver. Do not
-   * include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as job
-   * properties, since a collision may occur that causes an incorrect job
+   * include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as
+   * job properties, since a collision may occur that causes an incorrect job
    * submission.
    * 
* @@ -91,8 +91,8 @@ public interface HadoopJobOrBuilder * *
    * Optional. The arguments to pass to the driver. Do not
-   * include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as job
-   * properties, since a collision may occur that causes an incorrect job
+   * include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as
+   * job properties, since a collision may occur that causes an incorrect job
    * submission.
    * 
* @@ -104,8 +104,8 @@ public interface HadoopJobOrBuilder * *
    * Optional. The arguments to pass to the driver. Do not
-   * include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as job
-   * properties, since a collision may occur that causes an incorrect job
+   * include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as
+   * job properties, since a collision may occur that causes an incorrect job
    * submission.
    * 
* diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/HiveJob.java b/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/HiveJob.java index 02ab12d7de9a..561b77d39df7 100644 --- a/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/HiveJob.java +++ b/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/HiveJob.java @@ -316,8 +316,8 @@ public com.google.cloud.dataproc.v1.QueryListOrBuilder getQueryListOrBuilder() { * *
    * Optional. Whether to continue executing queries if a query fails.
-   * The default value is `false`. Setting to `true` can be useful when executing
-   * independent parallel queries.
+   * The default value is `false`. Setting to `true` can be useful when
+   * executing independent parallel queries.
    * 
* * bool continue_on_failure = 3; @@ -1404,8 +1404,8 @@ public com.google.cloud.dataproc.v1.QueryListOrBuilder getQueryListOrBuilder() { * *
      * Optional. Whether to continue executing queries if a query fails.
-     * The default value is `false`. Setting to `true` can be useful when executing
-     * independent parallel queries.
+     * The default value is `false`. Setting to `true` can be useful when
+     * executing independent parallel queries.
      * 
* * bool continue_on_failure = 3; @@ -1418,8 +1418,8 @@ public boolean getContinueOnFailure() { * *
      * Optional. Whether to continue executing queries if a query fails.
-     * The default value is `false`. Setting to `true` can be useful when executing
-     * independent parallel queries.
+     * The default value is `false`. Setting to `true` can be useful when
+     * executing independent parallel queries.
      * 
* * bool continue_on_failure = 3; @@ -1435,8 +1435,8 @@ public Builder setContinueOnFailure(boolean value) { * *
      * Optional. Whether to continue executing queries if a query fails.
-     * The default value is `false`. Setting to `true` can be useful when executing
-     * independent parallel queries.
+     * The default value is `false`. Setting to `true` can be useful when
+     * executing independent parallel queries.
      * 
* * bool continue_on_failure = 3; diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/HiveJobOrBuilder.java b/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/HiveJobOrBuilder.java index cdccc15fe47c..12900aae5786 100644 --- a/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/HiveJobOrBuilder.java +++ b/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/HiveJobOrBuilder.java @@ -65,8 +65,8 @@ public interface HiveJobOrBuilder * *
    * Optional. Whether to continue executing queries if a query fails.
-   * The default value is `false`. Setting to `true` can be useful when executing
-   * independent parallel queries.
+   * The default value is `false`. Setting to `true` can be useful when
+   * executing independent parallel queries.
    * 
* * bool continue_on_failure = 3; diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/InstanceGroupConfig.java b/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/InstanceGroupConfig.java index a3adc1aec6ab..2f76a3393baf 100644 --- a/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/InstanceGroupConfig.java +++ b/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/InstanceGroupConfig.java @@ -305,7 +305,8 @@ public com.google.protobuf.ByteString getImageUriBytes() { * * `projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` * * `n1-standard-2` * **Auto Zone Exception**: If you are using the Cloud Dataproc - * [Auto Zone Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) + * [Auto Zone + * Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) * feature, you must use the short name of the machine type * resource, for example, `n1-standard-2`. *
@@ -333,7 +334,8 @@ public java.lang.String getMachineTypeUri() { * * `projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` * * `n1-standard-2` * **Auto Zone Exception**: If you are using the Cloud Dataproc - * [Auto Zone Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) + * [Auto Zone + * Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) * feature, you must use the short name of the machine type * resource, for example, `n1-standard-2`. *
@@ -399,7 +401,8 @@ public com.google.cloud.dataproc.v1.DiskConfigOrBuilder getDiskConfigOrBuilder() * * *
-   * Optional. Specifies that this instance group contains preemptible instances.
+   * Optional. Specifies that this instance group contains preemptible
+   * instances.
    * 
* * bool is_preemptible = 6; @@ -1363,7 +1366,8 @@ public Builder setImageUriBytes(com.google.protobuf.ByteString value) { * * `projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` * * `n1-standard-2` * **Auto Zone Exception**: If you are using the Cloud Dataproc - * [Auto Zone Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) + * [Auto Zone + * Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) * feature, you must use the short name of the machine type * resource, for example, `n1-standard-2`. *
@@ -1391,7 +1395,8 @@ public java.lang.String getMachineTypeUri() { * * `projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` * * `n1-standard-2` * **Auto Zone Exception**: If you are using the Cloud Dataproc - * [Auto Zone Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) + * [Auto Zone + * Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) * feature, you must use the short name of the machine type * resource, for example, `n1-standard-2`. *
@@ -1419,7 +1424,8 @@ public com.google.protobuf.ByteString getMachineTypeUriBytes() { * * `projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` * * `n1-standard-2` * **Auto Zone Exception**: If you are using the Cloud Dataproc - * [Auto Zone Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) + * [Auto Zone + * Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) * feature, you must use the short name of the machine type * resource, for example, `n1-standard-2`. *
@@ -1445,7 +1451,8 @@ public Builder setMachineTypeUri(java.lang.String value) { * * `projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` * * `n1-standard-2` * **Auto Zone Exception**: If you are using the Cloud Dataproc - * [Auto Zone Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) + * [Auto Zone + * Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) * feature, you must use the short name of the machine type * resource, for example, `n1-standard-2`. *
@@ -1468,7 +1475,8 @@ public Builder clearMachineTypeUri() { * * `projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` * * `n1-standard-2` * **Auto Zone Exception**: If you are using the Cloud Dataproc - * [Auto Zone Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) + * [Auto Zone + * Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) * feature, you must use the short name of the machine type * resource, for example, `n1-standard-2`. *
@@ -1672,7 +1680,8 @@ public com.google.cloud.dataproc.v1.DiskConfigOrBuilder getDiskConfigOrBuilder() * * *
-     * Optional. Specifies that this instance group contains preemptible instances.
+     * Optional. Specifies that this instance group contains preemptible
+     * instances.
      * 
* * bool is_preemptible = 6; @@ -1684,7 +1693,8 @@ public boolean getIsPreemptible() { * * *
-     * Optional. Specifies that this instance group contains preemptible instances.
+     * Optional. Specifies that this instance group contains preemptible
+     * instances.
      * 
* * bool is_preemptible = 6; @@ -1699,7 +1709,8 @@ public Builder setIsPreemptible(boolean value) { * * *
-     * Optional. Specifies that this instance group contains preemptible instances.
+     * Optional. Specifies that this instance group contains preemptible
+     * instances.
      * 
* * bool is_preemptible = 6; diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/InstanceGroupConfigOrBuilder.java b/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/InstanceGroupConfigOrBuilder.java index c1dfeb2e3b13..6062ed24e540 100644 --- a/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/InstanceGroupConfigOrBuilder.java +++ b/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/InstanceGroupConfigOrBuilder.java @@ -100,7 +100,8 @@ public interface InstanceGroupConfigOrBuilder * * `projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` * * `n1-standard-2` * **Auto Zone Exception**: If you are using the Cloud Dataproc - * [Auto Zone Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) + * [Auto Zone + * Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) * feature, you must use the short name of the machine type * resource, for example, `n1-standard-2`. *
@@ -118,7 +119,8 @@ public interface InstanceGroupConfigOrBuilder * * `projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` * * `n1-standard-2` * **Auto Zone Exception**: If you are using the Cloud Dataproc - * [Auto Zone Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) + * [Auto Zone + * Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) * feature, you must use the short name of the machine type * resource, for example, `n1-standard-2`. *
@@ -162,7 +164,8 @@ public interface InstanceGroupConfigOrBuilder * * *
-   * Optional. Specifies that this instance group contains preemptible instances.
+   * Optional. Specifies that this instance group contains preemptible
+   * instances.
    * 
* * bool is_preemptible = 6; diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/Job.java b/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/Job.java index cd777b8630ae..9887fe8a7eaa 100644 --- a/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/Job.java +++ b/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/Job.java @@ -848,8 +848,8 @@ public com.google.cloud.dataproc.v1.JobStatusOrBuilder getStatusHistoryOrBuilder * *
    * Output only. The collection of YARN applications spun up by this job.
-   * **Beta** Feature: This report is available for testing purposes only. It may
-   * be changed before final release.
+   * **Beta** Feature: This report is available for testing purposes only. It
+   * may be changed before final release.
    * 
* * repeated .google.cloud.dataproc.v1.YarnApplication yarn_applications = 9; @@ -862,8 +862,8 @@ public java.util.List getYarnAppli * *
    * Output only. The collection of YARN applications spun up by this job.
-   * **Beta** Feature: This report is available for testing purposes only. It may
-   * be changed before final release.
+   * **Beta** Feature: This report is available for testing purposes only. It
+   * may be changed before final release.
    * 
* * repeated .google.cloud.dataproc.v1.YarnApplication yarn_applications = 9; @@ -877,8 +877,8 @@ public java.util.List getYarnAppli * *
    * Output only. The collection of YARN applications spun up by this job.
-   * **Beta** Feature: This report is available for testing purposes only. It may
-   * be changed before final release.
+   * **Beta** Feature: This report is available for testing purposes only. It
+   * may be changed before final release.
    * 
* * repeated .google.cloud.dataproc.v1.YarnApplication yarn_applications = 9; @@ -891,8 +891,8 @@ public int getYarnApplicationsCount() { * *
    * Output only. The collection of YARN applications spun up by this job.
-   * **Beta** Feature: This report is available for testing purposes only. It may
-   * be changed before final release.
+   * **Beta** Feature: This report is available for testing purposes only. It
+   * may be changed before final release.
    * 
* * repeated .google.cloud.dataproc.v1.YarnApplication yarn_applications = 9; @@ -905,8 +905,8 @@ public com.google.cloud.dataproc.v1.YarnApplication getYarnApplications(int inde * *
    * Output only. The collection of YARN applications spun up by this job.
-   * **Beta** Feature: This report is available for testing purposes only. It may
-   * be changed before final release.
+   * **Beta** Feature: This report is available for testing purposes only. It
+   * may be changed before final release.
    * 
* * repeated .google.cloud.dataproc.v1.YarnApplication yarn_applications = 9; @@ -1041,8 +1041,9 @@ public int getLabelsCount() { * Label **keys** must contain 1 to 63 characters, and must conform to * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). * Label **values** may be empty, but, if present, must contain 1 to 63 - * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). - * No more than 32 labels can be associated with a job. + * characters, and must conform to [RFC + * 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be + * associated with a job. * * * map<string, string> labels = 18; @@ -1066,8 +1067,9 @@ public java.util.Map getLabels() { * Label **keys** must contain 1 to 63 characters, and must conform to * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). * Label **values** may be empty, but, if present, must contain 1 to 63 - * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). - * No more than 32 labels can be associated with a job. + * characters, and must conform to [RFC + * 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be + * associated with a job. * * * map<string, string> labels = 18; @@ -1083,8 +1085,9 @@ public java.util.Map getLabelsMap() { * Label **keys** must contain 1 to 63 characters, and must conform to * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). * Label **values** may be empty, but, if present, must contain 1 to 63 - * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). - * No more than 32 labels can be associated with a job. + * characters, and must conform to [RFC + * 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be + * associated with a job. * * * map<string, string> labels = 18; @@ -1104,8 +1107,9 @@ public java.lang.String getLabelsOrDefault(java.lang.String key, java.lang.Strin * Label **keys** must contain 1 to 63 characters, and must conform to * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). * Label **values** may be empty, but, if present, must contain 1 to 63 - * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). - * No more than 32 labels can be associated with a job. + * characters, and must conform to [RFC + * 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be + * associated with a job. * * * map<string, string> labels = 18; @@ -4195,8 +4199,8 @@ private void ensureYarnApplicationsIsMutable() { * *
      * Output only. The collection of YARN applications spun up by this job.
-     * **Beta** Feature: This report is available for testing purposes only. It may
-     * be changed before final release.
+     * **Beta** Feature: This report is available for testing purposes only. It
+     * may be changed before final release.
      * 
* * repeated .google.cloud.dataproc.v1.YarnApplication yarn_applications = 9; @@ -4213,8 +4217,8 @@ public java.util.List getYarnAppli * *
      * Output only. The collection of YARN applications spun up by this job.
-     * **Beta** Feature: This report is available for testing purposes only. It may
-     * be changed before final release.
+     * **Beta** Feature: This report is available for testing purposes only. It
+     * may be changed before final release.
      * 
* * repeated .google.cloud.dataproc.v1.YarnApplication yarn_applications = 9; @@ -4231,8 +4235,8 @@ public int getYarnApplicationsCount() { * *
      * Output only. The collection of YARN applications spun up by this job.
-     * **Beta** Feature: This report is available for testing purposes only. It may
-     * be changed before final release.
+     * **Beta** Feature: This report is available for testing purposes only. It
+     * may be changed before final release.
      * 
* * repeated .google.cloud.dataproc.v1.YarnApplication yarn_applications = 9; @@ -4249,8 +4253,8 @@ public com.google.cloud.dataproc.v1.YarnApplication getYarnApplications(int inde * *
      * Output only. The collection of YARN applications spun up by this job.
-     * **Beta** Feature: This report is available for testing purposes only. It may
-     * be changed before final release.
+     * **Beta** Feature: This report is available for testing purposes only. It
+     * may be changed before final release.
      * 
* * repeated .google.cloud.dataproc.v1.YarnApplication yarn_applications = 9; @@ -4274,8 +4278,8 @@ public Builder setYarnApplications( * *
      * Output only. The collection of YARN applications spun up by this job.
-     * **Beta** Feature: This report is available for testing purposes only. It may
-     * be changed before final release.
+     * **Beta** Feature: This report is available for testing purposes only. It
+     * may be changed before final release.
      * 
* * repeated .google.cloud.dataproc.v1.YarnApplication yarn_applications = 9; @@ -4296,8 +4300,8 @@ public Builder setYarnApplications( * *
      * Output only. The collection of YARN applications spun up by this job.
-     * **Beta** Feature: This report is available for testing purposes only. It may
-     * be changed before final release.
+     * **Beta** Feature: This report is available for testing purposes only. It
+     * may be changed before final release.
      * 
* * repeated .google.cloud.dataproc.v1.YarnApplication yarn_applications = 9; @@ -4320,8 +4324,8 @@ public Builder addYarnApplications(com.google.cloud.dataproc.v1.YarnApplication * *
      * Output only. The collection of YARN applications spun up by this job.
-     * **Beta** Feature: This report is available for testing purposes only. It may
-     * be changed before final release.
+     * **Beta** Feature: This report is available for testing purposes only. It
+     * may be changed before final release.
      * 
* * repeated .google.cloud.dataproc.v1.YarnApplication yarn_applications = 9; @@ -4345,8 +4349,8 @@ public Builder addYarnApplications( * *
      * Output only. The collection of YARN applications spun up by this job.
-     * **Beta** Feature: This report is available for testing purposes only. It may
-     * be changed before final release.
+     * **Beta** Feature: This report is available for testing purposes only. It
+     * may be changed before final release.
      * 
* * repeated .google.cloud.dataproc.v1.YarnApplication yarn_applications = 9; @@ -4367,8 +4371,8 @@ public Builder addYarnApplications( * *
      * Output only. The collection of YARN applications spun up by this job.
-     * **Beta** Feature: This report is available for testing purposes only. It may
-     * be changed before final release.
+     * **Beta** Feature: This report is available for testing purposes only. It
+     * may be changed before final release.
      * 
* * repeated .google.cloud.dataproc.v1.YarnApplication yarn_applications = 9; @@ -4389,8 +4393,8 @@ public Builder addYarnApplications( * *
      * Output only. The collection of YARN applications spun up by this job.
-     * **Beta** Feature: This report is available for testing purposes only. It may
-     * be changed before final release.
+     * **Beta** Feature: This report is available for testing purposes only. It
+     * may be changed before final release.
      * 
* * repeated .google.cloud.dataproc.v1.YarnApplication yarn_applications = 9; @@ -4411,8 +4415,8 @@ public Builder addAllYarnApplications( * *
      * Output only. The collection of YARN applications spun up by this job.
-     * **Beta** Feature: This report is available for testing purposes only. It may
-     * be changed before final release.
+     * **Beta** Feature: This report is available for testing purposes only. It
+     * may be changed before final release.
      * 
* * repeated .google.cloud.dataproc.v1.YarnApplication yarn_applications = 9; @@ -4432,8 +4436,8 @@ public Builder clearYarnApplications() { * *
      * Output only. The collection of YARN applications spun up by this job.
-     * **Beta** Feature: This report is available for testing purposes only. It may
-     * be changed before final release.
+     * **Beta** Feature: This report is available for testing purposes only. It
+     * may be changed before final release.
      * 
* * repeated .google.cloud.dataproc.v1.YarnApplication yarn_applications = 9; @@ -4453,8 +4457,8 @@ public Builder removeYarnApplications(int index) { * *
      * Output only. The collection of YARN applications spun up by this job.
-     * **Beta** Feature: This report is available for testing purposes only. It may
-     * be changed before final release.
+     * **Beta** Feature: This report is available for testing purposes only. It
+     * may be changed before final release.
      * 
* * repeated .google.cloud.dataproc.v1.YarnApplication yarn_applications = 9; @@ -4468,8 +4472,8 @@ public com.google.cloud.dataproc.v1.YarnApplication.Builder getYarnApplicationsB * *
      * Output only. The collection of YARN applications spun up by this job.
-     * **Beta** Feature: This report is available for testing purposes only. It may
-     * be changed before final release.
+     * **Beta** Feature: This report is available for testing purposes only. It
+     * may be changed before final release.
      * 
* * repeated .google.cloud.dataproc.v1.YarnApplication yarn_applications = 9; @@ -4487,8 +4491,8 @@ public com.google.cloud.dataproc.v1.YarnApplicationOrBuilder getYarnApplications * *
      * Output only. The collection of YARN applications spun up by this job.
-     * **Beta** Feature: This report is available for testing purposes only. It may
-     * be changed before final release.
+     * **Beta** Feature: This report is available for testing purposes only. It
+     * may be changed before final release.
      * 
* * repeated .google.cloud.dataproc.v1.YarnApplication yarn_applications = 9; @@ -4506,8 +4510,8 @@ public com.google.cloud.dataproc.v1.YarnApplicationOrBuilder getYarnApplications * *
      * Output only. The collection of YARN applications spun up by this job.
-     * **Beta** Feature: This report is available for testing purposes only. It may
-     * be changed before final release.
+     * **Beta** Feature: This report is available for testing purposes only. It
+     * may be changed before final release.
      * 
* * repeated .google.cloud.dataproc.v1.YarnApplication yarn_applications = 9; @@ -4521,8 +4525,8 @@ public com.google.cloud.dataproc.v1.YarnApplication.Builder addYarnApplicationsB * *
      * Output only. The collection of YARN applications spun up by this job.
-     * **Beta** Feature: This report is available for testing purposes only. It may
-     * be changed before final release.
+     * **Beta** Feature: This report is available for testing purposes only. It
+     * may be changed before final release.
      * 
* * repeated .google.cloud.dataproc.v1.YarnApplication yarn_applications = 9; @@ -4537,8 +4541,8 @@ public com.google.cloud.dataproc.v1.YarnApplication.Builder addYarnApplicationsB * *
      * Output only. The collection of YARN applications spun up by this job.
-     * **Beta** Feature: This report is available for testing purposes only. It may
-     * be changed before final release.
+     * **Beta** Feature: This report is available for testing purposes only. It
+     * may be changed before final release.
      * 
* * repeated .google.cloud.dataproc.v1.YarnApplication yarn_applications = 9; @@ -4804,8 +4808,9 @@ public int getLabelsCount() { * Label **keys** must contain 1 to 63 characters, and must conform to * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). * Label **values** may be empty, but, if present, must contain 1 to 63 - * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). - * No more than 32 labels can be associated with a job. + * characters, and must conform to [RFC + * 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be + * associated with a job. * * * map<string, string> labels = 18; @@ -4829,8 +4834,9 @@ public java.util.Map getLabels() { * Label **keys** must contain 1 to 63 characters, and must conform to * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). * Label **values** may be empty, but, if present, must contain 1 to 63 - * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). - * No more than 32 labels can be associated with a job. + * characters, and must conform to [RFC + * 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be + * associated with a job. * * * map<string, string> labels = 18; @@ -4846,8 +4852,9 @@ public java.util.Map getLabelsMap() { * Label **keys** must contain 1 to 63 characters, and must conform to * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). * Label **values** may be empty, but, if present, must contain 1 to 63 - * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). - * No more than 32 labels can be associated with a job. + * characters, and must conform to [RFC + * 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be + * associated with a job. * * * map<string, string> labels = 18; @@ -4868,8 +4875,9 @@ public java.lang.String getLabelsOrDefault( * Label **keys** must contain 1 to 63 characters, and must conform to * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). * Label **values** may be empty, but, if present, must contain 1 to 63 - * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). - * No more than 32 labels can be associated with a job. + * characters, and must conform to [RFC + * 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be + * associated with a job. * * * map<string, string> labels = 18; @@ -4897,8 +4905,9 @@ public Builder clearLabels() { * Label **keys** must contain 1 to 63 characters, and must conform to * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). * Label **values** may be empty, but, if present, must contain 1 to 63 - * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). - * No more than 32 labels can be associated with a job. + * characters, and must conform to [RFC + * 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be + * associated with a job. * * * map<string, string> labels = 18; @@ -4923,8 +4932,9 @@ public java.util.Map getMutableLabels() { * Label **keys** must contain 1 to 63 characters, and must conform to * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). * Label **values** may be empty, but, if present, must contain 1 to 63 - * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). - * No more than 32 labels can be associated with a job. + * characters, and must conform to [RFC + * 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be + * associated with a job. * * * map<string, string> labels = 18; @@ -4947,8 +4957,9 @@ public Builder putLabels(java.lang.String key, java.lang.String value) { * Label **keys** must contain 1 to 63 characters, and must conform to * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). * Label **values** may be empty, but, if present, must contain 1 to 63 - * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). - * No more than 32 labels can be associated with a job. + * characters, and must conform to [RFC + * 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be + * associated with a job. * * * map<string, string> labels = 18; diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/JobOrBuilder.java b/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/JobOrBuilder.java index a5f7b1a51f91..b08e93413fc7 100644 --- a/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/JobOrBuilder.java +++ b/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/JobOrBuilder.java @@ -362,8 +362,8 @@ public interface JobOrBuilder * *
    * Output only. The collection of YARN applications spun up by this job.
-   * **Beta** Feature: This report is available for testing purposes only. It may
-   * be changed before final release.
+   * **Beta** Feature: This report is available for testing purposes only. It
+   * may be changed before final release.
    * 
* * repeated .google.cloud.dataproc.v1.YarnApplication yarn_applications = 9; @@ -374,8 +374,8 @@ public interface JobOrBuilder * *
    * Output only. The collection of YARN applications spun up by this job.
-   * **Beta** Feature: This report is available for testing purposes only. It may
-   * be changed before final release.
+   * **Beta** Feature: This report is available for testing purposes only. It
+   * may be changed before final release.
    * 
* * repeated .google.cloud.dataproc.v1.YarnApplication yarn_applications = 9; @@ -386,8 +386,8 @@ public interface JobOrBuilder * *
    * Output only. The collection of YARN applications spun up by this job.
-   * **Beta** Feature: This report is available for testing purposes only. It may
-   * be changed before final release.
+   * **Beta** Feature: This report is available for testing purposes only. It
+   * may be changed before final release.
    * 
* * repeated .google.cloud.dataproc.v1.YarnApplication yarn_applications = 9; @@ -398,8 +398,8 @@ public interface JobOrBuilder * *
    * Output only. The collection of YARN applications spun up by this job.
-   * **Beta** Feature: This report is available for testing purposes only. It may
-   * be changed before final release.
+   * **Beta** Feature: This report is available for testing purposes only. It
+   * may be changed before final release.
    * 
* * repeated .google.cloud.dataproc.v1.YarnApplication yarn_applications = 9; @@ -411,8 +411,8 @@ public interface JobOrBuilder * *
    * Output only. The collection of YARN applications spun up by this job.
-   * **Beta** Feature: This report is available for testing purposes only. It may
-   * be changed before final release.
+   * **Beta** Feature: This report is available for testing purposes only. It
+   * may be changed before final release.
    * 
* * repeated .google.cloud.dataproc.v1.YarnApplication yarn_applications = 9; @@ -475,8 +475,9 @@ public interface JobOrBuilder * Label **keys** must contain 1 to 63 characters, and must conform to * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). * Label **values** may be empty, but, if present, must contain 1 to 63 - * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). - * No more than 32 labels can be associated with a job. + * characters, and must conform to [RFC + * 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be + * associated with a job. * * * map<string, string> labels = 18; @@ -490,8 +491,9 @@ public interface JobOrBuilder * Label **keys** must contain 1 to 63 characters, and must conform to * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). * Label **values** may be empty, but, if present, must contain 1 to 63 - * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). - * No more than 32 labels can be associated with a job. + * characters, and must conform to [RFC + * 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be + * associated with a job. * * * map<string, string> labels = 18; @@ -508,8 +510,9 @@ public interface JobOrBuilder * Label **keys** must contain 1 to 63 characters, and must conform to * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). * Label **values** may be empty, but, if present, must contain 1 to 63 - * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). - * No more than 32 labels can be associated with a job. + * characters, and must conform to [RFC + * 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be + * associated with a job. * * * map<string, string> labels = 18; @@ -523,8 +526,9 @@ public interface JobOrBuilder * Label **keys** must contain 1 to 63 characters, and must conform to * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). * Label **values** may be empty, but, if present, must contain 1 to 63 - * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). - * No more than 32 labels can be associated with a job. + * characters, and must conform to [RFC + * 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be + * associated with a job. * * * map<string, string> labels = 18; @@ -538,8 +542,9 @@ public interface JobOrBuilder * Label **keys** must contain 1 to 63 characters, and must conform to * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). * Label **values** may be empty, but, if present, must contain 1 to 63 - * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). - * No more than 32 labels can be associated with a job. + * characters, and must conform to [RFC + * 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be + * associated with a job. * * * map<string, string> labels = 18; diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/OrderedJob.java b/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/OrderedJob.java index c946a12d42db..dc1368a65eed 100644 --- a/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/OrderedJob.java +++ b/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/OrderedJob.java @@ -304,8 +304,8 @@ public JobTypeCase getJobTypeCase() { * within the template. * The step id is used as prefix for job id, as job * `goog-dataproc-workflow-step-id` label, and in - * [prerequisiteStepIds][google.cloud.dataproc.v1.OrderedJob.prerequisite_step_ids] field from other - * steps. + * [prerequisiteStepIds][google.cloud.dataproc.v1.OrderedJob.prerequisite_step_ids] + * field from other steps. * The id must contain only letters (a-z, A-Z), numbers (0-9), * underscores (_), and hyphens (-). Cannot begin or end with underscore * or hyphen. Must consist of between 3 and 50 characters. @@ -332,8 +332,8 @@ public java.lang.String getStepId() { * within the template. * The step id is used as prefix for job id, as job * `goog-dataproc-workflow-step-id` label, and in - * [prerequisiteStepIds][google.cloud.dataproc.v1.OrderedJob.prerequisite_step_ids] field from other - * steps. + * [prerequisiteStepIds][google.cloud.dataproc.v1.OrderedJob.prerequisite_step_ids] + * field from other steps. * The id must contain only letters (a-z, A-Z), numbers (0-9), * underscores (_), and hyphens (-). Cannot begin or end with underscore * or hyphen. Must consist of between 3 and 50 characters. @@ -1458,8 +1458,8 @@ public Builder clearJobType() { * within the template. * The step id is used as prefix for job id, as job * `goog-dataproc-workflow-step-id` label, and in - * [prerequisiteStepIds][google.cloud.dataproc.v1.OrderedJob.prerequisite_step_ids] field from other - * steps. + * [prerequisiteStepIds][google.cloud.dataproc.v1.OrderedJob.prerequisite_step_ids] + * field from other steps. * The id must contain only letters (a-z, A-Z), numbers (0-9), * underscores (_), and hyphens (-). Cannot begin or end with underscore * or hyphen. Must consist of between 3 and 50 characters. @@ -1486,8 +1486,8 @@ public java.lang.String getStepId() { * within the template. * The step id is used as prefix for job id, as job * `goog-dataproc-workflow-step-id` label, and in - * [prerequisiteStepIds][google.cloud.dataproc.v1.OrderedJob.prerequisite_step_ids] field from other - * steps. + * [prerequisiteStepIds][google.cloud.dataproc.v1.OrderedJob.prerequisite_step_ids] + * field from other steps. * The id must contain only letters (a-z, A-Z), numbers (0-9), * underscores (_), and hyphens (-). Cannot begin or end with underscore * or hyphen. Must consist of between 3 and 50 characters. @@ -1514,8 +1514,8 @@ public com.google.protobuf.ByteString getStepIdBytes() { * within the template. * The step id is used as prefix for job id, as job * `goog-dataproc-workflow-step-id` label, and in - * [prerequisiteStepIds][google.cloud.dataproc.v1.OrderedJob.prerequisite_step_ids] field from other - * steps. + * [prerequisiteStepIds][google.cloud.dataproc.v1.OrderedJob.prerequisite_step_ids] + * field from other steps. * The id must contain only letters (a-z, A-Z), numbers (0-9), * underscores (_), and hyphens (-). Cannot begin or end with underscore * or hyphen. Must consist of between 3 and 50 characters. @@ -1540,8 +1540,8 @@ public Builder setStepId(java.lang.String value) { * within the template. * The step id is used as prefix for job id, as job * `goog-dataproc-workflow-step-id` label, and in - * [prerequisiteStepIds][google.cloud.dataproc.v1.OrderedJob.prerequisite_step_ids] field from other - * steps. + * [prerequisiteStepIds][google.cloud.dataproc.v1.OrderedJob.prerequisite_step_ids] + * field from other steps. * The id must contain only letters (a-z, A-Z), numbers (0-9), * underscores (_), and hyphens (-). Cannot begin or end with underscore * or hyphen. Must consist of between 3 and 50 characters. @@ -1563,8 +1563,8 @@ public Builder clearStepId() { * within the template. * The step id is used as prefix for job id, as job * `goog-dataproc-workflow-step-id` label, and in - * [prerequisiteStepIds][google.cloud.dataproc.v1.OrderedJob.prerequisite_step_ids] field from other - * steps. + * [prerequisiteStepIds][google.cloud.dataproc.v1.OrderedJob.prerequisite_step_ids] + * field from other steps. * The id must contain only letters (a-z, A-Z), numbers (0-9), * underscores (_), and hyphens (-). Cannot begin or end with underscore * or hyphen. Must consist of between 3 and 50 characters. diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/OrderedJobOrBuilder.java b/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/OrderedJobOrBuilder.java index 9f2b14af2c8d..58df39ba4431 100644 --- a/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/OrderedJobOrBuilder.java +++ b/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/OrderedJobOrBuilder.java @@ -16,8 +16,8 @@ public interface OrderedJobOrBuilder * within the template. * The step id is used as prefix for job id, as job * `goog-dataproc-workflow-step-id` label, and in - * [prerequisiteStepIds][google.cloud.dataproc.v1.OrderedJob.prerequisite_step_ids] field from other - * steps. + * [prerequisiteStepIds][google.cloud.dataproc.v1.OrderedJob.prerequisite_step_ids] + * field from other steps. * The id must contain only letters (a-z, A-Z), numbers (0-9), * underscores (_), and hyphens (-). Cannot begin or end with underscore * or hyphen. Must consist of between 3 and 50 characters. @@ -34,8 +34,8 @@ public interface OrderedJobOrBuilder * within the template. * The step id is used as prefix for job id, as job * `goog-dataproc-workflow-step-id` label, and in - * [prerequisiteStepIds][google.cloud.dataproc.v1.OrderedJob.prerequisite_step_ids] field from other - * steps. + * [prerequisiteStepIds][google.cloud.dataproc.v1.OrderedJob.prerequisite_step_ids] + * field from other steps. * The id must contain only letters (a-z, A-Z), numbers (0-9), * underscores (_), and hyphens (-). Cannot begin or end with underscore * or hyphen. Must consist of between 3 and 50 characters. diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/PigJob.java b/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/PigJob.java index 54684b202027..65b6d58f20fc 100644 --- a/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/PigJob.java +++ b/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/PigJob.java @@ -332,8 +332,8 @@ public com.google.cloud.dataproc.v1.QueryListOrBuilder getQueryListOrBuilder() { * *
    * Optional. Whether to continue executing queries if a query fails.
-   * The default value is `false`. Setting to `true` can be useful when executing
-   * independent parallel queries.
+   * The default value is `false`. Setting to `true` can be useful when
+   * executing independent parallel queries.
    * 
* * bool continue_on_failure = 3; @@ -1485,8 +1485,8 @@ public com.google.cloud.dataproc.v1.QueryListOrBuilder getQueryListOrBuilder() { * *
      * Optional. Whether to continue executing queries if a query fails.
-     * The default value is `false`. Setting to `true` can be useful when executing
-     * independent parallel queries.
+     * The default value is `false`. Setting to `true` can be useful when
+     * executing independent parallel queries.
      * 
* * bool continue_on_failure = 3; @@ -1499,8 +1499,8 @@ public boolean getContinueOnFailure() { * *
      * Optional. Whether to continue executing queries if a query fails.
-     * The default value is `false`. Setting to `true` can be useful when executing
-     * independent parallel queries.
+     * The default value is `false`. Setting to `true` can be useful when
+     * executing independent parallel queries.
      * 
* * bool continue_on_failure = 3; @@ -1516,8 +1516,8 @@ public Builder setContinueOnFailure(boolean value) { * *
      * Optional. Whether to continue executing queries if a query fails.
-     * The default value is `false`. Setting to `true` can be useful when executing
-     * independent parallel queries.
+     * The default value is `false`. Setting to `true` can be useful when
+     * executing independent parallel queries.
      * 
* * bool continue_on_failure = 3; diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/PigJobOrBuilder.java b/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/PigJobOrBuilder.java index 864b99850954..08e3ff1f1dc6 100644 --- a/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/PigJobOrBuilder.java +++ b/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/PigJobOrBuilder.java @@ -65,8 +65,8 @@ public interface PigJobOrBuilder * *
    * Optional. Whether to continue executing queries if a query fails.
-   * The default value is `false`. Setting to `true` can be useful when executing
-   * independent parallel queries.
+   * The default value is `false`. Setting to `true` can be useful when
+   * executing independent parallel queries.
    * 
* * bool continue_on_failure = 3; diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/PySparkJob.java b/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/PySparkJob.java index c2ef2913f5eb..4df00cba7ac6 100644 --- a/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/PySparkJob.java +++ b/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/PySparkJob.java @@ -8,7 +8,8 @@ * *
  * A Cloud Dataproc job for running
- * [Apache PySpark](https://spark.apache.org/docs/0.9.0/python-programming-guide.html)
+ * [Apache
+ * PySpark](https://spark.apache.org/docs/0.9.0/python-programming-guide.html)
  * applications on YARN.
  * 
* @@ -952,7 +953,8 @@ protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.Build * *
    * A Cloud Dataproc job for running
-   * [Apache PySpark](https://spark.apache.org/docs/0.9.0/python-programming-guide.html)
+   * [Apache
+   * PySpark](https://spark.apache.org/docs/0.9.0/python-programming-guide.html)
    * applications on YARN.
    * 
* diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SoftwareConfig.java b/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SoftwareConfig.java index 41042ec89535..e87fc8b1faba 100644 --- a/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SoftwareConfig.java +++ b/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SoftwareConfig.java @@ -124,10 +124,12 @@ protected com.google.protobuf.MapField internalGetMapField(int number) { * * *
-   * Optional. The version of software inside the cluster. It must be one of the supported
-   * [Cloud Dataproc Versions](/dataproc/docs/concepts/versioning/dataproc-versions#supported_cloud_dataproc_versions),
+   * Optional. The version of software inside the cluster. It must be one of the
+   * supported [Cloud Dataproc
+   * Versions](/dataproc/docs/concepts/versioning/dataproc-versions#supported_cloud_dataproc_versions),
    * such as "1.2" (including a subminor version, such as "1.2.29"), or the
-   * ["preview" version](/dataproc/docs/concepts/versioning/dataproc-versions#other_versions).
+   * ["preview"
+   * version](/dataproc/docs/concepts/versioning/dataproc-versions#other_versions).
    * If unspecified, it defaults to the latest version.
    * 
* @@ -148,10 +150,12 @@ public java.lang.String getImageVersion() { * * *
-   * Optional. The version of software inside the cluster. It must be one of the supported
-   * [Cloud Dataproc Versions](/dataproc/docs/concepts/versioning/dataproc-versions#supported_cloud_dataproc_versions),
+   * Optional. The version of software inside the cluster. It must be one of the
+   * supported [Cloud Dataproc
+   * Versions](/dataproc/docs/concepts/versioning/dataproc-versions#supported_cloud_dataproc_versions),
    * such as "1.2" (including a subminor version, such as "1.2.29"), or the
-   * ["preview" version](/dataproc/docs/concepts/versioning/dataproc-versions#other_versions).
+   * ["preview"
+   * version](/dataproc/docs/concepts/versioning/dataproc-versions#other_versions).
    * If unspecified, it defaults to the latest version.
    * 
* @@ -687,10 +691,12 @@ public Builder mergeFrom( * * *
-     * Optional. The version of software inside the cluster. It must be one of the supported
-     * [Cloud Dataproc Versions](/dataproc/docs/concepts/versioning/dataproc-versions#supported_cloud_dataproc_versions),
+     * Optional. The version of software inside the cluster. It must be one of the
+     * supported [Cloud Dataproc
+     * Versions](/dataproc/docs/concepts/versioning/dataproc-versions#supported_cloud_dataproc_versions),
      * such as "1.2" (including a subminor version, such as "1.2.29"), or the
-     * ["preview" version](/dataproc/docs/concepts/versioning/dataproc-versions#other_versions).
+     * ["preview"
+     * version](/dataproc/docs/concepts/versioning/dataproc-versions#other_versions).
      * If unspecified, it defaults to the latest version.
      * 
* @@ -711,10 +717,12 @@ public java.lang.String getImageVersion() { * * *
-     * Optional. The version of software inside the cluster. It must be one of the supported
-     * [Cloud Dataproc Versions](/dataproc/docs/concepts/versioning/dataproc-versions#supported_cloud_dataproc_versions),
+     * Optional. The version of software inside the cluster. It must be one of the
+     * supported [Cloud Dataproc
+     * Versions](/dataproc/docs/concepts/versioning/dataproc-versions#supported_cloud_dataproc_versions),
      * such as "1.2" (including a subminor version, such as "1.2.29"), or the
-     * ["preview" version](/dataproc/docs/concepts/versioning/dataproc-versions#other_versions).
+     * ["preview"
+     * version](/dataproc/docs/concepts/versioning/dataproc-versions#other_versions).
      * If unspecified, it defaults to the latest version.
      * 
* @@ -735,10 +743,12 @@ public com.google.protobuf.ByteString getImageVersionBytes() { * * *
-     * Optional. The version of software inside the cluster. It must be one of the supported
-     * [Cloud Dataproc Versions](/dataproc/docs/concepts/versioning/dataproc-versions#supported_cloud_dataproc_versions),
+     * Optional. The version of software inside the cluster. It must be one of the
+     * supported [Cloud Dataproc
+     * Versions](/dataproc/docs/concepts/versioning/dataproc-versions#supported_cloud_dataproc_versions),
      * such as "1.2" (including a subminor version, such as "1.2.29"), or the
-     * ["preview" version](/dataproc/docs/concepts/versioning/dataproc-versions#other_versions).
+     * ["preview"
+     * version](/dataproc/docs/concepts/versioning/dataproc-versions#other_versions).
      * If unspecified, it defaults to the latest version.
      * 
* @@ -757,10 +767,12 @@ public Builder setImageVersion(java.lang.String value) { * * *
-     * Optional. The version of software inside the cluster. It must be one of the supported
-     * [Cloud Dataproc Versions](/dataproc/docs/concepts/versioning/dataproc-versions#supported_cloud_dataproc_versions),
+     * Optional. The version of software inside the cluster. It must be one of the
+     * supported [Cloud Dataproc
+     * Versions](/dataproc/docs/concepts/versioning/dataproc-versions#supported_cloud_dataproc_versions),
      * such as "1.2" (including a subminor version, such as "1.2.29"), or the
-     * ["preview" version](/dataproc/docs/concepts/versioning/dataproc-versions#other_versions).
+     * ["preview"
+     * version](/dataproc/docs/concepts/versioning/dataproc-versions#other_versions).
      * If unspecified, it defaults to the latest version.
      * 
* @@ -776,10 +788,12 @@ public Builder clearImageVersion() { * * *
-     * Optional. The version of software inside the cluster. It must be one of the supported
-     * [Cloud Dataproc Versions](/dataproc/docs/concepts/versioning/dataproc-versions#supported_cloud_dataproc_versions),
+     * Optional. The version of software inside the cluster. It must be one of the
+     * supported [Cloud Dataproc
+     * Versions](/dataproc/docs/concepts/versioning/dataproc-versions#supported_cloud_dataproc_versions),
      * such as "1.2" (including a subminor version, such as "1.2.29"), or the
-     * ["preview" version](/dataproc/docs/concepts/versioning/dataproc-versions#other_versions).
+     * ["preview"
+     * version](/dataproc/docs/concepts/versioning/dataproc-versions#other_versions).
      * If unspecified, it defaults to the latest version.
      * 
* diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SoftwareConfigOrBuilder.java b/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SoftwareConfigOrBuilder.java index 77860621e066..b0102ebaebf4 100644 --- a/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SoftwareConfigOrBuilder.java +++ b/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SoftwareConfigOrBuilder.java @@ -12,10 +12,12 @@ public interface SoftwareConfigOrBuilder * * *
-   * Optional. The version of software inside the cluster. It must be one of the supported
-   * [Cloud Dataproc Versions](/dataproc/docs/concepts/versioning/dataproc-versions#supported_cloud_dataproc_versions),
+   * Optional. The version of software inside the cluster. It must be one of the
+   * supported [Cloud Dataproc
+   * Versions](/dataproc/docs/concepts/versioning/dataproc-versions#supported_cloud_dataproc_versions),
    * such as "1.2" (including a subminor version, such as "1.2.29"), or the
-   * ["preview" version](/dataproc/docs/concepts/versioning/dataproc-versions#other_versions).
+   * ["preview"
+   * version](/dataproc/docs/concepts/versioning/dataproc-versions#other_versions).
    * If unspecified, it defaults to the latest version.
    * 
* @@ -26,10 +28,12 @@ public interface SoftwareConfigOrBuilder * * *
-   * Optional. The version of software inside the cluster. It must be one of the supported
-   * [Cloud Dataproc Versions](/dataproc/docs/concepts/versioning/dataproc-versions#supported_cloud_dataproc_versions),
+   * Optional. The version of software inside the cluster. It must be one of the
+   * supported [Cloud Dataproc
+   * Versions](/dataproc/docs/concepts/versioning/dataproc-versions#supported_cloud_dataproc_versions),
    * such as "1.2" (including a subminor version, such as "1.2.29"), or the
-   * ["preview" version](/dataproc/docs/concepts/versioning/dataproc-versions#other_versions).
+   * ["preview"
+   * version](/dataproc/docs/concepts/versioning/dataproc-versions#other_versions).
    * If unspecified, it defaults to the latest version.
    * 
* diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SparkSqlJob.java b/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SparkSqlJob.java index 315fa1fefb5d..621415ccbd77 100644 --- a/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SparkSqlJob.java +++ b/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SparkSqlJob.java @@ -7,8 +7,8 @@ * * *
- * A Cloud Dataproc job for running [Apache Spark SQL](http://spark.apache.org/sql/)
- * queries.
+ * A Cloud Dataproc job for running [Apache Spark
+ * SQL](http://spark.apache.org/sql/) queries.
  * 
* * Protobuf type {@code google.cloud.dataproc.v1.SparkSqlJob} @@ -869,8 +869,8 @@ protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.Build * * *
-   * A Cloud Dataproc job for running [Apache Spark SQL](http://spark.apache.org/sql/)
-   * queries.
+   * A Cloud Dataproc job for running [Apache Spark
+   * SQL](http://spark.apache.org/sql/) queries.
    * 
* * Protobuf type {@code google.cloud.dataproc.v1.SparkSqlJob} diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SubmitJobRequest.java b/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SubmitJobRequest.java index a2032f50603c..55f4f7c8adef 100644 --- a/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SubmitJobRequest.java +++ b/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SubmitJobRequest.java @@ -256,8 +256,8 @@ public com.google.cloud.dataproc.v1.JobOrBuilder getJobOrBuilder() { * *
    * Optional. A unique id used to identify the request. If the server
-   * receives two [SubmitJobRequest][google.cloud.dataproc.v1.SubmitJobRequest] requests  with the same
-   * id, then the second request will be ignored and the
+   * receives two [SubmitJobRequest][google.cloud.dataproc.v1.SubmitJobRequest]
+   * requests  with the same id, then the second request will be ignored and the
    * first [Job][google.cloud.dataproc.v1.Job] created and stored in the backend
    * is returned.
    * It is recommended to always set this value to a
@@ -284,8 +284,8 @@ public java.lang.String getRequestId() {
    *
    * 
    * Optional. A unique id used to identify the request. If the server
-   * receives two [SubmitJobRequest][google.cloud.dataproc.v1.SubmitJobRequest] requests  with the same
-   * id, then the second request will be ignored and the
+   * receives two [SubmitJobRequest][google.cloud.dataproc.v1.SubmitJobRequest]
+   * requests  with the same id, then the second request will be ignored and the
    * first [Job][google.cloud.dataproc.v1.Job] created and stored in the backend
    * is returned.
    * It is recommended to always set this value to a
@@ -1058,8 +1058,8 @@ public com.google.cloud.dataproc.v1.JobOrBuilder getJobOrBuilder() {
      *
      * 
      * Optional. A unique id used to identify the request. If the server
-     * receives two [SubmitJobRequest][google.cloud.dataproc.v1.SubmitJobRequest] requests  with the same
-     * id, then the second request will be ignored and the
+     * receives two [SubmitJobRequest][google.cloud.dataproc.v1.SubmitJobRequest]
+     * requests  with the same id, then the second request will be ignored and the
      * first [Job][google.cloud.dataproc.v1.Job] created and stored in the backend
      * is returned.
      * It is recommended to always set this value to a
@@ -1086,8 +1086,8 @@ public java.lang.String getRequestId() {
      *
      * 
      * Optional. A unique id used to identify the request. If the server
-     * receives two [SubmitJobRequest][google.cloud.dataproc.v1.SubmitJobRequest] requests  with the same
-     * id, then the second request will be ignored and the
+     * receives two [SubmitJobRequest][google.cloud.dataproc.v1.SubmitJobRequest]
+     * requests  with the same id, then the second request will be ignored and the
      * first [Job][google.cloud.dataproc.v1.Job] created and stored in the backend
      * is returned.
      * It is recommended to always set this value to a
@@ -1114,8 +1114,8 @@ public com.google.protobuf.ByteString getRequestIdBytes() {
      *
      * 
      * Optional. A unique id used to identify the request. If the server
-     * receives two [SubmitJobRequest][google.cloud.dataproc.v1.SubmitJobRequest] requests  with the same
-     * id, then the second request will be ignored and the
+     * receives two [SubmitJobRequest][google.cloud.dataproc.v1.SubmitJobRequest]
+     * requests  with the same id, then the second request will be ignored and the
      * first [Job][google.cloud.dataproc.v1.Job] created and stored in the backend
      * is returned.
      * It is recommended to always set this value to a
@@ -1140,8 +1140,8 @@ public Builder setRequestId(java.lang.String value) {
      *
      * 
      * Optional. A unique id used to identify the request. If the server
-     * receives two [SubmitJobRequest][google.cloud.dataproc.v1.SubmitJobRequest] requests  with the same
-     * id, then the second request will be ignored and the
+     * receives two [SubmitJobRequest][google.cloud.dataproc.v1.SubmitJobRequest]
+     * requests  with the same id, then the second request will be ignored and the
      * first [Job][google.cloud.dataproc.v1.Job] created and stored in the backend
      * is returned.
      * It is recommended to always set this value to a
@@ -1163,8 +1163,8 @@ public Builder clearRequestId() {
      *
      * 
      * Optional. A unique id used to identify the request. If the server
-     * receives two [SubmitJobRequest][google.cloud.dataproc.v1.SubmitJobRequest] requests  with the same
-     * id, then the second request will be ignored and the
+     * receives two [SubmitJobRequest][google.cloud.dataproc.v1.SubmitJobRequest]
+     * requests  with the same id, then the second request will be ignored and the
      * first [Job][google.cloud.dataproc.v1.Job] created and stored in the backend
      * is returned.
      * It is recommended to always set this value to a
diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SubmitJobRequestOrBuilder.java b/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SubmitJobRequestOrBuilder.java
index 234746e31f43..353a6085d6dd 100644
--- a/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SubmitJobRequestOrBuilder.java
+++ b/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/SubmitJobRequestOrBuilder.java
@@ -88,8 +88,8 @@ public interface SubmitJobRequestOrBuilder
    *
    * 
    * Optional. A unique id used to identify the request. If the server
-   * receives two [SubmitJobRequest][google.cloud.dataproc.v1.SubmitJobRequest] requests  with the same
-   * id, then the second request will be ignored and the
+   * receives two [SubmitJobRequest][google.cloud.dataproc.v1.SubmitJobRequest]
+   * requests  with the same id, then the second request will be ignored and the
    * first [Job][google.cloud.dataproc.v1.Job] created and stored in the backend
    * is returned.
    * It is recommended to always set this value to a
@@ -106,8 +106,8 @@ public interface SubmitJobRequestOrBuilder
    *
    * 
    * Optional. A unique id used to identify the request. If the server
-   * receives two [SubmitJobRequest][google.cloud.dataproc.v1.SubmitJobRequest] requests  with the same
-   * id, then the second request will be ignored and the
+   * receives two [SubmitJobRequest][google.cloud.dataproc.v1.SubmitJobRequest]
+   * requests  with the same id, then the second request will be ignored and the
    * first [Job][google.cloud.dataproc.v1.Job] created and stored in the backend
    * is returned.
    * It is recommended to always set this value to a
diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/TemplateParameter.java b/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/TemplateParameter.java
index ce00b86adb01..170ffb3bc042 100644
--- a/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/TemplateParameter.java
+++ b/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/TemplateParameter.java
@@ -202,10 +202,10 @@ public com.google.protobuf.ByteString getNameBytes() {
    * Required. Paths to all fields that the parameter replaces.
    * A field is allowed to appear in at most one parameter's list of field
    * paths.
-   * A field path is similar in syntax to a [google.protobuf.FieldMask][google.protobuf.FieldMask].
-   * For example, a field path that references the zone field of a workflow
-   * template's cluster selector would be specified as
-   * `placement.clusterSelector.zone`.
+   * A field path is similar in syntax to a
+   * [google.protobuf.FieldMask][google.protobuf.FieldMask]. For example, a
+   * field path that references the zone field of a workflow template's cluster
+   * selector would be specified as `placement.clusterSelector.zone`.
    * Also, field paths can reference fields using the following syntax:
    * * Values in maps can be referenced by key:
    *     * labels['key']
@@ -249,10 +249,10 @@ public com.google.protobuf.ProtocolStringList getFieldsList() {
    * Required. Paths to all fields that the parameter replaces.
    * A field is allowed to appear in at most one parameter's list of field
    * paths.
-   * A field path is similar in syntax to a [google.protobuf.FieldMask][google.protobuf.FieldMask].
-   * For example, a field path that references the zone field of a workflow
-   * template's cluster selector would be specified as
-   * `placement.clusterSelector.zone`.
+   * A field path is similar in syntax to a
+   * [google.protobuf.FieldMask][google.protobuf.FieldMask]. For example, a
+   * field path that references the zone field of a workflow template's cluster
+   * selector would be specified as `placement.clusterSelector.zone`.
    * Also, field paths can reference fields using the following syntax:
    * * Values in maps can be referenced by key:
    *     * labels['key']
@@ -296,10 +296,10 @@ public int getFieldsCount() {
    * Required. Paths to all fields that the parameter replaces.
    * A field is allowed to appear in at most one parameter's list of field
    * paths.
-   * A field path is similar in syntax to a [google.protobuf.FieldMask][google.protobuf.FieldMask].
-   * For example, a field path that references the zone field of a workflow
-   * template's cluster selector would be specified as
-   * `placement.clusterSelector.zone`.
+   * A field path is similar in syntax to a
+   * [google.protobuf.FieldMask][google.protobuf.FieldMask]. For example, a
+   * field path that references the zone field of a workflow template's cluster
+   * selector would be specified as `placement.clusterSelector.zone`.
    * Also, field paths can reference fields using the following syntax:
    * * Values in maps can be referenced by key:
    *     * labels['key']
@@ -343,10 +343,10 @@ public java.lang.String getFields(int index) {
    * Required. Paths to all fields that the parameter replaces.
    * A field is allowed to appear in at most one parameter's list of field
    * paths.
-   * A field path is similar in syntax to a [google.protobuf.FieldMask][google.protobuf.FieldMask].
-   * For example, a field path that references the zone field of a workflow
-   * template's cluster selector would be specified as
-   * `placement.clusterSelector.zone`.
+   * A field path is similar in syntax to a
+   * [google.protobuf.FieldMask][google.protobuf.FieldMask]. For example, a
+   * field path that references the zone field of a workflow template's cluster
+   * selector would be specified as `placement.clusterSelector.zone`.
    * Also, field paths can reference fields using the following syntax:
    * * Values in maps can be referenced by key:
    *     * labels['key']
@@ -1017,10 +1017,10 @@ private void ensureFieldsIsMutable() {
      * Required. Paths to all fields that the parameter replaces.
      * A field is allowed to appear in at most one parameter's list of field
      * paths.
-     * A field path is similar in syntax to a [google.protobuf.FieldMask][google.protobuf.FieldMask].
-     * For example, a field path that references the zone field of a workflow
-     * template's cluster selector would be specified as
-     * `placement.clusterSelector.zone`.
+     * A field path is similar in syntax to a
+     * [google.protobuf.FieldMask][google.protobuf.FieldMask]. For example, a
+     * field path that references the zone field of a workflow template's cluster
+     * selector would be specified as `placement.clusterSelector.zone`.
      * Also, field paths can reference fields using the following syntax:
      * * Values in maps can be referenced by key:
      *     * labels['key']
@@ -1064,10 +1064,10 @@ public com.google.protobuf.ProtocolStringList getFieldsList() {
      * Required. Paths to all fields that the parameter replaces.
      * A field is allowed to appear in at most one parameter's list of field
      * paths.
-     * A field path is similar in syntax to a [google.protobuf.FieldMask][google.protobuf.FieldMask].
-     * For example, a field path that references the zone field of a workflow
-     * template's cluster selector would be specified as
-     * `placement.clusterSelector.zone`.
+     * A field path is similar in syntax to a
+     * [google.protobuf.FieldMask][google.protobuf.FieldMask]. For example, a
+     * field path that references the zone field of a workflow template's cluster
+     * selector would be specified as `placement.clusterSelector.zone`.
      * Also, field paths can reference fields using the following syntax:
      * * Values in maps can be referenced by key:
      *     * labels['key']
@@ -1111,10 +1111,10 @@ public int getFieldsCount() {
      * Required. Paths to all fields that the parameter replaces.
      * A field is allowed to appear in at most one parameter's list of field
      * paths.
-     * A field path is similar in syntax to a [google.protobuf.FieldMask][google.protobuf.FieldMask].
-     * For example, a field path that references the zone field of a workflow
-     * template's cluster selector would be specified as
-     * `placement.clusterSelector.zone`.
+     * A field path is similar in syntax to a
+     * [google.protobuf.FieldMask][google.protobuf.FieldMask]. For example, a
+     * field path that references the zone field of a workflow template's cluster
+     * selector would be specified as `placement.clusterSelector.zone`.
      * Also, field paths can reference fields using the following syntax:
      * * Values in maps can be referenced by key:
      *     * labels['key']
@@ -1158,10 +1158,10 @@ public java.lang.String getFields(int index) {
      * Required. Paths to all fields that the parameter replaces.
      * A field is allowed to appear in at most one parameter's list of field
      * paths.
-     * A field path is similar in syntax to a [google.protobuf.FieldMask][google.protobuf.FieldMask].
-     * For example, a field path that references the zone field of a workflow
-     * template's cluster selector would be specified as
-     * `placement.clusterSelector.zone`.
+     * A field path is similar in syntax to a
+     * [google.protobuf.FieldMask][google.protobuf.FieldMask]. For example, a
+     * field path that references the zone field of a workflow template's cluster
+     * selector would be specified as `placement.clusterSelector.zone`.
      * Also, field paths can reference fields using the following syntax:
      * * Values in maps can be referenced by key:
      *     * labels['key']
@@ -1205,10 +1205,10 @@ public com.google.protobuf.ByteString getFieldsBytes(int index) {
      * Required. Paths to all fields that the parameter replaces.
      * A field is allowed to appear in at most one parameter's list of field
      * paths.
-     * A field path is similar in syntax to a [google.protobuf.FieldMask][google.protobuf.FieldMask].
-     * For example, a field path that references the zone field of a workflow
-     * template's cluster selector would be specified as
-     * `placement.clusterSelector.zone`.
+     * A field path is similar in syntax to a
+     * [google.protobuf.FieldMask][google.protobuf.FieldMask]. For example, a
+     * field path that references the zone field of a workflow template's cluster
+     * selector would be specified as `placement.clusterSelector.zone`.
      * Also, field paths can reference fields using the following syntax:
      * * Values in maps can be referenced by key:
      *     * labels['key']
@@ -1258,10 +1258,10 @@ public Builder setFields(int index, java.lang.String value) {
      * Required. Paths to all fields that the parameter replaces.
      * A field is allowed to appear in at most one parameter's list of field
      * paths.
-     * A field path is similar in syntax to a [google.protobuf.FieldMask][google.protobuf.FieldMask].
-     * For example, a field path that references the zone field of a workflow
-     * template's cluster selector would be specified as
-     * `placement.clusterSelector.zone`.
+     * A field path is similar in syntax to a
+     * [google.protobuf.FieldMask][google.protobuf.FieldMask]. For example, a
+     * field path that references the zone field of a workflow template's cluster
+     * selector would be specified as `placement.clusterSelector.zone`.
      * Also, field paths can reference fields using the following syntax:
      * * Values in maps can be referenced by key:
      *     * labels['key']
@@ -1311,10 +1311,10 @@ public Builder addFields(java.lang.String value) {
      * Required. Paths to all fields that the parameter replaces.
      * A field is allowed to appear in at most one parameter's list of field
      * paths.
-     * A field path is similar in syntax to a [google.protobuf.FieldMask][google.protobuf.FieldMask].
-     * For example, a field path that references the zone field of a workflow
-     * template's cluster selector would be specified as
-     * `placement.clusterSelector.zone`.
+     * A field path is similar in syntax to a
+     * [google.protobuf.FieldMask][google.protobuf.FieldMask]. For example, a
+     * field path that references the zone field of a workflow template's cluster
+     * selector would be specified as `placement.clusterSelector.zone`.
      * Also, field paths can reference fields using the following syntax:
      * * Values in maps can be referenced by key:
      *     * labels['key']
@@ -1361,10 +1361,10 @@ public Builder addAllFields(java.lang.Iterable values) {
      * Required. Paths to all fields that the parameter replaces.
      * A field is allowed to appear in at most one parameter's list of field
      * paths.
-     * A field path is similar in syntax to a [google.protobuf.FieldMask][google.protobuf.FieldMask].
-     * For example, a field path that references the zone field of a workflow
-     * template's cluster selector would be specified as
-     * `placement.clusterSelector.zone`.
+     * A field path is similar in syntax to a
+     * [google.protobuf.FieldMask][google.protobuf.FieldMask]. For example, a
+     * field path that references the zone field of a workflow template's cluster
+     * selector would be specified as `placement.clusterSelector.zone`.
      * Also, field paths can reference fields using the following syntax:
      * * Values in maps can be referenced by key:
      *     * labels['key']
@@ -1411,10 +1411,10 @@ public Builder clearFields() {
      * Required. Paths to all fields that the parameter replaces.
      * A field is allowed to appear in at most one parameter's list of field
      * paths.
-     * A field path is similar in syntax to a [google.protobuf.FieldMask][google.protobuf.FieldMask].
-     * For example, a field path that references the zone field of a workflow
-     * template's cluster selector would be specified as
-     * `placement.clusterSelector.zone`.
+     * A field path is similar in syntax to a
+     * [google.protobuf.FieldMask][google.protobuf.FieldMask]. For example, a
+     * field path that references the zone field of a workflow template's cluster
+     * selector would be specified as `placement.clusterSelector.zone`.
      * Also, field paths can reference fields using the following syntax:
      * * Values in maps can be referenced by key:
      *     * labels['key']
diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/TemplateParameterOrBuilder.java b/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/TemplateParameterOrBuilder.java
index 219e0a738457..c5e7b4cd898b 100644
--- a/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/TemplateParameterOrBuilder.java
+++ b/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/TemplateParameterOrBuilder.java
@@ -48,10 +48,10 @@ public interface TemplateParameterOrBuilder
    * Required. Paths to all fields that the parameter replaces.
    * A field is allowed to appear in at most one parameter's list of field
    * paths.
-   * A field path is similar in syntax to a [google.protobuf.FieldMask][google.protobuf.FieldMask].
-   * For example, a field path that references the zone field of a workflow
-   * template's cluster selector would be specified as
-   * `placement.clusterSelector.zone`.
+   * A field path is similar in syntax to a
+   * [google.protobuf.FieldMask][google.protobuf.FieldMask]. For example, a
+   * field path that references the zone field of a workflow template's cluster
+   * selector would be specified as `placement.clusterSelector.zone`.
    * Also, field paths can reference fields using the following syntax:
    * * Values in maps can be referenced by key:
    *     * labels['key']
@@ -93,10 +93,10 @@ public interface TemplateParameterOrBuilder
    * Required. Paths to all fields that the parameter replaces.
    * A field is allowed to appear in at most one parameter's list of field
    * paths.
-   * A field path is similar in syntax to a [google.protobuf.FieldMask][google.protobuf.FieldMask].
-   * For example, a field path that references the zone field of a workflow
-   * template's cluster selector would be specified as
-   * `placement.clusterSelector.zone`.
+   * A field path is similar in syntax to a
+   * [google.protobuf.FieldMask][google.protobuf.FieldMask]. For example, a
+   * field path that references the zone field of a workflow template's cluster
+   * selector would be specified as `placement.clusterSelector.zone`.
    * Also, field paths can reference fields using the following syntax:
    * * Values in maps can be referenced by key:
    *     * labels['key']
@@ -138,10 +138,10 @@ public interface TemplateParameterOrBuilder
    * Required. Paths to all fields that the parameter replaces.
    * A field is allowed to appear in at most one parameter's list of field
    * paths.
-   * A field path is similar in syntax to a [google.protobuf.FieldMask][google.protobuf.FieldMask].
-   * For example, a field path that references the zone field of a workflow
-   * template's cluster selector would be specified as
-   * `placement.clusterSelector.zone`.
+   * A field path is similar in syntax to a
+   * [google.protobuf.FieldMask][google.protobuf.FieldMask]. For example, a
+   * field path that references the zone field of a workflow template's cluster
+   * selector would be specified as `placement.clusterSelector.zone`.
    * Also, field paths can reference fields using the following syntax:
    * * Values in maps can be referenced by key:
    *     * labels['key']
@@ -183,10 +183,10 @@ public interface TemplateParameterOrBuilder
    * Required. Paths to all fields that the parameter replaces.
    * A field is allowed to appear in at most one parameter's list of field
    * paths.
-   * A field path is similar in syntax to a [google.protobuf.FieldMask][google.protobuf.FieldMask].
-   * For example, a field path that references the zone field of a workflow
-   * template's cluster selector would be specified as
-   * `placement.clusterSelector.zone`.
+   * A field path is similar in syntax to a
+   * [google.protobuf.FieldMask][google.protobuf.FieldMask]. For example, a
+   * field path that references the zone field of a workflow template's cluster
+   * selector would be specified as `placement.clusterSelector.zone`.
    * Also, field paths can reference fields using the following syntax:
    * * Values in maps can be referenced by key:
    *     * labels['key']
diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/UpdateClusterRequest.java b/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/UpdateClusterRequest.java
index 9f10659a769c..bca91e4228e0 100644
--- a/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/UpdateClusterRequest.java
+++ b/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/UpdateClusterRequest.java
@@ -565,10 +565,11 @@ public com.google.protobuf.FieldMaskOrBuilder getUpdateMaskOrBuilder() {
    *
    * 
    * Optional. A unique id used to identify the request. If the server
-   * receives two [UpdateClusterRequest][google.cloud.dataproc.v1.UpdateClusterRequest] requests  with the same
-   * id, then the second request will be ignored and the
-   * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
-   * backend is returned.
+   * receives two
+   * [UpdateClusterRequest][google.cloud.dataproc.v1.UpdateClusterRequest]
+   * requests  with the same id, then the second request will be ignored and the
+   * first [google.longrunning.Operation][google.longrunning.Operation] created
+   * and stored in the backend is returned.
    * It is recommended to always set this value to a
    * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
    * The id must contain only letters (a-z, A-Z), numbers (0-9),
@@ -593,10 +594,11 @@ public java.lang.String getRequestId() {
    *
    * 
    * Optional. A unique id used to identify the request. If the server
-   * receives two [UpdateClusterRequest][google.cloud.dataproc.v1.UpdateClusterRequest] requests  with the same
-   * id, then the second request will be ignored and the
-   * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
-   * backend is returned.
+   * receives two
+   * [UpdateClusterRequest][google.cloud.dataproc.v1.UpdateClusterRequest]
+   * requests  with the same id, then the second request will be ignored and the
+   * first [google.longrunning.Operation][google.longrunning.Operation] created
+   * and stored in the backend is returned.
    * It is recommended to always set this value to a
    * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
    * The id must contain only letters (a-z, A-Z), numbers (0-9),
@@ -2347,10 +2349,11 @@ public com.google.protobuf.FieldMaskOrBuilder getUpdateMaskOrBuilder() {
      *
      * 
      * Optional. A unique id used to identify the request. If the server
-     * receives two [UpdateClusterRequest][google.cloud.dataproc.v1.UpdateClusterRequest] requests  with the same
-     * id, then the second request will be ignored and the
-     * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
-     * backend is returned.
+     * receives two
+     * [UpdateClusterRequest][google.cloud.dataproc.v1.UpdateClusterRequest]
+     * requests  with the same id, then the second request will be ignored and the
+     * first [google.longrunning.Operation][google.longrunning.Operation] created
+     * and stored in the backend is returned.
      * It is recommended to always set this value to a
      * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
      * The id must contain only letters (a-z, A-Z), numbers (0-9),
@@ -2375,10 +2378,11 @@ public java.lang.String getRequestId() {
      *
      * 
      * Optional. A unique id used to identify the request. If the server
-     * receives two [UpdateClusterRequest][google.cloud.dataproc.v1.UpdateClusterRequest] requests  with the same
-     * id, then the second request will be ignored and the
-     * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
-     * backend is returned.
+     * receives two
+     * [UpdateClusterRequest][google.cloud.dataproc.v1.UpdateClusterRequest]
+     * requests  with the same id, then the second request will be ignored and the
+     * first [google.longrunning.Operation][google.longrunning.Operation] created
+     * and stored in the backend is returned.
      * It is recommended to always set this value to a
      * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
      * The id must contain only letters (a-z, A-Z), numbers (0-9),
@@ -2403,10 +2407,11 @@ public com.google.protobuf.ByteString getRequestIdBytes() {
      *
      * 
      * Optional. A unique id used to identify the request. If the server
-     * receives two [UpdateClusterRequest][google.cloud.dataproc.v1.UpdateClusterRequest] requests  with the same
-     * id, then the second request will be ignored and the
-     * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
-     * backend is returned.
+     * receives two
+     * [UpdateClusterRequest][google.cloud.dataproc.v1.UpdateClusterRequest]
+     * requests  with the same id, then the second request will be ignored and the
+     * first [google.longrunning.Operation][google.longrunning.Operation] created
+     * and stored in the backend is returned.
      * It is recommended to always set this value to a
      * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
      * The id must contain only letters (a-z, A-Z), numbers (0-9),
@@ -2429,10 +2434,11 @@ public Builder setRequestId(java.lang.String value) {
      *
      * 
      * Optional. A unique id used to identify the request. If the server
-     * receives two [UpdateClusterRequest][google.cloud.dataproc.v1.UpdateClusterRequest] requests  with the same
-     * id, then the second request will be ignored and the
-     * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
-     * backend is returned.
+     * receives two
+     * [UpdateClusterRequest][google.cloud.dataproc.v1.UpdateClusterRequest]
+     * requests  with the same id, then the second request will be ignored and the
+     * first [google.longrunning.Operation][google.longrunning.Operation] created
+     * and stored in the backend is returned.
      * It is recommended to always set this value to a
      * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
      * The id must contain only letters (a-z, A-Z), numbers (0-9),
@@ -2452,10 +2458,11 @@ public Builder clearRequestId() {
      *
      * 
      * Optional. A unique id used to identify the request. If the server
-     * receives two [UpdateClusterRequest][google.cloud.dataproc.v1.UpdateClusterRequest] requests  with the same
-     * id, then the second request will be ignored and the
-     * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
-     * backend is returned.
+     * receives two
+     * [UpdateClusterRequest][google.cloud.dataproc.v1.UpdateClusterRequest]
+     * requests  with the same id, then the second request will be ignored and the
+     * first [google.longrunning.Operation][google.longrunning.Operation] created
+     * and stored in the backend is returned.
      * It is recommended to always set this value to a
      * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
      * The id must contain only letters (a-z, A-Z), numbers (0-9),
diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/UpdateClusterRequestOrBuilder.java b/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/UpdateClusterRequestOrBuilder.java
index 2a21ab180674..10401fcd7839 100644
--- a/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/UpdateClusterRequestOrBuilder.java
+++ b/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/java/com/google/cloud/dataproc/v1/UpdateClusterRequestOrBuilder.java
@@ -318,10 +318,11 @@ public interface UpdateClusterRequestOrBuilder
    *
    * 
    * Optional. A unique id used to identify the request. If the server
-   * receives two [UpdateClusterRequest][google.cloud.dataproc.v1.UpdateClusterRequest] requests  with the same
-   * id, then the second request will be ignored and the
-   * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
-   * backend is returned.
+   * receives two
+   * [UpdateClusterRequest][google.cloud.dataproc.v1.UpdateClusterRequest]
+   * requests  with the same id, then the second request will be ignored and the
+   * first [google.longrunning.Operation][google.longrunning.Operation] created
+   * and stored in the backend is returned.
    * It is recommended to always set this value to a
    * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
    * The id must contain only letters (a-z, A-Z), numbers (0-9),
@@ -336,10 +337,11 @@ public interface UpdateClusterRequestOrBuilder
    *
    * 
    * Optional. A unique id used to identify the request. If the server
-   * receives two [UpdateClusterRequest][google.cloud.dataproc.v1.UpdateClusterRequest] requests  with the same
-   * id, then the second request will be ignored and the
-   * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
-   * backend is returned.
+   * receives two
+   * [UpdateClusterRequest][google.cloud.dataproc.v1.UpdateClusterRequest]
+   * requests  with the same id, then the second request will be ignored and the
+   * first [google.longrunning.Operation][google.longrunning.Operation] created
+   * and stored in the backend is returned.
    * It is recommended to always set this value to a
    * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
    * The id must contain only letters (a-z, A-Z), numbers (0-9),
diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/proto/google/cloud/dataproc/v1/clusters.proto b/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/proto/google/cloud/dataproc/v1/clusters.proto
index cc7db4645528..31873aad1745 100644
--- a/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/proto/google/cloud/dataproc/v1/clusters.proto
+++ b/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/proto/google/cloud/dataproc/v1/clusters.proto
@@ -29,12 +29,12 @@ option java_multiple_files = true;
 option java_outer_classname = "ClustersProto";
 option java_package = "com.google.cloud.dataproc.v1";
 
-
 // The ClusterControllerService provides methods to manage clusters
 // of Compute Engine instances.
 service ClusterController {
   // Creates a cluster in a project.
-  rpc CreateCluster(CreateClusterRequest) returns (google.longrunning.Operation) {
+  rpc CreateCluster(CreateClusterRequest)
+      returns (google.longrunning.Operation) {
     option (google.api.http) = {
       post: "/v1/projects/{project_id}/regions/{region}/clusters"
       body: "cluster"
@@ -42,7 +42,8 @@ service ClusterController {
   }
 
   // Updates a cluster in a project.
-  rpc UpdateCluster(UpdateClusterRequest) returns (google.longrunning.Operation) {
+  rpc UpdateCluster(UpdateClusterRequest)
+      returns (google.longrunning.Operation) {
     option (google.api.http) = {
       patch: "/v1/projects/{project_id}/regions/{region}/clusters/{cluster_name}"
       body: "cluster"
@@ -50,7 +51,8 @@ service ClusterController {
   }
 
   // Deletes a cluster in a project.
-  rpc DeleteCluster(DeleteClusterRequest) returns (google.longrunning.Operation) {
+  rpc DeleteCluster(DeleteClusterRequest)
+      returns (google.longrunning.Operation) {
     option (google.api.http) = {
       delete: "/v1/projects/{project_id}/regions/{region}/clusters/{cluster_name}"
     };
@@ -73,7 +75,8 @@ service ClusterController {
   // Gets cluster diagnostic information.
   // After the operation completes, the Operation.response field
   // contains `DiagnoseClusterOutputLocation`.
-  rpc DiagnoseCluster(DiagnoseClusterRequest) returns (google.longrunning.Operation) {
+  rpc DiagnoseCluster(DiagnoseClusterRequest)
+      returns (google.longrunning.Operation) {
     option (google.api.http) = {
       post: "/v1/projects/{project_id}/regions/{region}/clusters/{cluster_name}:diagnose"
       body: "*"
@@ -99,8 +102,9 @@ message Cluster {
   // Label **keys** must contain 1 to 63 characters, and must conform to
   // [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
   // Label **values** may be empty, but, if present, must contain 1 to 63
-  // characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
-  // No more than 32 labels can be associated with a cluster.
+  // characters, and must conform to [RFC
+  // 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be
+  // associated with a cluster.
   map labels = 8;
 
   // Output only. Cluster status.
@@ -115,8 +119,8 @@ message Cluster {
 
   // Contains cluster daemon metrics such as HDFS and YARN stats.
   //
-  // **Beta Feature**: This report is available for testing purposes only. It may
-  // be changed before final release.
+  // **Beta Feature**: This report is available for testing purposes only. It
+  // may be changed before final release.
   ClusterMetrics metrics = 9;
 }
 
@@ -152,9 +156,11 @@ message ClusterConfig {
   // Optional. Commands to execute on each node after config is
   // completed. By default, executables are run on master and all worker nodes.
   // You can test a node's `role` metadata to run an executable on
-  // a master or worker node, as shown below using `curl` (you can also use `wget`):
+  // a master or worker node, as shown below using `curl` (you can also use
+  // `wget`):
   //
-  //     ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role)
+  //     ROLE=$(curl -H Metadata-Flavor:Google
+  //     http://metadata/computeMetadata/v1/instance/attributes/dataproc-role)
   //     if [[ "${ROLE}" == 'Master' ]]; then
   //       ... master specific actions ...
   //     else
@@ -213,11 +219,11 @@ message GceClusterConfig {
   string subnetwork_uri = 6;
 
   // Optional. If true, all instances in the cluster will only have internal IP
-  // addresses. By default, clusters are not restricted to internal IP addresses,
-  // and will have ephemeral external IP addresses assigned to each instance.
-  // This `internal_ip_only` restriction can only be enabled for subnetwork
-  // enabled networks, and all off-cluster dependencies must be configured to be
-  // accessible without external IP addresses.
+  // addresses. By default, clusters are not restricted to internal IP
+  // addresses, and will have ephemeral external IP addresses assigned to each
+  // instance. This `internal_ip_only` restriction can only be enabled for
+  // subnetwork enabled networks, and all off-cluster dependencies must be
+  // configured to be accessible without external IP addresses.
   bool internal_ip_only = 7;
 
   // Optional. The service account of the instances. Defaults to the default
@@ -227,7 +233,8 @@ message GceClusterConfig {
   // * roles/logging.logWriter
   // * roles/storage.objectAdmin
   //
-  // (see https://cloud.google.com/compute/docs/access/service-accounts#custom_service_accounts
+  // (see
+  // https://cloud.google.com/compute/docs/access/service-accounts#custom_service_accounts
   // for more information).
   // Example: `[account_id]@[project_id].iam.gserviceaccount.com`
   string service_account = 8;
@@ -253,7 +260,8 @@ message GceClusterConfig {
   repeated string tags = 4;
 
   // The Compute Engine metadata entries to add to all instances (see
-  // [Project and instance metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
+  // [Project and instance
+  // metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
   map metadata = 5;
 }
 
@@ -282,7 +290,8 @@ message InstanceGroupConfig {
   // * `n1-standard-2`
   //
   // **Auto Zone Exception**: If you are using the Cloud Dataproc
-  // [Auto Zone Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement)
+  // [Auto Zone
+  // Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement)
   // feature, you must use the short name of the machine type
   // resource, for example, `n1-standard-2`.
   string machine_type_uri = 4;
@@ -290,7 +299,8 @@ message InstanceGroupConfig {
   // Optional. Disk option config settings.
   DiskConfig disk_config = 5;
 
-  // Optional. Specifies that this instance group contains preemptible instances.
+  // Optional. Specifies that this instance group contains preemptible
+  // instances.
   bool is_preemptible = 6;
 
   // Output only. The config for Compute Engine Instance Group
@@ -321,7 +331,8 @@ message ManagedGroupConfig {
 message AcceleratorConfig {
   // Full URL, partial URI, or short name of the accelerator type resource to
   // expose to this instance. See
-  // [Compute Engine AcceleratorTypes](/compute/docs/reference/beta/acceleratorTypes).
+  // [Compute Engine
+  // AcceleratorTypes](/compute/docs/reference/beta/acceleratorTypes).
   //
   // Examples:
   //
@@ -330,7 +341,8 @@ message AcceleratorConfig {
   // * `nvidia-tesla-k80`
   //
   // **Auto Zone Exception**: If you are using the Cloud Dataproc
-  // [Auto Zone Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement)
+  // [Auto Zone
+  // Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement)
   // feature, you must use the short name of the accelerator type
   // resource, for example, `nvidia-tesla-k80`.
   string accelerator_type_uri = 1;
@@ -429,10 +441,12 @@ message ClusterStatus {
 
 // Specifies the selection and config of software inside the cluster.
 message SoftwareConfig {
-  // Optional. The version of software inside the cluster. It must be one of the supported
-  // [Cloud Dataproc Versions](/dataproc/docs/concepts/versioning/dataproc-versions#supported_cloud_dataproc_versions),
+  // Optional. The version of software inside the cluster. It must be one of the
+  // supported [Cloud Dataproc
+  // Versions](/dataproc/docs/concepts/versioning/dataproc-versions#supported_cloud_dataproc_versions),
   // such as "1.2" (including a subminor version, such as "1.2.29"), or the
-  // ["preview" version](/dataproc/docs/concepts/versioning/dataproc-versions#other_versions).
+  // ["preview"
+  // version](/dataproc/docs/concepts/versioning/dataproc-versions#other_versions).
   // If unspecified, it defaults to the latest version.
   string image_version = 1;
 
@@ -482,10 +496,11 @@ message CreateClusterRequest {
   Cluster cluster = 2;
 
   // Optional. A unique id used to identify the request. If the server
-  // receives two [CreateClusterRequest][google.cloud.dataproc.v1.CreateClusterRequest] requests  with the same
-  // id, then the second request will be ignored and the
-  // first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the backend
-  // is returned.
+  // receives two
+  // [CreateClusterRequest][google.cloud.dataproc.v1.CreateClusterRequest]
+  // requests  with the same id, then the second request will be ignored and the
+  // first [google.longrunning.Operation][google.longrunning.Operation] created
+  // and stored in the backend is returned.
   //
   // It is recommended to always set this value to a
   // [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
@@ -570,10 +585,11 @@ message UpdateClusterRequest {
   google.protobuf.FieldMask update_mask = 4;
 
   // Optional. A unique id used to identify the request. If the server
-  // receives two [UpdateClusterRequest][google.cloud.dataproc.v1.UpdateClusterRequest] requests  with the same
-  // id, then the second request will be ignored and the
-  // first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
-  // backend is returned.
+  // receives two
+  // [UpdateClusterRequest][google.cloud.dataproc.v1.UpdateClusterRequest]
+  // requests  with the same id, then the second request will be ignored and the
+  // first [google.longrunning.Operation][google.longrunning.Operation] created
+  // and stored in the backend is returned.
   //
   // It is recommended to always set this value to a
   // [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
@@ -600,10 +616,11 @@ message DeleteClusterRequest {
   string cluster_uuid = 4;
 
   // Optional. A unique id used to identify the request. If the server
-  // receives two [DeleteClusterRequest][google.cloud.dataproc.v1.DeleteClusterRequest] requests  with the same
-  // id, then the second request will be ignored and the
-  // first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
-  // backend is returned.
+  // receives two
+  // [DeleteClusterRequest][google.cloud.dataproc.v1.DeleteClusterRequest]
+  // requests  with the same id, then the second request will be ignored and the
+  // first [google.longrunning.Operation][google.longrunning.Operation] created
+  // and stored in the backend is returned.
   //
   // It is recommended to always set this value to a
   // [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/proto/google/cloud/dataproc/v1/jobs.proto b/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/proto/google/cloud/dataproc/v1/jobs.proto
index 5bf067e0648f..c331ee236d64 100644
--- a/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/proto/google/cloud/dataproc/v1/jobs.proto
+++ b/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/proto/google/cloud/dataproc/v1/jobs.proto
@@ -27,7 +27,6 @@ option java_multiple_files = true;
 option java_outer_classname = "JobsProto";
 option java_package = "com.google.cloud.dataproc.v1";
 
-
 // The JobController provides methods to manage jobs.
 service JobController {
   // Submits a job to a cluster.
@@ -62,7 +61,8 @@ service JobController {
 
   // Starts a job cancellation request. To access the job resource
   // after cancellation, call
-  // [regions/{region}/jobs.list](/dataproc/docs/reference/rest/v1/projects.regions.jobs/list) or
+  // [regions/{region}/jobs.list](/dataproc/docs/reference/rest/v1/projects.regions.jobs/list)
+  // or
   // [regions/{region}/jobs.get](/dataproc/docs/reference/rest/v1/projects.regions.jobs/get).
   rpc CancelJob(CancelJobRequest) returns (Job) {
     option (google.api.http) = {
@@ -122,8 +122,10 @@ message LoggingConfig {
 }
 
 // A Cloud Dataproc job for running
-// [Apache Hadoop MapReduce](https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduceTutorial.html)
-// jobs on [Apache Hadoop YARN](https://hadoop.apache.org/docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YARN.html).
+// [Apache Hadoop
+// MapReduce](https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduceTutorial.html)
+// jobs on [Apache Hadoop
+// YARN](https://hadoop.apache.org/docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YARN.html).
 message HadoopJob {
   // Required. Indicates the location of the driver's main class. Specify
   // either the jar file that contains the main class or the main class name.
@@ -143,8 +145,8 @@ message HadoopJob {
   }
 
   // Optional. The arguments to pass to the driver. Do not
-  // include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as job
-  // properties, since a collision may occur that causes an incorrect job
+  // include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as
+  // job properties, since a collision may occur that causes an incorrect job
   // submission.
   repeated string args = 3;
 
@@ -178,7 +180,8 @@ message SparkJob {
   // Required. The specification of the main method to call to drive the job.
   // Specify either the jar file that contains the main class or the main class
   // name. To pass both a main jar and a main class in that jar, add the jar to
-  // `CommonJob.jar_file_uris`, and then specify the main class name in `main_class`.
+  // `CommonJob.jar_file_uris`, and then specify the main class name in
+  // `main_class`.
   oneof driver {
     // The HCFS URI of the jar file that contains the main class.
     string main_jar_file_uri = 1;
@@ -217,7 +220,8 @@ message SparkJob {
 }
 
 // A Cloud Dataproc job for running
-// [Apache PySpark](https://spark.apache.org/docs/0.9.0/python-programming-guide.html)
+// [Apache
+// PySpark](https://spark.apache.org/docs/0.9.0/python-programming-guide.html)
 // applications on YARN.
 message PySparkJob {
   // Required. The HCFS URI of the main Python file to use as the driver. Must
@@ -288,8 +292,8 @@ message HiveJob {
   }
 
   // Optional. Whether to continue executing queries if a query fails.
-  // The default value is `false`. Setting to `true` can be useful when executing
-  // independent parallel queries.
+  // The default value is `false`. Setting to `true` can be useful when
+  // executing independent parallel queries.
   bool continue_on_failure = 3;
 
   // Optional. Mapping of query variable names to values (equivalent to the
@@ -308,8 +312,8 @@ message HiveJob {
   repeated string jar_file_uris = 6;
 }
 
-// A Cloud Dataproc job for running [Apache Spark SQL](http://spark.apache.org/sql/)
-// queries.
+// A Cloud Dataproc job for running [Apache Spark
+// SQL](http://spark.apache.org/sql/) queries.
 message SparkSqlJob {
   // Required. The sequence of Spark SQL queries to execute, specified as
   // either an HCFS file URI or as a list of queries.
@@ -351,8 +355,8 @@ message PigJob {
   }
 
   // Optional. Whether to continue executing queries if a query fails.
-  // The default value is `false`. Setting to `true` can be useful when executing
-  // independent parallel queries.
+  // The default value is `false`. Setting to `true` can be useful when
+  // executing independent parallel queries.
   bool continue_on_failure = 3;
 
   // Optional. Mapping of query variable names to values (equivalent to the Pig
@@ -573,8 +577,8 @@ message Job {
 
   // Output only. The collection of YARN applications spun up by this job.
   //
-  // **Beta** Feature: This report is available for testing purposes only. It may
-  // be changed before final release.
+  // **Beta** Feature: This report is available for testing purposes only. It
+  // may be changed before final release.
   repeated YarnApplication yarn_applications = 9;
 
   // Output only. A URI pointing to the location of the stdout of the job's
@@ -590,8 +594,9 @@ message Job {
   // Label **keys** must contain 1 to 63 characters, and must conform to
   // [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
   // Label **values** may be empty, but, if present, must contain 1 to 63
-  // characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
-  // No more than 32 labels can be associated with a job.
+  // characters, and must conform to [RFC
+  // 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be
+  // associated with a job.
   map labels = 18;
 
   // Optional. Job scheduling configuration.
@@ -629,8 +634,8 @@ message SubmitJobRequest {
   Job job = 2;
 
   // Optional. A unique id used to identify the request. If the server
-  // receives two [SubmitJobRequest][google.cloud.dataproc.v1.SubmitJobRequest] requests  with the same
-  // id, then the second request will be ignored and the
+  // receives two [SubmitJobRequest][google.cloud.dataproc.v1.SubmitJobRequest]
+  // requests  with the same id, then the second request will be ignored and the
   // first [Job][google.cloud.dataproc.v1.Job] created and stored in the backend
   // is returned.
   //
diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/proto/google/cloud/dataproc/v1/operations.proto b/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/proto/google/cloud/dataproc/v1/operations.proto
index ba3ab3be056f..7c05876625dc 100644
--- a/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/proto/google/cloud/dataproc/v1/operations.proto
+++ b/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/proto/google/cloud/dataproc/v1/operations.proto
@@ -25,7 +25,6 @@ option java_multiple_files = true;
 option java_outer_classname = "OperationsProto";
 option java_package = "com.google.cloud.dataproc.v1";
 
-
 // The status of the operation.
 message ClusterOperationStatus {
   // The operation state.
diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/proto/google/cloud/dataproc/v1/workflow_templates.proto b/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/proto/google/cloud/dataproc/v1/workflow_templates.proto
index 45ef8a22ea91..a536698b742b 100644
--- a/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/proto/google/cloud/dataproc/v1/workflow_templates.proto
+++ b/google-api-grpc/proto-google-cloud-dataproc-v1/src/main/proto/google/cloud/dataproc/v1/workflow_templates.proto
@@ -29,12 +29,12 @@ option java_multiple_files = true;
 option java_outer_classname = "WorkflowTemplatesProto";
 option java_package = "com.google.cloud.dataproc.v1";
 
-
 // The API interface for managing Workflow Templates in the
 // Cloud Dataproc API.
 service WorkflowTemplateService {
   // Creates new workflow template.
-  rpc CreateWorkflowTemplate(CreateWorkflowTemplateRequest) returns (WorkflowTemplate) {
+  rpc CreateWorkflowTemplate(CreateWorkflowTemplateRequest)
+      returns (WorkflowTemplate) {
     option (google.api.http) = {
       post: "/v1/{parent=projects/*/locations/*}/workflowTemplates"
       body: "template"
@@ -49,7 +49,8 @@ service WorkflowTemplateService {
   //
   // Can retrieve previously instantiated template by specifying optional
   // version parameter.
-  rpc GetWorkflowTemplate(GetWorkflowTemplateRequest) returns (WorkflowTemplate) {
+  rpc GetWorkflowTemplate(GetWorkflowTemplateRequest)
+      returns (WorkflowTemplate) {
     option (google.api.http) = {
       get: "/v1/{name=projects/*/locations/*/workflowTemplates/*}"
       additional_bindings {
@@ -76,7 +77,8 @@ service WorkflowTemplateService {
   // On successful completion,
   // [Operation.response][google.longrunning.Operation.response] will be
   // [Empty][google.protobuf.Empty].
-  rpc InstantiateWorkflowTemplate(InstantiateWorkflowTemplateRequest) returns (google.longrunning.Operation) {
+  rpc InstantiateWorkflowTemplate(InstantiateWorkflowTemplateRequest)
+      returns (google.longrunning.Operation) {
     option (google.api.http) = {
       post: "/v1/{name=projects/*/locations/*/workflowTemplates/*}:instantiate"
       body: "*"
@@ -90,7 +92,8 @@ service WorkflowTemplateService {
   // Instantiates a template and begins execution.
   //
   // This method is equivalent to executing the sequence
-  // [CreateWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.CreateWorkflowTemplate], [InstantiateWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.InstantiateWorkflowTemplate],
+  // [CreateWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.CreateWorkflowTemplate],
+  // [InstantiateWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.InstantiateWorkflowTemplate],
   // [DeleteWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.DeleteWorkflowTemplate].
   //
   // The returned Operation can be used to track execution of
@@ -109,7 +112,9 @@ service WorkflowTemplateService {
   // On successful completion,
   // [Operation.response][google.longrunning.Operation.response] will be
   // [Empty][google.protobuf.Empty].
-  rpc InstantiateInlineWorkflowTemplate(InstantiateInlineWorkflowTemplateRequest) returns (google.longrunning.Operation) {
+  rpc InstantiateInlineWorkflowTemplate(
+      InstantiateInlineWorkflowTemplateRequest)
+      returns (google.longrunning.Operation) {
     option (google.api.http) = {
       post: "/v1/{parent=projects/*/locations/*}/workflowTemplates:instantiateInline"
       body: "template"
@@ -122,7 +127,8 @@ service WorkflowTemplateService {
 
   // Updates (replaces) workflow template. The updated template
   // must contain version that matches the current server version.
-  rpc UpdateWorkflowTemplate(UpdateWorkflowTemplateRequest) returns (WorkflowTemplate) {
+  rpc UpdateWorkflowTemplate(UpdateWorkflowTemplateRequest)
+      returns (WorkflowTemplate) {
     option (google.api.http) = {
       put: "/v1/{template.name=projects/*/locations/*/workflowTemplates/*}"
       body: "template"
@@ -134,7 +140,8 @@ service WorkflowTemplateService {
   }
 
   // Lists workflows that match the specified filter in the request.
-  rpc ListWorkflowTemplates(ListWorkflowTemplatesRequest) returns (ListWorkflowTemplatesResponse) {
+  rpc ListWorkflowTemplates(ListWorkflowTemplatesRequest)
+      returns (ListWorkflowTemplatesResponse) {
     option (google.api.http) = {
       get: "/v1/{parent=projects/*/locations/*}/workflowTemplates"
       additional_bindings {
@@ -144,7 +151,8 @@ service WorkflowTemplateService {
   }
 
   // Deletes a workflow template. It does not cancel in-progress workflows.
-  rpc DeleteWorkflowTemplate(DeleteWorkflowTemplateRequest) returns (google.protobuf.Empty) {
+  rpc DeleteWorkflowTemplate(DeleteWorkflowTemplateRequest)
+      returns (google.protobuf.Empty) {
     option (google.api.http) = {
       delete: "/v1/{name=projects/*/locations/*/workflowTemplates/*}"
       additional_bindings {
@@ -276,8 +284,8 @@ message OrderedJob {
   //
   // The step id is used as prefix for job id, as job
   // `goog-dataproc-workflow-step-id` label, and in
-  // [prerequisiteStepIds][google.cloud.dataproc.v1.OrderedJob.prerequisite_step_ids] field from other
-  // steps.
+  // [prerequisiteStepIds][google.cloud.dataproc.v1.OrderedJob.prerequisite_step_ids]
+  // field from other steps.
   //
   // The id must contain only letters (a-z, A-Z), numbers (0-9),
   // underscores (_), and hyphens (-). Cannot begin or end with underscore
@@ -348,10 +356,10 @@ message TemplateParameter {
   // A field is allowed to appear in at most one parameter's list of field
   // paths.
   //
-  // A field path is similar in syntax to a [google.protobuf.FieldMask][google.protobuf.FieldMask].
-  // For example, a field path that references the zone field of a workflow
-  // template's cluster selector would be specified as
-  // `placement.clusterSelector.zone`.
+  // A field path is similar in syntax to a
+  // [google.protobuf.FieldMask][google.protobuf.FieldMask]. For example, a
+  // field path that references the zone field of a workflow template's cluster
+  // selector would be specified as `placement.clusterSelector.zone`.
   //
   // Also, field paths can reference fields using the following syntax:
   //
diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/AcceleratorConfig.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/AcceleratorConfig.java
index 66e105723793..ed81f056fd8b 100644
--- a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/AcceleratorConfig.java
+++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/AcceleratorConfig.java
@@ -112,7 +112,8 @@ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
    * * `projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80`
    * * `nvidia-tesla-k80`
    * **Auto Zone Exception**: If you are using the Cloud Dataproc
-   * [Auto Zone Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement)
+   * [Auto Zone
+   * Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement)
    * feature, you must use the short name of the accelerator type
    * resource, for example, `nvidia-tesla-k80`.
    * 
@@ -142,7 +143,8 @@ public java.lang.String getAcceleratorTypeUri() { * * `projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80` * * `nvidia-tesla-k80` * **Auto Zone Exception**: If you are using the Cloud Dataproc - * [Auto Zone Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) + * [Auto Zone + * Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) * feature, you must use the short name of the accelerator type * resource, for example, `nvidia-tesla-k80`. *
@@ -524,7 +526,8 @@ public Builder mergeFrom( * * `projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80` * * `nvidia-tesla-k80` * **Auto Zone Exception**: If you are using the Cloud Dataproc - * [Auto Zone Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) + * [Auto Zone + * Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) * feature, you must use the short name of the accelerator type * resource, for example, `nvidia-tesla-k80`. *
@@ -554,7 +557,8 @@ public java.lang.String getAcceleratorTypeUri() { * * `projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80` * * `nvidia-tesla-k80` * **Auto Zone Exception**: If you are using the Cloud Dataproc - * [Auto Zone Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) + * [Auto Zone + * Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) * feature, you must use the short name of the accelerator type * resource, for example, `nvidia-tesla-k80`. *
@@ -584,7 +588,8 @@ public com.google.protobuf.ByteString getAcceleratorTypeUriBytes() { * * `projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80` * * `nvidia-tesla-k80` * **Auto Zone Exception**: If you are using the Cloud Dataproc - * [Auto Zone Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) + * [Auto Zone + * Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) * feature, you must use the short name of the accelerator type * resource, for example, `nvidia-tesla-k80`. *
@@ -612,7 +617,8 @@ public Builder setAcceleratorTypeUri(java.lang.String value) { * * `projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80` * * `nvidia-tesla-k80` * **Auto Zone Exception**: If you are using the Cloud Dataproc - * [Auto Zone Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) + * [Auto Zone + * Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) * feature, you must use the short name of the accelerator type * resource, for example, `nvidia-tesla-k80`. *
@@ -637,7 +643,8 @@ public Builder clearAcceleratorTypeUri() { * * `projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80` * * `nvidia-tesla-k80` * **Auto Zone Exception**: If you are using the Cloud Dataproc - * [Auto Zone Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) + * [Auto Zone + * Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) * feature, you must use the short name of the accelerator type * resource, for example, `nvidia-tesla-k80`. *
diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/AcceleratorConfigOrBuilder.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/AcceleratorConfigOrBuilder.java index db70c0ce297a..ee42fea60fdf 100644 --- a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/AcceleratorConfigOrBuilder.java +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/AcceleratorConfigOrBuilder.java @@ -20,7 +20,8 @@ public interface AcceleratorConfigOrBuilder * * `projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80` * * `nvidia-tesla-k80` * **Auto Zone Exception**: If you are using the Cloud Dataproc - * [Auto Zone Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) + * [Auto Zone + * Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) * feature, you must use the short name of the accelerator type * resource, for example, `nvidia-tesla-k80`. *
@@ -40,7 +41,8 @@ public interface AcceleratorConfigOrBuilder * * `projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80` * * `nvidia-tesla-k80` * **Auto Zone Exception**: If you are using the Cloud Dataproc - * [Auto Zone Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) + * [Auto Zone + * Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) * feature, you must use the short name of the accelerator type * resource, for example, `nvidia-tesla-k80`. *
diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/Cluster.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/Cluster.java index dec447c98319..a73c1b8b17ee 100644 --- a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/Cluster.java +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/Cluster.java @@ -362,8 +362,9 @@ public int getLabelsCount() { * Label **keys** must contain 1 to 63 characters, and must conform to * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). * Label **values** may be empty, but, if present, must contain 1 to 63 - * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). - * No more than 32 labels can be associated with a cluster. + * characters, and must conform to [RFC + * 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be + * associated with a cluster. *
* * map<string, string> labels = 8; @@ -387,8 +388,9 @@ public java.util.Map getLabels() { * Label **keys** must contain 1 to 63 characters, and must conform to * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). * Label **values** may be empty, but, if present, must contain 1 to 63 - * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). - * No more than 32 labels can be associated with a cluster. + * characters, and must conform to [RFC + * 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be + * associated with a cluster. *
* * map<string, string> labels = 8; @@ -404,8 +406,9 @@ public java.util.Map getLabelsMap() { * Label **keys** must contain 1 to 63 characters, and must conform to * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). * Label **values** may be empty, but, if present, must contain 1 to 63 - * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). - * No more than 32 labels can be associated with a cluster. + * characters, and must conform to [RFC + * 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be + * associated with a cluster. *
* * map<string, string> labels = 8; @@ -425,8 +428,9 @@ public java.lang.String getLabelsOrDefault(java.lang.String key, java.lang.Strin * Label **keys** must contain 1 to 63 characters, and must conform to * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). * Label **values** may be empty, but, if present, must contain 1 to 63 - * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). - * No more than 32 labels can be associated with a cluster. + * characters, and must conform to [RFC + * 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be + * associated with a cluster. *
* * map<string, string> labels = 8; @@ -600,8 +604,8 @@ public com.google.protobuf.ByteString getClusterUuidBytes() { * *
    * Output only. Contains cluster daemon metrics such as HDFS and YARN stats.
-   * **Beta Feature**: This report is available for testing purposes only. It may
-   * be changed before final release.
+   * **Beta Feature**: This report is available for testing purposes only. It
+   * may be changed before final release.
    * 
* * .google.cloud.dataproc.v1beta2.ClusterMetrics metrics = 9; @@ -614,8 +618,8 @@ public boolean hasMetrics() { * *
    * Output only. Contains cluster daemon metrics such as HDFS and YARN stats.
-   * **Beta Feature**: This report is available for testing purposes only. It may
-   * be changed before final release.
+   * **Beta Feature**: This report is available for testing purposes only. It
+   * may be changed before final release.
    * 
* * .google.cloud.dataproc.v1beta2.ClusterMetrics metrics = 9; @@ -630,8 +634,8 @@ public com.google.cloud.dataproc.v1beta2.ClusterMetrics getMetrics() { * *
    * Output only. Contains cluster daemon metrics such as HDFS and YARN stats.
-   * **Beta Feature**: This report is available for testing purposes only. It may
-   * be changed before final release.
+   * **Beta Feature**: This report is available for testing purposes only. It
+   * may be changed before final release.
    * 
* * .google.cloud.dataproc.v1beta2.ClusterMetrics metrics = 9; @@ -1592,8 +1596,9 @@ public int getLabelsCount() { * Label **keys** must contain 1 to 63 characters, and must conform to * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). * Label **values** may be empty, but, if present, must contain 1 to 63 - * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). - * No more than 32 labels can be associated with a cluster. + * characters, and must conform to [RFC + * 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be + * associated with a cluster. *
* * map<string, string> labels = 8; @@ -1617,8 +1622,9 @@ public java.util.Map getLabels() { * Label **keys** must contain 1 to 63 characters, and must conform to * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). * Label **values** may be empty, but, if present, must contain 1 to 63 - * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). - * No more than 32 labels can be associated with a cluster. + * characters, and must conform to [RFC + * 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be + * associated with a cluster. *
* * map<string, string> labels = 8; @@ -1634,8 +1640,9 @@ public java.util.Map getLabelsMap() { * Label **keys** must contain 1 to 63 characters, and must conform to * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). * Label **values** may be empty, but, if present, must contain 1 to 63 - * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). - * No more than 32 labels can be associated with a cluster. + * characters, and must conform to [RFC + * 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be + * associated with a cluster. *
* * map<string, string> labels = 8; @@ -1656,8 +1663,9 @@ public java.lang.String getLabelsOrDefault( * Label **keys** must contain 1 to 63 characters, and must conform to * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). * Label **values** may be empty, but, if present, must contain 1 to 63 - * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). - * No more than 32 labels can be associated with a cluster. + * characters, and must conform to [RFC + * 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be + * associated with a cluster. *
* * map<string, string> labels = 8; @@ -1685,8 +1693,9 @@ public Builder clearLabels() { * Label **keys** must contain 1 to 63 characters, and must conform to * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). * Label **values** may be empty, but, if present, must contain 1 to 63 - * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). - * No more than 32 labels can be associated with a cluster. + * characters, and must conform to [RFC + * 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be + * associated with a cluster. *
* * map<string, string> labels = 8; @@ -1711,8 +1720,9 @@ public java.util.Map getMutableLabels() { * Label **keys** must contain 1 to 63 characters, and must conform to * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). * Label **values** may be empty, but, if present, must contain 1 to 63 - * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). - * No more than 32 labels can be associated with a cluster. + * characters, and must conform to [RFC + * 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be + * associated with a cluster. * * * map<string, string> labels = 8; @@ -1735,8 +1745,9 @@ public Builder putLabels(java.lang.String key, java.lang.String value) { * Label **keys** must contain 1 to 63 characters, and must conform to * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). * Label **values** may be empty, but, if present, must contain 1 to 63 - * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). - * No more than 32 labels can be associated with a cluster. + * characters, and must conform to [RFC + * 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be + * associated with a cluster. * * * map<string, string> labels = 8; @@ -2398,8 +2409,8 @@ public Builder setClusterUuidBytes(com.google.protobuf.ByteString value) { * *
      * Output only. Contains cluster daemon metrics such as HDFS and YARN stats.
-     * **Beta Feature**: This report is available for testing purposes only. It may
-     * be changed before final release.
+     * **Beta Feature**: This report is available for testing purposes only. It
+     * may be changed before final release.
      * 
* * .google.cloud.dataproc.v1beta2.ClusterMetrics metrics = 9; @@ -2412,8 +2423,8 @@ public boolean hasMetrics() { * *
      * Output only. Contains cluster daemon metrics such as HDFS and YARN stats.
-     * **Beta Feature**: This report is available for testing purposes only. It may
-     * be changed before final release.
+     * **Beta Feature**: This report is available for testing purposes only. It
+     * may be changed before final release.
      * 
* * .google.cloud.dataproc.v1beta2.ClusterMetrics metrics = 9; @@ -2432,8 +2443,8 @@ public com.google.cloud.dataproc.v1beta2.ClusterMetrics getMetrics() { * *
      * Output only. Contains cluster daemon metrics such as HDFS and YARN stats.
-     * **Beta Feature**: This report is available for testing purposes only. It may
-     * be changed before final release.
+     * **Beta Feature**: This report is available for testing purposes only. It
+     * may be changed before final release.
      * 
* * .google.cloud.dataproc.v1beta2.ClusterMetrics metrics = 9; @@ -2456,8 +2467,8 @@ public Builder setMetrics(com.google.cloud.dataproc.v1beta2.ClusterMetrics value * *
      * Output only. Contains cluster daemon metrics such as HDFS and YARN stats.
-     * **Beta Feature**: This report is available for testing purposes only. It may
-     * be changed before final release.
+     * **Beta Feature**: This report is available for testing purposes only. It
+     * may be changed before final release.
      * 
* * .google.cloud.dataproc.v1beta2.ClusterMetrics metrics = 9; @@ -2478,8 +2489,8 @@ public Builder setMetrics( * *
      * Output only. Contains cluster daemon metrics such as HDFS and YARN stats.
-     * **Beta Feature**: This report is available for testing purposes only. It may
-     * be changed before final release.
+     * **Beta Feature**: This report is available for testing purposes only. It
+     * may be changed before final release.
      * 
* * .google.cloud.dataproc.v1beta2.ClusterMetrics metrics = 9; @@ -2506,8 +2517,8 @@ public Builder mergeMetrics(com.google.cloud.dataproc.v1beta2.ClusterMetrics val * *
      * Output only. Contains cluster daemon metrics such as HDFS and YARN stats.
-     * **Beta Feature**: This report is available for testing purposes only. It may
-     * be changed before final release.
+     * **Beta Feature**: This report is available for testing purposes only. It
+     * may be changed before final release.
      * 
* * .google.cloud.dataproc.v1beta2.ClusterMetrics metrics = 9; @@ -2528,8 +2539,8 @@ public Builder clearMetrics() { * *
      * Output only. Contains cluster daemon metrics such as HDFS and YARN stats.
-     * **Beta Feature**: This report is available for testing purposes only. It may
-     * be changed before final release.
+     * **Beta Feature**: This report is available for testing purposes only. It
+     * may be changed before final release.
      * 
* * .google.cloud.dataproc.v1beta2.ClusterMetrics metrics = 9; @@ -2544,8 +2555,8 @@ public com.google.cloud.dataproc.v1beta2.ClusterMetrics.Builder getMetricsBuilde * *
      * Output only. Contains cluster daemon metrics such as HDFS and YARN stats.
-     * **Beta Feature**: This report is available for testing purposes only. It may
-     * be changed before final release.
+     * **Beta Feature**: This report is available for testing purposes only. It
+     * may be changed before final release.
      * 
* * .google.cloud.dataproc.v1beta2.ClusterMetrics metrics = 9; @@ -2564,8 +2575,8 @@ public com.google.cloud.dataproc.v1beta2.ClusterMetricsOrBuilder getMetricsOrBui * *
      * Output only. Contains cluster daemon metrics such as HDFS and YARN stats.
-     * **Beta Feature**: This report is available for testing purposes only. It may
-     * be changed before final release.
+     * **Beta Feature**: This report is available for testing purposes only. It
+     * may be changed before final release.
      * 
* * .google.cloud.dataproc.v1beta2.ClusterMetrics metrics = 9; diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterConfig.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterConfig.java index aabe5ec402ef..c1bb08654002 100644 --- a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterConfig.java +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterConfig.java @@ -551,8 +551,10 @@ public com.google.cloud.dataproc.v1beta2.LifecycleConfigOrBuilder getLifecycleCo * Optional. Commands to execute on each node after config is * completed. By default, executables are run on master and all worker nodes. * You can test a node's <code>role</code> metadata to run an executable on - * a master or worker node, as shown below using `curl` (you can also use `wget`): - * ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role) + * a master or worker node, as shown below using `curl` (you can also use + * `wget`): + * ROLE=$(curl -H Metadata-Flavor:Google + * http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role) * if [[ "${ROLE}" == 'Master' ]]; then * ... master specific actions ... * else @@ -575,8 +577,10 @@ public com.google.cloud.dataproc.v1beta2.LifecycleConfigOrBuilder getLifecycleCo * Optional. Commands to execute on each node after config is * completed. By default, executables are run on master and all worker nodes. * You can test a node's <code>role</code> metadata to run an executable on - * a master or worker node, as shown below using `curl` (you can also use `wget`): - * ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role) + * a master or worker node, as shown below using `curl` (you can also use + * `wget`): + * ROLE=$(curl -H Metadata-Flavor:Google + * http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role) * if [[ "${ROLE}" == 'Master' ]]; then * ... master specific actions ... * else @@ -600,8 +604,10 @@ public com.google.cloud.dataproc.v1beta2.LifecycleConfigOrBuilder getLifecycleCo * Optional. Commands to execute on each node after config is * completed. By default, executables are run on master and all worker nodes. * You can test a node's <code>role</code> metadata to run an executable on - * a master or worker node, as shown below using `curl` (you can also use `wget`): - * ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role) + * a master or worker node, as shown below using `curl` (you can also use + * `wget`): + * ROLE=$(curl -H Metadata-Flavor:Google + * http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role) * if [[ "${ROLE}" == 'Master' ]]; then * ... master specific actions ... * else @@ -623,8 +629,10 @@ public int getInitializationActionsCount() { * Optional. Commands to execute on each node after config is * completed. By default, executables are run on master and all worker nodes. * You can test a node's <code>role</code> metadata to run an executable on - * a master or worker node, as shown below using `curl` (you can also use `wget`): - * ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role) + * a master or worker node, as shown below using `curl` (you can also use + * `wget`): + * ROLE=$(curl -H Metadata-Flavor:Google + * http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role) * if [[ "${ROLE}" == 'Master' ]]; then * ... master specific actions ... * else @@ -647,8 +655,10 @@ public com.google.cloud.dataproc.v1beta2.NodeInitializationAction getInitializat * Optional. Commands to execute on each node after config is * completed. By default, executables are run on master and all worker nodes. * You can test a node's <code>role</code> metadata to run an executable on - * a master or worker node, as shown below using `curl` (you can also use `wget`): - * ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role) + * a master or worker node, as shown below using `curl` (you can also use + * `wget`): + * ROLE=$(curl -H Metadata-Flavor:Google + * http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role) * if [[ "${ROLE}" == 'Master' ]]; then * ... master specific actions ... * else @@ -2565,8 +2575,10 @@ private void ensureInitializationActionsIsMutable() { * Optional. Commands to execute on each node after config is * completed. By default, executables are run on master and all worker nodes. * You can test a node's <code>role</code> metadata to run an executable on - * a master or worker node, as shown below using `curl` (you can also use `wget`): - * ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role) + * a master or worker node, as shown below using `curl` (you can also use + * `wget`): + * ROLE=$(curl -H Metadata-Flavor:Google + * http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role) * if [[ "${ROLE}" == 'Master' ]]; then * ... master specific actions ... * else @@ -2593,8 +2605,10 @@ private void ensureInitializationActionsIsMutable() { * Optional. Commands to execute on each node after config is * completed. By default, executables are run on master and all worker nodes. * You can test a node's <code>role</code> metadata to run an executable on - * a master or worker node, as shown below using `curl` (you can also use `wget`): - * ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role) + * a master or worker node, as shown below using `curl` (you can also use + * `wget`): + * ROLE=$(curl -H Metadata-Flavor:Google + * http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role) * if [[ "${ROLE}" == 'Master' ]]; then * ... master specific actions ... * else @@ -2620,8 +2634,10 @@ public int getInitializationActionsCount() { * Optional. Commands to execute on each node after config is * completed. By default, executables are run on master and all worker nodes. * You can test a node's <code>role</code> metadata to run an executable on - * a master or worker node, as shown below using `curl` (you can also use `wget`): - * ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role) + * a master or worker node, as shown below using `curl` (you can also use + * `wget`): + * ROLE=$(curl -H Metadata-Flavor:Google + * http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role) * if [[ "${ROLE}" == 'Master' ]]; then * ... master specific actions ... * else @@ -2648,8 +2664,10 @@ public com.google.cloud.dataproc.v1beta2.NodeInitializationAction getInitializat * Optional. Commands to execute on each node after config is * completed. By default, executables are run on master and all worker nodes. * You can test a node's <code>role</code> metadata to run an executable on - * a master or worker node, as shown below using `curl` (you can also use `wget`): - * ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role) + * a master or worker node, as shown below using `curl` (you can also use + * `wget`): + * ROLE=$(curl -H Metadata-Flavor:Google + * http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role) * if [[ "${ROLE}" == 'Master' ]]; then * ... master specific actions ... * else @@ -2682,8 +2700,10 @@ public Builder setInitializationActions( * Optional. Commands to execute on each node after config is * completed. By default, executables are run on master and all worker nodes. * You can test a node's <code>role</code> metadata to run an executable on - * a master or worker node, as shown below using `curl` (you can also use `wget`): - * ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role) + * a master or worker node, as shown below using `curl` (you can also use + * `wget`): + * ROLE=$(curl -H Metadata-Flavor:Google + * http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role) * if [[ "${ROLE}" == 'Master' ]]; then * ... master specific actions ... * else @@ -2714,8 +2734,10 @@ public Builder setInitializationActions( * Optional. Commands to execute on each node after config is * completed. By default, executables are run on master and all worker nodes. * You can test a node's <code>role</code> metadata to run an executable on - * a master or worker node, as shown below using `curl` (you can also use `wget`): - * ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role) + * a master or worker node, as shown below using `curl` (you can also use + * `wget`): + * ROLE=$(curl -H Metadata-Flavor:Google + * http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role) * if [[ "${ROLE}" == 'Master' ]]; then * ... master specific actions ... * else @@ -2748,8 +2770,10 @@ public Builder addInitializationActions( * Optional. Commands to execute on each node after config is * completed. By default, executables are run on master and all worker nodes. * You can test a node's <code>role</code> metadata to run an executable on - * a master or worker node, as shown below using `curl` (you can also use `wget`): - * ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role) + * a master or worker node, as shown below using `curl` (you can also use + * `wget`): + * ROLE=$(curl -H Metadata-Flavor:Google + * http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role) * if [[ "${ROLE}" == 'Master' ]]; then * ... master specific actions ... * else @@ -2782,8 +2806,10 @@ public Builder addInitializationActions( * Optional. Commands to execute on each node after config is * completed. By default, executables are run on master and all worker nodes. * You can test a node's <code>role</code> metadata to run an executable on - * a master or worker node, as shown below using `curl` (you can also use `wget`): - * ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role) + * a master or worker node, as shown below using `curl` (you can also use + * `wget`): + * ROLE=$(curl -H Metadata-Flavor:Google + * http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role) * if [[ "${ROLE}" == 'Master' ]]; then * ... master specific actions ... * else @@ -2813,8 +2839,10 @@ public Builder addInitializationActions( * Optional. Commands to execute on each node after config is * completed. By default, executables are run on master and all worker nodes. * You can test a node's <code>role</code> metadata to run an executable on - * a master or worker node, as shown below using `curl` (you can also use `wget`): - * ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role) + * a master or worker node, as shown below using `curl` (you can also use + * `wget`): + * ROLE=$(curl -H Metadata-Flavor:Google + * http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role) * if [[ "${ROLE}" == 'Master' ]]; then * ... master specific actions ... * else @@ -2845,8 +2873,10 @@ public Builder addInitializationActions( * Optional. Commands to execute on each node after config is * completed. By default, executables are run on master and all worker nodes. * You can test a node's <code>role</code> metadata to run an executable on - * a master or worker node, as shown below using `curl` (you can also use `wget`): - * ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role) + * a master or worker node, as shown below using `curl` (you can also use + * `wget`): + * ROLE=$(curl -H Metadata-Flavor:Google + * http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role) * if [[ "${ROLE}" == 'Master' ]]; then * ... master specific actions ... * else @@ -2877,8 +2907,10 @@ public Builder addAllInitializationActions( * Optional. Commands to execute on each node after config is * completed. By default, executables are run on master and all worker nodes. * You can test a node's <code>role</code> metadata to run an executable on - * a master or worker node, as shown below using `curl` (you can also use `wget`): - * ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role) + * a master or worker node, as shown below using `curl` (you can also use + * `wget`): + * ROLE=$(curl -H Metadata-Flavor:Google + * http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role) * if [[ "${ROLE}" == 'Master' ]]; then * ... master specific actions ... * else @@ -2907,8 +2939,10 @@ public Builder clearInitializationActions() { * Optional. Commands to execute on each node after config is * completed. By default, executables are run on master and all worker nodes. * You can test a node's <code>role</code> metadata to run an executable on - * a master or worker node, as shown below using `curl` (you can also use `wget`): - * ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role) + * a master or worker node, as shown below using `curl` (you can also use + * `wget`): + * ROLE=$(curl -H Metadata-Flavor:Google + * http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role) * if [[ "${ROLE}" == 'Master' ]]; then * ... master specific actions ... * else @@ -2937,8 +2971,10 @@ public Builder removeInitializationActions(int index) { * Optional. Commands to execute on each node after config is * completed. By default, executables are run on master and all worker nodes. * You can test a node's <code>role</code> metadata to run an executable on - * a master or worker node, as shown below using `curl` (you can also use `wget`): - * ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role) + * a master or worker node, as shown below using `curl` (you can also use + * `wget`): + * ROLE=$(curl -H Metadata-Flavor:Google + * http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role) * if [[ "${ROLE}" == 'Master' ]]; then * ... master specific actions ... * else @@ -2961,8 +2997,10 @@ public Builder removeInitializationActions(int index) { * Optional. Commands to execute on each node after config is * completed. By default, executables are run on master and all worker nodes. * You can test a node's <code>role</code> metadata to run an executable on - * a master or worker node, as shown below using `curl` (you can also use `wget`): - * ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role) + * a master or worker node, as shown below using `curl` (you can also use + * `wget`): + * ROLE=$(curl -H Metadata-Flavor:Google + * http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role) * if [[ "${ROLE}" == 'Master' ]]; then * ... master specific actions ... * else @@ -2989,8 +3027,10 @@ public Builder removeInitializationActions(int index) { * Optional. Commands to execute on each node after config is * completed. By default, executables are run on master and all worker nodes. * You can test a node's <code>role</code> metadata to run an executable on - * a master or worker node, as shown below using `curl` (you can also use `wget`): - * ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role) + * a master or worker node, as shown below using `curl` (you can also use + * `wget`): + * ROLE=$(curl -H Metadata-Flavor:Google + * http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role) * if [[ "${ROLE}" == 'Master' ]]; then * ... master specific actions ... * else @@ -3018,8 +3058,10 @@ public Builder removeInitializationActions(int index) { * Optional. Commands to execute on each node after config is * completed. By default, executables are run on master and all worker nodes. * You can test a node's <code>role</code> metadata to run an executable on - * a master or worker node, as shown below using `curl` (you can also use `wget`): - * ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role) + * a master or worker node, as shown below using `curl` (you can also use + * `wget`): + * ROLE=$(curl -H Metadata-Flavor:Google + * http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role) * if [[ "${ROLE}" == 'Master' ]]; then * ... master specific actions ... * else @@ -3044,8 +3086,10 @@ public Builder removeInitializationActions(int index) { * Optional. Commands to execute on each node after config is * completed. By default, executables are run on master and all worker nodes. * You can test a node's <code>role</code> metadata to run an executable on - * a master or worker node, as shown below using `curl` (you can also use `wget`): - * ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role) + * a master or worker node, as shown below using `curl` (you can also use + * `wget`): + * ROLE=$(curl -H Metadata-Flavor:Google + * http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role) * if [[ "${ROLE}" == 'Master' ]]; then * ... master specific actions ... * else @@ -3071,8 +3115,10 @@ public Builder removeInitializationActions(int index) { * Optional. Commands to execute on each node after config is * completed. By default, executables are run on master and all worker nodes. * You can test a node's <code>role</code> metadata to run an executable on - * a master or worker node, as shown below using `curl` (you can also use `wget`): - * ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role) + * a master or worker node, as shown below using `curl` (you can also use + * `wget`): + * ROLE=$(curl -H Metadata-Flavor:Google + * http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role) * if [[ "${ROLE}" == 'Master' ]]; then * ... master specific actions ... * else diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterConfigOrBuilder.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterConfigOrBuilder.java index 5bb67e46d70e..e020cef03fcd 100644 --- a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterConfigOrBuilder.java +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterConfigOrBuilder.java @@ -245,8 +245,10 @@ public interface ClusterConfigOrBuilder * Optional. Commands to execute on each node after config is * completed. By default, executables are run on master and all worker nodes. * You can test a node's <code>role</code> metadata to run an executable on - * a master or worker node, as shown below using `curl` (you can also use `wget`): - * ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role) + * a master or worker node, as shown below using `curl` (you can also use + * `wget`): + * ROLE=$(curl -H Metadata-Flavor:Google + * http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role) * if [[ "${ROLE}" == 'Master' ]]; then * ... master specific actions ... * else @@ -267,8 +269,10 @@ public interface ClusterConfigOrBuilder * Optional. Commands to execute on each node after config is * completed. By default, executables are run on master and all worker nodes. * You can test a node's <code>role</code> metadata to run an executable on - * a master or worker node, as shown below using `curl` (you can also use `wget`): - * ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role) + * a master or worker node, as shown below using `curl` (you can also use + * `wget`): + * ROLE=$(curl -H Metadata-Flavor:Google + * http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role) * if [[ "${ROLE}" == 'Master' ]]; then * ... master specific actions ... * else @@ -288,8 +292,10 @@ public interface ClusterConfigOrBuilder * Optional. Commands to execute on each node after config is * completed. By default, executables are run on master and all worker nodes. * You can test a node's <code>role</code> metadata to run an executable on - * a master or worker node, as shown below using `curl` (you can also use `wget`): - * ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role) + * a master or worker node, as shown below using `curl` (you can also use + * `wget`): + * ROLE=$(curl -H Metadata-Flavor:Google + * http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role) * if [[ "${ROLE}" == 'Master' ]]; then * ... master specific actions ... * else @@ -309,8 +315,10 @@ public interface ClusterConfigOrBuilder * Optional. Commands to execute on each node after config is * completed. By default, executables are run on master and all worker nodes. * You can test a node's <code>role</code> metadata to run an executable on - * a master or worker node, as shown below using `curl` (you can also use `wget`): - * ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role) + * a master or worker node, as shown below using `curl` (you can also use + * `wget`): + * ROLE=$(curl -H Metadata-Flavor:Google + * http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role) * if [[ "${ROLE}" == 'Master' ]]; then * ... master specific actions ... * else @@ -331,8 +339,10 @@ public interface ClusterConfigOrBuilder * Optional. Commands to execute on each node after config is * completed. By default, executables are run on master and all worker nodes. * You can test a node's <code>role</code> metadata to run an executable on - * a master or worker node, as shown below using `curl` (you can also use `wget`): - * ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role) + * a master or worker node, as shown below using `curl` (you can also use + * `wget`): + * ROLE=$(curl -H Metadata-Flavor:Google + * http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role) * if [[ "${ROLE}" == 'Master' ]]; then * ... master specific actions ... * else diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterOrBuilder.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterOrBuilder.java index 603ece251dbb..bd8f0c141584 100644 --- a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterOrBuilder.java +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClusterOrBuilder.java @@ -94,8 +94,9 @@ public interface ClusterOrBuilder * Label **keys** must contain 1 to 63 characters, and must conform to * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). * Label **values** may be empty, but, if present, must contain 1 to 63 - * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). - * No more than 32 labels can be associated with a cluster. + * characters, and must conform to [RFC + * 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be + * associated with a cluster. * * * map<string, string> labels = 8; @@ -109,8 +110,9 @@ public interface ClusterOrBuilder * Label **keys** must contain 1 to 63 characters, and must conform to * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). * Label **values** may be empty, but, if present, must contain 1 to 63 - * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). - * No more than 32 labels can be associated with a cluster. + * characters, and must conform to [RFC + * 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be + * associated with a cluster. * * * map<string, string> labels = 8; @@ -127,8 +129,9 @@ public interface ClusterOrBuilder * Label **keys** must contain 1 to 63 characters, and must conform to * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). * Label **values** may be empty, but, if present, must contain 1 to 63 - * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). - * No more than 32 labels can be associated with a cluster. + * characters, and must conform to [RFC + * 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be + * associated with a cluster. * * * map<string, string> labels = 8; @@ -142,8 +145,9 @@ public interface ClusterOrBuilder * Label **keys** must contain 1 to 63 characters, and must conform to * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). * Label **values** may be empty, but, if present, must contain 1 to 63 - * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). - * No more than 32 labels can be associated with a cluster. + * characters, and must conform to [RFC + * 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be + * associated with a cluster. * * * map<string, string> labels = 8; @@ -157,8 +161,9 @@ public interface ClusterOrBuilder * Label **keys** must contain 1 to 63 characters, and must conform to * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). * Label **values** may be empty, but, if present, must contain 1 to 63 - * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). - * No more than 32 labels can be associated with a cluster. + * characters, and must conform to [RFC + * 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be + * associated with a cluster. * * * map<string, string> labels = 8; @@ -276,8 +281,8 @@ public interface ClusterOrBuilder * *
    * Output only. Contains cluster daemon metrics such as HDFS and YARN stats.
-   * **Beta Feature**: This report is available for testing purposes only. It may
-   * be changed before final release.
+   * **Beta Feature**: This report is available for testing purposes only. It
+   * may be changed before final release.
    * 
* * .google.cloud.dataproc.v1beta2.ClusterMetrics metrics = 9; @@ -288,8 +293,8 @@ public interface ClusterOrBuilder * *
    * Output only. Contains cluster daemon metrics such as HDFS and YARN stats.
-   * **Beta Feature**: This report is available for testing purposes only. It may
-   * be changed before final release.
+   * **Beta Feature**: This report is available for testing purposes only. It
+   * may be changed before final release.
    * 
* * .google.cloud.dataproc.v1beta2.ClusterMetrics metrics = 9; @@ -300,8 +305,8 @@ public interface ClusterOrBuilder * *
    * Output only. Contains cluster daemon metrics such as HDFS and YARN stats.
-   * **Beta Feature**: This report is available for testing purposes only. It may
-   * be changed before final release.
+   * **Beta Feature**: This report is available for testing purposes only. It
+   * may be changed before final release.
    * 
* * .google.cloud.dataproc.v1beta2.ClusterMetrics metrics = 9; diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClustersProto.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClustersProto.java index ab9923a0449d..2152b7b31e33 100644 --- a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClustersProto.java +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ClustersProto.java @@ -127,9 +127,9 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { java.lang.String[] descriptorData = { "\n,google/cloud/dataproc/v1beta2/clusters" + ".proto\022\035google.cloud.dataproc.v1beta2\032\034g" - + "oogle/api/annotations.proto\032*google/clou" - + "d/dataproc/v1beta2/shared.proto\032.google/" - + "cloud/dataproc/v1beta2/operations.proto\032" + + "oogle/api/annotations.proto\032.google/clou" + + "d/dataproc/v1beta2/operations.proto\032*goo" + + "gle/cloud/dataproc/v1beta2/shared.proto\032" + "#google/longrunning/operations.proto\032\036go" + "ogle/protobuf/duration.proto\032 google/pro" + "tobuf/field_mask.proto\032\037google/protobuf/" @@ -285,8 +285,8 @@ public com.google.protobuf.ExtensionRegistry assignDescriptors( descriptorData, new com.google.protobuf.Descriptors.FileDescriptor[] { com.google.api.AnnotationsProto.getDescriptor(), - com.google.cloud.dataproc.v1beta2.SharedProto.getDescriptor(), com.google.cloud.dataproc.v1beta2.OperationsProto.getDescriptor(), + com.google.cloud.dataproc.v1beta2.SharedProto.getDescriptor(), com.google.longrunning.OperationsProto.getDescriptor(), com.google.protobuf.DurationProto.getDescriptor(), com.google.protobuf.FieldMaskProto.getDescriptor(), @@ -551,8 +551,8 @@ public com.google.protobuf.ExtensionRegistry assignDescriptors( com.google.protobuf.Descriptors.FileDescriptor.internalUpdateFileDescriptor( descriptor, registry); com.google.api.AnnotationsProto.getDescriptor(); - com.google.cloud.dataproc.v1beta2.SharedProto.getDescriptor(); com.google.cloud.dataproc.v1beta2.OperationsProto.getDescriptor(); + com.google.cloud.dataproc.v1beta2.SharedProto.getDescriptor(); com.google.longrunning.OperationsProto.getDescriptor(); com.google.protobuf.DurationProto.getDescriptor(); com.google.protobuf.FieldMaskProto.getDescriptor(); diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/CreateClusterRequest.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/CreateClusterRequest.java index 923a8754cb87..b7e16cb93329 100644 --- a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/CreateClusterRequest.java +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/CreateClusterRequest.java @@ -259,10 +259,11 @@ public com.google.cloud.dataproc.v1beta2.ClusterOrBuilder getClusterOrBuilder() * *
    * Optional. A unique id used to identify the request. If the server
-   * receives two [CreateClusterRequest][google.cloud.dataproc.v1beta2.CreateClusterRequest] requests  with the same
-   * id, then the second request will be ignored and the
-   * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the backend
-   * is returned.
+   * receives two
+   * [CreateClusterRequest][google.cloud.dataproc.v1beta2.CreateClusterRequest]
+   * requests  with the same id, then the second request will be ignored and the
+   * first [google.longrunning.Operation][google.longrunning.Operation] created
+   * and stored in the backend is returned.
    * It is recommended to always set this value to a
    * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
    * The id must contain only letters (a-z, A-Z), numbers (0-9),
@@ -287,10 +288,11 @@ public java.lang.String getRequestId() {
    *
    * 
    * Optional. A unique id used to identify the request. If the server
-   * receives two [CreateClusterRequest][google.cloud.dataproc.v1beta2.CreateClusterRequest] requests  with the same
-   * id, then the second request will be ignored and the
-   * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the backend
-   * is returned.
+   * receives two
+   * [CreateClusterRequest][google.cloud.dataproc.v1beta2.CreateClusterRequest]
+   * requests  with the same id, then the second request will be ignored and the
+   * first [google.longrunning.Operation][google.longrunning.Operation] created
+   * and stored in the backend is returned.
    * It is recommended to always set this value to a
    * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
    * The id must contain only letters (a-z, A-Z), numbers (0-9),
@@ -1071,10 +1073,11 @@ public com.google.cloud.dataproc.v1beta2.ClusterOrBuilder getClusterOrBuilder()
      *
      * 
      * Optional. A unique id used to identify the request. If the server
-     * receives two [CreateClusterRequest][google.cloud.dataproc.v1beta2.CreateClusterRequest] requests  with the same
-     * id, then the second request will be ignored and the
-     * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the backend
-     * is returned.
+     * receives two
+     * [CreateClusterRequest][google.cloud.dataproc.v1beta2.CreateClusterRequest]
+     * requests  with the same id, then the second request will be ignored and the
+     * first [google.longrunning.Operation][google.longrunning.Operation] created
+     * and stored in the backend is returned.
      * It is recommended to always set this value to a
      * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
      * The id must contain only letters (a-z, A-Z), numbers (0-9),
@@ -1099,10 +1102,11 @@ public java.lang.String getRequestId() {
      *
      * 
      * Optional. A unique id used to identify the request. If the server
-     * receives two [CreateClusterRequest][google.cloud.dataproc.v1beta2.CreateClusterRequest] requests  with the same
-     * id, then the second request will be ignored and the
-     * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the backend
-     * is returned.
+     * receives two
+     * [CreateClusterRequest][google.cloud.dataproc.v1beta2.CreateClusterRequest]
+     * requests  with the same id, then the second request will be ignored and the
+     * first [google.longrunning.Operation][google.longrunning.Operation] created
+     * and stored in the backend is returned.
      * It is recommended to always set this value to a
      * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
      * The id must contain only letters (a-z, A-Z), numbers (0-9),
@@ -1127,10 +1131,11 @@ public com.google.protobuf.ByteString getRequestIdBytes() {
      *
      * 
      * Optional. A unique id used to identify the request. If the server
-     * receives two [CreateClusterRequest][google.cloud.dataproc.v1beta2.CreateClusterRequest] requests  with the same
-     * id, then the second request will be ignored and the
-     * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the backend
-     * is returned.
+     * receives two
+     * [CreateClusterRequest][google.cloud.dataproc.v1beta2.CreateClusterRequest]
+     * requests  with the same id, then the second request will be ignored and the
+     * first [google.longrunning.Operation][google.longrunning.Operation] created
+     * and stored in the backend is returned.
      * It is recommended to always set this value to a
      * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
      * The id must contain only letters (a-z, A-Z), numbers (0-9),
@@ -1153,10 +1158,11 @@ public Builder setRequestId(java.lang.String value) {
      *
      * 
      * Optional. A unique id used to identify the request. If the server
-     * receives two [CreateClusterRequest][google.cloud.dataproc.v1beta2.CreateClusterRequest] requests  with the same
-     * id, then the second request will be ignored and the
-     * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the backend
-     * is returned.
+     * receives two
+     * [CreateClusterRequest][google.cloud.dataproc.v1beta2.CreateClusterRequest]
+     * requests  with the same id, then the second request will be ignored and the
+     * first [google.longrunning.Operation][google.longrunning.Operation] created
+     * and stored in the backend is returned.
      * It is recommended to always set this value to a
      * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
      * The id must contain only letters (a-z, A-Z), numbers (0-9),
@@ -1176,10 +1182,11 @@ public Builder clearRequestId() {
      *
      * 
      * Optional. A unique id used to identify the request. If the server
-     * receives two [CreateClusterRequest][google.cloud.dataproc.v1beta2.CreateClusterRequest] requests  with the same
-     * id, then the second request will be ignored and the
-     * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the backend
-     * is returned.
+     * receives two
+     * [CreateClusterRequest][google.cloud.dataproc.v1beta2.CreateClusterRequest]
+     * requests  with the same id, then the second request will be ignored and the
+     * first [google.longrunning.Operation][google.longrunning.Operation] created
+     * and stored in the backend is returned.
      * It is recommended to always set this value to a
      * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
      * The id must contain only letters (a-z, A-Z), numbers (0-9),
diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/CreateClusterRequestOrBuilder.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/CreateClusterRequestOrBuilder.java
index c5ad4ae2d307..1ccf73137f69 100644
--- a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/CreateClusterRequestOrBuilder.java
+++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/CreateClusterRequestOrBuilder.java
@@ -88,10 +88,11 @@ public interface CreateClusterRequestOrBuilder
    *
    * 
    * Optional. A unique id used to identify the request. If the server
-   * receives two [CreateClusterRequest][google.cloud.dataproc.v1beta2.CreateClusterRequest] requests  with the same
-   * id, then the second request will be ignored and the
-   * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the backend
-   * is returned.
+   * receives two
+   * [CreateClusterRequest][google.cloud.dataproc.v1beta2.CreateClusterRequest]
+   * requests  with the same id, then the second request will be ignored and the
+   * first [google.longrunning.Operation][google.longrunning.Operation] created
+   * and stored in the backend is returned.
    * It is recommended to always set this value to a
    * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
    * The id must contain only letters (a-z, A-Z), numbers (0-9),
@@ -106,10 +107,11 @@ public interface CreateClusterRequestOrBuilder
    *
    * 
    * Optional. A unique id used to identify the request. If the server
-   * receives two [CreateClusterRequest][google.cloud.dataproc.v1beta2.CreateClusterRequest] requests  with the same
-   * id, then the second request will be ignored and the
-   * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the backend
-   * is returned.
+   * receives two
+   * [CreateClusterRequest][google.cloud.dataproc.v1beta2.CreateClusterRequest]
+   * requests  with the same id, then the second request will be ignored and the
+   * first [google.longrunning.Operation][google.longrunning.Operation] created
+   * and stored in the backend is returned.
    * It is recommended to always set this value to a
    * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
    * The id must contain only letters (a-z, A-Z), numbers (0-9),
diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/DeleteClusterRequest.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/DeleteClusterRequest.java
index 262b21c80bb6..6bce264b343b 100644
--- a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/DeleteClusterRequest.java
+++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/DeleteClusterRequest.java
@@ -306,10 +306,11 @@ public com.google.protobuf.ByteString getClusterUuidBytes() {
    *
    * 
    * Optional. A unique id used to identify the request. If the server
-   * receives two [DeleteClusterRequest][google.cloud.dataproc.v1beta2.DeleteClusterRequest] requests  with the same
-   * id, then the second request will be ignored and the
-   * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
-   * backend is returned.
+   * receives two
+   * [DeleteClusterRequest][google.cloud.dataproc.v1beta2.DeleteClusterRequest]
+   * requests  with the same id, then the second request will be ignored and the
+   * first [google.longrunning.Operation][google.longrunning.Operation] created
+   * and stored in the backend is returned.
    * It is recommended to always set this value to a
    * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
    * The id must contain only letters (a-z, A-Z), numbers (0-9),
@@ -334,10 +335,11 @@ public java.lang.String getRequestId() {
    *
    * 
    * Optional. A unique id used to identify the request. If the server
-   * receives two [DeleteClusterRequest][google.cloud.dataproc.v1beta2.DeleteClusterRequest] requests  with the same
-   * id, then the second request will be ignored and the
-   * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
-   * backend is returned.
+   * receives two
+   * [DeleteClusterRequest][google.cloud.dataproc.v1beta2.DeleteClusterRequest]
+   * requests  with the same id, then the second request will be ignored and the
+   * first [google.longrunning.Operation][google.longrunning.Operation] created
+   * and stored in the backend is returned.
    * It is recommended to always set this value to a
    * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
    * The id must contain only letters (a-z, A-Z), numbers (0-9),
@@ -1134,10 +1136,11 @@ public Builder setClusterUuidBytes(com.google.protobuf.ByteString value) {
      *
      * 
      * Optional. A unique id used to identify the request. If the server
-     * receives two [DeleteClusterRequest][google.cloud.dataproc.v1beta2.DeleteClusterRequest] requests  with the same
-     * id, then the second request will be ignored and the
-     * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
-     * backend is returned.
+     * receives two
+     * [DeleteClusterRequest][google.cloud.dataproc.v1beta2.DeleteClusterRequest]
+     * requests  with the same id, then the second request will be ignored and the
+     * first [google.longrunning.Operation][google.longrunning.Operation] created
+     * and stored in the backend is returned.
      * It is recommended to always set this value to a
      * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
      * The id must contain only letters (a-z, A-Z), numbers (0-9),
@@ -1162,10 +1165,11 @@ public java.lang.String getRequestId() {
      *
      * 
      * Optional. A unique id used to identify the request. If the server
-     * receives two [DeleteClusterRequest][google.cloud.dataproc.v1beta2.DeleteClusterRequest] requests  with the same
-     * id, then the second request will be ignored and the
-     * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
-     * backend is returned.
+     * receives two
+     * [DeleteClusterRequest][google.cloud.dataproc.v1beta2.DeleteClusterRequest]
+     * requests  with the same id, then the second request will be ignored and the
+     * first [google.longrunning.Operation][google.longrunning.Operation] created
+     * and stored in the backend is returned.
      * It is recommended to always set this value to a
      * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
      * The id must contain only letters (a-z, A-Z), numbers (0-9),
@@ -1190,10 +1194,11 @@ public com.google.protobuf.ByteString getRequestIdBytes() {
      *
      * 
      * Optional. A unique id used to identify the request. If the server
-     * receives two [DeleteClusterRequest][google.cloud.dataproc.v1beta2.DeleteClusterRequest] requests  with the same
-     * id, then the second request will be ignored and the
-     * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
-     * backend is returned.
+     * receives two
+     * [DeleteClusterRequest][google.cloud.dataproc.v1beta2.DeleteClusterRequest]
+     * requests  with the same id, then the second request will be ignored and the
+     * first [google.longrunning.Operation][google.longrunning.Operation] created
+     * and stored in the backend is returned.
      * It is recommended to always set this value to a
      * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
      * The id must contain only letters (a-z, A-Z), numbers (0-9),
@@ -1216,10 +1221,11 @@ public Builder setRequestId(java.lang.String value) {
      *
      * 
      * Optional. A unique id used to identify the request. If the server
-     * receives two [DeleteClusterRequest][google.cloud.dataproc.v1beta2.DeleteClusterRequest] requests  with the same
-     * id, then the second request will be ignored and the
-     * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
-     * backend is returned.
+     * receives two
+     * [DeleteClusterRequest][google.cloud.dataproc.v1beta2.DeleteClusterRequest]
+     * requests  with the same id, then the second request will be ignored and the
+     * first [google.longrunning.Operation][google.longrunning.Operation] created
+     * and stored in the backend is returned.
      * It is recommended to always set this value to a
      * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
      * The id must contain only letters (a-z, A-Z), numbers (0-9),
@@ -1239,10 +1245,11 @@ public Builder clearRequestId() {
      *
      * 
      * Optional. A unique id used to identify the request. If the server
-     * receives two [DeleteClusterRequest][google.cloud.dataproc.v1beta2.DeleteClusterRequest] requests  with the same
-     * id, then the second request will be ignored and the
-     * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
-     * backend is returned.
+     * receives two
+     * [DeleteClusterRequest][google.cloud.dataproc.v1beta2.DeleteClusterRequest]
+     * requests  with the same id, then the second request will be ignored and the
+     * first [google.longrunning.Operation][google.longrunning.Operation] created
+     * and stored in the backend is returned.
      * It is recommended to always set this value to a
      * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
      * The id must contain only letters (a-z, A-Z), numbers (0-9),
diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/DeleteClusterRequestOrBuilder.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/DeleteClusterRequestOrBuilder.java
index 37010cd48470..a191e76fead8 100644
--- a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/DeleteClusterRequestOrBuilder.java
+++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/DeleteClusterRequestOrBuilder.java
@@ -101,10 +101,11 @@ public interface DeleteClusterRequestOrBuilder
    *
    * 
    * Optional. A unique id used to identify the request. If the server
-   * receives two [DeleteClusterRequest][google.cloud.dataproc.v1beta2.DeleteClusterRequest] requests  with the same
-   * id, then the second request will be ignored and the
-   * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
-   * backend is returned.
+   * receives two
+   * [DeleteClusterRequest][google.cloud.dataproc.v1beta2.DeleteClusterRequest]
+   * requests  with the same id, then the second request will be ignored and the
+   * first [google.longrunning.Operation][google.longrunning.Operation] created
+   * and stored in the backend is returned.
    * It is recommended to always set this value to a
    * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
    * The id must contain only letters (a-z, A-Z), numbers (0-9),
@@ -119,10 +120,11 @@ public interface DeleteClusterRequestOrBuilder
    *
    * 
    * Optional. A unique id used to identify the request. If the server
-   * receives two [DeleteClusterRequest][google.cloud.dataproc.v1beta2.DeleteClusterRequest] requests  with the same
-   * id, then the second request will be ignored and the
-   * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
-   * backend is returned.
+   * receives two
+   * [DeleteClusterRequest][google.cloud.dataproc.v1beta2.DeleteClusterRequest]
+   * requests  with the same id, then the second request will be ignored and the
+   * first [google.longrunning.Operation][google.longrunning.Operation] created
+   * and stored in the backend is returned.
    * It is recommended to always set this value to a
    * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
    * The id must contain only letters (a-z, A-Z), numbers (0-9),
diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/GceClusterConfig.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/GceClusterConfig.java
index 6bc03e403601..b77177fd4524 100644
--- a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/GceClusterConfig.java
+++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/GceClusterConfig.java
@@ -355,11 +355,11 @@ public com.google.protobuf.ByteString getSubnetworkUriBytes() {
    *
    * 
    * Optional. If true, all instances in the cluster will only have internal IP
-   * addresses. By default, clusters are not restricted to internal IP addresses,
-   * and will have ephemeral external IP addresses assigned to each instance.
-   * This `internal_ip_only` restriction can only be enabled for subnetwork
-   * enabled networks, and all off-cluster dependencies must be configured to be
-   * accessible without external IP addresses.
+   * addresses. By default, clusters are not restricted to internal IP
+   * addresses, and will have ephemeral external IP addresses assigned to each
+   * instance. This `internal_ip_only` restriction can only be enabled for
+   * subnetwork enabled networks, and all off-cluster dependencies must be
+   * configured to be accessible without external IP addresses.
    * 
* * bool internal_ip_only = 7; @@ -379,7 +379,8 @@ public boolean getInternalIpOnly() { * permissions equivalent to the following IAM roles: * * roles/logging.logWriter * * roles/storage.objectAdmin - * (see https://cloud.google.com/compute/docs/access/service-accounts#custom_service_accounts + * (see + * https://cloud.google.com/compute/docs/access/service-accounts#custom_service_accounts * for more information). * Example: `[account_id]@[project_id].iam.gserviceaccount.com` *
@@ -406,7 +407,8 @@ public java.lang.String getServiceAccount() { * permissions equivalent to the following IAM roles: * * roles/logging.logWriter * * roles/storage.objectAdmin - * (see https://cloud.google.com/compute/docs/access/service-accounts#custom_service_accounts + * (see + * https://cloud.google.com/compute/docs/access/service-accounts#custom_service_accounts * for more information). * Example: `[account_id]@[project_id].iam.gserviceaccount.com` *
@@ -601,7 +603,8 @@ public int getMetadataCount() { * *
    * The Compute Engine metadata entries to add to all instances (see
-   * [Project and instance metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
+   * [Project and instance
+   * metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
    * 
* * map<string, string> metadata = 5; @@ -622,7 +625,8 @@ public java.util.Map getMetadata() { * *
    * The Compute Engine metadata entries to add to all instances (see
-   * [Project and instance metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
+   * [Project and instance
+   * metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
    * 
* * map<string, string> metadata = 5; @@ -635,7 +639,8 @@ public java.util.Map getMetadataMap() { * *
    * The Compute Engine metadata entries to add to all instances (see
-   * [Project and instance metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
+   * [Project and instance
+   * metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
    * 
* * map<string, string> metadata = 5; @@ -653,7 +658,8 @@ public java.lang.String getMetadataOrDefault( * *
    * The Compute Engine metadata entries to add to all instances (see
-   * [Project and instance metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
+   * [Project and instance
+   * metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
    * 
* * map<string, string> metadata = 5; @@ -1558,11 +1564,11 @@ public Builder setSubnetworkUriBytes(com.google.protobuf.ByteString value) { * *
      * Optional. If true, all instances in the cluster will only have internal IP
-     * addresses. By default, clusters are not restricted to internal IP addresses,
-     * and will have ephemeral external IP addresses assigned to each instance.
-     * This `internal_ip_only` restriction can only be enabled for subnetwork
-     * enabled networks, and all off-cluster dependencies must be configured to be
-     * accessible without external IP addresses.
+     * addresses. By default, clusters are not restricted to internal IP
+     * addresses, and will have ephemeral external IP addresses assigned to each
+     * instance. This `internal_ip_only` restriction can only be enabled for
+     * subnetwork enabled networks, and all off-cluster dependencies must be
+     * configured to be accessible without external IP addresses.
      * 
* * bool internal_ip_only = 7; @@ -1575,11 +1581,11 @@ public boolean getInternalIpOnly() { * *
      * Optional. If true, all instances in the cluster will only have internal IP
-     * addresses. By default, clusters are not restricted to internal IP addresses,
-     * and will have ephemeral external IP addresses assigned to each instance.
-     * This `internal_ip_only` restriction can only be enabled for subnetwork
-     * enabled networks, and all off-cluster dependencies must be configured to be
-     * accessible without external IP addresses.
+     * addresses. By default, clusters are not restricted to internal IP
+     * addresses, and will have ephemeral external IP addresses assigned to each
+     * instance. This `internal_ip_only` restriction can only be enabled for
+     * subnetwork enabled networks, and all off-cluster dependencies must be
+     * configured to be accessible without external IP addresses.
      * 
* * bool internal_ip_only = 7; @@ -1595,11 +1601,11 @@ public Builder setInternalIpOnly(boolean value) { * *
      * Optional. If true, all instances in the cluster will only have internal IP
-     * addresses. By default, clusters are not restricted to internal IP addresses,
-     * and will have ephemeral external IP addresses assigned to each instance.
-     * This `internal_ip_only` restriction can only be enabled for subnetwork
-     * enabled networks, and all off-cluster dependencies must be configured to be
-     * accessible without external IP addresses.
+     * addresses. By default, clusters are not restricted to internal IP
+     * addresses, and will have ephemeral external IP addresses assigned to each
+     * instance. This `internal_ip_only` restriction can only be enabled for
+     * subnetwork enabled networks, and all off-cluster dependencies must be
+     * configured to be accessible without external IP addresses.
      * 
* * bool internal_ip_only = 7; @@ -1621,7 +1627,8 @@ public Builder clearInternalIpOnly() { * permissions equivalent to the following IAM roles: * * roles/logging.logWriter * * roles/storage.objectAdmin - * (see https://cloud.google.com/compute/docs/access/service-accounts#custom_service_accounts + * (see + * https://cloud.google.com/compute/docs/access/service-accounts#custom_service_accounts * for more information). * Example: `[account_id]@[project_id].iam.gserviceaccount.com` *
@@ -1648,7 +1655,8 @@ public java.lang.String getServiceAccount() { * permissions equivalent to the following IAM roles: * * roles/logging.logWriter * * roles/storage.objectAdmin - * (see https://cloud.google.com/compute/docs/access/service-accounts#custom_service_accounts + * (see + * https://cloud.google.com/compute/docs/access/service-accounts#custom_service_accounts * for more information). * Example: `[account_id]@[project_id].iam.gserviceaccount.com` *
@@ -1675,7 +1683,8 @@ public com.google.protobuf.ByteString getServiceAccountBytes() { * permissions equivalent to the following IAM roles: * * roles/logging.logWriter * * roles/storage.objectAdmin - * (see https://cloud.google.com/compute/docs/access/service-accounts#custom_service_accounts + * (see + * https://cloud.google.com/compute/docs/access/service-accounts#custom_service_accounts * for more information). * Example: `[account_id]@[project_id].iam.gserviceaccount.com` *
@@ -1700,7 +1709,8 @@ public Builder setServiceAccount(java.lang.String value) { * permissions equivalent to the following IAM roles: * * roles/logging.logWriter * * roles/storage.objectAdmin - * (see https://cloud.google.com/compute/docs/access/service-accounts#custom_service_accounts + * (see + * https://cloud.google.com/compute/docs/access/service-accounts#custom_service_accounts * for more information). * Example: `[account_id]@[project_id].iam.gserviceaccount.com` *
@@ -1722,7 +1732,8 @@ public Builder clearServiceAccount() { * permissions equivalent to the following IAM roles: * * roles/logging.logWriter * * roles/storage.objectAdmin - * (see https://cloud.google.com/compute/docs/access/service-accounts#custom_service_accounts + * (see + * https://cloud.google.com/compute/docs/access/service-accounts#custom_service_accounts * for more information). * Example: `[account_id]@[project_id].iam.gserviceaccount.com` *
@@ -2156,7 +2167,8 @@ public int getMetadataCount() { * *
      * The Compute Engine metadata entries to add to all instances (see
-     * [Project and instance metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
+     * [Project and instance
+     * metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
      * 
* * map<string, string> metadata = 5; @@ -2177,7 +2189,8 @@ public java.util.Map getMetadata() { * *
      * The Compute Engine metadata entries to add to all instances (see
-     * [Project and instance metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
+     * [Project and instance
+     * metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
      * 
* * map<string, string> metadata = 5; @@ -2190,7 +2203,8 @@ public java.util.Map getMetadataMap() { * *
      * The Compute Engine metadata entries to add to all instances (see
-     * [Project and instance metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
+     * [Project and instance
+     * metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
      * 
* * map<string, string> metadata = 5; @@ -2208,7 +2222,8 @@ public java.lang.String getMetadataOrDefault( * *
      * The Compute Engine metadata entries to add to all instances (see
-     * [Project and instance metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
+     * [Project and instance
+     * metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
      * 
* * map<string, string> metadata = 5; @@ -2233,7 +2248,8 @@ public Builder clearMetadata() { * *
      * The Compute Engine metadata entries to add to all instances (see
-     * [Project and instance metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
+     * [Project and instance
+     * metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
      * 
* * map<string, string> metadata = 5; @@ -2255,7 +2271,8 @@ public java.util.Map getMutableMetadata() { * *
      * The Compute Engine metadata entries to add to all instances (see
-     * [Project and instance metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
+     * [Project and instance
+     * metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
      * 
* * map<string, string> metadata = 5; @@ -2275,7 +2292,8 @@ public Builder putMetadata(java.lang.String key, java.lang.String value) { * *
      * The Compute Engine metadata entries to add to all instances (see
-     * [Project and instance metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
+     * [Project and instance
+     * metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
      * 
* * map<string, string> metadata = 5; diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/GceClusterConfigOrBuilder.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/GceClusterConfigOrBuilder.java index 8c61114a9144..11943a34cfa5 100644 --- a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/GceClusterConfigOrBuilder.java +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/GceClusterConfigOrBuilder.java @@ -118,11 +118,11 @@ public interface GceClusterConfigOrBuilder * *
    * Optional. If true, all instances in the cluster will only have internal IP
-   * addresses. By default, clusters are not restricted to internal IP addresses,
-   * and will have ephemeral external IP addresses assigned to each instance.
-   * This `internal_ip_only` restriction can only be enabled for subnetwork
-   * enabled networks, and all off-cluster dependencies must be configured to be
-   * accessible without external IP addresses.
+   * addresses. By default, clusters are not restricted to internal IP
+   * addresses, and will have ephemeral external IP addresses assigned to each
+   * instance. This `internal_ip_only` restriction can only be enabled for
+   * subnetwork enabled networks, and all off-cluster dependencies must be
+   * configured to be accessible without external IP addresses.
    * 
* * bool internal_ip_only = 7; @@ -138,7 +138,8 @@ public interface GceClusterConfigOrBuilder * permissions equivalent to the following IAM roles: * * roles/logging.logWriter * * roles/storage.objectAdmin - * (see https://cloud.google.com/compute/docs/access/service-accounts#custom_service_accounts + * (see + * https://cloud.google.com/compute/docs/access/service-accounts#custom_service_accounts * for more information). * Example: `[account_id]@[project_id].iam.gserviceaccount.com` *
@@ -155,7 +156,8 @@ public interface GceClusterConfigOrBuilder * permissions equivalent to the following IAM roles: * * roles/logging.logWriter * * roles/storage.objectAdmin - * (see https://cloud.google.com/compute/docs/access/service-accounts#custom_service_accounts + * (see + * https://cloud.google.com/compute/docs/access/service-accounts#custom_service_accounts * for more information). * Example: `[account_id]@[project_id].iam.gserviceaccount.com` *
@@ -295,7 +297,8 @@ public interface GceClusterConfigOrBuilder * *
    * The Compute Engine metadata entries to add to all instances (see
-   * [Project and instance metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
+   * [Project and instance
+   * metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
    * 
* * map<string, string> metadata = 5; @@ -306,7 +309,8 @@ public interface GceClusterConfigOrBuilder * *
    * The Compute Engine metadata entries to add to all instances (see
-   * [Project and instance metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
+   * [Project and instance
+   * metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
    * 
* * map<string, string> metadata = 5; @@ -320,7 +324,8 @@ public interface GceClusterConfigOrBuilder * *
    * The Compute Engine metadata entries to add to all instances (see
-   * [Project and instance metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
+   * [Project and instance
+   * metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
    * 
* * map<string, string> metadata = 5; @@ -331,7 +336,8 @@ public interface GceClusterConfigOrBuilder * *
    * The Compute Engine metadata entries to add to all instances (see
-   * [Project and instance metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
+   * [Project and instance
+   * metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
    * 
* * map<string, string> metadata = 5; @@ -342,7 +348,8 @@ public interface GceClusterConfigOrBuilder * *
    * The Compute Engine metadata entries to add to all instances (see
-   * [Project and instance metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
+   * [Project and instance
+   * metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
    * 
* * map<string, string> metadata = 5; diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/HadoopJob.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/HadoopJob.java index 45d6f1dffac7..b47d9a1ef86d 100644 --- a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/HadoopJob.java +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/HadoopJob.java @@ -8,8 +8,10 @@ * *
  * A Cloud Dataproc job for running
- * [Apache Hadoop MapReduce](https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduceTutorial.html)
- * jobs on [Apache Hadoop YARN](https://hadoop.apache.org/docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YARN.html).
+ * [Apache Hadoop
+ * MapReduce](https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduceTutorial.html)
+ * jobs on [Apache Hadoop
+ * YARN](https://hadoop.apache.org/docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YARN.html).
  * 
* * Protobuf type {@code google.cloud.dataproc.v1beta2.HadoopJob} @@ -359,8 +361,8 @@ public com.google.protobuf.ByteString getMainClassBytes() { * *
    * Optional. The arguments to pass to the driver. Do not
-   * include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as job
-   * properties, since a collision may occur that causes an incorrect job
+   * include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as
+   * job properties, since a collision may occur that causes an incorrect job
    * submission.
    * 
* @@ -374,8 +376,8 @@ public com.google.protobuf.ProtocolStringList getArgsList() { * *
    * Optional. The arguments to pass to the driver. Do not
-   * include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as job
-   * properties, since a collision may occur that causes an incorrect job
+   * include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as
+   * job properties, since a collision may occur that causes an incorrect job
    * submission.
    * 
* @@ -389,8 +391,8 @@ public int getArgsCount() { * *
    * Optional. The arguments to pass to the driver. Do not
-   * include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as job
-   * properties, since a collision may occur that causes an incorrect job
+   * include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as
+   * job properties, since a collision may occur that causes an incorrect job
    * submission.
    * 
* @@ -404,8 +406,8 @@ public java.lang.String getArgs(int index) { * *
    * Optional. The arguments to pass to the driver. Do not
-   * include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as job
-   * properties, since a collision may occur that causes an incorrect job
+   * include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as
+   * job properties, since a collision may occur that causes an incorrect job
    * submission.
    * 
* @@ -1022,8 +1024,10 @@ protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.Build * *
    * A Cloud Dataproc job for running
-   * [Apache Hadoop MapReduce](https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduceTutorial.html)
-   * jobs on [Apache Hadoop YARN](https://hadoop.apache.org/docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YARN.html).
+   * [Apache Hadoop
+   * MapReduce](https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduceTutorial.html)
+   * jobs on [Apache Hadoop
+   * YARN](https://hadoop.apache.org/docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YARN.html).
    * 
* * Protobuf type {@code google.cloud.dataproc.v1beta2.HadoopJob} @@ -1572,8 +1576,8 @@ private void ensureArgsIsMutable() { * *
      * Optional. The arguments to pass to the driver. Do not
-     * include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as job
-     * properties, since a collision may occur that causes an incorrect job
+     * include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as
+     * job properties, since a collision may occur that causes an incorrect job
      * submission.
      * 
* @@ -1587,8 +1591,8 @@ public com.google.protobuf.ProtocolStringList getArgsList() { * *
      * Optional. The arguments to pass to the driver. Do not
-     * include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as job
-     * properties, since a collision may occur that causes an incorrect job
+     * include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as
+     * job properties, since a collision may occur that causes an incorrect job
      * submission.
      * 
* @@ -1602,8 +1606,8 @@ public int getArgsCount() { * *
      * Optional. The arguments to pass to the driver. Do not
-     * include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as job
-     * properties, since a collision may occur that causes an incorrect job
+     * include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as
+     * job properties, since a collision may occur that causes an incorrect job
      * submission.
      * 
* @@ -1617,8 +1621,8 @@ public java.lang.String getArgs(int index) { * *
      * Optional. The arguments to pass to the driver. Do not
-     * include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as job
-     * properties, since a collision may occur that causes an incorrect job
+     * include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as
+     * job properties, since a collision may occur that causes an incorrect job
      * submission.
      * 
* @@ -1632,8 +1636,8 @@ public com.google.protobuf.ByteString getArgsBytes(int index) { * *
      * Optional. The arguments to pass to the driver. Do not
-     * include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as job
-     * properties, since a collision may occur that causes an incorrect job
+     * include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as
+     * job properties, since a collision may occur that causes an incorrect job
      * submission.
      * 
* @@ -1653,8 +1657,8 @@ public Builder setArgs(int index, java.lang.String value) { * *
      * Optional. The arguments to pass to the driver. Do not
-     * include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as job
-     * properties, since a collision may occur that causes an incorrect job
+     * include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as
+     * job properties, since a collision may occur that causes an incorrect job
      * submission.
      * 
* @@ -1674,8 +1678,8 @@ public Builder addArgs(java.lang.String value) { * *
      * Optional. The arguments to pass to the driver. Do not
-     * include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as job
-     * properties, since a collision may occur that causes an incorrect job
+     * include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as
+     * job properties, since a collision may occur that causes an incorrect job
      * submission.
      * 
* @@ -1692,8 +1696,8 @@ public Builder addAllArgs(java.lang.Iterable values) { * *
      * Optional. The arguments to pass to the driver. Do not
-     * include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as job
-     * properties, since a collision may occur that causes an incorrect job
+     * include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as
+     * job properties, since a collision may occur that causes an incorrect job
      * submission.
      * 
* @@ -1710,8 +1714,8 @@ public Builder clearArgs() { * *
      * Optional. The arguments to pass to the driver. Do not
-     * include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as job
-     * properties, since a collision may occur that causes an incorrect job
+     * include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as
+     * job properties, since a collision may occur that causes an incorrect job
      * submission.
      * 
* diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/HadoopJobOrBuilder.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/HadoopJobOrBuilder.java index 74c8a690de47..295681c1f45a 100644 --- a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/HadoopJobOrBuilder.java +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/HadoopJobOrBuilder.java @@ -65,8 +65,8 @@ public interface HadoopJobOrBuilder * *
    * Optional. The arguments to pass to the driver. Do not
-   * include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as job
-   * properties, since a collision may occur that causes an incorrect job
+   * include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as
+   * job properties, since a collision may occur that causes an incorrect job
    * submission.
    * 
* @@ -78,8 +78,8 @@ public interface HadoopJobOrBuilder * *
    * Optional. The arguments to pass to the driver. Do not
-   * include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as job
-   * properties, since a collision may occur that causes an incorrect job
+   * include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as
+   * job properties, since a collision may occur that causes an incorrect job
    * submission.
    * 
* @@ -91,8 +91,8 @@ public interface HadoopJobOrBuilder * *
    * Optional. The arguments to pass to the driver. Do not
-   * include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as job
-   * properties, since a collision may occur that causes an incorrect job
+   * include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as
+   * job properties, since a collision may occur that causes an incorrect job
    * submission.
    * 
* @@ -104,8 +104,8 @@ public interface HadoopJobOrBuilder * *
    * Optional. The arguments to pass to the driver. Do not
-   * include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as job
-   * properties, since a collision may occur that causes an incorrect job
+   * include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as
+   * job properties, since a collision may occur that causes an incorrect job
    * submission.
    * 
* diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/HiveJob.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/HiveJob.java index c8a89a806184..50a9746f7acc 100644 --- a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/HiveJob.java +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/HiveJob.java @@ -316,8 +316,8 @@ public com.google.cloud.dataproc.v1beta2.QueryListOrBuilder getQueryListOrBuilde * *
    * Optional. Whether to continue executing queries if a query fails.
-   * The default value is `false`. Setting to `true` can be useful when executing
-   * independent parallel queries.
+   * The default value is `false`. Setting to `true` can be useful when
+   * executing independent parallel queries.
    * 
* * bool continue_on_failure = 3; @@ -1408,8 +1408,8 @@ public com.google.cloud.dataproc.v1beta2.QueryListOrBuilder getQueryListOrBuilde * *
      * Optional. Whether to continue executing queries if a query fails.
-     * The default value is `false`. Setting to `true` can be useful when executing
-     * independent parallel queries.
+     * The default value is `false`. Setting to `true` can be useful when
+     * executing independent parallel queries.
      * 
* * bool continue_on_failure = 3; @@ -1422,8 +1422,8 @@ public boolean getContinueOnFailure() { * *
      * Optional. Whether to continue executing queries if a query fails.
-     * The default value is `false`. Setting to `true` can be useful when executing
-     * independent parallel queries.
+     * The default value is `false`. Setting to `true` can be useful when
+     * executing independent parallel queries.
      * 
* * bool continue_on_failure = 3; @@ -1439,8 +1439,8 @@ public Builder setContinueOnFailure(boolean value) { * *
      * Optional. Whether to continue executing queries if a query fails.
-     * The default value is `false`. Setting to `true` can be useful when executing
-     * independent parallel queries.
+     * The default value is `false`. Setting to `true` can be useful when
+     * executing independent parallel queries.
      * 
* * bool continue_on_failure = 3; diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/HiveJobOrBuilder.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/HiveJobOrBuilder.java index 742b6b065145..7df56d02caa6 100644 --- a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/HiveJobOrBuilder.java +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/HiveJobOrBuilder.java @@ -65,8 +65,8 @@ public interface HiveJobOrBuilder * *
    * Optional. Whether to continue executing queries if a query fails.
-   * The default value is `false`. Setting to `true` can be useful when executing
-   * independent parallel queries.
+   * The default value is `false`. Setting to `true` can be useful when
+   * executing independent parallel queries.
    * 
* * bool continue_on_failure = 3; diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/InstanceGroupConfig.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/InstanceGroupConfig.java index be16d9f6e2e5..60ad8e717b52 100644 --- a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/InstanceGroupConfig.java +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/InstanceGroupConfig.java @@ -315,7 +315,8 @@ public com.google.protobuf.ByteString getImageUriBytes() { * * `projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` * * `n1-standard-2` * **Auto Zone Exception**: If you are using the Cloud Dataproc - * [Auto Zone Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) + * [Auto Zone + * Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) * feature, you must use the short name of the machine type * resource, for example, `n1-standard-2`. *
@@ -343,7 +344,8 @@ public java.lang.String getMachineTypeUri() { * * `projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` * * `n1-standard-2` * **Auto Zone Exception**: If you are using the Cloud Dataproc - * [Auto Zone Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) + * [Auto Zone + * Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) * feature, you must use the short name of the machine type * resource, for example, `n1-standard-2`. *
@@ -409,7 +411,8 @@ public com.google.cloud.dataproc.v1beta2.DiskConfigOrBuilder getDiskConfigOrBuil * * *
-   * Optional. Specifies that this instance group contains preemptible instances.
+   * Optional. Specifies that this instance group contains preemptible
+   * instances.
    * 
* * bool is_preemptible = 6; @@ -1439,7 +1442,8 @@ public Builder setImageUriBytes(com.google.protobuf.ByteString value) { * * `projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` * * `n1-standard-2` * **Auto Zone Exception**: If you are using the Cloud Dataproc - * [Auto Zone Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) + * [Auto Zone + * Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) * feature, you must use the short name of the machine type * resource, for example, `n1-standard-2`. *
@@ -1467,7 +1471,8 @@ public java.lang.String getMachineTypeUri() { * * `projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` * * `n1-standard-2` * **Auto Zone Exception**: If you are using the Cloud Dataproc - * [Auto Zone Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) + * [Auto Zone + * Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) * feature, you must use the short name of the machine type * resource, for example, `n1-standard-2`. *
@@ -1495,7 +1500,8 @@ public com.google.protobuf.ByteString getMachineTypeUriBytes() { * * `projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` * * `n1-standard-2` * **Auto Zone Exception**: If you are using the Cloud Dataproc - * [Auto Zone Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) + * [Auto Zone + * Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) * feature, you must use the short name of the machine type * resource, for example, `n1-standard-2`. *
@@ -1521,7 +1527,8 @@ public Builder setMachineTypeUri(java.lang.String value) { * * `projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` * * `n1-standard-2` * **Auto Zone Exception**: If you are using the Cloud Dataproc - * [Auto Zone Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) + * [Auto Zone + * Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) * feature, you must use the short name of the machine type * resource, for example, `n1-standard-2`. *
@@ -1544,7 +1551,8 @@ public Builder clearMachineTypeUri() { * * `projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` * * `n1-standard-2` * **Auto Zone Exception**: If you are using the Cloud Dataproc - * [Auto Zone Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) + * [Auto Zone + * Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) * feature, you must use the short name of the machine type * resource, for example, `n1-standard-2`. *
@@ -1749,7 +1757,8 @@ public com.google.cloud.dataproc.v1beta2.DiskConfigOrBuilder getDiskConfigOrBuil * * *
-     * Optional. Specifies that this instance group contains preemptible instances.
+     * Optional. Specifies that this instance group contains preemptible
+     * instances.
      * 
* * bool is_preemptible = 6; @@ -1761,7 +1770,8 @@ public boolean getIsPreemptible() { * * *
-     * Optional. Specifies that this instance group contains preemptible instances.
+     * Optional. Specifies that this instance group contains preemptible
+     * instances.
      * 
* * bool is_preemptible = 6; @@ -1776,7 +1786,8 @@ public Builder setIsPreemptible(boolean value) { * * *
-     * Optional. Specifies that this instance group contains preemptible instances.
+     * Optional. Specifies that this instance group contains preemptible
+     * instances.
      * 
* * bool is_preemptible = 6; diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/InstanceGroupConfigOrBuilder.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/InstanceGroupConfigOrBuilder.java index 447ac56d51d6..9728780aa097 100644 --- a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/InstanceGroupConfigOrBuilder.java +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/InstanceGroupConfigOrBuilder.java @@ -100,7 +100,8 @@ public interface InstanceGroupConfigOrBuilder * * `projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` * * `n1-standard-2` * **Auto Zone Exception**: If you are using the Cloud Dataproc - * [Auto Zone Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) + * [Auto Zone + * Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) * feature, you must use the short name of the machine type * resource, for example, `n1-standard-2`. *
@@ -118,7 +119,8 @@ public interface InstanceGroupConfigOrBuilder * * `projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` * * `n1-standard-2` * **Auto Zone Exception**: If you are using the Cloud Dataproc - * [Auto Zone Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) + * [Auto Zone + * Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) * feature, you must use the short name of the machine type * resource, for example, `n1-standard-2`. *
@@ -162,7 +164,8 @@ public interface InstanceGroupConfigOrBuilder * * *
-   * Optional. Specifies that this instance group contains preemptible instances.
+   * Optional. Specifies that this instance group contains preemptible
+   * instances.
    * 
* * bool is_preemptible = 6; diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/Job.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/Job.java index 653724114979..1ba3b73701bc 100644 --- a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/Job.java +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/Job.java @@ -861,8 +861,8 @@ public com.google.cloud.dataproc.v1beta2.JobStatusOrBuilder getStatusHistoryOrBu * *
    * Output only. The collection of YARN applications spun up by this job.
-   * **Beta** Feature: This report is available for testing purposes only. It may
-   * be changed before final release.
+   * **Beta** Feature: This report is available for testing purposes only. It
+   * may be changed before final release.
    * 
* * repeated .google.cloud.dataproc.v1beta2.YarnApplication yarn_applications = 9; @@ -876,8 +876,8 @@ public com.google.cloud.dataproc.v1beta2.JobStatusOrBuilder getStatusHistoryOrBu * *
    * Output only. The collection of YARN applications spun up by this job.
-   * **Beta** Feature: This report is available for testing purposes only. It may
-   * be changed before final release.
+   * **Beta** Feature: This report is available for testing purposes only. It
+   * may be changed before final release.
    * 
* * repeated .google.cloud.dataproc.v1beta2.YarnApplication yarn_applications = 9; @@ -891,8 +891,8 @@ public com.google.cloud.dataproc.v1beta2.JobStatusOrBuilder getStatusHistoryOrBu * *
    * Output only. The collection of YARN applications spun up by this job.
-   * **Beta** Feature: This report is available for testing purposes only. It may
-   * be changed before final release.
+   * **Beta** Feature: This report is available for testing purposes only. It
+   * may be changed before final release.
    * 
* * repeated .google.cloud.dataproc.v1beta2.YarnApplication yarn_applications = 9; @@ -905,8 +905,8 @@ public int getYarnApplicationsCount() { * *
    * Output only. The collection of YARN applications spun up by this job.
-   * **Beta** Feature: This report is available for testing purposes only. It may
-   * be changed before final release.
+   * **Beta** Feature: This report is available for testing purposes only. It
+   * may be changed before final release.
    * 
* * repeated .google.cloud.dataproc.v1beta2.YarnApplication yarn_applications = 9; @@ -919,8 +919,8 @@ public com.google.cloud.dataproc.v1beta2.YarnApplication getYarnApplications(int * *
    * Output only. The collection of YARN applications spun up by this job.
-   * **Beta** Feature: This report is available for testing purposes only. It may
-   * be changed before final release.
+   * **Beta** Feature: This report is available for testing purposes only. It
+   * may be changed before final release.
    * 
* * repeated .google.cloud.dataproc.v1beta2.YarnApplication yarn_applications = 9; @@ -1100,8 +1100,9 @@ public int getLabelsCount() { * Label **keys** must contain 1 to 63 characters, and must conform to * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). * Label **values** may be empty, but, if present, must contain 1 to 63 - * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). - * No more than 32 labels can be associated with a job. + * characters, and must conform to [RFC + * 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be + * associated with a job. * * * map<string, string> labels = 18; @@ -1125,8 +1126,9 @@ public java.util.Map getLabels() { * Label **keys** must contain 1 to 63 characters, and must conform to * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). * Label **values** may be empty, but, if present, must contain 1 to 63 - * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). - * No more than 32 labels can be associated with a job. + * characters, and must conform to [RFC + * 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be + * associated with a job. * * * map<string, string> labels = 18; @@ -1142,8 +1144,9 @@ public java.util.Map getLabelsMap() { * Label **keys** must contain 1 to 63 characters, and must conform to * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). * Label **values** may be empty, but, if present, must contain 1 to 63 - * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). - * No more than 32 labels can be associated with a job. + * characters, and must conform to [RFC + * 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be + * associated with a job. * * * map<string, string> labels = 18; @@ -1163,8 +1166,9 @@ public java.lang.String getLabelsOrDefault(java.lang.String key, java.lang.Strin * Label **keys** must contain 1 to 63 characters, and must conform to * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). * Label **values** may be empty, but, if present, must contain 1 to 63 - * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). - * No more than 32 labels can be associated with a job. + * characters, and must conform to [RFC + * 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be + * associated with a job. * * * map<string, string> labels = 18; @@ -4280,8 +4284,8 @@ private void ensureYarnApplicationsIsMutable() { * *
      * Output only. The collection of YARN applications spun up by this job.
-     * **Beta** Feature: This report is available for testing purposes only. It may
-     * be changed before final release.
+     * **Beta** Feature: This report is available for testing purposes only. It
+     * may be changed before final release.
      * 
* * repeated .google.cloud.dataproc.v1beta2.YarnApplication yarn_applications = 9; @@ -4299,8 +4303,8 @@ private void ensureYarnApplicationsIsMutable() { * *
      * Output only. The collection of YARN applications spun up by this job.
-     * **Beta** Feature: This report is available for testing purposes only. It may
-     * be changed before final release.
+     * **Beta** Feature: This report is available for testing purposes only. It
+     * may be changed before final release.
      * 
* * repeated .google.cloud.dataproc.v1beta2.YarnApplication yarn_applications = 9; @@ -4317,8 +4321,8 @@ public int getYarnApplicationsCount() { * *
      * Output only. The collection of YARN applications spun up by this job.
-     * **Beta** Feature: This report is available for testing purposes only. It may
-     * be changed before final release.
+     * **Beta** Feature: This report is available for testing purposes only. It
+     * may be changed before final release.
      * 
* * repeated .google.cloud.dataproc.v1beta2.YarnApplication yarn_applications = 9; @@ -4335,8 +4339,8 @@ public com.google.cloud.dataproc.v1beta2.YarnApplication getYarnApplications(int * *
      * Output only. The collection of YARN applications spun up by this job.
-     * **Beta** Feature: This report is available for testing purposes only. It may
-     * be changed before final release.
+     * **Beta** Feature: This report is available for testing purposes only. It
+     * may be changed before final release.
      * 
* * repeated .google.cloud.dataproc.v1beta2.YarnApplication yarn_applications = 9; @@ -4360,8 +4364,8 @@ public Builder setYarnApplications( * *
      * Output only. The collection of YARN applications spun up by this job.
-     * **Beta** Feature: This report is available for testing purposes only. It may
-     * be changed before final release.
+     * **Beta** Feature: This report is available for testing purposes only. It
+     * may be changed before final release.
      * 
* * repeated .google.cloud.dataproc.v1beta2.YarnApplication yarn_applications = 9; @@ -4382,8 +4386,8 @@ public Builder setYarnApplications( * *
      * Output only. The collection of YARN applications spun up by this job.
-     * **Beta** Feature: This report is available for testing purposes only. It may
-     * be changed before final release.
+     * **Beta** Feature: This report is available for testing purposes only. It
+     * may be changed before final release.
      * 
* * repeated .google.cloud.dataproc.v1beta2.YarnApplication yarn_applications = 9; @@ -4406,8 +4410,8 @@ public Builder addYarnApplications(com.google.cloud.dataproc.v1beta2.YarnApplica * *
      * Output only. The collection of YARN applications spun up by this job.
-     * **Beta** Feature: This report is available for testing purposes only. It may
-     * be changed before final release.
+     * **Beta** Feature: This report is available for testing purposes only. It
+     * may be changed before final release.
      * 
* * repeated .google.cloud.dataproc.v1beta2.YarnApplication yarn_applications = 9; @@ -4431,8 +4435,8 @@ public Builder addYarnApplications( * *
      * Output only. The collection of YARN applications spun up by this job.
-     * **Beta** Feature: This report is available for testing purposes only. It may
-     * be changed before final release.
+     * **Beta** Feature: This report is available for testing purposes only. It
+     * may be changed before final release.
      * 
* * repeated .google.cloud.dataproc.v1beta2.YarnApplication yarn_applications = 9; @@ -4453,8 +4457,8 @@ public Builder addYarnApplications( * *
      * Output only. The collection of YARN applications spun up by this job.
-     * **Beta** Feature: This report is available for testing purposes only. It may
-     * be changed before final release.
+     * **Beta** Feature: This report is available for testing purposes only. It
+     * may be changed before final release.
      * 
* * repeated .google.cloud.dataproc.v1beta2.YarnApplication yarn_applications = 9; @@ -4475,8 +4479,8 @@ public Builder addYarnApplications( * *
      * Output only. The collection of YARN applications spun up by this job.
-     * **Beta** Feature: This report is available for testing purposes only. It may
-     * be changed before final release.
+     * **Beta** Feature: This report is available for testing purposes only. It
+     * may be changed before final release.
      * 
* * repeated .google.cloud.dataproc.v1beta2.YarnApplication yarn_applications = 9; @@ -4497,8 +4501,8 @@ public Builder addAllYarnApplications( * *
      * Output only. The collection of YARN applications spun up by this job.
-     * **Beta** Feature: This report is available for testing purposes only. It may
-     * be changed before final release.
+     * **Beta** Feature: This report is available for testing purposes only. It
+     * may be changed before final release.
      * 
* * repeated .google.cloud.dataproc.v1beta2.YarnApplication yarn_applications = 9; @@ -4518,8 +4522,8 @@ public Builder clearYarnApplications() { * *
      * Output only. The collection of YARN applications spun up by this job.
-     * **Beta** Feature: This report is available for testing purposes only. It may
-     * be changed before final release.
+     * **Beta** Feature: This report is available for testing purposes only. It
+     * may be changed before final release.
      * 
* * repeated .google.cloud.dataproc.v1beta2.YarnApplication yarn_applications = 9; @@ -4539,8 +4543,8 @@ public Builder removeYarnApplications(int index) { * *
      * Output only. The collection of YARN applications spun up by this job.
-     * **Beta** Feature: This report is available for testing purposes only. It may
-     * be changed before final release.
+     * **Beta** Feature: This report is available for testing purposes only. It
+     * may be changed before final release.
      * 
* * repeated .google.cloud.dataproc.v1beta2.YarnApplication yarn_applications = 9; @@ -4554,8 +4558,8 @@ public com.google.cloud.dataproc.v1beta2.YarnApplication.Builder getYarnApplicat * *
      * Output only. The collection of YARN applications spun up by this job.
-     * **Beta** Feature: This report is available for testing purposes only. It may
-     * be changed before final release.
+     * **Beta** Feature: This report is available for testing purposes only. It
+     * may be changed before final release.
      * 
* * repeated .google.cloud.dataproc.v1beta2.YarnApplication yarn_applications = 9; @@ -4573,8 +4577,8 @@ public com.google.cloud.dataproc.v1beta2.YarnApplicationOrBuilder getYarnApplica * *
      * Output only. The collection of YARN applications spun up by this job.
-     * **Beta** Feature: This report is available for testing purposes only. It may
-     * be changed before final release.
+     * **Beta** Feature: This report is available for testing purposes only. It
+     * may be changed before final release.
      * 
* * repeated .google.cloud.dataproc.v1beta2.YarnApplication yarn_applications = 9; @@ -4592,8 +4596,8 @@ public com.google.cloud.dataproc.v1beta2.YarnApplicationOrBuilder getYarnApplica * *
      * Output only. The collection of YARN applications spun up by this job.
-     * **Beta** Feature: This report is available for testing purposes only. It may
-     * be changed before final release.
+     * **Beta** Feature: This report is available for testing purposes only. It
+     * may be changed before final release.
      * 
* * repeated .google.cloud.dataproc.v1beta2.YarnApplication yarn_applications = 9; @@ -4607,8 +4611,8 @@ public com.google.cloud.dataproc.v1beta2.YarnApplication.Builder addYarnApplicat * *
      * Output only. The collection of YARN applications spun up by this job.
-     * **Beta** Feature: This report is available for testing purposes only. It may
-     * be changed before final release.
+     * **Beta** Feature: This report is available for testing purposes only. It
+     * may be changed before final release.
      * 
* * repeated .google.cloud.dataproc.v1beta2.YarnApplication yarn_applications = 9; @@ -4624,8 +4628,8 @@ public com.google.cloud.dataproc.v1beta2.YarnApplication.Builder addYarnApplicat * *
      * Output only. The collection of YARN applications spun up by this job.
-     * **Beta** Feature: This report is available for testing purposes only. It may
-     * be changed before final release.
+     * **Beta** Feature: This report is available for testing purposes only. It
+     * may be changed before final release.
      * 
* * repeated .google.cloud.dataproc.v1beta2.YarnApplication yarn_applications = 9; @@ -4990,8 +4994,9 @@ public int getLabelsCount() { * Label **keys** must contain 1 to 63 characters, and must conform to * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). * Label **values** may be empty, but, if present, must contain 1 to 63 - * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). - * No more than 32 labels can be associated with a job. + * characters, and must conform to [RFC + * 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be + * associated with a job. * * * map<string, string> labels = 18; @@ -5015,8 +5020,9 @@ public java.util.Map getLabels() { * Label **keys** must contain 1 to 63 characters, and must conform to * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). * Label **values** may be empty, but, if present, must contain 1 to 63 - * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). - * No more than 32 labels can be associated with a job. + * characters, and must conform to [RFC + * 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be + * associated with a job. * * * map<string, string> labels = 18; @@ -5032,8 +5038,9 @@ public java.util.Map getLabelsMap() { * Label **keys** must contain 1 to 63 characters, and must conform to * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). * Label **values** may be empty, but, if present, must contain 1 to 63 - * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). - * No more than 32 labels can be associated with a job. + * characters, and must conform to [RFC + * 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be + * associated with a job. * * * map<string, string> labels = 18; @@ -5054,8 +5061,9 @@ public java.lang.String getLabelsOrDefault( * Label **keys** must contain 1 to 63 characters, and must conform to * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). * Label **values** may be empty, but, if present, must contain 1 to 63 - * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). - * No more than 32 labels can be associated with a job. + * characters, and must conform to [RFC + * 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be + * associated with a job. * * * map<string, string> labels = 18; @@ -5083,8 +5091,9 @@ public Builder clearLabels() { * Label **keys** must contain 1 to 63 characters, and must conform to * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). * Label **values** may be empty, but, if present, must contain 1 to 63 - * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). - * No more than 32 labels can be associated with a job. + * characters, and must conform to [RFC + * 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be + * associated with a job. * * * map<string, string> labels = 18; @@ -5109,8 +5118,9 @@ public java.util.Map getMutableLabels() { * Label **keys** must contain 1 to 63 characters, and must conform to * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). * Label **values** may be empty, but, if present, must contain 1 to 63 - * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). - * No more than 32 labels can be associated with a job. + * characters, and must conform to [RFC + * 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be + * associated with a job. * * * map<string, string> labels = 18; @@ -5133,8 +5143,9 @@ public Builder putLabels(java.lang.String key, java.lang.String value) { * Label **keys** must contain 1 to 63 characters, and must conform to * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). * Label **values** may be empty, but, if present, must contain 1 to 63 - * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). - * No more than 32 labels can be associated with a job. + * characters, and must conform to [RFC + * 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be + * associated with a job. * * * map<string, string> labels = 18; diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/JobOrBuilder.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/JobOrBuilder.java index 0afbf0ce736b..ad3d1ea390ff 100644 --- a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/JobOrBuilder.java +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/JobOrBuilder.java @@ -362,8 +362,8 @@ public interface JobOrBuilder * *
    * Output only. The collection of YARN applications spun up by this job.
-   * **Beta** Feature: This report is available for testing purposes only. It may
-   * be changed before final release.
+   * **Beta** Feature: This report is available for testing purposes only. It
+   * may be changed before final release.
    * 
* * repeated .google.cloud.dataproc.v1beta2.YarnApplication yarn_applications = 9; @@ -374,8 +374,8 @@ public interface JobOrBuilder * *
    * Output only. The collection of YARN applications spun up by this job.
-   * **Beta** Feature: This report is available for testing purposes only. It may
-   * be changed before final release.
+   * **Beta** Feature: This report is available for testing purposes only. It
+   * may be changed before final release.
    * 
* * repeated .google.cloud.dataproc.v1beta2.YarnApplication yarn_applications = 9; @@ -386,8 +386,8 @@ public interface JobOrBuilder * *
    * Output only. The collection of YARN applications spun up by this job.
-   * **Beta** Feature: This report is available for testing purposes only. It may
-   * be changed before final release.
+   * **Beta** Feature: This report is available for testing purposes only. It
+   * may be changed before final release.
    * 
* * repeated .google.cloud.dataproc.v1beta2.YarnApplication yarn_applications = 9; @@ -398,8 +398,8 @@ public interface JobOrBuilder * *
    * Output only. The collection of YARN applications spun up by this job.
-   * **Beta** Feature: This report is available for testing purposes only. It may
-   * be changed before final release.
+   * **Beta** Feature: This report is available for testing purposes only. It
+   * may be changed before final release.
    * 
* * repeated .google.cloud.dataproc.v1beta2.YarnApplication yarn_applications = 9; @@ -411,8 +411,8 @@ public interface JobOrBuilder * *
    * Output only. The collection of YARN applications spun up by this job.
-   * **Beta** Feature: This report is available for testing purposes only. It may
-   * be changed before final release.
+   * **Beta** Feature: This report is available for testing purposes only. It
+   * may be changed before final release.
    * 
* * repeated .google.cloud.dataproc.v1beta2.YarnApplication yarn_applications = 9; @@ -499,8 +499,9 @@ com.google.cloud.dataproc.v1beta2.YarnApplicationOrBuilder getYarnApplicationsOr * Label **keys** must contain 1 to 63 characters, and must conform to * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). * Label **values** may be empty, but, if present, must contain 1 to 63 - * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). - * No more than 32 labels can be associated with a job. + * characters, and must conform to [RFC + * 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be + * associated with a job. * * * map<string, string> labels = 18; @@ -514,8 +515,9 @@ com.google.cloud.dataproc.v1beta2.YarnApplicationOrBuilder getYarnApplicationsOr * Label **keys** must contain 1 to 63 characters, and must conform to * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). * Label **values** may be empty, but, if present, must contain 1 to 63 - * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). - * No more than 32 labels can be associated with a job. + * characters, and must conform to [RFC + * 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be + * associated with a job. * * * map<string, string> labels = 18; @@ -532,8 +534,9 @@ com.google.cloud.dataproc.v1beta2.YarnApplicationOrBuilder getYarnApplicationsOr * Label **keys** must contain 1 to 63 characters, and must conform to * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). * Label **values** may be empty, but, if present, must contain 1 to 63 - * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). - * No more than 32 labels can be associated with a job. + * characters, and must conform to [RFC + * 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be + * associated with a job. * * * map<string, string> labels = 18; @@ -547,8 +550,9 @@ com.google.cloud.dataproc.v1beta2.YarnApplicationOrBuilder getYarnApplicationsOr * Label **keys** must contain 1 to 63 characters, and must conform to * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). * Label **values** may be empty, but, if present, must contain 1 to 63 - * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). - * No more than 32 labels can be associated with a job. + * characters, and must conform to [RFC + * 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be + * associated with a job. * * * map<string, string> labels = 18; @@ -562,8 +566,9 @@ com.google.cloud.dataproc.v1beta2.YarnApplicationOrBuilder getYarnApplicationsOr * Label **keys** must contain 1 to 63 characters, and must conform to * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). * Label **values** may be empty, but, if present, must contain 1 to 63 - * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). - * No more than 32 labels can be associated with a job. + * characters, and must conform to [RFC + * 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be + * associated with a job. * * * map<string, string> labels = 18; diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ListWorkflowTemplatesResponse.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ListWorkflowTemplatesResponse.java index e1f21a05c58d..a9300d400847 100644 --- a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ListWorkflowTemplatesResponse.java +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ListWorkflowTemplatesResponse.java @@ -180,8 +180,8 @@ public com.google.cloud.dataproc.v1beta2.WorkflowTemplateOrBuilder getTemplatesO * * *
-   * Output only. This token is included in the response if there are more results
-   * to fetch. To fetch additional results, provide this value as the
+   * Output only. This token is included in the response if there are more
+   * results to fetch. To fetch additional results, provide this value as the
    * page_token in a subsequent <code>ListWorkflowTemplatesRequest</code>.
    * 
* @@ -202,8 +202,8 @@ public java.lang.String getNextPageToken() { * * *
-   * Output only. This token is included in the response if there are more results
-   * to fetch. To fetch additional results, provide this value as the
+   * Output only. This token is included in the response if there are more
+   * results to fetch. To fetch additional results, provide this value as the
    * page_token in a subsequent <code>ListWorkflowTemplatesRequest</code>.
    * 
* @@ -970,8 +970,8 @@ public com.google.cloud.dataproc.v1beta2.WorkflowTemplate.Builder addTemplatesBu * * *
-     * Output only. This token is included in the response if there are more results
-     * to fetch. To fetch additional results, provide this value as the
+     * Output only. This token is included in the response if there are more
+     * results to fetch. To fetch additional results, provide this value as the
      * page_token in a subsequent <code>ListWorkflowTemplatesRequest</code>.
      * 
* @@ -992,8 +992,8 @@ public java.lang.String getNextPageToken() { * * *
-     * Output only. This token is included in the response if there are more results
-     * to fetch. To fetch additional results, provide this value as the
+     * Output only. This token is included in the response if there are more
+     * results to fetch. To fetch additional results, provide this value as the
      * page_token in a subsequent <code>ListWorkflowTemplatesRequest</code>.
      * 
* @@ -1014,8 +1014,8 @@ public com.google.protobuf.ByteString getNextPageTokenBytes() { * * *
-     * Output only. This token is included in the response if there are more results
-     * to fetch. To fetch additional results, provide this value as the
+     * Output only. This token is included in the response if there are more
+     * results to fetch. To fetch additional results, provide this value as the
      * page_token in a subsequent <code>ListWorkflowTemplatesRequest</code>.
      * 
* @@ -1034,8 +1034,8 @@ public Builder setNextPageToken(java.lang.String value) { * * *
-     * Output only. This token is included in the response if there are more results
-     * to fetch. To fetch additional results, provide this value as the
+     * Output only. This token is included in the response if there are more
+     * results to fetch. To fetch additional results, provide this value as the
      * page_token in a subsequent <code>ListWorkflowTemplatesRequest</code>.
      * 
* @@ -1051,8 +1051,8 @@ public Builder clearNextPageToken() { * * *
-     * Output only. This token is included in the response if there are more results
-     * to fetch. To fetch additional results, provide this value as the
+     * Output only. This token is included in the response if there are more
+     * results to fetch. To fetch additional results, provide this value as the
      * page_token in a subsequent <code>ListWorkflowTemplatesRequest</code>.
      * 
* diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ListWorkflowTemplatesResponseOrBuilder.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ListWorkflowTemplatesResponseOrBuilder.java index 9c18833bc262..8f3b7ab8d72e 100644 --- a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ListWorkflowTemplatesResponseOrBuilder.java +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/ListWorkflowTemplatesResponseOrBuilder.java @@ -64,8 +64,8 @@ public interface ListWorkflowTemplatesResponseOrBuilder * * *
-   * Output only. This token is included in the response if there are more results
-   * to fetch. To fetch additional results, provide this value as the
+   * Output only. This token is included in the response if there are more
+   * results to fetch. To fetch additional results, provide this value as the
    * page_token in a subsequent <code>ListWorkflowTemplatesRequest</code>.
    * 
* @@ -76,8 +76,8 @@ public interface ListWorkflowTemplatesResponseOrBuilder * * *
-   * Output only. This token is included in the response if there are more results
-   * to fetch. To fetch additional results, provide this value as the
+   * Output only. This token is included in the response if there are more
+   * results to fetch. To fetch additional results, provide this value as the
    * page_token in a subsequent <code>ListWorkflowTemplatesRequest</code>.
    * 
* diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/OrderedJob.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/OrderedJob.java index a81190c2b411..2009baf5f7df 100644 --- a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/OrderedJob.java +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/OrderedJob.java @@ -304,8 +304,8 @@ public JobTypeCase getJobTypeCase() { * within the template. * The step id is used as prefix for job id, as job * `goog-dataproc-workflow-step-id` label, and in - * [prerequisiteStepIds][google.cloud.dataproc.v1beta2.OrderedJob.prerequisite_step_ids] field from other - * steps. + * [prerequisiteStepIds][google.cloud.dataproc.v1beta2.OrderedJob.prerequisite_step_ids] + * field from other steps. * The id must contain only letters (a-z, A-Z), numbers (0-9), * underscores (_), and hyphens (-). Cannot begin or end with underscore * or hyphen. Must consist of between 3 and 50 characters. @@ -332,8 +332,8 @@ public java.lang.String getStepId() { * within the template. * The step id is used as prefix for job id, as job * `goog-dataproc-workflow-step-id` label, and in - * [prerequisiteStepIds][google.cloud.dataproc.v1beta2.OrderedJob.prerequisite_step_ids] field from other - * steps. + * [prerequisiteStepIds][google.cloud.dataproc.v1beta2.OrderedJob.prerequisite_step_ids] + * field from other steps. * The id must contain only letters (a-z, A-Z), numbers (0-9), * underscores (_), and hyphens (-). Cannot begin or end with underscore * or hyphen. Must consist of between 3 and 50 characters. @@ -1459,8 +1459,8 @@ public Builder clearJobType() { * within the template. * The step id is used as prefix for job id, as job * `goog-dataproc-workflow-step-id` label, and in - * [prerequisiteStepIds][google.cloud.dataproc.v1beta2.OrderedJob.prerequisite_step_ids] field from other - * steps. + * [prerequisiteStepIds][google.cloud.dataproc.v1beta2.OrderedJob.prerequisite_step_ids] + * field from other steps. * The id must contain only letters (a-z, A-Z), numbers (0-9), * underscores (_), and hyphens (-). Cannot begin or end with underscore * or hyphen. Must consist of between 3 and 50 characters. @@ -1487,8 +1487,8 @@ public java.lang.String getStepId() { * within the template. * The step id is used as prefix for job id, as job * `goog-dataproc-workflow-step-id` label, and in - * [prerequisiteStepIds][google.cloud.dataproc.v1beta2.OrderedJob.prerequisite_step_ids] field from other - * steps. + * [prerequisiteStepIds][google.cloud.dataproc.v1beta2.OrderedJob.prerequisite_step_ids] + * field from other steps. * The id must contain only letters (a-z, A-Z), numbers (0-9), * underscores (_), and hyphens (-). Cannot begin or end with underscore * or hyphen. Must consist of between 3 and 50 characters. @@ -1515,8 +1515,8 @@ public com.google.protobuf.ByteString getStepIdBytes() { * within the template. * The step id is used as prefix for job id, as job * `goog-dataproc-workflow-step-id` label, and in - * [prerequisiteStepIds][google.cloud.dataproc.v1beta2.OrderedJob.prerequisite_step_ids] field from other - * steps. + * [prerequisiteStepIds][google.cloud.dataproc.v1beta2.OrderedJob.prerequisite_step_ids] + * field from other steps. * The id must contain only letters (a-z, A-Z), numbers (0-9), * underscores (_), and hyphens (-). Cannot begin or end with underscore * or hyphen. Must consist of between 3 and 50 characters. @@ -1541,8 +1541,8 @@ public Builder setStepId(java.lang.String value) { * within the template. * The step id is used as prefix for job id, as job * `goog-dataproc-workflow-step-id` label, and in - * [prerequisiteStepIds][google.cloud.dataproc.v1beta2.OrderedJob.prerequisite_step_ids] field from other - * steps. + * [prerequisiteStepIds][google.cloud.dataproc.v1beta2.OrderedJob.prerequisite_step_ids] + * field from other steps. * The id must contain only letters (a-z, A-Z), numbers (0-9), * underscores (_), and hyphens (-). Cannot begin or end with underscore * or hyphen. Must consist of between 3 and 50 characters. @@ -1564,8 +1564,8 @@ public Builder clearStepId() { * within the template. * The step id is used as prefix for job id, as job * `goog-dataproc-workflow-step-id` label, and in - * [prerequisiteStepIds][google.cloud.dataproc.v1beta2.OrderedJob.prerequisite_step_ids] field from other - * steps. + * [prerequisiteStepIds][google.cloud.dataproc.v1beta2.OrderedJob.prerequisite_step_ids] + * field from other steps. * The id must contain only letters (a-z, A-Z), numbers (0-9), * underscores (_), and hyphens (-). Cannot begin or end with underscore * or hyphen. Must consist of between 3 and 50 characters. diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/OrderedJobOrBuilder.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/OrderedJobOrBuilder.java index 084575b7888d..e5c6e1932f39 100644 --- a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/OrderedJobOrBuilder.java +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/OrderedJobOrBuilder.java @@ -16,8 +16,8 @@ public interface OrderedJobOrBuilder * within the template. * The step id is used as prefix for job id, as job * `goog-dataproc-workflow-step-id` label, and in - * [prerequisiteStepIds][google.cloud.dataproc.v1beta2.OrderedJob.prerequisite_step_ids] field from other - * steps. + * [prerequisiteStepIds][google.cloud.dataproc.v1beta2.OrderedJob.prerequisite_step_ids] + * field from other steps. * The id must contain only letters (a-z, A-Z), numbers (0-9), * underscores (_), and hyphens (-). Cannot begin or end with underscore * or hyphen. Must consist of between 3 and 50 characters. @@ -34,8 +34,8 @@ public interface OrderedJobOrBuilder * within the template. * The step id is used as prefix for job id, as job * `goog-dataproc-workflow-step-id` label, and in - * [prerequisiteStepIds][google.cloud.dataproc.v1beta2.OrderedJob.prerequisite_step_ids] field from other - * steps. + * [prerequisiteStepIds][google.cloud.dataproc.v1beta2.OrderedJob.prerequisite_step_ids] + * field from other steps. * The id must contain only letters (a-z, A-Z), numbers (0-9), * underscores (_), and hyphens (-). Cannot begin or end with underscore * or hyphen. Must consist of between 3 and 50 characters. diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/PigJob.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/PigJob.java index 6cdca2ac9a7d..a377f674a69d 100644 --- a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/PigJob.java +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/PigJob.java @@ -332,8 +332,8 @@ public com.google.cloud.dataproc.v1beta2.QueryListOrBuilder getQueryListOrBuilde * *
    * Optional. Whether to continue executing queries if a query fails.
-   * The default value is `false`. Setting to `true` can be useful when executing
-   * independent parallel queries.
+   * The default value is `false`. Setting to `true` can be useful when
+   * executing independent parallel queries.
    * 
* * bool continue_on_failure = 3; @@ -1488,8 +1488,8 @@ public com.google.cloud.dataproc.v1beta2.QueryListOrBuilder getQueryListOrBuilde * *
      * Optional. Whether to continue executing queries if a query fails.
-     * The default value is `false`. Setting to `true` can be useful when executing
-     * independent parallel queries.
+     * The default value is `false`. Setting to `true` can be useful when
+     * executing independent parallel queries.
      * 
* * bool continue_on_failure = 3; @@ -1502,8 +1502,8 @@ public boolean getContinueOnFailure() { * *
      * Optional. Whether to continue executing queries if a query fails.
-     * The default value is `false`. Setting to `true` can be useful when executing
-     * independent parallel queries.
+     * The default value is `false`. Setting to `true` can be useful when
+     * executing independent parallel queries.
      * 
* * bool continue_on_failure = 3; @@ -1519,8 +1519,8 @@ public Builder setContinueOnFailure(boolean value) { * *
      * Optional. Whether to continue executing queries if a query fails.
-     * The default value is `false`. Setting to `true` can be useful when executing
-     * independent parallel queries.
+     * The default value is `false`. Setting to `true` can be useful when
+     * executing independent parallel queries.
      * 
* * bool continue_on_failure = 3; diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/PigJobOrBuilder.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/PigJobOrBuilder.java index 92667dbac9ef..5011f10395ea 100644 --- a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/PigJobOrBuilder.java +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/PigJobOrBuilder.java @@ -65,8 +65,8 @@ public interface PigJobOrBuilder * *
    * Optional. Whether to continue executing queries if a query fails.
-   * The default value is `false`. Setting to `true` can be useful when executing
-   * independent parallel queries.
+   * The default value is `false`. Setting to `true` can be useful when
+   * executing independent parallel queries.
    * 
* * bool continue_on_failure = 3; diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/PySparkJob.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/PySparkJob.java index 6ff3138ae1c0..6b6b85807452 100644 --- a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/PySparkJob.java +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/PySparkJob.java @@ -8,7 +8,8 @@ * *
  * A Cloud Dataproc job for running
- * [Apache PySpark](https://spark.apache.org/docs/0.9.0/python-programming-guide.html)
+ * [Apache
+ * PySpark](https://spark.apache.org/docs/0.9.0/python-programming-guide.html)
  * applications on YARN.
  * 
* @@ -953,7 +954,8 @@ protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.Build * *
    * A Cloud Dataproc job for running
-   * [Apache PySpark](https://spark.apache.org/docs/0.9.0/python-programming-guide.html)
+   * [Apache
+   * PySpark](https://spark.apache.org/docs/0.9.0/python-programming-guide.html)
    * applications on YARN.
    * 
* diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/SoftwareConfig.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/SoftwareConfig.java index 2f65fd296d7e..fec581f989aa 100644 --- a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/SoftwareConfig.java +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/SoftwareConfig.java @@ -124,10 +124,12 @@ protected com.google.protobuf.MapField internalGetMapField(int number) { * * *
-   * Optional. The version of software inside the cluster. It must be one of the supported
-   * [Cloud Dataproc Versions](/dataproc/docs/concepts/versioning/dataproc-versions#supported_cloud_dataproc_versions),
+   * Optional. The version of software inside the cluster. It must be one of the
+   * supported [Cloud Dataproc
+   * Versions](/dataproc/docs/concepts/versioning/dataproc-versions#supported_cloud_dataproc_versions),
    * such as "1.2" (including a subminor version, such as "1.2.29"), or the
-   * ["preview" version](/dataproc/docs/concepts/versioning/dataproc-versions#other_versions).
+   * ["preview"
+   * version](/dataproc/docs/concepts/versioning/dataproc-versions#other_versions).
    * If unspecified, it defaults to the latest version.
    * 
* @@ -148,10 +150,12 @@ public java.lang.String getImageVersion() { * * *
-   * Optional. The version of software inside the cluster. It must be one of the supported
-   * [Cloud Dataproc Versions](/dataproc/docs/concepts/versioning/dataproc-versions#supported_cloud_dataproc_versions),
+   * Optional. The version of software inside the cluster. It must be one of the
+   * supported [Cloud Dataproc
+   * Versions](/dataproc/docs/concepts/versioning/dataproc-versions#supported_cloud_dataproc_versions),
    * such as "1.2" (including a subminor version, such as "1.2.29"), or the
-   * ["preview" version](/dataproc/docs/concepts/versioning/dataproc-versions#other_versions).
+   * ["preview"
+   * version](/dataproc/docs/concepts/versioning/dataproc-versions#other_versions).
    * If unspecified, it defaults to the latest version.
    * 
* @@ -688,10 +692,12 @@ public Builder mergeFrom( * * *
-     * Optional. The version of software inside the cluster. It must be one of the supported
-     * [Cloud Dataproc Versions](/dataproc/docs/concepts/versioning/dataproc-versions#supported_cloud_dataproc_versions),
+     * Optional. The version of software inside the cluster. It must be one of the
+     * supported [Cloud Dataproc
+     * Versions](/dataproc/docs/concepts/versioning/dataproc-versions#supported_cloud_dataproc_versions),
      * such as "1.2" (including a subminor version, such as "1.2.29"), or the
-     * ["preview" version](/dataproc/docs/concepts/versioning/dataproc-versions#other_versions).
+     * ["preview"
+     * version](/dataproc/docs/concepts/versioning/dataproc-versions#other_versions).
      * If unspecified, it defaults to the latest version.
      * 
* @@ -712,10 +718,12 @@ public java.lang.String getImageVersion() { * * *
-     * Optional. The version of software inside the cluster. It must be one of the supported
-     * [Cloud Dataproc Versions](/dataproc/docs/concepts/versioning/dataproc-versions#supported_cloud_dataproc_versions),
+     * Optional. The version of software inside the cluster. It must be one of the
+     * supported [Cloud Dataproc
+     * Versions](/dataproc/docs/concepts/versioning/dataproc-versions#supported_cloud_dataproc_versions),
      * such as "1.2" (including a subminor version, such as "1.2.29"), or the
-     * ["preview" version](/dataproc/docs/concepts/versioning/dataproc-versions#other_versions).
+     * ["preview"
+     * version](/dataproc/docs/concepts/versioning/dataproc-versions#other_versions).
      * If unspecified, it defaults to the latest version.
      * 
* @@ -736,10 +744,12 @@ public com.google.protobuf.ByteString getImageVersionBytes() { * * *
-     * Optional. The version of software inside the cluster. It must be one of the supported
-     * [Cloud Dataproc Versions](/dataproc/docs/concepts/versioning/dataproc-versions#supported_cloud_dataproc_versions),
+     * Optional. The version of software inside the cluster. It must be one of the
+     * supported [Cloud Dataproc
+     * Versions](/dataproc/docs/concepts/versioning/dataproc-versions#supported_cloud_dataproc_versions),
      * such as "1.2" (including a subminor version, such as "1.2.29"), or the
-     * ["preview" version](/dataproc/docs/concepts/versioning/dataproc-versions#other_versions).
+     * ["preview"
+     * version](/dataproc/docs/concepts/versioning/dataproc-versions#other_versions).
      * If unspecified, it defaults to the latest version.
      * 
* @@ -758,10 +768,12 @@ public Builder setImageVersion(java.lang.String value) { * * *
-     * Optional. The version of software inside the cluster. It must be one of the supported
-     * [Cloud Dataproc Versions](/dataproc/docs/concepts/versioning/dataproc-versions#supported_cloud_dataproc_versions),
+     * Optional. The version of software inside the cluster. It must be one of the
+     * supported [Cloud Dataproc
+     * Versions](/dataproc/docs/concepts/versioning/dataproc-versions#supported_cloud_dataproc_versions),
      * such as "1.2" (including a subminor version, such as "1.2.29"), or the
-     * ["preview" version](/dataproc/docs/concepts/versioning/dataproc-versions#other_versions).
+     * ["preview"
+     * version](/dataproc/docs/concepts/versioning/dataproc-versions#other_versions).
      * If unspecified, it defaults to the latest version.
      * 
* @@ -777,10 +789,12 @@ public Builder clearImageVersion() { * * *
-     * Optional. The version of software inside the cluster. It must be one of the supported
-     * [Cloud Dataproc Versions](/dataproc/docs/concepts/versioning/dataproc-versions#supported_cloud_dataproc_versions),
+     * Optional. The version of software inside the cluster. It must be one of the
+     * supported [Cloud Dataproc
+     * Versions](/dataproc/docs/concepts/versioning/dataproc-versions#supported_cloud_dataproc_versions),
      * such as "1.2" (including a subminor version, such as "1.2.29"), or the
-     * ["preview" version](/dataproc/docs/concepts/versioning/dataproc-versions#other_versions).
+     * ["preview"
+     * version](/dataproc/docs/concepts/versioning/dataproc-versions#other_versions).
      * If unspecified, it defaults to the latest version.
      * 
* diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/SoftwareConfigOrBuilder.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/SoftwareConfigOrBuilder.java index 63ac912d99ae..c4203a4aa010 100644 --- a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/SoftwareConfigOrBuilder.java +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/SoftwareConfigOrBuilder.java @@ -12,10 +12,12 @@ public interface SoftwareConfigOrBuilder * * *
-   * Optional. The version of software inside the cluster. It must be one of the supported
-   * [Cloud Dataproc Versions](/dataproc/docs/concepts/versioning/dataproc-versions#supported_cloud_dataproc_versions),
+   * Optional. The version of software inside the cluster. It must be one of the
+   * supported [Cloud Dataproc
+   * Versions](/dataproc/docs/concepts/versioning/dataproc-versions#supported_cloud_dataproc_versions),
    * such as "1.2" (including a subminor version, such as "1.2.29"), or the
-   * ["preview" version](/dataproc/docs/concepts/versioning/dataproc-versions#other_versions).
+   * ["preview"
+   * version](/dataproc/docs/concepts/versioning/dataproc-versions#other_versions).
    * If unspecified, it defaults to the latest version.
    * 
* @@ -26,10 +28,12 @@ public interface SoftwareConfigOrBuilder * * *
-   * Optional. The version of software inside the cluster. It must be one of the supported
-   * [Cloud Dataproc Versions](/dataproc/docs/concepts/versioning/dataproc-versions#supported_cloud_dataproc_versions),
+   * Optional. The version of software inside the cluster. It must be one of the
+   * supported [Cloud Dataproc
+   * Versions](/dataproc/docs/concepts/versioning/dataproc-versions#supported_cloud_dataproc_versions),
    * such as "1.2" (including a subminor version, such as "1.2.29"), or the
-   * ["preview" version](/dataproc/docs/concepts/versioning/dataproc-versions#other_versions).
+   * ["preview"
+   * version](/dataproc/docs/concepts/versioning/dataproc-versions#other_versions).
    * If unspecified, it defaults to the latest version.
    * 
* diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/SparkSqlJob.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/SparkSqlJob.java index d20a7cb595a6..1deef51072b2 100644 --- a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/SparkSqlJob.java +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/SparkSqlJob.java @@ -7,8 +7,8 @@ * * *
- * A Cloud Dataproc job for running [Apache Spark SQL](http://spark.apache.org/sql/)
- * queries.
+ * A Cloud Dataproc job for running [Apache Spark
+ * SQL](http://spark.apache.org/sql/) queries.
  * 
* * Protobuf type {@code google.cloud.dataproc.v1beta2.SparkSqlJob} @@ -870,8 +870,8 @@ protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.Build * * *
-   * A Cloud Dataproc job for running [Apache Spark SQL](http://spark.apache.org/sql/)
-   * queries.
+   * A Cloud Dataproc job for running [Apache Spark
+   * SQL](http://spark.apache.org/sql/) queries.
    * 
* * Protobuf type {@code google.cloud.dataproc.v1beta2.SparkSqlJob} diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/SubmitJobRequest.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/SubmitJobRequest.java index ee76f92dc518..5d58cd63b97b 100644 --- a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/SubmitJobRequest.java +++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/SubmitJobRequest.java @@ -257,9 +257,10 @@ public com.google.cloud.dataproc.v1beta2.JobOrBuilder getJobOrBuilder() { * *
    * Optional. A unique id used to identify the request. If the server
-   * receives two [SubmitJobRequest][google.cloud.dataproc.v1beta2.SubmitJobRequest] requests  with the same
-   * id, then the second request will be ignored and the
-   * first [Job][google.cloud.dataproc.v1beta2.Job] created and stored in the backend
+   * receives two
+   * [SubmitJobRequest][google.cloud.dataproc.v1beta2.SubmitJobRequest] requests
+   * with the same id, then the second request will be ignored and the first
+   * [Job][google.cloud.dataproc.v1beta2.Job] created and stored in the backend
    * is returned.
    * It is recommended to always set this value to a
    * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
@@ -285,9 +286,10 @@ public java.lang.String getRequestId() {
    *
    * 
    * Optional. A unique id used to identify the request. If the server
-   * receives two [SubmitJobRequest][google.cloud.dataproc.v1beta2.SubmitJobRequest] requests  with the same
-   * id, then the second request will be ignored and the
-   * first [Job][google.cloud.dataproc.v1beta2.Job] created and stored in the backend
+   * receives two
+   * [SubmitJobRequest][google.cloud.dataproc.v1beta2.SubmitJobRequest] requests
+   * with the same id, then the second request will be ignored and the first
+   * [Job][google.cloud.dataproc.v1beta2.Job] created and stored in the backend
    * is returned.
    * It is recommended to always set this value to a
    * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
@@ -1064,9 +1066,10 @@ public com.google.cloud.dataproc.v1beta2.JobOrBuilder getJobOrBuilder() {
      *
      * 
      * Optional. A unique id used to identify the request. If the server
-     * receives two [SubmitJobRequest][google.cloud.dataproc.v1beta2.SubmitJobRequest] requests  with the same
-     * id, then the second request will be ignored and the
-     * first [Job][google.cloud.dataproc.v1beta2.Job] created and stored in the backend
+     * receives two
+     * [SubmitJobRequest][google.cloud.dataproc.v1beta2.SubmitJobRequest] requests
+     * with the same id, then the second request will be ignored and the first
+     * [Job][google.cloud.dataproc.v1beta2.Job] created and stored in the backend
      * is returned.
      * It is recommended to always set this value to a
      * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
@@ -1092,9 +1095,10 @@ public java.lang.String getRequestId() {
      *
      * 
      * Optional. A unique id used to identify the request. If the server
-     * receives two [SubmitJobRequest][google.cloud.dataproc.v1beta2.SubmitJobRequest] requests  with the same
-     * id, then the second request will be ignored and the
-     * first [Job][google.cloud.dataproc.v1beta2.Job] created and stored in the backend
+     * receives two
+     * [SubmitJobRequest][google.cloud.dataproc.v1beta2.SubmitJobRequest] requests
+     * with the same id, then the second request will be ignored and the first
+     * [Job][google.cloud.dataproc.v1beta2.Job] created and stored in the backend
      * is returned.
      * It is recommended to always set this value to a
      * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
@@ -1120,9 +1124,10 @@ public com.google.protobuf.ByteString getRequestIdBytes() {
      *
      * 
      * Optional. A unique id used to identify the request. If the server
-     * receives two [SubmitJobRequest][google.cloud.dataproc.v1beta2.SubmitJobRequest] requests  with the same
-     * id, then the second request will be ignored and the
-     * first [Job][google.cloud.dataproc.v1beta2.Job] created and stored in the backend
+     * receives two
+     * [SubmitJobRequest][google.cloud.dataproc.v1beta2.SubmitJobRequest] requests
+     * with the same id, then the second request will be ignored and the first
+     * [Job][google.cloud.dataproc.v1beta2.Job] created and stored in the backend
      * is returned.
      * It is recommended to always set this value to a
      * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
@@ -1146,9 +1151,10 @@ public Builder setRequestId(java.lang.String value) {
      *
      * 
      * Optional. A unique id used to identify the request. If the server
-     * receives two [SubmitJobRequest][google.cloud.dataproc.v1beta2.SubmitJobRequest] requests  with the same
-     * id, then the second request will be ignored and the
-     * first [Job][google.cloud.dataproc.v1beta2.Job] created and stored in the backend
+     * receives two
+     * [SubmitJobRequest][google.cloud.dataproc.v1beta2.SubmitJobRequest] requests
+     * with the same id, then the second request will be ignored and the first
+     * [Job][google.cloud.dataproc.v1beta2.Job] created and stored in the backend
      * is returned.
      * It is recommended to always set this value to a
      * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
@@ -1169,9 +1175,10 @@ public Builder clearRequestId() {
      *
      * 
      * Optional. A unique id used to identify the request. If the server
-     * receives two [SubmitJobRequest][google.cloud.dataproc.v1beta2.SubmitJobRequest] requests  with the same
-     * id, then the second request will be ignored and the
-     * first [Job][google.cloud.dataproc.v1beta2.Job] created and stored in the backend
+     * receives two
+     * [SubmitJobRequest][google.cloud.dataproc.v1beta2.SubmitJobRequest] requests
+     * with the same id, then the second request will be ignored and the first
+     * [Job][google.cloud.dataproc.v1beta2.Job] created and stored in the backend
      * is returned.
      * It is recommended to always set this value to a
      * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/SubmitJobRequestOrBuilder.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/SubmitJobRequestOrBuilder.java
index f214ee434bbe..f20b98e53fcd 100644
--- a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/SubmitJobRequestOrBuilder.java
+++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/SubmitJobRequestOrBuilder.java
@@ -88,9 +88,10 @@ public interface SubmitJobRequestOrBuilder
    *
    * 
    * Optional. A unique id used to identify the request. If the server
-   * receives two [SubmitJobRequest][google.cloud.dataproc.v1beta2.SubmitJobRequest] requests  with the same
-   * id, then the second request will be ignored and the
-   * first [Job][google.cloud.dataproc.v1beta2.Job] created and stored in the backend
+   * receives two
+   * [SubmitJobRequest][google.cloud.dataproc.v1beta2.SubmitJobRequest] requests
+   * with the same id, then the second request will be ignored and the first
+   * [Job][google.cloud.dataproc.v1beta2.Job] created and stored in the backend
    * is returned.
    * It is recommended to always set this value to a
    * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
@@ -106,9 +107,10 @@ public interface SubmitJobRequestOrBuilder
    *
    * 
    * Optional. A unique id used to identify the request. If the server
-   * receives two [SubmitJobRequest][google.cloud.dataproc.v1beta2.SubmitJobRequest] requests  with the same
-   * id, then the second request will be ignored and the
-   * first [Job][google.cloud.dataproc.v1beta2.Job] created and stored in the backend
+   * receives two
+   * [SubmitJobRequest][google.cloud.dataproc.v1beta2.SubmitJobRequest] requests
+   * with the same id, then the second request will be ignored and the first
+   * [Job][google.cloud.dataproc.v1beta2.Job] created and stored in the backend
    * is returned.
    * It is recommended to always set this value to a
    * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/TemplateParameter.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/TemplateParameter.java
index f347dbbdb491..d6b7e8532a3d 100644
--- a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/TemplateParameter.java
+++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/TemplateParameter.java
@@ -201,11 +201,12 @@ public com.google.protobuf.ByteString getNameBytes() {
    *
    * 
    * Required. Paths to all fields that the parameter replaces.
-   * A field is allowed to appear in at most one parameter's list of field paths.
-   * A field path is similar in syntax to a [google.protobuf.FieldMask][google.protobuf.FieldMask].
-   * For example, a field path that references the zone field of a workflow
-   * template's cluster selector would be specified as
-   * `placement.clusterSelector.zone`.
+   * A field is allowed to appear in at most one parameter's list of field
+   * paths.
+   * A field path is similar in syntax to a
+   * [google.protobuf.FieldMask][google.protobuf.FieldMask]. For example, a
+   * field path that references the zone field of a workflow template's cluster
+   * selector would be specified as `placement.clusterSelector.zone`.
    * Also, field paths can reference fields using the following syntax:
    * * Values in maps can be referenced by key:
    *     * labels['key']
@@ -247,11 +248,12 @@ public com.google.protobuf.ProtocolStringList getFieldsList() {
    *
    * 
    * Required. Paths to all fields that the parameter replaces.
-   * A field is allowed to appear in at most one parameter's list of field paths.
-   * A field path is similar in syntax to a [google.protobuf.FieldMask][google.protobuf.FieldMask].
-   * For example, a field path that references the zone field of a workflow
-   * template's cluster selector would be specified as
-   * `placement.clusterSelector.zone`.
+   * A field is allowed to appear in at most one parameter's list of field
+   * paths.
+   * A field path is similar in syntax to a
+   * [google.protobuf.FieldMask][google.protobuf.FieldMask]. For example, a
+   * field path that references the zone field of a workflow template's cluster
+   * selector would be specified as `placement.clusterSelector.zone`.
    * Also, field paths can reference fields using the following syntax:
    * * Values in maps can be referenced by key:
    *     * labels['key']
@@ -293,11 +295,12 @@ public int getFieldsCount() {
    *
    * 
    * Required. Paths to all fields that the parameter replaces.
-   * A field is allowed to appear in at most one parameter's list of field paths.
-   * A field path is similar in syntax to a [google.protobuf.FieldMask][google.protobuf.FieldMask].
-   * For example, a field path that references the zone field of a workflow
-   * template's cluster selector would be specified as
-   * `placement.clusterSelector.zone`.
+   * A field is allowed to appear in at most one parameter's list of field
+   * paths.
+   * A field path is similar in syntax to a
+   * [google.protobuf.FieldMask][google.protobuf.FieldMask]. For example, a
+   * field path that references the zone field of a workflow template's cluster
+   * selector would be specified as `placement.clusterSelector.zone`.
    * Also, field paths can reference fields using the following syntax:
    * * Values in maps can be referenced by key:
    *     * labels['key']
@@ -339,11 +342,12 @@ public java.lang.String getFields(int index) {
    *
    * 
    * Required. Paths to all fields that the parameter replaces.
-   * A field is allowed to appear in at most one parameter's list of field paths.
-   * A field path is similar in syntax to a [google.protobuf.FieldMask][google.protobuf.FieldMask].
-   * For example, a field path that references the zone field of a workflow
-   * template's cluster selector would be specified as
-   * `placement.clusterSelector.zone`.
+   * A field is allowed to appear in at most one parameter's list of field
+   * paths.
+   * A field path is similar in syntax to a
+   * [google.protobuf.FieldMask][google.protobuf.FieldMask]. For example, a
+   * field path that references the zone field of a workflow template's cluster
+   * selector would be specified as `placement.clusterSelector.zone`.
    * Also, field paths can reference fields using the following syntax:
    * * Values in maps can be referenced by key:
    *     * labels['key']
@@ -1014,11 +1018,12 @@ private void ensureFieldsIsMutable() {
      *
      * 
      * Required. Paths to all fields that the parameter replaces.
-     * A field is allowed to appear in at most one parameter's list of field paths.
-     * A field path is similar in syntax to a [google.protobuf.FieldMask][google.protobuf.FieldMask].
-     * For example, a field path that references the zone field of a workflow
-     * template's cluster selector would be specified as
-     * `placement.clusterSelector.zone`.
+     * A field is allowed to appear in at most one parameter's list of field
+     * paths.
+     * A field path is similar in syntax to a
+     * [google.protobuf.FieldMask][google.protobuf.FieldMask]. For example, a
+     * field path that references the zone field of a workflow template's cluster
+     * selector would be specified as `placement.clusterSelector.zone`.
      * Also, field paths can reference fields using the following syntax:
      * * Values in maps can be referenced by key:
      *     * labels['key']
@@ -1060,11 +1065,12 @@ public com.google.protobuf.ProtocolStringList getFieldsList() {
      *
      * 
      * Required. Paths to all fields that the parameter replaces.
-     * A field is allowed to appear in at most one parameter's list of field paths.
-     * A field path is similar in syntax to a [google.protobuf.FieldMask][google.protobuf.FieldMask].
-     * For example, a field path that references the zone field of a workflow
-     * template's cluster selector would be specified as
-     * `placement.clusterSelector.zone`.
+     * A field is allowed to appear in at most one parameter's list of field
+     * paths.
+     * A field path is similar in syntax to a
+     * [google.protobuf.FieldMask][google.protobuf.FieldMask]. For example, a
+     * field path that references the zone field of a workflow template's cluster
+     * selector would be specified as `placement.clusterSelector.zone`.
      * Also, field paths can reference fields using the following syntax:
      * * Values in maps can be referenced by key:
      *     * labels['key']
@@ -1106,11 +1112,12 @@ public int getFieldsCount() {
      *
      * 
      * Required. Paths to all fields that the parameter replaces.
-     * A field is allowed to appear in at most one parameter's list of field paths.
-     * A field path is similar in syntax to a [google.protobuf.FieldMask][google.protobuf.FieldMask].
-     * For example, a field path that references the zone field of a workflow
-     * template's cluster selector would be specified as
-     * `placement.clusterSelector.zone`.
+     * A field is allowed to appear in at most one parameter's list of field
+     * paths.
+     * A field path is similar in syntax to a
+     * [google.protobuf.FieldMask][google.protobuf.FieldMask]. For example, a
+     * field path that references the zone field of a workflow template's cluster
+     * selector would be specified as `placement.clusterSelector.zone`.
      * Also, field paths can reference fields using the following syntax:
      * * Values in maps can be referenced by key:
      *     * labels['key']
@@ -1152,11 +1159,12 @@ public java.lang.String getFields(int index) {
      *
      * 
      * Required. Paths to all fields that the parameter replaces.
-     * A field is allowed to appear in at most one parameter's list of field paths.
-     * A field path is similar in syntax to a [google.protobuf.FieldMask][google.protobuf.FieldMask].
-     * For example, a field path that references the zone field of a workflow
-     * template's cluster selector would be specified as
-     * `placement.clusterSelector.zone`.
+     * A field is allowed to appear in at most one parameter's list of field
+     * paths.
+     * A field path is similar in syntax to a
+     * [google.protobuf.FieldMask][google.protobuf.FieldMask]. For example, a
+     * field path that references the zone field of a workflow template's cluster
+     * selector would be specified as `placement.clusterSelector.zone`.
      * Also, field paths can reference fields using the following syntax:
      * * Values in maps can be referenced by key:
      *     * labels['key']
@@ -1198,11 +1206,12 @@ public com.google.protobuf.ByteString getFieldsBytes(int index) {
      *
      * 
      * Required. Paths to all fields that the parameter replaces.
-     * A field is allowed to appear in at most one parameter's list of field paths.
-     * A field path is similar in syntax to a [google.protobuf.FieldMask][google.protobuf.FieldMask].
-     * For example, a field path that references the zone field of a workflow
-     * template's cluster selector would be specified as
-     * `placement.clusterSelector.zone`.
+     * A field is allowed to appear in at most one parameter's list of field
+     * paths.
+     * A field path is similar in syntax to a
+     * [google.protobuf.FieldMask][google.protobuf.FieldMask]. For example, a
+     * field path that references the zone field of a workflow template's cluster
+     * selector would be specified as `placement.clusterSelector.zone`.
      * Also, field paths can reference fields using the following syntax:
      * * Values in maps can be referenced by key:
      *     * labels['key']
@@ -1250,11 +1259,12 @@ public Builder setFields(int index, java.lang.String value) {
      *
      * 
      * Required. Paths to all fields that the parameter replaces.
-     * A field is allowed to appear in at most one parameter's list of field paths.
-     * A field path is similar in syntax to a [google.protobuf.FieldMask][google.protobuf.FieldMask].
-     * For example, a field path that references the zone field of a workflow
-     * template's cluster selector would be specified as
-     * `placement.clusterSelector.zone`.
+     * A field is allowed to appear in at most one parameter's list of field
+     * paths.
+     * A field path is similar in syntax to a
+     * [google.protobuf.FieldMask][google.protobuf.FieldMask]. For example, a
+     * field path that references the zone field of a workflow template's cluster
+     * selector would be specified as `placement.clusterSelector.zone`.
      * Also, field paths can reference fields using the following syntax:
      * * Values in maps can be referenced by key:
      *     * labels['key']
@@ -1302,11 +1312,12 @@ public Builder addFields(java.lang.String value) {
      *
      * 
      * Required. Paths to all fields that the parameter replaces.
-     * A field is allowed to appear in at most one parameter's list of field paths.
-     * A field path is similar in syntax to a [google.protobuf.FieldMask][google.protobuf.FieldMask].
-     * For example, a field path that references the zone field of a workflow
-     * template's cluster selector would be specified as
-     * `placement.clusterSelector.zone`.
+     * A field is allowed to appear in at most one parameter's list of field
+     * paths.
+     * A field path is similar in syntax to a
+     * [google.protobuf.FieldMask][google.protobuf.FieldMask]. For example, a
+     * field path that references the zone field of a workflow template's cluster
+     * selector would be specified as `placement.clusterSelector.zone`.
      * Also, field paths can reference fields using the following syntax:
      * * Values in maps can be referenced by key:
      *     * labels['key']
@@ -1351,11 +1362,12 @@ public Builder addAllFields(java.lang.Iterable values) {
      *
      * 
      * Required. Paths to all fields that the parameter replaces.
-     * A field is allowed to appear in at most one parameter's list of field paths.
-     * A field path is similar in syntax to a [google.protobuf.FieldMask][google.protobuf.FieldMask].
-     * For example, a field path that references the zone field of a workflow
-     * template's cluster selector would be specified as
-     * `placement.clusterSelector.zone`.
+     * A field is allowed to appear in at most one parameter's list of field
+     * paths.
+     * A field path is similar in syntax to a
+     * [google.protobuf.FieldMask][google.protobuf.FieldMask]. For example, a
+     * field path that references the zone field of a workflow template's cluster
+     * selector would be specified as `placement.clusterSelector.zone`.
      * Also, field paths can reference fields using the following syntax:
      * * Values in maps can be referenced by key:
      *     * labels['key']
@@ -1400,11 +1412,12 @@ public Builder clearFields() {
      *
      * 
      * Required. Paths to all fields that the parameter replaces.
-     * A field is allowed to appear in at most one parameter's list of field paths.
-     * A field path is similar in syntax to a [google.protobuf.FieldMask][google.protobuf.FieldMask].
-     * For example, a field path that references the zone field of a workflow
-     * template's cluster selector would be specified as
-     * `placement.clusterSelector.zone`.
+     * A field is allowed to appear in at most one parameter's list of field
+     * paths.
+     * A field path is similar in syntax to a
+     * [google.protobuf.FieldMask][google.protobuf.FieldMask]. For example, a
+     * field path that references the zone field of a workflow template's cluster
+     * selector would be specified as `placement.clusterSelector.zone`.
      * Also, field paths can reference fields using the following syntax:
      * * Values in maps can be referenced by key:
      *     * labels['key']
diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/TemplateParameterOrBuilder.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/TemplateParameterOrBuilder.java
index eeaeeb7280e7..7d6d18ddcdd6 100644
--- a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/TemplateParameterOrBuilder.java
+++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/TemplateParameterOrBuilder.java
@@ -46,11 +46,12 @@ public interface TemplateParameterOrBuilder
    *
    * 
    * Required. Paths to all fields that the parameter replaces.
-   * A field is allowed to appear in at most one parameter's list of field paths.
-   * A field path is similar in syntax to a [google.protobuf.FieldMask][google.protobuf.FieldMask].
-   * For example, a field path that references the zone field of a workflow
-   * template's cluster selector would be specified as
-   * `placement.clusterSelector.zone`.
+   * A field is allowed to appear in at most one parameter's list of field
+   * paths.
+   * A field path is similar in syntax to a
+   * [google.protobuf.FieldMask][google.protobuf.FieldMask]. For example, a
+   * field path that references the zone field of a workflow template's cluster
+   * selector would be specified as `placement.clusterSelector.zone`.
    * Also, field paths can reference fields using the following syntax:
    * * Values in maps can be referenced by key:
    *     * labels['key']
@@ -90,11 +91,12 @@ public interface TemplateParameterOrBuilder
    *
    * 
    * Required. Paths to all fields that the parameter replaces.
-   * A field is allowed to appear in at most one parameter's list of field paths.
-   * A field path is similar in syntax to a [google.protobuf.FieldMask][google.protobuf.FieldMask].
-   * For example, a field path that references the zone field of a workflow
-   * template's cluster selector would be specified as
-   * `placement.clusterSelector.zone`.
+   * A field is allowed to appear in at most one parameter's list of field
+   * paths.
+   * A field path is similar in syntax to a
+   * [google.protobuf.FieldMask][google.protobuf.FieldMask]. For example, a
+   * field path that references the zone field of a workflow template's cluster
+   * selector would be specified as `placement.clusterSelector.zone`.
    * Also, field paths can reference fields using the following syntax:
    * * Values in maps can be referenced by key:
    *     * labels['key']
@@ -134,11 +136,12 @@ public interface TemplateParameterOrBuilder
    *
    * 
    * Required. Paths to all fields that the parameter replaces.
-   * A field is allowed to appear in at most one parameter's list of field paths.
-   * A field path is similar in syntax to a [google.protobuf.FieldMask][google.protobuf.FieldMask].
-   * For example, a field path that references the zone field of a workflow
-   * template's cluster selector would be specified as
-   * `placement.clusterSelector.zone`.
+   * A field is allowed to appear in at most one parameter's list of field
+   * paths.
+   * A field path is similar in syntax to a
+   * [google.protobuf.FieldMask][google.protobuf.FieldMask]. For example, a
+   * field path that references the zone field of a workflow template's cluster
+   * selector would be specified as `placement.clusterSelector.zone`.
    * Also, field paths can reference fields using the following syntax:
    * * Values in maps can be referenced by key:
    *     * labels['key']
@@ -178,11 +181,12 @@ public interface TemplateParameterOrBuilder
    *
    * 
    * Required. Paths to all fields that the parameter replaces.
-   * A field is allowed to appear in at most one parameter's list of field paths.
-   * A field path is similar in syntax to a [google.protobuf.FieldMask][google.protobuf.FieldMask].
-   * For example, a field path that references the zone field of a workflow
-   * template's cluster selector would be specified as
-   * `placement.clusterSelector.zone`.
+   * A field is allowed to appear in at most one parameter's list of field
+   * paths.
+   * A field path is similar in syntax to a
+   * [google.protobuf.FieldMask][google.protobuf.FieldMask]. For example, a
+   * field path that references the zone field of a workflow template's cluster
+   * selector would be specified as `placement.clusterSelector.zone`.
    * Also, field paths can reference fields using the following syntax:
    * * Values in maps can be referenced by key:
    *     * labels['key']
diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/UpdateClusterRequest.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/UpdateClusterRequest.java
index 65f288a3cf54..f819e98e1479 100644
--- a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/UpdateClusterRequest.java
+++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/UpdateClusterRequest.java
@@ -410,9 +410,10 @@ public com.google.protobuf.DurationOrBuilder getGracefulDecommissionTimeoutOrBui
    *         }
    *       }
    *     }
-   * Similarly, to change the number of preemptible workers in a cluster to 5, the
-   * `update_mask` parameter would be `config.secondary_worker_config.num_instances`,
-   * and the `PATCH` request body would be set as follows:
+   * Similarly, to change the number of preemptible workers in a cluster to 5,
+   * the `update_mask` parameter would be
+   * `config.secondary_worker_config.num_instances`, and the `PATCH` request
+   * body would be set as follows:
    *     {
    *       "config":{
    *         "secondaryWorkerConfig":{
@@ -429,19 +430,24 @@ public com.google.protobuf.DurationOrBuilder getGracefulDecommissionTimeoutOrBui
    * <td>labels</td><td>Updates labels</td>
    * </tr>
    * <tr>
-   * <td>config.worker_config.num_instances</td><td>Resize primary worker group</td>
+   * <td>config.worker_config.num_instances</td><td>Resize primary worker
+   * group</td>
    * </tr>
    * <tr>
-   * <td>config.secondary_worker_config.num_instances</td><td>Resize secondary worker group</td>
+   * <td>config.secondary_worker_config.num_instances</td><td>Resize secondary
+   * worker group</td>
    * </tr>
    * <tr>
-   * <td>config.lifecycle_config.auto_delete_ttl</td><td>Reset MAX TTL duration</td>
+   * <td>config.lifecycle_config.auto_delete_ttl</td><td>Reset MAX TTL
+   * duration</td>
    * </tr>
    * <tr>
-   * <td>config.lifecycle_config.auto_delete_time</td><td>Update MAX TTL deletion timestamp</td>
+   * <td>config.lifecycle_config.auto_delete_time</td><td>Update MAX TTL
+   * deletion timestamp</td>
    * </tr>
    * <tr>
-   * <td>config.lifecycle_config.idle_delete_ttl</td><td>Update Idle TTL duration</td>
+   * <td>config.lifecycle_config.idle_delete_ttl</td><td>Update Idle TTL
+   * duration</td>
    * </tr>
    * </table>
    * 
@@ -467,9 +473,10 @@ public boolean hasUpdateMask() { * } * } * } - * Similarly, to change the number of preemptible workers in a cluster to 5, the - * `update_mask` parameter would be `config.secondary_worker_config.num_instances`, - * and the `PATCH` request body would be set as follows: + * Similarly, to change the number of preemptible workers in a cluster to 5, + * the `update_mask` parameter would be + * `config.secondary_worker_config.num_instances`, and the `PATCH` request + * body would be set as follows: * { * "config":{ * "secondaryWorkerConfig":{ @@ -486,19 +493,24 @@ public boolean hasUpdateMask() { * <td>labels</td><td>Updates labels</td> * </tr> * <tr> - * <td>config.worker_config.num_instances</td><td>Resize primary worker group</td> + * <td>config.worker_config.num_instances</td><td>Resize primary worker + * group</td> * </tr> * <tr> - * <td>config.secondary_worker_config.num_instances</td><td>Resize secondary worker group</td> + * <td>config.secondary_worker_config.num_instances</td><td>Resize secondary + * worker group</td> * </tr> * <tr> - * <td>config.lifecycle_config.auto_delete_ttl</td><td>Reset MAX TTL duration</td> + * <td>config.lifecycle_config.auto_delete_ttl</td><td>Reset MAX TTL + * duration</td> * </tr> * <tr> - * <td>config.lifecycle_config.auto_delete_time</td><td>Update MAX TTL deletion timestamp</td> + * <td>config.lifecycle_config.auto_delete_time</td><td>Update MAX TTL + * deletion timestamp</td> * </tr> * <tr> - * <td>config.lifecycle_config.idle_delete_ttl</td><td>Update Idle TTL duration</td> + * <td>config.lifecycle_config.idle_delete_ttl</td><td>Update Idle TTL + * duration</td> * </tr> * </table> *
@@ -524,9 +536,10 @@ public com.google.protobuf.FieldMask getUpdateMask() { * } * } * } - * Similarly, to change the number of preemptible workers in a cluster to 5, the - * `update_mask` parameter would be `config.secondary_worker_config.num_instances`, - * and the `PATCH` request body would be set as follows: + * Similarly, to change the number of preemptible workers in a cluster to 5, + * the `update_mask` parameter would be + * `config.secondary_worker_config.num_instances`, and the `PATCH` request + * body would be set as follows: * { * "config":{ * "secondaryWorkerConfig":{ @@ -543,19 +556,24 @@ public com.google.protobuf.FieldMask getUpdateMask() { * <td>labels</td><td>Updates labels</td> * </tr> * <tr> - * <td>config.worker_config.num_instances</td><td>Resize primary worker group</td> + * <td>config.worker_config.num_instances</td><td>Resize primary worker + * group</td> * </tr> * <tr> - * <td>config.secondary_worker_config.num_instances</td><td>Resize secondary worker group</td> + * <td>config.secondary_worker_config.num_instances</td><td>Resize secondary + * worker group</td> * </tr> * <tr> - * <td>config.lifecycle_config.auto_delete_ttl</td><td>Reset MAX TTL duration</td> + * <td>config.lifecycle_config.auto_delete_ttl</td><td>Reset MAX TTL + * duration</td> * </tr> * <tr> - * <td>config.lifecycle_config.auto_delete_time</td><td>Update MAX TTL deletion timestamp</td> + * <td>config.lifecycle_config.auto_delete_time</td><td>Update MAX TTL + * deletion timestamp</td> * </tr> * <tr> - * <td>config.lifecycle_config.idle_delete_ttl</td><td>Update Idle TTL duration</td> + * <td>config.lifecycle_config.idle_delete_ttl</td><td>Update Idle TTL + * duration</td> * </tr> * </table> *
@@ -573,10 +591,11 @@ public com.google.protobuf.FieldMaskOrBuilder getUpdateMaskOrBuilder() { * *
    * Optional. A unique id used to identify the request. If the server
-   * receives two [UpdateClusterRequest][google.cloud.dataproc.v1beta2.UpdateClusterRequest] requests  with the same
-   * id, then the second request will be ignored and the
-   * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
-   * backend is returned.
+   * receives two
+   * [UpdateClusterRequest][google.cloud.dataproc.v1beta2.UpdateClusterRequest]
+   * requests  with the same id, then the second request will be ignored and the
+   * first [google.longrunning.Operation][google.longrunning.Operation] created
+   * and stored in the backend is returned.
    * It is recommended to always set this value to a
    * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
    * The id must contain only letters (a-z, A-Z), numbers (0-9),
@@ -601,10 +620,11 @@ public java.lang.String getRequestId() {
    *
    * 
    * Optional. A unique id used to identify the request. If the server
-   * receives two [UpdateClusterRequest][google.cloud.dataproc.v1beta2.UpdateClusterRequest] requests  with the same
-   * id, then the second request will be ignored and the
-   * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
-   * backend is returned.
+   * receives two
+   * [UpdateClusterRequest][google.cloud.dataproc.v1beta2.UpdateClusterRequest]
+   * requests  with the same id, then the second request will be ignored and the
+   * first [google.longrunning.Operation][google.longrunning.Operation] created
+   * and stored in the backend is returned.
    * It is recommended to always set this value to a
    * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
    * The id must contain only letters (a-z, A-Z), numbers (0-9),
@@ -1806,9 +1826,10 @@ public com.google.protobuf.DurationOrBuilder getGracefulDecommissionTimeoutOrBui
      *         }
      *       }
      *     }
-     * Similarly, to change the number of preemptible workers in a cluster to 5, the
-     * `update_mask` parameter would be `config.secondary_worker_config.num_instances`,
-     * and the `PATCH` request body would be set as follows:
+     * Similarly, to change the number of preemptible workers in a cluster to 5,
+     * the `update_mask` parameter would be
+     * `config.secondary_worker_config.num_instances`, and the `PATCH` request
+     * body would be set as follows:
      *     {
      *       "config":{
      *         "secondaryWorkerConfig":{
@@ -1825,19 +1846,24 @@ public com.google.protobuf.DurationOrBuilder getGracefulDecommissionTimeoutOrBui
      * <td>labels</td><td>Updates labels</td>
      * </tr>
      * <tr>
-     * <td>config.worker_config.num_instances</td><td>Resize primary worker group</td>
+     * <td>config.worker_config.num_instances</td><td>Resize primary worker
+     * group</td>
      * </tr>
      * <tr>
-     * <td>config.secondary_worker_config.num_instances</td><td>Resize secondary worker group</td>
+     * <td>config.secondary_worker_config.num_instances</td><td>Resize secondary
+     * worker group</td>
      * </tr>
      * <tr>
-     * <td>config.lifecycle_config.auto_delete_ttl</td><td>Reset MAX TTL duration</td>
+     * <td>config.lifecycle_config.auto_delete_ttl</td><td>Reset MAX TTL
+     * duration</td>
      * </tr>
      * <tr>
-     * <td>config.lifecycle_config.auto_delete_time</td><td>Update MAX TTL deletion timestamp</td>
+     * <td>config.lifecycle_config.auto_delete_time</td><td>Update MAX TTL
+     * deletion timestamp</td>
      * </tr>
      * <tr>
-     * <td>config.lifecycle_config.idle_delete_ttl</td><td>Update Idle TTL duration</td>
+     * <td>config.lifecycle_config.idle_delete_ttl</td><td>Update Idle TTL
+     * duration</td>
      * </tr>
      * </table>
      * 
@@ -1863,9 +1889,10 @@ public boolean hasUpdateMask() { * } * } * } - * Similarly, to change the number of preemptible workers in a cluster to 5, the - * `update_mask` parameter would be `config.secondary_worker_config.num_instances`, - * and the `PATCH` request body would be set as follows: + * Similarly, to change the number of preemptible workers in a cluster to 5, + * the `update_mask` parameter would be + * `config.secondary_worker_config.num_instances`, and the `PATCH` request + * body would be set as follows: * { * "config":{ * "secondaryWorkerConfig":{ @@ -1882,19 +1909,24 @@ public boolean hasUpdateMask() { * <td>labels</td><td>Updates labels</td> * </tr> * <tr> - * <td>config.worker_config.num_instances</td><td>Resize primary worker group</td> + * <td>config.worker_config.num_instances</td><td>Resize primary worker + * group</td> * </tr> * <tr> - * <td>config.secondary_worker_config.num_instances</td><td>Resize secondary worker group</td> + * <td>config.secondary_worker_config.num_instances</td><td>Resize secondary + * worker group</td> * </tr> * <tr> - * <td>config.lifecycle_config.auto_delete_ttl</td><td>Reset MAX TTL duration</td> + * <td>config.lifecycle_config.auto_delete_ttl</td><td>Reset MAX TTL + * duration</td> * </tr> * <tr> - * <td>config.lifecycle_config.auto_delete_time</td><td>Update MAX TTL deletion timestamp</td> + * <td>config.lifecycle_config.auto_delete_time</td><td>Update MAX TTL + * deletion timestamp</td> * </tr> * <tr> - * <td>config.lifecycle_config.idle_delete_ttl</td><td>Update Idle TTL duration</td> + * <td>config.lifecycle_config.idle_delete_ttl</td><td>Update Idle TTL + * duration</td> * </tr> * </table> *
@@ -1926,9 +1958,10 @@ public com.google.protobuf.FieldMask getUpdateMask() { * } * } * } - * Similarly, to change the number of preemptible workers in a cluster to 5, the - * `update_mask` parameter would be `config.secondary_worker_config.num_instances`, - * and the `PATCH` request body would be set as follows: + * Similarly, to change the number of preemptible workers in a cluster to 5, + * the `update_mask` parameter would be + * `config.secondary_worker_config.num_instances`, and the `PATCH` request + * body would be set as follows: * { * "config":{ * "secondaryWorkerConfig":{ @@ -1945,19 +1978,24 @@ public com.google.protobuf.FieldMask getUpdateMask() { * <td>labels</td><td>Updates labels</td> * </tr> * <tr> - * <td>config.worker_config.num_instances</td><td>Resize primary worker group</td> + * <td>config.worker_config.num_instances</td><td>Resize primary worker + * group</td> * </tr> * <tr> - * <td>config.secondary_worker_config.num_instances</td><td>Resize secondary worker group</td> + * <td>config.secondary_worker_config.num_instances</td><td>Resize secondary + * worker group</td> * </tr> * <tr> - * <td>config.lifecycle_config.auto_delete_ttl</td><td>Reset MAX TTL duration</td> + * <td>config.lifecycle_config.auto_delete_ttl</td><td>Reset MAX TTL + * duration</td> * </tr> * <tr> - * <td>config.lifecycle_config.auto_delete_time</td><td>Update MAX TTL deletion timestamp</td> + * <td>config.lifecycle_config.auto_delete_time</td><td>Update MAX TTL + * deletion timestamp</td> * </tr> * <tr> - * <td>config.lifecycle_config.idle_delete_ttl</td><td>Update Idle TTL duration</td> + * <td>config.lifecycle_config.idle_delete_ttl</td><td>Update Idle TTL + * duration</td> * </tr> * </table> *
@@ -1993,9 +2031,10 @@ public Builder setUpdateMask(com.google.protobuf.FieldMask value) { * } * } * } - * Similarly, to change the number of preemptible workers in a cluster to 5, the - * `update_mask` parameter would be `config.secondary_worker_config.num_instances`, - * and the `PATCH` request body would be set as follows: + * Similarly, to change the number of preemptible workers in a cluster to 5, + * the `update_mask` parameter would be + * `config.secondary_worker_config.num_instances`, and the `PATCH` request + * body would be set as follows: * { * "config":{ * "secondaryWorkerConfig":{ @@ -2012,19 +2051,24 @@ public Builder setUpdateMask(com.google.protobuf.FieldMask value) { * <td>labels</td><td>Updates labels</td> * </tr> * <tr> - * <td>config.worker_config.num_instances</td><td>Resize primary worker group</td> + * <td>config.worker_config.num_instances</td><td>Resize primary worker + * group</td> * </tr> * <tr> - * <td>config.secondary_worker_config.num_instances</td><td>Resize secondary worker group</td> + * <td>config.secondary_worker_config.num_instances</td><td>Resize secondary + * worker group</td> * </tr> * <tr> - * <td>config.lifecycle_config.auto_delete_ttl</td><td>Reset MAX TTL duration</td> + * <td>config.lifecycle_config.auto_delete_ttl</td><td>Reset MAX TTL + * duration</td> * </tr> * <tr> - * <td>config.lifecycle_config.auto_delete_time</td><td>Update MAX TTL deletion timestamp</td> + * <td>config.lifecycle_config.auto_delete_time</td><td>Update MAX TTL + * deletion timestamp</td> * </tr> * <tr> - * <td>config.lifecycle_config.idle_delete_ttl</td><td>Update Idle TTL duration</td> + * <td>config.lifecycle_config.idle_delete_ttl</td><td>Update Idle TTL + * duration</td> * </tr> * </table> *
@@ -2057,9 +2101,10 @@ public Builder setUpdateMask(com.google.protobuf.FieldMask.Builder builderForVal * } * } * } - * Similarly, to change the number of preemptible workers in a cluster to 5, the - * `update_mask` parameter would be `config.secondary_worker_config.num_instances`, - * and the `PATCH` request body would be set as follows: + * Similarly, to change the number of preemptible workers in a cluster to 5, + * the `update_mask` parameter would be + * `config.secondary_worker_config.num_instances`, and the `PATCH` request + * body would be set as follows: * { * "config":{ * "secondaryWorkerConfig":{ @@ -2076,19 +2121,24 @@ public Builder setUpdateMask(com.google.protobuf.FieldMask.Builder builderForVal * <td>labels</td><td>Updates labels</td> * </tr> * <tr> - * <td>config.worker_config.num_instances</td><td>Resize primary worker group</td> + * <td>config.worker_config.num_instances</td><td>Resize primary worker + * group</td> * </tr> * <tr> - * <td>config.secondary_worker_config.num_instances</td><td>Resize secondary worker group</td> + * <td>config.secondary_worker_config.num_instances</td><td>Resize secondary + * worker group</td> * </tr> * <tr> - * <td>config.lifecycle_config.auto_delete_ttl</td><td>Reset MAX TTL duration</td> + * <td>config.lifecycle_config.auto_delete_ttl</td><td>Reset MAX TTL + * duration</td> * </tr> * <tr> - * <td>config.lifecycle_config.auto_delete_time</td><td>Update MAX TTL deletion timestamp</td> + * <td>config.lifecycle_config.auto_delete_time</td><td>Update MAX TTL + * deletion timestamp</td> * </tr> * <tr> - * <td>config.lifecycle_config.idle_delete_ttl</td><td>Update Idle TTL duration</td> + * <td>config.lifecycle_config.idle_delete_ttl</td><td>Update Idle TTL + * duration</td> * </tr> * </table> *
@@ -2126,9 +2176,10 @@ public Builder mergeUpdateMask(com.google.protobuf.FieldMask value) { * } * } * } - * Similarly, to change the number of preemptible workers in a cluster to 5, the - * `update_mask` parameter would be `config.secondary_worker_config.num_instances`, - * and the `PATCH` request body would be set as follows: + * Similarly, to change the number of preemptible workers in a cluster to 5, + * the `update_mask` parameter would be + * `config.secondary_worker_config.num_instances`, and the `PATCH` request + * body would be set as follows: * { * "config":{ * "secondaryWorkerConfig":{ @@ -2145,19 +2196,24 @@ public Builder mergeUpdateMask(com.google.protobuf.FieldMask value) { * <td>labels</td><td>Updates labels</td> * </tr> * <tr> - * <td>config.worker_config.num_instances</td><td>Resize primary worker group</td> + * <td>config.worker_config.num_instances</td><td>Resize primary worker + * group</td> * </tr> * <tr> - * <td>config.secondary_worker_config.num_instances</td><td>Resize secondary worker group</td> + * <td>config.secondary_worker_config.num_instances</td><td>Resize secondary + * worker group</td> * </tr> * <tr> - * <td>config.lifecycle_config.auto_delete_ttl</td><td>Reset MAX TTL duration</td> + * <td>config.lifecycle_config.auto_delete_ttl</td><td>Reset MAX TTL + * duration</td> * </tr> * <tr> - * <td>config.lifecycle_config.auto_delete_time</td><td>Update MAX TTL deletion timestamp</td> + * <td>config.lifecycle_config.auto_delete_time</td><td>Update MAX TTL + * deletion timestamp</td> * </tr> * <tr> - * <td>config.lifecycle_config.idle_delete_ttl</td><td>Update Idle TTL duration</td> + * <td>config.lifecycle_config.idle_delete_ttl</td><td>Update Idle TTL + * duration</td> * </tr> * </table> *
@@ -2191,9 +2247,10 @@ public Builder clearUpdateMask() { * } * } * } - * Similarly, to change the number of preemptible workers in a cluster to 5, the - * `update_mask` parameter would be `config.secondary_worker_config.num_instances`, - * and the `PATCH` request body would be set as follows: + * Similarly, to change the number of preemptible workers in a cluster to 5, + * the `update_mask` parameter would be + * `config.secondary_worker_config.num_instances`, and the `PATCH` request + * body would be set as follows: * { * "config":{ * "secondaryWorkerConfig":{ @@ -2210,19 +2267,24 @@ public Builder clearUpdateMask() { * <td>labels</td><td>Updates labels</td> * </tr> * <tr> - * <td>config.worker_config.num_instances</td><td>Resize primary worker group</td> + * <td>config.worker_config.num_instances</td><td>Resize primary worker + * group</td> * </tr> * <tr> - * <td>config.secondary_worker_config.num_instances</td><td>Resize secondary worker group</td> + * <td>config.secondary_worker_config.num_instances</td><td>Resize secondary + * worker group</td> * </tr> * <tr> - * <td>config.lifecycle_config.auto_delete_ttl</td><td>Reset MAX TTL duration</td> + * <td>config.lifecycle_config.auto_delete_ttl</td><td>Reset MAX TTL + * duration</td> * </tr> * <tr> - * <td>config.lifecycle_config.auto_delete_time</td><td>Update MAX TTL deletion timestamp</td> + * <td>config.lifecycle_config.auto_delete_time</td><td>Update MAX TTL + * deletion timestamp</td> * </tr> * <tr> - * <td>config.lifecycle_config.idle_delete_ttl</td><td>Update Idle TTL duration</td> + * <td>config.lifecycle_config.idle_delete_ttl</td><td>Update Idle TTL + * duration</td> * </tr> * </table> *
@@ -2250,9 +2312,10 @@ public com.google.protobuf.FieldMask.Builder getUpdateMaskBuilder() { * } * } * } - * Similarly, to change the number of preemptible workers in a cluster to 5, the - * `update_mask` parameter would be `config.secondary_worker_config.num_instances`, - * and the `PATCH` request body would be set as follows: + * Similarly, to change the number of preemptible workers in a cluster to 5, + * the `update_mask` parameter would be + * `config.secondary_worker_config.num_instances`, and the `PATCH` request + * body would be set as follows: * { * "config":{ * "secondaryWorkerConfig":{ @@ -2269,19 +2332,24 @@ public com.google.protobuf.FieldMask.Builder getUpdateMaskBuilder() { * <td>labels</td><td>Updates labels</td> * </tr> * <tr> - * <td>config.worker_config.num_instances</td><td>Resize primary worker group</td> + * <td>config.worker_config.num_instances</td><td>Resize primary worker + * group</td> * </tr> * <tr> - * <td>config.secondary_worker_config.num_instances</td><td>Resize secondary worker group</td> + * <td>config.secondary_worker_config.num_instances</td><td>Resize secondary + * worker group</td> * </tr> * <tr> - * <td>config.lifecycle_config.auto_delete_ttl</td><td>Reset MAX TTL duration</td> + * <td>config.lifecycle_config.auto_delete_ttl</td><td>Reset MAX TTL + * duration</td> * </tr> * <tr> - * <td>config.lifecycle_config.auto_delete_time</td><td>Update MAX TTL deletion timestamp</td> + * <td>config.lifecycle_config.auto_delete_time</td><td>Update MAX TTL + * deletion timestamp</td> * </tr> * <tr> - * <td>config.lifecycle_config.idle_delete_ttl</td><td>Update Idle TTL duration</td> + * <td>config.lifecycle_config.idle_delete_ttl</td><td>Update Idle TTL + * duration</td> * </tr> * </table> *
@@ -2313,9 +2381,10 @@ public com.google.protobuf.FieldMaskOrBuilder getUpdateMaskOrBuilder() { * } * } * } - * Similarly, to change the number of preemptible workers in a cluster to 5, the - * `update_mask` parameter would be `config.secondary_worker_config.num_instances`, - * and the `PATCH` request body would be set as follows: + * Similarly, to change the number of preemptible workers in a cluster to 5, + * the `update_mask` parameter would be + * `config.secondary_worker_config.num_instances`, and the `PATCH` request + * body would be set as follows: * { * "config":{ * "secondaryWorkerConfig":{ @@ -2332,19 +2401,24 @@ public com.google.protobuf.FieldMaskOrBuilder getUpdateMaskOrBuilder() { * <td>labels</td><td>Updates labels</td> * </tr> * <tr> - * <td>config.worker_config.num_instances</td><td>Resize primary worker group</td> + * <td>config.worker_config.num_instances</td><td>Resize primary worker + * group</td> * </tr> * <tr> - * <td>config.secondary_worker_config.num_instances</td><td>Resize secondary worker group</td> + * <td>config.secondary_worker_config.num_instances</td><td>Resize secondary + * worker group</td> * </tr> * <tr> - * <td>config.lifecycle_config.auto_delete_ttl</td><td>Reset MAX TTL duration</td> + * <td>config.lifecycle_config.auto_delete_ttl</td><td>Reset MAX TTL + * duration</td> * </tr> * <tr> - * <td>config.lifecycle_config.auto_delete_time</td><td>Update MAX TTL deletion timestamp</td> + * <td>config.lifecycle_config.auto_delete_time</td><td>Update MAX TTL + * deletion timestamp</td> * </tr> * <tr> - * <td>config.lifecycle_config.idle_delete_ttl</td><td>Update Idle TTL duration</td> + * <td>config.lifecycle_config.idle_delete_ttl</td><td>Update Idle TTL + * duration</td> * </tr> * </table> *
@@ -2374,10 +2448,11 @@ public com.google.protobuf.FieldMaskOrBuilder getUpdateMaskOrBuilder() { * *
      * Optional. A unique id used to identify the request. If the server
-     * receives two [UpdateClusterRequest][google.cloud.dataproc.v1beta2.UpdateClusterRequest] requests  with the same
-     * id, then the second request will be ignored and the
-     * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
-     * backend is returned.
+     * receives two
+     * [UpdateClusterRequest][google.cloud.dataproc.v1beta2.UpdateClusterRequest]
+     * requests  with the same id, then the second request will be ignored and the
+     * first [google.longrunning.Operation][google.longrunning.Operation] created
+     * and stored in the backend is returned.
      * It is recommended to always set this value to a
      * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
      * The id must contain only letters (a-z, A-Z), numbers (0-9),
@@ -2402,10 +2477,11 @@ public java.lang.String getRequestId() {
      *
      * 
      * Optional. A unique id used to identify the request. If the server
-     * receives two [UpdateClusterRequest][google.cloud.dataproc.v1beta2.UpdateClusterRequest] requests  with the same
-     * id, then the second request will be ignored and the
-     * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
-     * backend is returned.
+     * receives two
+     * [UpdateClusterRequest][google.cloud.dataproc.v1beta2.UpdateClusterRequest]
+     * requests  with the same id, then the second request will be ignored and the
+     * first [google.longrunning.Operation][google.longrunning.Operation] created
+     * and stored in the backend is returned.
      * It is recommended to always set this value to a
      * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
      * The id must contain only letters (a-z, A-Z), numbers (0-9),
@@ -2430,10 +2506,11 @@ public com.google.protobuf.ByteString getRequestIdBytes() {
      *
      * 
      * Optional. A unique id used to identify the request. If the server
-     * receives two [UpdateClusterRequest][google.cloud.dataproc.v1beta2.UpdateClusterRequest] requests  with the same
-     * id, then the second request will be ignored and the
-     * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
-     * backend is returned.
+     * receives two
+     * [UpdateClusterRequest][google.cloud.dataproc.v1beta2.UpdateClusterRequest]
+     * requests  with the same id, then the second request will be ignored and the
+     * first [google.longrunning.Operation][google.longrunning.Operation] created
+     * and stored in the backend is returned.
      * It is recommended to always set this value to a
      * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
      * The id must contain only letters (a-z, A-Z), numbers (0-9),
@@ -2456,10 +2533,11 @@ public Builder setRequestId(java.lang.String value) {
      *
      * 
      * Optional. A unique id used to identify the request. If the server
-     * receives two [UpdateClusterRequest][google.cloud.dataproc.v1beta2.UpdateClusterRequest] requests  with the same
-     * id, then the second request will be ignored and the
-     * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
-     * backend is returned.
+     * receives two
+     * [UpdateClusterRequest][google.cloud.dataproc.v1beta2.UpdateClusterRequest]
+     * requests  with the same id, then the second request will be ignored and the
+     * first [google.longrunning.Operation][google.longrunning.Operation] created
+     * and stored in the backend is returned.
      * It is recommended to always set this value to a
      * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
      * The id must contain only letters (a-z, A-Z), numbers (0-9),
@@ -2479,10 +2557,11 @@ public Builder clearRequestId() {
      *
      * 
      * Optional. A unique id used to identify the request. If the server
-     * receives two [UpdateClusterRequest][google.cloud.dataproc.v1beta2.UpdateClusterRequest] requests  with the same
-     * id, then the second request will be ignored and the
-     * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
-     * backend is returned.
+     * receives two
+     * [UpdateClusterRequest][google.cloud.dataproc.v1beta2.UpdateClusterRequest]
+     * requests  with the same id, then the second request will be ignored and the
+     * first [google.longrunning.Operation][google.longrunning.Operation] created
+     * and stored in the backend is returned.
      * It is recommended to always set this value to a
      * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
      * The id must contain only letters (a-z, A-Z), numbers (0-9),
diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/UpdateClusterRequestOrBuilder.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/UpdateClusterRequestOrBuilder.java
index e9a16ee9eef2..61d34bbd8056 100644
--- a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/UpdateClusterRequestOrBuilder.java
+++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/UpdateClusterRequestOrBuilder.java
@@ -169,9 +169,10 @@ public interface UpdateClusterRequestOrBuilder
    *         }
    *       }
    *     }
-   * Similarly, to change the number of preemptible workers in a cluster to 5, the
-   * `update_mask` parameter would be `config.secondary_worker_config.num_instances`,
-   * and the `PATCH` request body would be set as follows:
+   * Similarly, to change the number of preemptible workers in a cluster to 5,
+   * the `update_mask` parameter would be
+   * `config.secondary_worker_config.num_instances`, and the `PATCH` request
+   * body would be set as follows:
    *     {
    *       "config":{
    *         "secondaryWorkerConfig":{
@@ -188,19 +189,24 @@ public interface UpdateClusterRequestOrBuilder
    * <td>labels</td><td>Updates labels</td>
    * </tr>
    * <tr>
-   * <td>config.worker_config.num_instances</td><td>Resize primary worker group</td>
+   * <td>config.worker_config.num_instances</td><td>Resize primary worker
+   * group</td>
    * </tr>
    * <tr>
-   * <td>config.secondary_worker_config.num_instances</td><td>Resize secondary worker group</td>
+   * <td>config.secondary_worker_config.num_instances</td><td>Resize secondary
+   * worker group</td>
    * </tr>
    * <tr>
-   * <td>config.lifecycle_config.auto_delete_ttl</td><td>Reset MAX TTL duration</td>
+   * <td>config.lifecycle_config.auto_delete_ttl</td><td>Reset MAX TTL
+   * duration</td>
    * </tr>
    * <tr>
-   * <td>config.lifecycle_config.auto_delete_time</td><td>Update MAX TTL deletion timestamp</td>
+   * <td>config.lifecycle_config.auto_delete_time</td><td>Update MAX TTL
+   * deletion timestamp</td>
    * </tr>
    * <tr>
-   * <td>config.lifecycle_config.idle_delete_ttl</td><td>Update Idle TTL duration</td>
+   * <td>config.lifecycle_config.idle_delete_ttl</td><td>Update Idle TTL
+   * duration</td>
    * </tr>
    * </table>
    * 
@@ -224,9 +230,10 @@ public interface UpdateClusterRequestOrBuilder * } * } * } - * Similarly, to change the number of preemptible workers in a cluster to 5, the - * `update_mask` parameter would be `config.secondary_worker_config.num_instances`, - * and the `PATCH` request body would be set as follows: + * Similarly, to change the number of preemptible workers in a cluster to 5, + * the `update_mask` parameter would be + * `config.secondary_worker_config.num_instances`, and the `PATCH` request + * body would be set as follows: * { * "config":{ * "secondaryWorkerConfig":{ @@ -243,19 +250,24 @@ public interface UpdateClusterRequestOrBuilder * <td>labels</td><td>Updates labels</td> * </tr> * <tr> - * <td>config.worker_config.num_instances</td><td>Resize primary worker group</td> + * <td>config.worker_config.num_instances</td><td>Resize primary worker + * group</td> * </tr> * <tr> - * <td>config.secondary_worker_config.num_instances</td><td>Resize secondary worker group</td> + * <td>config.secondary_worker_config.num_instances</td><td>Resize secondary + * worker group</td> * </tr> * <tr> - * <td>config.lifecycle_config.auto_delete_ttl</td><td>Reset MAX TTL duration</td> + * <td>config.lifecycle_config.auto_delete_ttl</td><td>Reset MAX TTL + * duration</td> * </tr> * <tr> - * <td>config.lifecycle_config.auto_delete_time</td><td>Update MAX TTL deletion timestamp</td> + * <td>config.lifecycle_config.auto_delete_time</td><td>Update MAX TTL + * deletion timestamp</td> * </tr> * <tr> - * <td>config.lifecycle_config.idle_delete_ttl</td><td>Update Idle TTL duration</td> + * <td>config.lifecycle_config.idle_delete_ttl</td><td>Update Idle TTL + * duration</td> * </tr> * </table> *
@@ -279,9 +291,10 @@ public interface UpdateClusterRequestOrBuilder * } * } * } - * Similarly, to change the number of preemptible workers in a cluster to 5, the - * `update_mask` parameter would be `config.secondary_worker_config.num_instances`, - * and the `PATCH` request body would be set as follows: + * Similarly, to change the number of preemptible workers in a cluster to 5, + * the `update_mask` parameter would be + * `config.secondary_worker_config.num_instances`, and the `PATCH` request + * body would be set as follows: * { * "config":{ * "secondaryWorkerConfig":{ @@ -298,19 +311,24 @@ public interface UpdateClusterRequestOrBuilder * <td>labels</td><td>Updates labels</td> * </tr> * <tr> - * <td>config.worker_config.num_instances</td><td>Resize primary worker group</td> + * <td>config.worker_config.num_instances</td><td>Resize primary worker + * group</td> * </tr> * <tr> - * <td>config.secondary_worker_config.num_instances</td><td>Resize secondary worker group</td> + * <td>config.secondary_worker_config.num_instances</td><td>Resize secondary + * worker group</td> * </tr> * <tr> - * <td>config.lifecycle_config.auto_delete_ttl</td><td>Reset MAX TTL duration</td> + * <td>config.lifecycle_config.auto_delete_ttl</td><td>Reset MAX TTL + * duration</td> * </tr> * <tr> - * <td>config.lifecycle_config.auto_delete_time</td><td>Update MAX TTL deletion timestamp</td> + * <td>config.lifecycle_config.auto_delete_time</td><td>Update MAX TTL + * deletion timestamp</td> * </tr> * <tr> - * <td>config.lifecycle_config.idle_delete_ttl</td><td>Update Idle TTL duration</td> + * <td>config.lifecycle_config.idle_delete_ttl</td><td>Update Idle TTL + * duration</td> * </tr> * </table> *
@@ -324,10 +342,11 @@ public interface UpdateClusterRequestOrBuilder * *
    * Optional. A unique id used to identify the request. If the server
-   * receives two [UpdateClusterRequest][google.cloud.dataproc.v1beta2.UpdateClusterRequest] requests  with the same
-   * id, then the second request will be ignored and the
-   * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
-   * backend is returned.
+   * receives two
+   * [UpdateClusterRequest][google.cloud.dataproc.v1beta2.UpdateClusterRequest]
+   * requests  with the same id, then the second request will be ignored and the
+   * first [google.longrunning.Operation][google.longrunning.Operation] created
+   * and stored in the backend is returned.
    * It is recommended to always set this value to a
    * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
    * The id must contain only letters (a-z, A-Z), numbers (0-9),
@@ -342,10 +361,11 @@ public interface UpdateClusterRequestOrBuilder
    *
    * 
    * Optional. A unique id used to identify the request. If the server
-   * receives two [UpdateClusterRequest][google.cloud.dataproc.v1beta2.UpdateClusterRequest] requests  with the same
-   * id, then the second request will be ignored and the
-   * first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
-   * backend is returned.
+   * receives two
+   * [UpdateClusterRequest][google.cloud.dataproc.v1beta2.UpdateClusterRequest]
+   * requests  with the same id, then the second request will be ignored and the
+   * first [google.longrunning.Operation][google.longrunning.Operation] created
+   * and stored in the backend is returned.
    * It is recommended to always set this value to a
    * [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
    * The id must contain only letters (a-z, A-Z), numbers (0-9),
diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/proto/google/cloud/dataproc/v1beta2/clusters.proto b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/proto/google/cloud/dataproc/v1beta2/clusters.proto
index fd3c86a0ffb8..fd89fbffed0e 100644
--- a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/proto/google/cloud/dataproc/v1beta2/clusters.proto
+++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/proto/google/cloud/dataproc/v1beta2/clusters.proto
@@ -18,8 +18,8 @@ syntax = "proto3";
 package google.cloud.dataproc.v1beta2;
 
 import "google/api/annotations.proto";
-import "google/cloud/dataproc/v1beta2/shared.proto";
 import "google/cloud/dataproc/v1beta2/operations.proto";
+import "google/cloud/dataproc/v1beta2/shared.proto";
 import "google/longrunning/operations.proto";
 import "google/protobuf/duration.proto";
 import "google/protobuf/field_mask.proto";
@@ -30,12 +30,12 @@ option java_multiple_files = true;
 option java_outer_classname = "ClustersProto";
 option java_package = "com.google.cloud.dataproc.v1beta2";
 
-
 // The ClusterControllerService provides methods to manage clusters
 // of Compute Engine instances.
 service ClusterController {
   // Creates a cluster in a project.
-  rpc CreateCluster(CreateClusterRequest) returns (google.longrunning.Operation) {
+  rpc CreateCluster(CreateClusterRequest)
+      returns (google.longrunning.Operation) {
     option (google.api.http) = {
       post: "/v1beta2/projects/{project_id}/regions/{region}/clusters"
       body: "cluster"
@@ -43,7 +43,8 @@ service ClusterController {
   }
 
   // Updates a cluster in a project.
-  rpc UpdateCluster(UpdateClusterRequest) returns (google.longrunning.Operation) {
+  rpc UpdateCluster(UpdateClusterRequest)
+      returns (google.longrunning.Operation) {
     option (google.api.http) = {
       patch: "/v1beta2/projects/{project_id}/regions/{region}/clusters/{cluster_name}"
       body: "cluster"
@@ -51,7 +52,8 @@ service ClusterController {
   }
 
   // Deletes a cluster in a project.
-  rpc DeleteCluster(DeleteClusterRequest) returns (google.longrunning.Operation) {
+  rpc DeleteCluster(DeleteClusterRequest)
+      returns (google.longrunning.Operation) {
     option (google.api.http) = {
       delete: "/v1beta2/projects/{project_id}/regions/{region}/clusters/{cluster_name}"
     };
@@ -74,7 +76,8 @@ service ClusterController {
   // Gets cluster diagnostic information.
   // After the operation completes, the Operation.response field
   // contains `DiagnoseClusterOutputLocation`.
-  rpc DiagnoseCluster(DiagnoseClusterRequest) returns (google.longrunning.Operation) {
+  rpc DiagnoseCluster(DiagnoseClusterRequest)
+      returns (google.longrunning.Operation) {
     option (google.api.http) = {
       post: "/v1beta2/projects/{project_id}/regions/{region}/clusters/{cluster_name}:diagnose"
       body: "*"
@@ -100,8 +103,9 @@ message Cluster {
   // Label **keys** must contain 1 to 63 characters, and must conform to
   // [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
   // Label **values** may be empty, but, if present, must contain 1 to 63
-  // characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
-  // No more than 32 labels can be associated with a cluster.
+  // characters, and must conform to [RFC
+  // 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be
+  // associated with a cluster.
   map labels = 8;
 
   // Output only. Cluster status.
@@ -116,8 +120,8 @@ message Cluster {
 
   // Output only. Contains cluster daemon metrics such as HDFS and YARN stats.
   //
-  // **Beta Feature**: This report is available for testing purposes only. It may
-  // be changed before final release.
+  // **Beta Feature**: This report is available for testing purposes only. It
+  // may be changed before final release.
   ClusterMetrics metrics = 9;
 }
 
@@ -156,9 +160,11 @@ message ClusterConfig {
   // Optional. Commands to execute on each node after config is
   // completed. By default, executables are run on master and all worker nodes.
   // You can test a node's role metadata to run an executable on
-  // a master or worker node, as shown below using `curl` (you can also use `wget`):
+  // a master or worker node, as shown below using `curl` (you can also use
+  // `wget`):
   //
-  //     ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role)
+  //     ROLE=$(curl -H Metadata-Flavor:Google
+  //     http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role)
   //     if [[ "${ROLE}" == 'Master' ]]; then
   //       ... master specific actions ...
   //     else
@@ -217,11 +223,11 @@ message GceClusterConfig {
   string subnetwork_uri = 6;
 
   // Optional. If true, all instances in the cluster will only have internal IP
-  // addresses. By default, clusters are not restricted to internal IP addresses,
-  // and will have ephemeral external IP addresses assigned to each instance.
-  // This `internal_ip_only` restriction can only be enabled for subnetwork
-  // enabled networks, and all off-cluster dependencies must be configured to be
-  // accessible without external IP addresses.
+  // addresses. By default, clusters are not restricted to internal IP
+  // addresses, and will have ephemeral external IP addresses assigned to each
+  // instance. This `internal_ip_only` restriction can only be enabled for
+  // subnetwork enabled networks, and all off-cluster dependencies must be
+  // configured to be accessible without external IP addresses.
   bool internal_ip_only = 7;
 
   // Optional. The service account of the instances. Defaults to the default
@@ -231,7 +237,8 @@ message GceClusterConfig {
   // * roles/logging.logWriter
   // * roles/storage.objectAdmin
   //
-  // (see https://cloud.google.com/compute/docs/access/service-accounts#custom_service_accounts
+  // (see
+  // https://cloud.google.com/compute/docs/access/service-accounts#custom_service_accounts
   // for more information).
   // Example: `[account_id]@[project_id].iam.gserviceaccount.com`
   string service_account = 8;
@@ -257,7 +264,8 @@ message GceClusterConfig {
   repeated string tags = 4;
 
   // The Compute Engine metadata entries to add to all instances (see
-  // [Project and instance metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
+  // [Project and instance
+  // metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
   map metadata = 5;
 }
 
@@ -286,7 +294,8 @@ message InstanceGroupConfig {
   // * `n1-standard-2`
   //
   // **Auto Zone Exception**: If you are using the Cloud Dataproc
-  // [Auto Zone Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement)
+  // [Auto Zone
+  // Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement)
   // feature, you must use the short name of the machine type
   // resource, for example, `n1-standard-2`.
   string machine_type_uri = 4;
@@ -294,7 +303,8 @@ message InstanceGroupConfig {
   // Optional. Disk option config settings.
   DiskConfig disk_config = 5;
 
-  // Optional. Specifies that this instance group contains preemptible instances.
+  // Optional. Specifies that this instance group contains preemptible
+  // instances.
   bool is_preemptible = 6;
 
   // Output only. The config for Compute Engine Instance Group
@@ -338,7 +348,8 @@ message AcceleratorConfig {
   // * `nvidia-tesla-k80`
   //
   // **Auto Zone Exception**: If you are using the Cloud Dataproc
-  // [Auto Zone Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement)
+  // [Auto Zone
+  // Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement)
   // feature, you must use the short name of the accelerator type
   // resource, for example, `nvidia-tesla-k80`.
   string accelerator_type_uri = 1;
@@ -461,10 +472,12 @@ message ClusterStatus {
 
 // Specifies the selection and config of software inside the cluster.
 message SoftwareConfig {
-  // Optional. The version of software inside the cluster. It must be one of the supported
-  // [Cloud Dataproc Versions](/dataproc/docs/concepts/versioning/dataproc-versions#supported_cloud_dataproc_versions),
+  // Optional. The version of software inside the cluster. It must be one of the
+  // supported [Cloud Dataproc
+  // Versions](/dataproc/docs/concepts/versioning/dataproc-versions#supported_cloud_dataproc_versions),
   // such as "1.2" (including a subminor version, such as "1.2.29"), or the
-  // ["preview" version](/dataproc/docs/concepts/versioning/dataproc-versions#other_versions).
+  // ["preview"
+  // version](/dataproc/docs/concepts/versioning/dataproc-versions#other_versions).
   // If unspecified, it defaults to the latest version.
   string image_version = 1;
 
@@ -514,10 +527,11 @@ message CreateClusterRequest {
   Cluster cluster = 2;
 
   // Optional. A unique id used to identify the request. If the server
-  // receives two [CreateClusterRequest][google.cloud.dataproc.v1beta2.CreateClusterRequest] requests  with the same
-  // id, then the second request will be ignored and the
-  // first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the backend
-  // is returned.
+  // receives two
+  // [CreateClusterRequest][google.cloud.dataproc.v1beta2.CreateClusterRequest]
+  // requests  with the same id, then the second request will be ignored and the
+  // first [google.longrunning.Operation][google.longrunning.Operation] created
+  // and stored in the backend is returned.
   //
   // It is recommended to always set this value to a
   // [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
@@ -566,9 +580,10 @@ message UpdateClusterRequest {
   //       }
   //     }
   //
-  // Similarly, to change the number of preemptible workers in a cluster to 5, the
-  // `update_mask` parameter would be `config.secondary_worker_config.num_instances`,
-  // and the `PATCH` request body would be set as follows:
+  // Similarly, to change the number of preemptible workers in a cluster to 5,
+  // the `update_mask` parameter would be
+  // `config.secondary_worker_config.num_instances`, and the `PATCH` request
+  // body would be set as follows:
   //
   //     {
   //       "config":{
@@ -587,28 +602,34 @@ message UpdateClusterRequest {
   // labelsUpdates labels
   // 
   // 
-  // config.worker_config.num_instancesResize primary worker group
+  // config.worker_config.num_instancesResize primary worker
+  // group
   // 
   // 
-  // config.secondary_worker_config.num_instancesResize secondary worker group
+  // config.secondary_worker_config.num_instancesResize secondary
+  // worker group
   // 
   // 
-  // config.lifecycle_config.auto_delete_ttlReset MAX TTL duration
+  // config.lifecycle_config.auto_delete_ttlReset MAX TTL
+  // duration
   // 
   // 
-  // config.lifecycle_config.auto_delete_timeUpdate MAX TTL deletion timestamp
+  // config.lifecycle_config.auto_delete_timeUpdate MAX TTL
+  // deletion timestamp
   // 
   // 
-  // config.lifecycle_config.idle_delete_ttlUpdate Idle TTL duration
+  // config.lifecycle_config.idle_delete_ttlUpdate Idle TTL
+  // duration
   // 
   // 
   google.protobuf.FieldMask update_mask = 4;
 
   // Optional. A unique id used to identify the request. If the server
-  // receives two [UpdateClusterRequest][google.cloud.dataproc.v1beta2.UpdateClusterRequest] requests  with the same
-  // id, then the second request will be ignored and the
-  // first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
-  // backend is returned.
+  // receives two
+  // [UpdateClusterRequest][google.cloud.dataproc.v1beta2.UpdateClusterRequest]
+  // requests  with the same id, then the second request will be ignored and the
+  // first [google.longrunning.Operation][google.longrunning.Operation] created
+  // and stored in the backend is returned.
   //
   // It is recommended to always set this value to a
   // [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
@@ -635,10 +656,11 @@ message DeleteClusterRequest {
   string cluster_uuid = 4;
 
   // Optional. A unique id used to identify the request. If the server
-  // receives two [DeleteClusterRequest][google.cloud.dataproc.v1beta2.DeleteClusterRequest] requests  with the same
-  // id, then the second request will be ignored and the
-  // first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
-  // backend is returned.
+  // receives two
+  // [DeleteClusterRequest][google.cloud.dataproc.v1beta2.DeleteClusterRequest]
+  // requests  with the same id, then the second request will be ignored and the
+  // first [google.longrunning.Operation][google.longrunning.Operation] created
+  // and stored in the backend is returned.
   //
   // It is recommended to always set this value to a
   // [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/proto/google/cloud/dataproc/v1beta2/jobs.proto b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/proto/google/cloud/dataproc/v1beta2/jobs.proto
index 7aff5f462ad1..4f949ba9452f 100644
--- a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/proto/google/cloud/dataproc/v1beta2/jobs.proto
+++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/proto/google/cloud/dataproc/v1beta2/jobs.proto
@@ -27,7 +27,6 @@ option java_multiple_files = true;
 option java_outer_classname = "JobsProto";
 option java_package = "com.google.cloud.dataproc.v1beta2";
 
-
 // The JobController provides methods to manage jobs.
 service JobController {
   // Submits a job to a cluster.
@@ -62,7 +61,8 @@ service JobController {
 
   // Starts a job cancellation request. To access the job resource
   // after cancellation, call
-  // [regions/{region}/jobs.list](/dataproc/docs/reference/rest/v1beta2/projects.regions.jobs/list) or
+  // [regions/{region}/jobs.list](/dataproc/docs/reference/rest/v1beta2/projects.regions.jobs/list)
+  // or
   // [regions/{region}/jobs.get](/dataproc/docs/reference/rest/v1beta2/projects.regions.jobs/get).
   rpc CancelJob(CancelJobRequest) returns (Job) {
     option (google.api.http) = {
@@ -122,8 +122,10 @@ message LoggingConfig {
 }
 
 // A Cloud Dataproc job for running
-// [Apache Hadoop MapReduce](https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduceTutorial.html)
-// jobs on [Apache Hadoop YARN](https://hadoop.apache.org/docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YARN.html).
+// [Apache Hadoop
+// MapReduce](https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduceTutorial.html)
+// jobs on [Apache Hadoop
+// YARN](https://hadoop.apache.org/docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YARN.html).
 message HadoopJob {
   // Required. Indicates the location of the driver's main class. Specify
   // either the jar file that contains the main class or the main class name.
@@ -143,8 +145,8 @@ message HadoopJob {
   }
 
   // Optional. The arguments to pass to the driver. Do not
-  // include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as job
-  // properties, since a collision may occur that causes an incorrect job
+  // include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as
+  // job properties, since a collision may occur that causes an incorrect job
   // submission.
   repeated string args = 3;
 
@@ -178,7 +180,8 @@ message SparkJob {
   // Required. The specification of the main method to call to drive the job.
   // Specify either the jar file that contains the main class or the main class
   // name. To pass both a main jar and a main class in that jar, add the jar to
-  // `CommonJob.jar_file_uris`, and then specify the main class name in `main_class`.
+  // `CommonJob.jar_file_uris`, and then specify the main class name in
+  // `main_class`.
   oneof driver {
     // The HCFS URI of the jar file that contains the main class.
     string main_jar_file_uri = 1;
@@ -217,7 +220,8 @@ message SparkJob {
 }
 
 // A Cloud Dataproc job for running
-// [Apache PySpark](https://spark.apache.org/docs/0.9.0/python-programming-guide.html)
+// [Apache
+// PySpark](https://spark.apache.org/docs/0.9.0/python-programming-guide.html)
 // applications on YARN.
 message PySparkJob {
   // Required. The HCFS URI of the main Python file to use as the driver. Must
@@ -288,8 +292,8 @@ message HiveJob {
   }
 
   // Optional. Whether to continue executing queries if a query fails.
-  // The default value is `false`. Setting to `true` can be useful when executing
-  // independent parallel queries.
+  // The default value is `false`. Setting to `true` can be useful when
+  // executing independent parallel queries.
   bool continue_on_failure = 3;
 
   // Optional. Mapping of query variable names to values (equivalent to the
@@ -308,8 +312,8 @@ message HiveJob {
   repeated string jar_file_uris = 6;
 }
 
-// A Cloud Dataproc job for running [Apache Spark SQL](http://spark.apache.org/sql/)
-// queries.
+// A Cloud Dataproc job for running [Apache Spark
+// SQL](http://spark.apache.org/sql/) queries.
 message SparkSqlJob {
   // Required. The sequence of Spark SQL queries to execute, specified as
   // either an HCFS file URI or as a list of queries.
@@ -351,8 +355,8 @@ message PigJob {
   }
 
   // Optional. Whether to continue executing queries if a query fails.
-  // The default value is `false`. Setting to `true` can be useful when executing
-  // independent parallel queries.
+  // The default value is `false`. Setting to `true` can be useful when
+  // executing independent parallel queries.
   bool continue_on_failure = 3;
 
   // Optional. Mapping of query variable names to values (equivalent to the Pig
@@ -573,8 +577,8 @@ message Job {
 
   // Output only. The collection of YARN applications spun up by this job.
   //
-  // **Beta** Feature: This report is available for testing purposes only. It may
-  // be changed before final release.
+  // **Beta** Feature: This report is available for testing purposes only. It
+  // may be changed before final release.
   repeated YarnApplication yarn_applications = 9;
 
   // Output only. The email address of the user submitting the job. For jobs
@@ -594,8 +598,9 @@ message Job {
   // Label **keys** must contain 1 to 63 characters, and must conform to
   // [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
   // Label **values** may be empty, but, if present, must contain 1 to 63
-  // characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
-  // No more than 32 labels can be associated with a job.
+  // characters, and must conform to [RFC
+  // 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be
+  // associated with a job.
   map labels = 18;
 
   // Optional. Job scheduling configuration.
@@ -633,9 +638,10 @@ message SubmitJobRequest {
   Job job = 2;
 
   // Optional. A unique id used to identify the request. If the server
-  // receives two [SubmitJobRequest][google.cloud.dataproc.v1beta2.SubmitJobRequest] requests  with the same
-  // id, then the second request will be ignored and the
-  // first [Job][google.cloud.dataproc.v1beta2.Job] created and stored in the backend
+  // receives two
+  // [SubmitJobRequest][google.cloud.dataproc.v1beta2.SubmitJobRequest] requests
+  // with the same id, then the second request will be ignored and the first
+  // [Job][google.cloud.dataproc.v1beta2.Job] created and stored in the backend
   // is returned.
   //
   // It is recommended to always set this value to a
diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/proto/google/cloud/dataproc/v1beta2/operations.proto b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/proto/google/cloud/dataproc/v1beta2/operations.proto
index 717410832ff6..b77eeddee354 100644
--- a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/proto/google/cloud/dataproc/v1beta2/operations.proto
+++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/proto/google/cloud/dataproc/v1beta2/operations.proto
@@ -25,7 +25,6 @@ option java_multiple_files = true;
 option java_outer_classname = "OperationsProto";
 option java_package = "com.google.cloud.dataproc.v1beta2";
 
-
 // The status of the operation.
 message ClusterOperationStatus {
   // The operation state.
diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/proto/google/cloud/dataproc/v1beta2/shared.proto b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/proto/google/cloud/dataproc/v1beta2/shared.proto
index 8d2f5e62834c..c9bea686ae34 100644
--- a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/proto/google/cloud/dataproc/v1beta2/shared.proto
+++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/proto/google/cloud/dataproc/v1beta2/shared.proto
@@ -23,4 +23,3 @@ option go_package = "google.golang.org/genproto/googleapis/cloud/dataproc/v1beta
 option java_multiple_files = true;
 option java_outer_classname = "SharedProto";
 option java_package = "com.google.cloud.dataproc.v1beta2";
-
diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/proto/google/cloud/dataproc/v1beta2/workflow_templates.proto b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/proto/google/cloud/dataproc/v1beta2/workflow_templates.proto
index 982f874d63b1..cb2584496568 100644
--- a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/proto/google/cloud/dataproc/v1beta2/workflow_templates.proto
+++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/proto/google/cloud/dataproc/v1beta2/workflow_templates.proto
@@ -29,12 +29,12 @@ option java_multiple_files = true;
 option java_outer_classname = "WorkflowTemplatesProto";
 option java_package = "com.google.cloud.dataproc.v1beta2";
 
-
 // The API interface for managing Workflow Templates in the
 // Cloud Dataproc API.
 service WorkflowTemplateService {
   // Creates new workflow template.
-  rpc CreateWorkflowTemplate(CreateWorkflowTemplateRequest) returns (WorkflowTemplate) {
+  rpc CreateWorkflowTemplate(CreateWorkflowTemplateRequest)
+      returns (WorkflowTemplate) {
     option (google.api.http) = {
       post: "/v1beta2/{parent=projects/*/regions/*}/workflowTemplates"
       body: "template"
@@ -49,7 +49,8 @@ service WorkflowTemplateService {
   //
   // Can retrieve previously instantiated template by specifying optional
   // version parameter.
-  rpc GetWorkflowTemplate(GetWorkflowTemplateRequest) returns (WorkflowTemplate) {
+  rpc GetWorkflowTemplate(GetWorkflowTemplateRequest)
+      returns (WorkflowTemplate) {
     option (google.api.http) = {
       get: "/v1beta2/{name=projects/*/regions/*/workflowTemplates/*}"
       additional_bindings {
@@ -76,7 +77,8 @@ service WorkflowTemplateService {
   // On successful completion,
   // [Operation.response][google.longrunning.Operation.response] will be
   // [Empty][google.protobuf.Empty].
-  rpc InstantiateWorkflowTemplate(InstantiateWorkflowTemplateRequest) returns (google.longrunning.Operation) {
+  rpc InstantiateWorkflowTemplate(InstantiateWorkflowTemplateRequest)
+      returns (google.longrunning.Operation) {
     option (google.api.http) = {
       post: "/v1beta2/{name=projects/*/regions/*/workflowTemplates/*}:instantiate"
       body: "*"
@@ -90,7 +92,8 @@ service WorkflowTemplateService {
   // Instantiates a template and begins execution.
   //
   // This method is equivalent to executing the sequence
-  // [CreateWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.CreateWorkflowTemplate], [InstantiateWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.InstantiateWorkflowTemplate],
+  // [CreateWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.CreateWorkflowTemplate],
+  // [InstantiateWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.InstantiateWorkflowTemplate],
   // [DeleteWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.DeleteWorkflowTemplate].
   //
   // The returned Operation can be used to track execution of
@@ -109,7 +112,9 @@ service WorkflowTemplateService {
   // On successful completion,
   // [Operation.response][google.longrunning.Operation.response] will be
   // [Empty][google.protobuf.Empty].
-  rpc InstantiateInlineWorkflowTemplate(InstantiateInlineWorkflowTemplateRequest) returns (google.longrunning.Operation) {
+  rpc InstantiateInlineWorkflowTemplate(
+      InstantiateInlineWorkflowTemplateRequest)
+      returns (google.longrunning.Operation) {
     option (google.api.http) = {
       post: "/v1beta2/{parent=projects/*/locations/*}/workflowTemplates:instantiateInline"
       body: "template"
@@ -122,7 +127,8 @@ service WorkflowTemplateService {
 
   // Updates (replaces) workflow template. The updated template
   // must contain version that matches the current server version.
-  rpc UpdateWorkflowTemplate(UpdateWorkflowTemplateRequest) returns (WorkflowTemplate) {
+  rpc UpdateWorkflowTemplate(UpdateWorkflowTemplateRequest)
+      returns (WorkflowTemplate) {
     option (google.api.http) = {
       put: "/v1beta2/{template.name=projects/*/regions/*/workflowTemplates/*}"
       body: "template"
@@ -134,7 +140,8 @@ service WorkflowTemplateService {
   }
 
   // Lists workflows that match the specified filter in the request.
-  rpc ListWorkflowTemplates(ListWorkflowTemplatesRequest) returns (ListWorkflowTemplatesResponse) {
+  rpc ListWorkflowTemplates(ListWorkflowTemplatesRequest)
+      returns (ListWorkflowTemplatesResponse) {
     option (google.api.http) = {
       get: "/v1beta2/{parent=projects/*/regions/*}/workflowTemplates"
       additional_bindings {
@@ -144,7 +151,8 @@ service WorkflowTemplateService {
   }
 
   // Deletes a workflow template. It does not cancel in-progress workflows.
-  rpc DeleteWorkflowTemplate(DeleteWorkflowTemplateRequest) returns (google.protobuf.Empty) {
+  rpc DeleteWorkflowTemplate(DeleteWorkflowTemplateRequest)
+      returns (google.protobuf.Empty) {
     option (google.api.http) = {
       delete: "/v1beta2/{name=projects/*/regions/*/workflowTemplates/*}"
       additional_bindings {
@@ -276,8 +284,8 @@ message OrderedJob {
   //
   // The step id is used as prefix for job id, as job
   // `goog-dataproc-workflow-step-id` label, and in
-  // [prerequisiteStepIds][google.cloud.dataproc.v1beta2.OrderedJob.prerequisite_step_ids] field from other
-  // steps.
+  // [prerequisiteStepIds][google.cloud.dataproc.v1beta2.OrderedJob.prerequisite_step_ids]
+  // field from other steps.
   //
   // The id must contain only letters (a-z, A-Z), numbers (0-9),
   // underscores (_), and hyphens (-). Cannot begin or end with underscore
@@ -345,12 +353,13 @@ message TemplateParameter {
   string name = 1;
 
   // Required. Paths to all fields that the parameter replaces.
-  // A field is allowed to appear in at most one parameter's list of field paths.
+  // A field is allowed to appear in at most one parameter's list of field
+  // paths.
   //
-  // A field path is similar in syntax to a [google.protobuf.FieldMask][google.protobuf.FieldMask].
-  // For example, a field path that references the zone field of a workflow
-  // template's cluster selector would be specified as
-  // `placement.clusterSelector.zone`.
+  // A field path is similar in syntax to a
+  // [google.protobuf.FieldMask][google.protobuf.FieldMask]. For example, a
+  // field path that references the zone field of a workflow template's cluster
+  // selector would be specified as `placement.clusterSelector.zone`.
   //
   // Also, field paths can reference fields using the following syntax:
   //
@@ -645,8 +654,8 @@ message ListWorkflowTemplatesResponse {
   // Output only. WorkflowTemplates list.
   repeated WorkflowTemplate templates = 1;
 
-  // Output only. This token is included in the response if there are more results
-  // to fetch. To fetch additional results, provide this value as the
+  // Output only. This token is included in the response if there are more
+  // results to fetch. To fetch additional results, provide this value as the
   // page_token in a subsequent ListWorkflowTemplatesRequest.
   string next_page_token = 2;
 }
diff --git a/google-cloud-clients/google-cloud-dataproc/synth.metadata b/google-cloud-clients/google-cloud-dataproc/synth.metadata
index b64d33c0853d..ef0d3dcdbe16 100644
--- a/google-cloud-clients/google-cloud-dataproc/synth.metadata
+++ b/google-cloud-clients/google-cloud-dataproc/synth.metadata
@@ -1,19 +1,19 @@
 {
-  "updateTime": "2019-02-02T08:37:20.101142Z",
+  "updateTime": "2019-03-01T08:40:02.252998Z",
   "sources": [
     {
       "generator": {
         "name": "artman",
-        "version": "0.16.8",
-        "dockerImage": "googleapis/artman@sha256:75bc07ef34a1de9895c18af54dc503ed3b3f3b52e85062e3360a979d2a0741e7"
+        "version": "0.16.14",
+        "dockerImage": "googleapis/artman@sha256:f3d61ae45abaeefb6be5f228cda22732c2f1b00fb687c79c4bd4f2c42bb1e1a7"
       }
     },
     {
       "git": {
         "name": "googleapis",
         "remote": "https://github.com/googleapis/googleapis.git",
-        "sha": "bce093dab3e65c40eb9a37efbdc960f34df6037a",
-        "internalRef": "231974277"
+        "sha": "41d72d444fbe445f4da89e13be02078734fb7875",
+        "internalRef": "236230004"
       }
     }
   ],