diff --git a/google-api-grpc/grpc-google-cloud-dataproc-v1beta2/pom.xml b/google-api-grpc/grpc-google-cloud-dataproc-v1beta2/pom.xml
new file mode 100644
index 000000000000..d09eb9e7b952
--- /dev/null
+++ b/google-api-grpc/grpc-google-cloud-dataproc-v1beta2/pom.xml
@@ -0,0 +1,31 @@
+
+ * The ClusterControllerService provides methods to manage clusters + * of Compute Engine instances. + *+ */ +@javax.annotation.Generated( + value = "by gRPC proto compiler (version 1.10.0)", + comments = "Source: google/cloud/dataproc/v1beta2/clusters.proto") +public final class ClusterControllerGrpc { + + private ClusterControllerGrpc() {} + + public static final String SERVICE_NAME = "google.cloud.dataproc.v1beta2.ClusterController"; + + // Static method descriptors that strictly reflect the proto. + @io.grpc.ExperimentalApi("https://github.com/grpc/grpc-java/issues/1901") + @java.lang.Deprecated // Use {@link #getCreateClusterMethod()} instead. + public static final io.grpc.MethodDescriptor
+ * The ClusterControllerService provides methods to manage clusters + * of Compute Engine instances. + *+ */ + public static abstract class ClusterControllerImplBase implements io.grpc.BindableService { + + /** + *
+ * Creates a cluster in a project. + *+ */ + public void createCluster(com.google.cloud.dataproc.v1beta2.CreateClusterRequest request, + io.grpc.stub.StreamObserver
+ * Updates a cluster in a project. + *+ */ + public void updateCluster(com.google.cloud.dataproc.v1beta2.UpdateClusterRequest request, + io.grpc.stub.StreamObserver
+ * Deletes a cluster in a project. + *+ */ + public void deleteCluster(com.google.cloud.dataproc.v1beta2.DeleteClusterRequest request, + io.grpc.stub.StreamObserver
+ * Gets the resource representation for a cluster in a project. + *+ */ + public void getCluster(com.google.cloud.dataproc.v1beta2.GetClusterRequest request, + io.grpc.stub.StreamObserver
+ * Lists all regions/{region}/clusters in a project.
+ *
+ */
+ public void listClusters(com.google.cloud.dataproc.v1beta2.ListClustersRequest request,
+ io.grpc.stub.StreamObserver+ * Gets cluster diagnostic information. + * After the operation completes, the Operation.response field + * contains `DiagnoseClusterOutputLocation`. + *+ */ + public void diagnoseCluster(com.google.cloud.dataproc.v1beta2.DiagnoseClusterRequest request, + io.grpc.stub.StreamObserver
+ * The ClusterControllerService provides methods to manage clusters + * of Compute Engine instances. + *+ */ + public static final class ClusterControllerStub extends io.grpc.stub.AbstractStub
+ * Creates a cluster in a project. + *+ */ + public void createCluster(com.google.cloud.dataproc.v1beta2.CreateClusterRequest request, + io.grpc.stub.StreamObserver
+ * Updates a cluster in a project. + *+ */ + public void updateCluster(com.google.cloud.dataproc.v1beta2.UpdateClusterRequest request, + io.grpc.stub.StreamObserver
+ * Deletes a cluster in a project. + *+ */ + public void deleteCluster(com.google.cloud.dataproc.v1beta2.DeleteClusterRequest request, + io.grpc.stub.StreamObserver
+ * Gets the resource representation for a cluster in a project. + *+ */ + public void getCluster(com.google.cloud.dataproc.v1beta2.GetClusterRequest request, + io.grpc.stub.StreamObserver
+ * Lists all regions/{region}/clusters in a project.
+ *
+ */
+ public void listClusters(com.google.cloud.dataproc.v1beta2.ListClustersRequest request,
+ io.grpc.stub.StreamObserver+ * Gets cluster diagnostic information. + * After the operation completes, the Operation.response field + * contains `DiagnoseClusterOutputLocation`. + *+ */ + public void diagnoseCluster(com.google.cloud.dataproc.v1beta2.DiagnoseClusterRequest request, + io.grpc.stub.StreamObserver
+ * The ClusterControllerService provides methods to manage clusters + * of Compute Engine instances. + *+ */ + public static final class ClusterControllerBlockingStub extends io.grpc.stub.AbstractStub
+ * Creates a cluster in a project. + *+ */ + public com.google.longrunning.Operation createCluster(com.google.cloud.dataproc.v1beta2.CreateClusterRequest request) { + return blockingUnaryCall( + getChannel(), getCreateClusterMethodHelper(), getCallOptions(), request); + } + + /** + *
+ * Updates a cluster in a project. + *+ */ + public com.google.longrunning.Operation updateCluster(com.google.cloud.dataproc.v1beta2.UpdateClusterRequest request) { + return blockingUnaryCall( + getChannel(), getUpdateClusterMethodHelper(), getCallOptions(), request); + } + + /** + *
+ * Deletes a cluster in a project. + *+ */ + public com.google.longrunning.Operation deleteCluster(com.google.cloud.dataproc.v1beta2.DeleteClusterRequest request) { + return blockingUnaryCall( + getChannel(), getDeleteClusterMethodHelper(), getCallOptions(), request); + } + + /** + *
+ * Gets the resource representation for a cluster in a project. + *+ */ + public com.google.cloud.dataproc.v1beta2.Cluster getCluster(com.google.cloud.dataproc.v1beta2.GetClusterRequest request) { + return blockingUnaryCall( + getChannel(), getGetClusterMethodHelper(), getCallOptions(), request); + } + + /** + *
+ * Lists all regions/{region}/clusters in a project.
+ *
+ */
+ public com.google.cloud.dataproc.v1beta2.ListClustersResponse listClusters(com.google.cloud.dataproc.v1beta2.ListClustersRequest request) {
+ return blockingUnaryCall(
+ getChannel(), getListClustersMethodHelper(), getCallOptions(), request);
+ }
+
+ /**
+ * + * Gets cluster diagnostic information. + * After the operation completes, the Operation.response field + * contains `DiagnoseClusterOutputLocation`. + *+ */ + public com.google.longrunning.Operation diagnoseCluster(com.google.cloud.dataproc.v1beta2.DiagnoseClusterRequest request) { + return blockingUnaryCall( + getChannel(), getDiagnoseClusterMethodHelper(), getCallOptions(), request); + } + } + + /** + *
+ * The ClusterControllerService provides methods to manage clusters + * of Compute Engine instances. + *+ */ + public static final class ClusterControllerFutureStub extends io.grpc.stub.AbstractStub
+ * Creates a cluster in a project. + *+ */ + public com.google.common.util.concurrent.ListenableFuture
+ * Updates a cluster in a project. + *+ */ + public com.google.common.util.concurrent.ListenableFuture
+ * Deletes a cluster in a project. + *+ */ + public com.google.common.util.concurrent.ListenableFuture
+ * Gets the resource representation for a cluster in a project. + *+ */ + public com.google.common.util.concurrent.ListenableFuture
+ * Lists all regions/{region}/clusters in a project.
+ *
+ */
+ public com.google.common.util.concurrent.ListenableFuture+ * Gets cluster diagnostic information. + * After the operation completes, the Operation.response field + * contains `DiagnoseClusterOutputLocation`. + *+ */ + public com.google.common.util.concurrent.ListenableFuture
+ * The JobController provides methods to manage jobs. + *+ */ +@javax.annotation.Generated( + value = "by gRPC proto compiler (version 1.10.0)", + comments = "Source: google/cloud/dataproc/v1beta2/jobs.proto") +public final class JobControllerGrpc { + + private JobControllerGrpc() {} + + public static final String SERVICE_NAME = "google.cloud.dataproc.v1beta2.JobController"; + + // Static method descriptors that strictly reflect the proto. + @io.grpc.ExperimentalApi("https://github.com/grpc/grpc-java/issues/1901") + @java.lang.Deprecated // Use {@link #getSubmitJobMethod()} instead. + public static final io.grpc.MethodDescriptor
+ * The JobController provides methods to manage jobs. + *+ */ + public static abstract class JobControllerImplBase implements io.grpc.BindableService { + + /** + *
+ * Submits a job to a cluster. + *+ */ + public void submitJob(com.google.cloud.dataproc.v1beta2.SubmitJobRequest request, + io.grpc.stub.StreamObserver
+ * Gets the resource representation for a job in a project. + *+ */ + public void getJob(com.google.cloud.dataproc.v1beta2.GetJobRequest request, + io.grpc.stub.StreamObserver
+ * Lists regions/{region}/jobs in a project.
+ *
+ */
+ public void listJobs(com.google.cloud.dataproc.v1beta2.ListJobsRequest request,
+ io.grpc.stub.StreamObserver+ * Updates a job in a project. + *+ */ + public void updateJob(com.google.cloud.dataproc.v1beta2.UpdateJobRequest request, + io.grpc.stub.StreamObserver
+ * Starts a job cancellation request. To access the job resource
+ * after cancellation, call
+ * [regions/{region}/jobs.list](/dataproc/docs/reference/rest/v1beta2/projects.regions.jobs/list) or
+ * [regions/{region}/jobs.get](/dataproc/docs/reference/rest/v1beta2/projects.regions.jobs/get).
+ *
+ */
+ public void cancelJob(com.google.cloud.dataproc.v1beta2.CancelJobRequest request,
+ io.grpc.stub.StreamObserver+ * Deletes the job from the project. If the job is active, the delete fails, + * and the response returns `FAILED_PRECONDITION`. + *+ */ + public void deleteJob(com.google.cloud.dataproc.v1beta2.DeleteJobRequest request, + io.grpc.stub.StreamObserver
+ * The JobController provides methods to manage jobs. + *+ */ + public static final class JobControllerStub extends io.grpc.stub.AbstractStub
+ * Submits a job to a cluster. + *+ */ + public void submitJob(com.google.cloud.dataproc.v1beta2.SubmitJobRequest request, + io.grpc.stub.StreamObserver
+ * Gets the resource representation for a job in a project. + *+ */ + public void getJob(com.google.cloud.dataproc.v1beta2.GetJobRequest request, + io.grpc.stub.StreamObserver
+ * Lists regions/{region}/jobs in a project.
+ *
+ */
+ public void listJobs(com.google.cloud.dataproc.v1beta2.ListJobsRequest request,
+ io.grpc.stub.StreamObserver+ * Updates a job in a project. + *+ */ + public void updateJob(com.google.cloud.dataproc.v1beta2.UpdateJobRequest request, + io.grpc.stub.StreamObserver
+ * Starts a job cancellation request. To access the job resource
+ * after cancellation, call
+ * [regions/{region}/jobs.list](/dataproc/docs/reference/rest/v1beta2/projects.regions.jobs/list) or
+ * [regions/{region}/jobs.get](/dataproc/docs/reference/rest/v1beta2/projects.regions.jobs/get).
+ *
+ */
+ public void cancelJob(com.google.cloud.dataproc.v1beta2.CancelJobRequest request,
+ io.grpc.stub.StreamObserver+ * Deletes the job from the project. If the job is active, the delete fails, + * and the response returns `FAILED_PRECONDITION`. + *+ */ + public void deleteJob(com.google.cloud.dataproc.v1beta2.DeleteJobRequest request, + io.grpc.stub.StreamObserver
+ * The JobController provides methods to manage jobs. + *+ */ + public static final class JobControllerBlockingStub extends io.grpc.stub.AbstractStub
+ * Submits a job to a cluster. + *+ */ + public com.google.cloud.dataproc.v1beta2.Job submitJob(com.google.cloud.dataproc.v1beta2.SubmitJobRequest request) { + return blockingUnaryCall( + getChannel(), getSubmitJobMethodHelper(), getCallOptions(), request); + } + + /** + *
+ * Gets the resource representation for a job in a project. + *+ */ + public com.google.cloud.dataproc.v1beta2.Job getJob(com.google.cloud.dataproc.v1beta2.GetJobRequest request) { + return blockingUnaryCall( + getChannel(), getGetJobMethodHelper(), getCallOptions(), request); + } + + /** + *
+ * Lists regions/{region}/jobs in a project.
+ *
+ */
+ public com.google.cloud.dataproc.v1beta2.ListJobsResponse listJobs(com.google.cloud.dataproc.v1beta2.ListJobsRequest request) {
+ return blockingUnaryCall(
+ getChannel(), getListJobsMethodHelper(), getCallOptions(), request);
+ }
+
+ /**
+ * + * Updates a job in a project. + *+ */ + public com.google.cloud.dataproc.v1beta2.Job updateJob(com.google.cloud.dataproc.v1beta2.UpdateJobRequest request) { + return blockingUnaryCall( + getChannel(), getUpdateJobMethodHelper(), getCallOptions(), request); + } + + /** + *
+ * Starts a job cancellation request. To access the job resource
+ * after cancellation, call
+ * [regions/{region}/jobs.list](/dataproc/docs/reference/rest/v1beta2/projects.regions.jobs/list) or
+ * [regions/{region}/jobs.get](/dataproc/docs/reference/rest/v1beta2/projects.regions.jobs/get).
+ *
+ */
+ public com.google.cloud.dataproc.v1beta2.Job cancelJob(com.google.cloud.dataproc.v1beta2.CancelJobRequest request) {
+ return blockingUnaryCall(
+ getChannel(), getCancelJobMethodHelper(), getCallOptions(), request);
+ }
+
+ /**
+ * + * Deletes the job from the project. If the job is active, the delete fails, + * and the response returns `FAILED_PRECONDITION`. + *+ */ + public com.google.protobuf.Empty deleteJob(com.google.cloud.dataproc.v1beta2.DeleteJobRequest request) { + return blockingUnaryCall( + getChannel(), getDeleteJobMethodHelper(), getCallOptions(), request); + } + } + + /** + *
+ * The JobController provides methods to manage jobs. + *+ */ + public static final class JobControllerFutureStub extends io.grpc.stub.AbstractStub
+ * Submits a job to a cluster. + *+ */ + public com.google.common.util.concurrent.ListenableFuture
+ * Gets the resource representation for a job in a project. + *+ */ + public com.google.common.util.concurrent.ListenableFuture
+ * Lists regions/{region}/jobs in a project.
+ *
+ */
+ public com.google.common.util.concurrent.ListenableFuture+ * Updates a job in a project. + *+ */ + public com.google.common.util.concurrent.ListenableFuture
+ * Starts a job cancellation request. To access the job resource
+ * after cancellation, call
+ * [regions/{region}/jobs.list](/dataproc/docs/reference/rest/v1beta2/projects.regions.jobs/list) or
+ * [regions/{region}/jobs.get](/dataproc/docs/reference/rest/v1beta2/projects.regions.jobs/get).
+ *
+ */
+ public com.google.common.util.concurrent.ListenableFuture+ * Deletes the job from the project. If the job is active, the delete fails, + * and the response returns `FAILED_PRECONDITION`. + *+ */ + public com.google.common.util.concurrent.ListenableFuture
+ * The API interface for managing Workflow Templates in the + * Cloud Dataproc API. + *+ */ +@javax.annotation.Generated( + value = "by gRPC proto compiler (version 1.10.0)", + comments = "Source: google/cloud/dataproc/v1beta2/workflow_templates.proto") +public final class WorkflowTemplateServiceGrpc { + + private WorkflowTemplateServiceGrpc() {} + + public static final String SERVICE_NAME = "google.cloud.dataproc.v1beta2.WorkflowTemplateService"; + + // Static method descriptors that strictly reflect the proto. + @io.grpc.ExperimentalApi("https://github.com/grpc/grpc-java/issues/1901") + @java.lang.Deprecated // Use {@link #getCreateWorkflowTemplateMethod()} instead. + public static final io.grpc.MethodDescriptor
+ * The API interface for managing Workflow Templates in the + * Cloud Dataproc API. + *+ */ + public static abstract class WorkflowTemplateServiceImplBase implements io.grpc.BindableService { + + /** + *
+ * Creates new workflow template. + *+ */ + public void createWorkflowTemplate(com.google.cloud.dataproc.v1beta2.CreateWorkflowTemplateRequest request, + io.grpc.stub.StreamObserver
+ * Retrieves the latest workflow template. + * Can retrieve previously instantiated template by specifying optional + * version parameter. + *+ */ + public void getWorkflowTemplate(com.google.cloud.dataproc.v1beta2.GetWorkflowTemplateRequest request, + io.grpc.stub.StreamObserver
+ * Instantiates a template and begins execution. + * The returned Operation can be used to track execution of + * workflow by polling + * [operations.get][google.longrunning.Operations.GetOperation]. + * The Operation will complete when entire workflow is finished. + * The running workflow can be aborted via + * [operations.cancel][google.longrunning.Operations.CancelOperation]. + * This will cause any inflight jobs to be cancelled and workflow-owned + * clusters to be deleted. + * The [Operation.metadata][google.longrunning.Operation.metadata] will be + * [WorkflowMetadata][google.cloud.dataproc.v1beta2.WorkflowMetadata]. + * On successful completion, + * [Operation.response][google.longrunning.Operation.response] will be + * [Empty][google.protobuf.Empty]. + *+ */ + public void instantiateWorkflowTemplate(com.google.cloud.dataproc.v1beta2.InstantiateWorkflowTemplateRequest request, + io.grpc.stub.StreamObserver
+ * Instantiates a template and begins execution. + * This method is equivalent to executing the sequence + * [CreateWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.CreateWorkflowTemplate], [InstantiateWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.InstantiateWorkflowTemplate], + * [DeleteWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.DeleteWorkflowTemplate]. + * The returned Operation can be used to track execution of + * workflow by polling + * [operations.get][google.longrunning.Operations.GetOperation]. + * The Operation will complete when entire workflow is finished. + * The running workflow can be aborted via + * [operations.cancel][google.longrunning.Operations.CancelOperation]. + * This will cause any inflight jobs to be cancelled and workflow-owned + * clusters to be deleted. + * The [Operation.metadata][google.longrunning.Operation.metadata] will be + * [WorkflowMetadata][google.cloud.dataproc.v1beta2.WorkflowMetadata]. + * On successful completion, + * [Operation.response][google.longrunning.Operation.response] will be + * [Empty][google.protobuf.Empty]. + *+ */ + public void instantiateInlineWorkflowTemplate(com.google.cloud.dataproc.v1beta2.InstantiateInlineWorkflowTemplateRequest request, + io.grpc.stub.StreamObserver
+ * Updates (replaces) workflow template. The updated template + * must contain version that matches the current server version. + *+ */ + public void updateWorkflowTemplate(com.google.cloud.dataproc.v1beta2.UpdateWorkflowTemplateRequest request, + io.grpc.stub.StreamObserver
+ * Lists workflows that match the specified filter in the request. + *+ */ + public void listWorkflowTemplates(com.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesRequest request, + io.grpc.stub.StreamObserver
+ * Deletes a workflow template. It does not cancel in-progress workflows. + *+ */ + public void deleteWorkflowTemplate(com.google.cloud.dataproc.v1beta2.DeleteWorkflowTemplateRequest request, + io.grpc.stub.StreamObserver
+ * The API interface for managing Workflow Templates in the + * Cloud Dataproc API. + *+ */ + public static final class WorkflowTemplateServiceStub extends io.grpc.stub.AbstractStub
+ * Creates new workflow template. + *+ */ + public void createWorkflowTemplate(com.google.cloud.dataproc.v1beta2.CreateWorkflowTemplateRequest request, + io.grpc.stub.StreamObserver
+ * Retrieves the latest workflow template. + * Can retrieve previously instantiated template by specifying optional + * version parameter. + *+ */ + public void getWorkflowTemplate(com.google.cloud.dataproc.v1beta2.GetWorkflowTemplateRequest request, + io.grpc.stub.StreamObserver
+ * Instantiates a template and begins execution. + * The returned Operation can be used to track execution of + * workflow by polling + * [operations.get][google.longrunning.Operations.GetOperation]. + * The Operation will complete when entire workflow is finished. + * The running workflow can be aborted via + * [operations.cancel][google.longrunning.Operations.CancelOperation]. + * This will cause any inflight jobs to be cancelled and workflow-owned + * clusters to be deleted. + * The [Operation.metadata][google.longrunning.Operation.metadata] will be + * [WorkflowMetadata][google.cloud.dataproc.v1beta2.WorkflowMetadata]. + * On successful completion, + * [Operation.response][google.longrunning.Operation.response] will be + * [Empty][google.protobuf.Empty]. + *+ */ + public void instantiateWorkflowTemplate(com.google.cloud.dataproc.v1beta2.InstantiateWorkflowTemplateRequest request, + io.grpc.stub.StreamObserver
+ * Instantiates a template and begins execution. + * This method is equivalent to executing the sequence + * [CreateWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.CreateWorkflowTemplate], [InstantiateWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.InstantiateWorkflowTemplate], + * [DeleteWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.DeleteWorkflowTemplate]. + * The returned Operation can be used to track execution of + * workflow by polling + * [operations.get][google.longrunning.Operations.GetOperation]. + * The Operation will complete when entire workflow is finished. + * The running workflow can be aborted via + * [operations.cancel][google.longrunning.Operations.CancelOperation]. + * This will cause any inflight jobs to be cancelled and workflow-owned + * clusters to be deleted. + * The [Operation.metadata][google.longrunning.Operation.metadata] will be + * [WorkflowMetadata][google.cloud.dataproc.v1beta2.WorkflowMetadata]. + * On successful completion, + * [Operation.response][google.longrunning.Operation.response] will be + * [Empty][google.protobuf.Empty]. + *+ */ + public void instantiateInlineWorkflowTemplate(com.google.cloud.dataproc.v1beta2.InstantiateInlineWorkflowTemplateRequest request, + io.grpc.stub.StreamObserver
+ * Updates (replaces) workflow template. The updated template + * must contain version that matches the current server version. + *+ */ + public void updateWorkflowTemplate(com.google.cloud.dataproc.v1beta2.UpdateWorkflowTemplateRequest request, + io.grpc.stub.StreamObserver
+ * Lists workflows that match the specified filter in the request. + *+ */ + public void listWorkflowTemplates(com.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesRequest request, + io.grpc.stub.StreamObserver
+ * Deletes a workflow template. It does not cancel in-progress workflows. + *+ */ + public void deleteWorkflowTemplate(com.google.cloud.dataproc.v1beta2.DeleteWorkflowTemplateRequest request, + io.grpc.stub.StreamObserver
+ * The API interface for managing Workflow Templates in the + * Cloud Dataproc API. + *+ */ + public static final class WorkflowTemplateServiceBlockingStub extends io.grpc.stub.AbstractStub
+ * Creates new workflow template. + *+ */ + public com.google.cloud.dataproc.v1beta2.WorkflowTemplate createWorkflowTemplate(com.google.cloud.dataproc.v1beta2.CreateWorkflowTemplateRequest request) { + return blockingUnaryCall( + getChannel(), getCreateWorkflowTemplateMethodHelper(), getCallOptions(), request); + } + + /** + *
+ * Retrieves the latest workflow template. + * Can retrieve previously instantiated template by specifying optional + * version parameter. + *+ */ + public com.google.cloud.dataproc.v1beta2.WorkflowTemplate getWorkflowTemplate(com.google.cloud.dataproc.v1beta2.GetWorkflowTemplateRequest request) { + return blockingUnaryCall( + getChannel(), getGetWorkflowTemplateMethodHelper(), getCallOptions(), request); + } + + /** + *
+ * Instantiates a template and begins execution. + * The returned Operation can be used to track execution of + * workflow by polling + * [operations.get][google.longrunning.Operations.GetOperation]. + * The Operation will complete when entire workflow is finished. + * The running workflow can be aborted via + * [operations.cancel][google.longrunning.Operations.CancelOperation]. + * This will cause any inflight jobs to be cancelled and workflow-owned + * clusters to be deleted. + * The [Operation.metadata][google.longrunning.Operation.metadata] will be + * [WorkflowMetadata][google.cloud.dataproc.v1beta2.WorkflowMetadata]. + * On successful completion, + * [Operation.response][google.longrunning.Operation.response] will be + * [Empty][google.protobuf.Empty]. + *+ */ + public com.google.longrunning.Operation instantiateWorkflowTemplate(com.google.cloud.dataproc.v1beta2.InstantiateWorkflowTemplateRequest request) { + return blockingUnaryCall( + getChannel(), getInstantiateWorkflowTemplateMethodHelper(), getCallOptions(), request); + } + + /** + *
+ * Instantiates a template and begins execution. + * This method is equivalent to executing the sequence + * [CreateWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.CreateWorkflowTemplate], [InstantiateWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.InstantiateWorkflowTemplate], + * [DeleteWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.DeleteWorkflowTemplate]. + * The returned Operation can be used to track execution of + * workflow by polling + * [operations.get][google.longrunning.Operations.GetOperation]. + * The Operation will complete when entire workflow is finished. + * The running workflow can be aborted via + * [operations.cancel][google.longrunning.Operations.CancelOperation]. + * This will cause any inflight jobs to be cancelled and workflow-owned + * clusters to be deleted. + * The [Operation.metadata][google.longrunning.Operation.metadata] will be + * [WorkflowMetadata][google.cloud.dataproc.v1beta2.WorkflowMetadata]. + * On successful completion, + * [Operation.response][google.longrunning.Operation.response] will be + * [Empty][google.protobuf.Empty]. + *+ */ + public com.google.longrunning.Operation instantiateInlineWorkflowTemplate(com.google.cloud.dataproc.v1beta2.InstantiateInlineWorkflowTemplateRequest request) { + return blockingUnaryCall( + getChannel(), getInstantiateInlineWorkflowTemplateMethodHelper(), getCallOptions(), request); + } + + /** + *
+ * Updates (replaces) workflow template. The updated template + * must contain version that matches the current server version. + *+ */ + public com.google.cloud.dataproc.v1beta2.WorkflowTemplate updateWorkflowTemplate(com.google.cloud.dataproc.v1beta2.UpdateWorkflowTemplateRequest request) { + return blockingUnaryCall( + getChannel(), getUpdateWorkflowTemplateMethodHelper(), getCallOptions(), request); + } + + /** + *
+ * Lists workflows that match the specified filter in the request. + *+ */ + public com.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesResponse listWorkflowTemplates(com.google.cloud.dataproc.v1beta2.ListWorkflowTemplatesRequest request) { + return blockingUnaryCall( + getChannel(), getListWorkflowTemplatesMethodHelper(), getCallOptions(), request); + } + + /** + *
+ * Deletes a workflow template. It does not cancel in-progress workflows. + *+ */ + public com.google.protobuf.Empty deleteWorkflowTemplate(com.google.cloud.dataproc.v1beta2.DeleteWorkflowTemplateRequest request) { + return blockingUnaryCall( + getChannel(), getDeleteWorkflowTemplateMethodHelper(), getCallOptions(), request); + } + } + + /** + *
+ * The API interface for managing Workflow Templates in the + * Cloud Dataproc API. + *+ */ + public static final class WorkflowTemplateServiceFutureStub extends io.grpc.stub.AbstractStub
+ * Creates new workflow template. + *+ */ + public com.google.common.util.concurrent.ListenableFuture
+ * Retrieves the latest workflow template. + * Can retrieve previously instantiated template by specifying optional + * version parameter. + *+ */ + public com.google.common.util.concurrent.ListenableFuture
+ * Instantiates a template and begins execution. + * The returned Operation can be used to track execution of + * workflow by polling + * [operations.get][google.longrunning.Operations.GetOperation]. + * The Operation will complete when entire workflow is finished. + * The running workflow can be aborted via + * [operations.cancel][google.longrunning.Operations.CancelOperation]. + * This will cause any inflight jobs to be cancelled and workflow-owned + * clusters to be deleted. + * The [Operation.metadata][google.longrunning.Operation.metadata] will be + * [WorkflowMetadata][google.cloud.dataproc.v1beta2.WorkflowMetadata]. + * On successful completion, + * [Operation.response][google.longrunning.Operation.response] will be + * [Empty][google.protobuf.Empty]. + *+ */ + public com.google.common.util.concurrent.ListenableFuture
+ * Instantiates a template and begins execution. + * This method is equivalent to executing the sequence + * [CreateWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.CreateWorkflowTemplate], [InstantiateWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.InstantiateWorkflowTemplate], + * [DeleteWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.DeleteWorkflowTemplate]. + * The returned Operation can be used to track execution of + * workflow by polling + * [operations.get][google.longrunning.Operations.GetOperation]. + * The Operation will complete when entire workflow is finished. + * The running workflow can be aborted via + * [operations.cancel][google.longrunning.Operations.CancelOperation]. + * This will cause any inflight jobs to be cancelled and workflow-owned + * clusters to be deleted. + * The [Operation.metadata][google.longrunning.Operation.metadata] will be + * [WorkflowMetadata][google.cloud.dataproc.v1beta2.WorkflowMetadata]. + * On successful completion, + * [Operation.response][google.longrunning.Operation.response] will be + * [Empty][google.protobuf.Empty]. + *+ */ + public com.google.common.util.concurrent.ListenableFuture
+ * Updates (replaces) workflow template. The updated template + * must contain version that matches the current server version. + *+ */ + public com.google.common.util.concurrent.ListenableFuture
+ * Lists workflows that match the specified filter in the request. + *+ */ + public com.google.common.util.concurrent.ListenableFuture
+ * Deletes a workflow template. It does not cancel in-progress workflows. + *+ */ + public com.google.common.util.concurrent.ListenableFuture
+ * Specifies the type and number of accelerator cards attached to the instances + * of an instance group (see [GPUs on Compute Engine](/compute/docs/gpus/)). + *+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.AcceleratorConfig} + */ +public final class AcceleratorConfig extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:google.cloud.dataproc.v1beta2.AcceleratorConfig) + AcceleratorConfigOrBuilder { +private static final long serialVersionUID = 0L; + // Use AcceleratorConfig.newBuilder() to construct. + private AcceleratorConfig(com.google.protobuf.GeneratedMessageV3.Builder> builder) { + super(builder); + } + private AcceleratorConfig() { + acceleratorTypeUri_ = ""; + acceleratorCount_ = 0; + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private AcceleratorConfig( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: { + java.lang.String s = input.readStringRequireUtf8(); + + acceleratorTypeUri_ = s; + break; + } + case 16: { + + acceleratorCount_ = input.readInt32(); + break; + } + default: { + if (!parseUnknownFieldProto3( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_AcceleratorConfig_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.ClustersProto.internal_static_google_cloud_dataproc_v1beta2_AcceleratorConfig_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.AcceleratorConfig.class, com.google.cloud.dataproc.v1beta2.AcceleratorConfig.Builder.class); + } + + public static final int ACCELERATOR_TYPE_URI_FIELD_NUMBER = 1; + private volatile java.lang.Object acceleratorTypeUri_; + /** + *
+ * Full URL, partial URI, or short name of the accelerator type resource to + * expose to this instance. See [Compute Engine AcceleratorTypes]( + * /compute/docs/reference/beta/acceleratorTypes) + * Examples + * * `https://www.googleapis.com/compute/beta/projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80` + * * `projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80` + * * `nvidia-tesla-k80` + * **Auto Zone Exception**: If you are using the Cloud Dataproc + * [Auto Zone Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) + * feature, you must use the short name of the accelerator type + * resource, for example, `nvidia-tesla-k80`. + *+ * + *
string accelerator_type_uri = 1;
+ */
+ public java.lang.String getAcceleratorTypeUri() {
+ java.lang.Object ref = acceleratorTypeUri_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ acceleratorTypeUri_ = s;
+ return s;
+ }
+ }
+ /**
+ * + * Full URL, partial URI, or short name of the accelerator type resource to + * expose to this instance. See [Compute Engine AcceleratorTypes]( + * /compute/docs/reference/beta/acceleratorTypes) + * Examples + * * `https://www.googleapis.com/compute/beta/projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80` + * * `projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80` + * * `nvidia-tesla-k80` + * **Auto Zone Exception**: If you are using the Cloud Dataproc + * [Auto Zone Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) + * feature, you must use the short name of the accelerator type + * resource, for example, `nvidia-tesla-k80`. + *+ * + *
string accelerator_type_uri = 1;
+ */
+ public com.google.protobuf.ByteString
+ getAcceleratorTypeUriBytes() {
+ java.lang.Object ref = acceleratorTypeUri_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ acceleratorTypeUri_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ public static final int ACCELERATOR_COUNT_FIELD_NUMBER = 2;
+ private int acceleratorCount_;
+ /**
+ * + * The number of the accelerator cards of this type exposed to this instance. + *+ * + *
int32 accelerator_count = 2;
+ */
+ public int getAcceleratorCount() {
+ return acceleratorCount_;
+ }
+
+ private byte memoizedIsInitialized = -1;
+ @java.lang.Override
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized == 1) return true;
+ if (isInitialized == 0) return false;
+
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ @java.lang.Override
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ if (!getAcceleratorTypeUriBytes().isEmpty()) {
+ com.google.protobuf.GeneratedMessageV3.writeString(output, 1, acceleratorTypeUri_);
+ }
+ if (acceleratorCount_ != 0) {
+ output.writeInt32(2, acceleratorCount_);
+ }
+ unknownFields.writeTo(output);
+ }
+
+ @java.lang.Override
+ public int getSerializedSize() {
+ int size = memoizedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (!getAcceleratorTypeUriBytes().isEmpty()) {
+ size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, acceleratorTypeUri_);
+ }
+ if (acceleratorCount_ != 0) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeInt32Size(2, acceleratorCount_);
+ }
+ size += unknownFields.getSerializedSize();
+ memoizedSize = size;
+ return size;
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof com.google.cloud.dataproc.v1beta2.AcceleratorConfig)) {
+ return super.equals(obj);
+ }
+ com.google.cloud.dataproc.v1beta2.AcceleratorConfig other = (com.google.cloud.dataproc.v1beta2.AcceleratorConfig) obj;
+
+ boolean result = true;
+ result = result && getAcceleratorTypeUri()
+ .equals(other.getAcceleratorTypeUri());
+ result = result && (getAcceleratorCount()
+ == other.getAcceleratorCount());
+ result = result && unknownFields.equals(other.unknownFields);
+ return result;
+ }
+
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptor().hashCode();
+ hash = (37 * hash) + ACCELERATOR_TYPE_URI_FIELD_NUMBER;
+ hash = (53 * hash) + getAcceleratorTypeUri().hashCode();
+ hash = (37 * hash) + ACCELERATOR_COUNT_FIELD_NUMBER;
+ hash = (53 * hash) + getAcceleratorCount();
+ hash = (29 * hash) + unknownFields.hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static com.google.cloud.dataproc.v1beta2.AcceleratorConfig parseFrom(
+ java.nio.ByteBuffer data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static com.google.cloud.dataproc.v1beta2.AcceleratorConfig parseFrom(
+ java.nio.ByteBuffer data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static com.google.cloud.dataproc.v1beta2.AcceleratorConfig parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static com.google.cloud.dataproc.v1beta2.AcceleratorConfig parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static com.google.cloud.dataproc.v1beta2.AcceleratorConfig parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static com.google.cloud.dataproc.v1beta2.AcceleratorConfig parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static com.google.cloud.dataproc.v1beta2.AcceleratorConfig parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3
+ .parseWithIOException(PARSER, input);
+ }
+ public static com.google.cloud.dataproc.v1beta2.AcceleratorConfig parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3
+ .parseWithIOException(PARSER, input, extensionRegistry);
+ }
+ public static com.google.cloud.dataproc.v1beta2.AcceleratorConfig parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3
+ .parseDelimitedWithIOException(PARSER, input);
+ }
+ public static com.google.cloud.dataproc.v1beta2.AcceleratorConfig parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3
+ .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
+ }
+ public static com.google.cloud.dataproc.v1beta2.AcceleratorConfig parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3
+ .parseWithIOException(PARSER, input);
+ }
+ public static com.google.cloud.dataproc.v1beta2.AcceleratorConfig parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3
+ .parseWithIOException(PARSER, input, extensionRegistry);
+ }
+
+ @java.lang.Override
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder() {
+ return DEFAULT_INSTANCE.toBuilder();
+ }
+ public static Builder newBuilder(com.google.cloud.dataproc.v1beta2.AcceleratorConfig prototype) {
+ return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
+ }
+ @java.lang.Override
+ public Builder toBuilder() {
+ return this == DEFAULT_INSTANCE
+ ? new Builder() : new Builder().mergeFrom(this);
+ }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * + * Specifies the type and number of accelerator cards attached to the instances + * of an instance group (see [GPUs on Compute Engine](/compute/docs/gpus/)). + *+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.AcceleratorConfig} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder
+ * Full URL, partial URI, or short name of the accelerator type resource to + * expose to this instance. See [Compute Engine AcceleratorTypes]( + * /compute/docs/reference/beta/acceleratorTypes) + * Examples + * * `https://www.googleapis.com/compute/beta/projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80` + * * `projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80` + * * `nvidia-tesla-k80` + * **Auto Zone Exception**: If you are using the Cloud Dataproc + * [Auto Zone Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) + * feature, you must use the short name of the accelerator type + * resource, for example, `nvidia-tesla-k80`. + *+ * + *
string accelerator_type_uri = 1;
+ */
+ public java.lang.String getAcceleratorTypeUri() {
+ java.lang.Object ref = acceleratorTypeUri_;
+ if (!(ref instanceof java.lang.String)) {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ acceleratorTypeUri_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * + * Full URL, partial URI, or short name of the accelerator type resource to + * expose to this instance. See [Compute Engine AcceleratorTypes]( + * /compute/docs/reference/beta/acceleratorTypes) + * Examples + * * `https://www.googleapis.com/compute/beta/projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80` + * * `projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80` + * * `nvidia-tesla-k80` + * **Auto Zone Exception**: If you are using the Cloud Dataproc + * [Auto Zone Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) + * feature, you must use the short name of the accelerator type + * resource, for example, `nvidia-tesla-k80`. + *+ * + *
string accelerator_type_uri = 1;
+ */
+ public com.google.protobuf.ByteString
+ getAcceleratorTypeUriBytes() {
+ java.lang.Object ref = acceleratorTypeUri_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ acceleratorTypeUri_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * + * Full URL, partial URI, or short name of the accelerator type resource to + * expose to this instance. See [Compute Engine AcceleratorTypes]( + * /compute/docs/reference/beta/acceleratorTypes) + * Examples + * * `https://www.googleapis.com/compute/beta/projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80` + * * `projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80` + * * `nvidia-tesla-k80` + * **Auto Zone Exception**: If you are using the Cloud Dataproc + * [Auto Zone Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) + * feature, you must use the short name of the accelerator type + * resource, for example, `nvidia-tesla-k80`. + *+ * + *
string accelerator_type_uri = 1;
+ */
+ public Builder setAcceleratorTypeUri(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+
+ acceleratorTypeUri_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * + * Full URL, partial URI, or short name of the accelerator type resource to + * expose to this instance. See [Compute Engine AcceleratorTypes]( + * /compute/docs/reference/beta/acceleratorTypes) + * Examples + * * `https://www.googleapis.com/compute/beta/projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80` + * * `projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80` + * * `nvidia-tesla-k80` + * **Auto Zone Exception**: If you are using the Cloud Dataproc + * [Auto Zone Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) + * feature, you must use the short name of the accelerator type + * resource, for example, `nvidia-tesla-k80`. + *+ * + *
string accelerator_type_uri = 1;
+ */
+ public Builder clearAcceleratorTypeUri() {
+
+ acceleratorTypeUri_ = getDefaultInstance().getAcceleratorTypeUri();
+ onChanged();
+ return this;
+ }
+ /**
+ * + * Full URL, partial URI, or short name of the accelerator type resource to + * expose to this instance. See [Compute Engine AcceleratorTypes]( + * /compute/docs/reference/beta/acceleratorTypes) + * Examples + * * `https://www.googleapis.com/compute/beta/projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80` + * * `projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80` + * * `nvidia-tesla-k80` + * **Auto Zone Exception**: If you are using the Cloud Dataproc + * [Auto Zone Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) + * feature, you must use the short name of the accelerator type + * resource, for example, `nvidia-tesla-k80`. + *+ * + *
string accelerator_type_uri = 1;
+ */
+ public Builder setAcceleratorTypeUriBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ checkByteStringIsUtf8(value);
+
+ acceleratorTypeUri_ = value;
+ onChanged();
+ return this;
+ }
+
+ private int acceleratorCount_ ;
+ /**
+ * + * The number of the accelerator cards of this type exposed to this instance. + *+ * + *
int32 accelerator_count = 2;
+ */
+ public int getAcceleratorCount() {
+ return acceleratorCount_;
+ }
+ /**
+ * + * The number of the accelerator cards of this type exposed to this instance. + *+ * + *
int32 accelerator_count = 2;
+ */
+ public Builder setAcceleratorCount(int value) {
+
+ acceleratorCount_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * + * The number of the accelerator cards of this type exposed to this instance. + *+ * + *
int32 accelerator_count = 2;
+ */
+ public Builder clearAcceleratorCount() {
+
+ acceleratorCount_ = 0;
+ onChanged();
+ return this;
+ }
+ @java.lang.Override
+ public final Builder setUnknownFields(
+ final com.google.protobuf.UnknownFieldSet unknownFields) {
+ return super.setUnknownFieldsProto3(unknownFields);
+ }
+
+ @java.lang.Override
+ public final Builder mergeUnknownFields(
+ final com.google.protobuf.UnknownFieldSet unknownFields) {
+ return super.mergeUnknownFields(unknownFields);
+ }
+
+
+ // @@protoc_insertion_point(builder_scope:google.cloud.dataproc.v1beta2.AcceleratorConfig)
+ }
+
+ // @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.AcceleratorConfig)
+ private static final com.google.cloud.dataproc.v1beta2.AcceleratorConfig DEFAULT_INSTANCE;
+ static {
+ DEFAULT_INSTANCE = new com.google.cloud.dataproc.v1beta2.AcceleratorConfig();
+ }
+
+ public static com.google.cloud.dataproc.v1beta2.AcceleratorConfig getDefaultInstance() {
+ return DEFAULT_INSTANCE;
+ }
+
+ private static final com.google.protobuf.Parser+ * Full URL, partial URI, or short name of the accelerator type resource to + * expose to this instance. See [Compute Engine AcceleratorTypes]( + * /compute/docs/reference/beta/acceleratorTypes) + * Examples + * * `https://www.googleapis.com/compute/beta/projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80` + * * `projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80` + * * `nvidia-tesla-k80` + * **Auto Zone Exception**: If you are using the Cloud Dataproc + * [Auto Zone Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) + * feature, you must use the short name of the accelerator type + * resource, for example, `nvidia-tesla-k80`. + *+ * + *
string accelerator_type_uri = 1;
+ */
+ java.lang.String getAcceleratorTypeUri();
+ /**
+ * + * Full URL, partial URI, or short name of the accelerator type resource to + * expose to this instance. See [Compute Engine AcceleratorTypes]( + * /compute/docs/reference/beta/acceleratorTypes) + * Examples + * * `https://www.googleapis.com/compute/beta/projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80` + * * `projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80` + * * `nvidia-tesla-k80` + * **Auto Zone Exception**: If you are using the Cloud Dataproc + * [Auto Zone Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) + * feature, you must use the short name of the accelerator type + * resource, for example, `nvidia-tesla-k80`. + *+ * + *
string accelerator_type_uri = 1;
+ */
+ com.google.protobuf.ByteString
+ getAcceleratorTypeUriBytes();
+
+ /**
+ * + * The number of the accelerator cards of this type exposed to this instance. + *+ * + *
int32 accelerator_count = 2;
+ */
+ int getAcceleratorCount();
+}
diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/CancelJobRequest.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/CancelJobRequest.java
new file mode 100644
index 000000000000..cad2f6e50eee
--- /dev/null
+++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/CancelJobRequest.java
@@ -0,0 +1,894 @@
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+// source: google/cloud/dataproc/v1beta2/jobs.proto
+
+package com.google.cloud.dataproc.v1beta2;
+
+/**
+ * + * A request to cancel a job. + *+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.CancelJobRequest} + */ +public final class CancelJobRequest extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:google.cloud.dataproc.v1beta2.CancelJobRequest) + CancelJobRequestOrBuilder { +private static final long serialVersionUID = 0L; + // Use CancelJobRequest.newBuilder() to construct. + private CancelJobRequest(com.google.protobuf.GeneratedMessageV3.Builder> builder) { + super(builder); + } + private CancelJobRequest() { + projectId_ = ""; + region_ = ""; + jobId_ = ""; + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private CancelJobRequest( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: { + java.lang.String s = input.readStringRequireUtf8(); + + projectId_ = s; + break; + } + case 18: { + java.lang.String s = input.readStringRequireUtf8(); + + jobId_ = s; + break; + } + case 26: { + java.lang.String s = input.readStringRequireUtf8(); + + region_ = s; + break; + } + default: { + if (!parseUnknownFieldProto3( + input, unknownFields, extensionRegistry, tag)) { + done = true; + } + break; + } + } + } + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(this); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException( + e).setUnfinishedMessage(this); + } finally { + this.unknownFields = unknownFields.build(); + makeExtensionsImmutable(); + } + } + public static final com.google.protobuf.Descriptors.Descriptor + getDescriptor() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_CancelJobRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.cloud.dataproc.v1beta2.JobsProto.internal_static_google_cloud_dataproc_v1beta2_CancelJobRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.cloud.dataproc.v1beta2.CancelJobRequest.class, com.google.cloud.dataproc.v1beta2.CancelJobRequest.Builder.class); + } + + public static final int PROJECT_ID_FIELD_NUMBER = 1; + private volatile java.lang.Object projectId_; + /** + *
+ * Required. The ID of the Google Cloud Platform project that the job + * belongs to. + *+ * + *
string project_id = 1;
+ */
+ public java.lang.String getProjectId() {
+ java.lang.Object ref = projectId_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ projectId_ = s;
+ return s;
+ }
+ }
+ /**
+ * + * Required. The ID of the Google Cloud Platform project that the job + * belongs to. + *+ * + *
string project_id = 1;
+ */
+ public com.google.protobuf.ByteString
+ getProjectIdBytes() {
+ java.lang.Object ref = projectId_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ projectId_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ public static final int REGION_FIELD_NUMBER = 3;
+ private volatile java.lang.Object region_;
+ /**
+ * + * Required. The Cloud Dataproc region in which to handle the request. + *+ * + *
string region = 3;
+ */
+ public java.lang.String getRegion() {
+ java.lang.Object ref = region_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ region_ = s;
+ return s;
+ }
+ }
+ /**
+ * + * Required. The Cloud Dataproc region in which to handle the request. + *+ * + *
string region = 3;
+ */
+ public com.google.protobuf.ByteString
+ getRegionBytes() {
+ java.lang.Object ref = region_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ region_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ public static final int JOB_ID_FIELD_NUMBER = 2;
+ private volatile java.lang.Object jobId_;
+ /**
+ * + * Required. The job ID. + *+ * + *
string job_id = 2;
+ */
+ public java.lang.String getJobId() {
+ java.lang.Object ref = jobId_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ jobId_ = s;
+ return s;
+ }
+ }
+ /**
+ * + * Required. The job ID. + *+ * + *
string job_id = 2;
+ */
+ public com.google.protobuf.ByteString
+ getJobIdBytes() {
+ java.lang.Object ref = jobId_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ jobId_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ private byte memoizedIsInitialized = -1;
+ @java.lang.Override
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized == 1) return true;
+ if (isInitialized == 0) return false;
+
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ @java.lang.Override
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ if (!getProjectIdBytes().isEmpty()) {
+ com.google.protobuf.GeneratedMessageV3.writeString(output, 1, projectId_);
+ }
+ if (!getJobIdBytes().isEmpty()) {
+ com.google.protobuf.GeneratedMessageV3.writeString(output, 2, jobId_);
+ }
+ if (!getRegionBytes().isEmpty()) {
+ com.google.protobuf.GeneratedMessageV3.writeString(output, 3, region_);
+ }
+ unknownFields.writeTo(output);
+ }
+
+ @java.lang.Override
+ public int getSerializedSize() {
+ int size = memoizedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (!getProjectIdBytes().isEmpty()) {
+ size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, projectId_);
+ }
+ if (!getJobIdBytes().isEmpty()) {
+ size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, jobId_);
+ }
+ if (!getRegionBytes().isEmpty()) {
+ size += com.google.protobuf.GeneratedMessageV3.computeStringSize(3, region_);
+ }
+ size += unknownFields.getSerializedSize();
+ memoizedSize = size;
+ return size;
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof com.google.cloud.dataproc.v1beta2.CancelJobRequest)) {
+ return super.equals(obj);
+ }
+ com.google.cloud.dataproc.v1beta2.CancelJobRequest other = (com.google.cloud.dataproc.v1beta2.CancelJobRequest) obj;
+
+ boolean result = true;
+ result = result && getProjectId()
+ .equals(other.getProjectId());
+ result = result && getRegion()
+ .equals(other.getRegion());
+ result = result && getJobId()
+ .equals(other.getJobId());
+ result = result && unknownFields.equals(other.unknownFields);
+ return result;
+ }
+
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptor().hashCode();
+ hash = (37 * hash) + PROJECT_ID_FIELD_NUMBER;
+ hash = (53 * hash) + getProjectId().hashCode();
+ hash = (37 * hash) + REGION_FIELD_NUMBER;
+ hash = (53 * hash) + getRegion().hashCode();
+ hash = (37 * hash) + JOB_ID_FIELD_NUMBER;
+ hash = (53 * hash) + getJobId().hashCode();
+ hash = (29 * hash) + unknownFields.hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static com.google.cloud.dataproc.v1beta2.CancelJobRequest parseFrom(
+ java.nio.ByteBuffer data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static com.google.cloud.dataproc.v1beta2.CancelJobRequest parseFrom(
+ java.nio.ByteBuffer data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static com.google.cloud.dataproc.v1beta2.CancelJobRequest parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static com.google.cloud.dataproc.v1beta2.CancelJobRequest parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static com.google.cloud.dataproc.v1beta2.CancelJobRequest parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+ public static com.google.cloud.dataproc.v1beta2.CancelJobRequest parseFrom(
+ byte[] data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+ public static com.google.cloud.dataproc.v1beta2.CancelJobRequest parseFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3
+ .parseWithIOException(PARSER, input);
+ }
+ public static com.google.cloud.dataproc.v1beta2.CancelJobRequest parseFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3
+ .parseWithIOException(PARSER, input, extensionRegistry);
+ }
+ public static com.google.cloud.dataproc.v1beta2.CancelJobRequest parseDelimitedFrom(java.io.InputStream input)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3
+ .parseDelimitedWithIOException(PARSER, input);
+ }
+ public static com.google.cloud.dataproc.v1beta2.CancelJobRequest parseDelimitedFrom(
+ java.io.InputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3
+ .parseDelimitedWithIOException(PARSER, input, extensionRegistry);
+ }
+ public static com.google.cloud.dataproc.v1beta2.CancelJobRequest parseFrom(
+ com.google.protobuf.CodedInputStream input)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3
+ .parseWithIOException(PARSER, input);
+ }
+ public static com.google.cloud.dataproc.v1beta2.CancelJobRequest parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3
+ .parseWithIOException(PARSER, input, extensionRegistry);
+ }
+
+ @java.lang.Override
+ public Builder newBuilderForType() { return newBuilder(); }
+ public static Builder newBuilder() {
+ return DEFAULT_INSTANCE.toBuilder();
+ }
+ public static Builder newBuilder(com.google.cloud.dataproc.v1beta2.CancelJobRequest prototype) {
+ return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
+ }
+ @java.lang.Override
+ public Builder toBuilder() {
+ return this == DEFAULT_INSTANCE
+ ? new Builder() : new Builder().mergeFrom(this);
+ }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ * + * A request to cancel a job. + *+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.CancelJobRequest} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder
+ * Required. The ID of the Google Cloud Platform project that the job + * belongs to. + *+ * + *
string project_id = 1;
+ */
+ public java.lang.String getProjectId() {
+ java.lang.Object ref = projectId_;
+ if (!(ref instanceof java.lang.String)) {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ projectId_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * + * Required. The ID of the Google Cloud Platform project that the job + * belongs to. + *+ * + *
string project_id = 1;
+ */
+ public com.google.protobuf.ByteString
+ getProjectIdBytes() {
+ java.lang.Object ref = projectId_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ projectId_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * + * Required. The ID of the Google Cloud Platform project that the job + * belongs to. + *+ * + *
string project_id = 1;
+ */
+ public Builder setProjectId(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+
+ projectId_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * + * Required. The ID of the Google Cloud Platform project that the job + * belongs to. + *+ * + *
string project_id = 1;
+ */
+ public Builder clearProjectId() {
+
+ projectId_ = getDefaultInstance().getProjectId();
+ onChanged();
+ return this;
+ }
+ /**
+ * + * Required. The ID of the Google Cloud Platform project that the job + * belongs to. + *+ * + *
string project_id = 1;
+ */
+ public Builder setProjectIdBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ checkByteStringIsUtf8(value);
+
+ projectId_ = value;
+ onChanged();
+ return this;
+ }
+
+ private java.lang.Object region_ = "";
+ /**
+ * + * Required. The Cloud Dataproc region in which to handle the request. + *+ * + *
string region = 3;
+ */
+ public java.lang.String getRegion() {
+ java.lang.Object ref = region_;
+ if (!(ref instanceof java.lang.String)) {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ region_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * + * Required. The Cloud Dataproc region in which to handle the request. + *+ * + *
string region = 3;
+ */
+ public com.google.protobuf.ByteString
+ getRegionBytes() {
+ java.lang.Object ref = region_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ region_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * + * Required. The Cloud Dataproc region in which to handle the request. + *+ * + *
string region = 3;
+ */
+ public Builder setRegion(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+
+ region_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * + * Required. The Cloud Dataproc region in which to handle the request. + *+ * + *
string region = 3;
+ */
+ public Builder clearRegion() {
+
+ region_ = getDefaultInstance().getRegion();
+ onChanged();
+ return this;
+ }
+ /**
+ * + * Required. The Cloud Dataproc region in which to handle the request. + *+ * + *
string region = 3;
+ */
+ public Builder setRegionBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ checkByteStringIsUtf8(value);
+
+ region_ = value;
+ onChanged();
+ return this;
+ }
+
+ private java.lang.Object jobId_ = "";
+ /**
+ * + * Required. The job ID. + *+ * + *
string job_id = 2;
+ */
+ public java.lang.String getJobId() {
+ java.lang.Object ref = jobId_;
+ if (!(ref instanceof java.lang.String)) {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ jobId_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * + * Required. The job ID. + *+ * + *
string job_id = 2;
+ */
+ public com.google.protobuf.ByteString
+ getJobIdBytes() {
+ java.lang.Object ref = jobId_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ jobId_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * + * Required. The job ID. + *+ * + *
string job_id = 2;
+ */
+ public Builder setJobId(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+
+ jobId_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * + * Required. The job ID. + *+ * + *
string job_id = 2;
+ */
+ public Builder clearJobId() {
+
+ jobId_ = getDefaultInstance().getJobId();
+ onChanged();
+ return this;
+ }
+ /**
+ * + * Required. The job ID. + *+ * + *
string job_id = 2;
+ */
+ public Builder setJobIdBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ checkByteStringIsUtf8(value);
+
+ jobId_ = value;
+ onChanged();
+ return this;
+ }
+ @java.lang.Override
+ public final Builder setUnknownFields(
+ final com.google.protobuf.UnknownFieldSet unknownFields) {
+ return super.setUnknownFieldsProto3(unknownFields);
+ }
+
+ @java.lang.Override
+ public final Builder mergeUnknownFields(
+ final com.google.protobuf.UnknownFieldSet unknownFields) {
+ return super.mergeUnknownFields(unknownFields);
+ }
+
+
+ // @@protoc_insertion_point(builder_scope:google.cloud.dataproc.v1beta2.CancelJobRequest)
+ }
+
+ // @@protoc_insertion_point(class_scope:google.cloud.dataproc.v1beta2.CancelJobRequest)
+ private static final com.google.cloud.dataproc.v1beta2.CancelJobRequest DEFAULT_INSTANCE;
+ static {
+ DEFAULT_INSTANCE = new com.google.cloud.dataproc.v1beta2.CancelJobRequest();
+ }
+
+ public static com.google.cloud.dataproc.v1beta2.CancelJobRequest getDefaultInstance() {
+ return DEFAULT_INSTANCE;
+ }
+
+ private static final com.google.protobuf.Parser+ * Required. The ID of the Google Cloud Platform project that the job + * belongs to. + *+ * + *
string project_id = 1;
+ */
+ java.lang.String getProjectId();
+ /**
+ * + * Required. The ID of the Google Cloud Platform project that the job + * belongs to. + *+ * + *
string project_id = 1;
+ */
+ com.google.protobuf.ByteString
+ getProjectIdBytes();
+
+ /**
+ * + * Required. The Cloud Dataproc region in which to handle the request. + *+ * + *
string region = 3;
+ */
+ java.lang.String getRegion();
+ /**
+ * + * Required. The Cloud Dataproc region in which to handle the request. + *+ * + *
string region = 3;
+ */
+ com.google.protobuf.ByteString
+ getRegionBytes();
+
+ /**
+ * + * Required. The job ID. + *+ * + *
string job_id = 2;
+ */
+ java.lang.String getJobId();
+ /**
+ * + * Required. The job ID. + *+ * + *
string job_id = 2;
+ */
+ com.google.protobuf.ByteString
+ getJobIdBytes();
+}
diff --git a/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/Cluster.java b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/Cluster.java
new file mode 100644
index 000000000000..35f46528be92
--- /dev/null
+++ b/google-api-grpc/proto-google-cloud-dataproc-v1beta2/src/main/java/com/google/cloud/dataproc/v1beta2/Cluster.java
@@ -0,0 +1,2438 @@
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+// source: google/cloud/dataproc/v1beta2/clusters.proto
+
+package com.google.cloud.dataproc.v1beta2;
+
+/**
+ * + * Describes the identifying information, config, and status of + * a cluster of Compute Engine instances. + *+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.Cluster} + */ +public final class Cluster extends + com.google.protobuf.GeneratedMessageV3 implements + // @@protoc_insertion_point(message_implements:google.cloud.dataproc.v1beta2.Cluster) + ClusterOrBuilder { +private static final long serialVersionUID = 0L; + // Use Cluster.newBuilder() to construct. + private Cluster(com.google.protobuf.GeneratedMessageV3.Builder> builder) { + super(builder); + } + private Cluster() { + projectId_ = ""; + clusterName_ = ""; + statusHistory_ = java.util.Collections.emptyList(); + clusterUuid_ = ""; + } + + @java.lang.Override + public final com.google.protobuf.UnknownFieldSet + getUnknownFields() { + return this.unknownFields; + } + private Cluster( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + this(); + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + int mutable_bitField0_ = 0; + com.google.protobuf.UnknownFieldSet.Builder unknownFields = + com.google.protobuf.UnknownFieldSet.newBuilder(); + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: { + java.lang.String s = input.readStringRequireUtf8(); + + projectId_ = s; + break; + } + case 18: { + java.lang.String s = input.readStringRequireUtf8(); + + clusterName_ = s; + break; + } + case 26: { + com.google.cloud.dataproc.v1beta2.ClusterConfig.Builder subBuilder = null; + if (config_ != null) { + subBuilder = config_.toBuilder(); + } + config_ = input.readMessage(com.google.cloud.dataproc.v1beta2.ClusterConfig.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(config_); + config_ = subBuilder.buildPartial(); + } + + break; + } + case 34: { + com.google.cloud.dataproc.v1beta2.ClusterStatus.Builder subBuilder = null; + if (status_ != null) { + subBuilder = status_.toBuilder(); + } + status_ = input.readMessage(com.google.cloud.dataproc.v1beta2.ClusterStatus.parser(), extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom(status_); + status_ = subBuilder.buildPartial(); + } + + break; + } + case 50: { + java.lang.String s = input.readStringRequireUtf8(); + + clusterUuid_ = s; + break; + } + case 58: { + if (!((mutable_bitField0_ & 0x00000020) == 0x00000020)) { + statusHistory_ = new java.util.ArrayList
+ * Required. The Google Cloud Platform project ID that the cluster belongs to. + *+ * + *
string project_id = 1;
+ */
+ public java.lang.String getProjectId() {
+ java.lang.Object ref = projectId_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ projectId_ = s;
+ return s;
+ }
+ }
+ /**
+ * + * Required. The Google Cloud Platform project ID that the cluster belongs to. + *+ * + *
string project_id = 1;
+ */
+ public com.google.protobuf.ByteString
+ getProjectIdBytes() {
+ java.lang.Object ref = projectId_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ projectId_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ public static final int CLUSTER_NAME_FIELD_NUMBER = 2;
+ private volatile java.lang.Object clusterName_;
+ /**
+ * + * Required. The cluster name. Cluster names within a project must be + * unique. Names of deleted clusters can be reused. + *+ * + *
string cluster_name = 2;
+ */
+ public java.lang.String getClusterName() {
+ java.lang.Object ref = clusterName_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ clusterName_ = s;
+ return s;
+ }
+ }
+ /**
+ * + * Required. The cluster name. Cluster names within a project must be + * unique. Names of deleted clusters can be reused. + *+ * + *
string cluster_name = 2;
+ */
+ public com.google.protobuf.ByteString
+ getClusterNameBytes() {
+ java.lang.Object ref = clusterName_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ clusterName_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ public static final int CONFIG_FIELD_NUMBER = 3;
+ private com.google.cloud.dataproc.v1beta2.ClusterConfig config_;
+ /**
+ * + * Required. The cluster config. Note that Cloud Dataproc may set + * default values, and values may change when clusters are updated. + *+ * + *
.google.cloud.dataproc.v1beta2.ClusterConfig config = 3;
+ */
+ public boolean hasConfig() {
+ return config_ != null;
+ }
+ /**
+ * + * Required. The cluster config. Note that Cloud Dataproc may set + * default values, and values may change when clusters are updated. + *+ * + *
.google.cloud.dataproc.v1beta2.ClusterConfig config = 3;
+ */
+ public com.google.cloud.dataproc.v1beta2.ClusterConfig getConfig() {
+ return config_ == null ? com.google.cloud.dataproc.v1beta2.ClusterConfig.getDefaultInstance() : config_;
+ }
+ /**
+ * + * Required. The cluster config. Note that Cloud Dataproc may set + * default values, and values may change when clusters are updated. + *+ * + *
.google.cloud.dataproc.v1beta2.ClusterConfig config = 3;
+ */
+ public com.google.cloud.dataproc.v1beta2.ClusterConfigOrBuilder getConfigOrBuilder() {
+ return getConfig();
+ }
+
+ public static final int LABELS_FIELD_NUMBER = 8;
+ private static final class LabelsDefaultEntryHolder {
+ static final com.google.protobuf.MapEntry<
+ java.lang.String, java.lang.String> defaultEntry =
+ com.google.protobuf.MapEntry
+ .+ * Optional. The labels to associate with this cluster. + * Label **keys** must contain 1 to 63 characters, and must conform to + * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). + * Label **values** may be empty, but, if present, must contain 1 to 63 + * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). + * No more than 32 labels can be associated with a cluster. + *+ * + *
map<string, string> labels = 8;
+ */
+
+ public boolean containsLabels(
+ java.lang.String key) {
+ if (key == null) { throw new java.lang.NullPointerException(); }
+ return internalGetLabels().getMap().containsKey(key);
+ }
+ /**
+ * Use {@link #getLabelsMap()} instead.
+ */
+ @java.lang.Deprecated
+ public java.util.Map+ * Optional. The labels to associate with this cluster. + * Label **keys** must contain 1 to 63 characters, and must conform to + * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). + * Label **values** may be empty, but, if present, must contain 1 to 63 + * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). + * No more than 32 labels can be associated with a cluster. + *+ * + *
map<string, string> labels = 8;
+ */
+
+ public java.util.Map+ * Optional. The labels to associate with this cluster. + * Label **keys** must contain 1 to 63 characters, and must conform to + * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). + * Label **values** may be empty, but, if present, must contain 1 to 63 + * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). + * No more than 32 labels can be associated with a cluster. + *+ * + *
map<string, string> labels = 8;
+ */
+
+ public java.lang.String getLabelsOrDefault(
+ java.lang.String key,
+ java.lang.String defaultValue) {
+ if (key == null) { throw new java.lang.NullPointerException(); }
+ java.util.Map+ * Optional. The labels to associate with this cluster. + * Label **keys** must contain 1 to 63 characters, and must conform to + * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). + * Label **values** may be empty, but, if present, must contain 1 to 63 + * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). + * No more than 32 labels can be associated with a cluster. + *+ * + *
map<string, string> labels = 8;
+ */
+
+ public java.lang.String getLabelsOrThrow(
+ java.lang.String key) {
+ if (key == null) { throw new java.lang.NullPointerException(); }
+ java.util.Map+ * Output only. Cluster status. + *+ * + *
.google.cloud.dataproc.v1beta2.ClusterStatus status = 4;
+ */
+ public boolean hasStatus() {
+ return status_ != null;
+ }
+ /**
+ * + * Output only. Cluster status. + *+ * + *
.google.cloud.dataproc.v1beta2.ClusterStatus status = 4;
+ */
+ public com.google.cloud.dataproc.v1beta2.ClusterStatus getStatus() {
+ return status_ == null ? com.google.cloud.dataproc.v1beta2.ClusterStatus.getDefaultInstance() : status_;
+ }
+ /**
+ * + * Output only. Cluster status. + *+ * + *
.google.cloud.dataproc.v1beta2.ClusterStatus status = 4;
+ */
+ public com.google.cloud.dataproc.v1beta2.ClusterStatusOrBuilder getStatusOrBuilder() {
+ return getStatus();
+ }
+
+ public static final int STATUS_HISTORY_FIELD_NUMBER = 7;
+ private java.util.List+ * Output only. The previous cluster status. + *+ * + *
repeated .google.cloud.dataproc.v1beta2.ClusterStatus status_history = 7;
+ */
+ public java.util.List+ * Output only. The previous cluster status. + *+ * + *
repeated .google.cloud.dataproc.v1beta2.ClusterStatus status_history = 7;
+ */
+ public java.util.List extends com.google.cloud.dataproc.v1beta2.ClusterStatusOrBuilder>
+ getStatusHistoryOrBuilderList() {
+ return statusHistory_;
+ }
+ /**
+ * + * Output only. The previous cluster status. + *+ * + *
repeated .google.cloud.dataproc.v1beta2.ClusterStatus status_history = 7;
+ */
+ public int getStatusHistoryCount() {
+ return statusHistory_.size();
+ }
+ /**
+ * + * Output only. The previous cluster status. + *+ * + *
repeated .google.cloud.dataproc.v1beta2.ClusterStatus status_history = 7;
+ */
+ public com.google.cloud.dataproc.v1beta2.ClusterStatus getStatusHistory(int index) {
+ return statusHistory_.get(index);
+ }
+ /**
+ * + * Output only. The previous cluster status. + *+ * + *
repeated .google.cloud.dataproc.v1beta2.ClusterStatus status_history = 7;
+ */
+ public com.google.cloud.dataproc.v1beta2.ClusterStatusOrBuilder getStatusHistoryOrBuilder(
+ int index) {
+ return statusHistory_.get(index);
+ }
+
+ public static final int CLUSTER_UUID_FIELD_NUMBER = 6;
+ private volatile java.lang.Object clusterUuid_;
+ /**
+ * + * Output only. A cluster UUID (Unique Universal Identifier). Cloud Dataproc + * generates this value when it creates the cluster. + *+ * + *
string cluster_uuid = 6;
+ */
+ public java.lang.String getClusterUuid() {
+ java.lang.Object ref = clusterUuid_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ clusterUuid_ = s;
+ return s;
+ }
+ }
+ /**
+ * + * Output only. A cluster UUID (Unique Universal Identifier). Cloud Dataproc + * generates this value when it creates the cluster. + *+ * + *
string cluster_uuid = 6;
+ */
+ public com.google.protobuf.ByteString
+ getClusterUuidBytes() {
+ java.lang.Object ref = clusterUuid_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ clusterUuid_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
+ public static final int METRICS_FIELD_NUMBER = 9;
+ private com.google.cloud.dataproc.v1beta2.ClusterMetrics metrics_;
+ /**
+ * + * Contains cluster daemon metrics such as HDFS and YARN stats. + * **Beta Feature**: This report is available for testing purposes only. It may + * be changed before final release. + *+ * + *
.google.cloud.dataproc.v1beta2.ClusterMetrics metrics = 9;
+ */
+ public boolean hasMetrics() {
+ return metrics_ != null;
+ }
+ /**
+ * + * Contains cluster daemon metrics such as HDFS and YARN stats. + * **Beta Feature**: This report is available for testing purposes only. It may + * be changed before final release. + *+ * + *
.google.cloud.dataproc.v1beta2.ClusterMetrics metrics = 9;
+ */
+ public com.google.cloud.dataproc.v1beta2.ClusterMetrics getMetrics() {
+ return metrics_ == null ? com.google.cloud.dataproc.v1beta2.ClusterMetrics.getDefaultInstance() : metrics_;
+ }
+ /**
+ * + * Contains cluster daemon metrics such as HDFS and YARN stats. + * **Beta Feature**: This report is available for testing purposes only. It may + * be changed before final release. + *+ * + *
.google.cloud.dataproc.v1beta2.ClusterMetrics metrics = 9;
+ */
+ public com.google.cloud.dataproc.v1beta2.ClusterMetricsOrBuilder getMetricsOrBuilder() {
+ return getMetrics();
+ }
+
+ private byte memoizedIsInitialized = -1;
+ @java.lang.Override
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized == 1) return true;
+ if (isInitialized == 0) return false;
+
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ @java.lang.Override
+ public void writeTo(com.google.protobuf.CodedOutputStream output)
+ throws java.io.IOException {
+ if (!getProjectIdBytes().isEmpty()) {
+ com.google.protobuf.GeneratedMessageV3.writeString(output, 1, projectId_);
+ }
+ if (!getClusterNameBytes().isEmpty()) {
+ com.google.protobuf.GeneratedMessageV3.writeString(output, 2, clusterName_);
+ }
+ if (config_ != null) {
+ output.writeMessage(3, getConfig());
+ }
+ if (status_ != null) {
+ output.writeMessage(4, getStatus());
+ }
+ if (!getClusterUuidBytes().isEmpty()) {
+ com.google.protobuf.GeneratedMessageV3.writeString(output, 6, clusterUuid_);
+ }
+ for (int i = 0; i < statusHistory_.size(); i++) {
+ output.writeMessage(7, statusHistory_.get(i));
+ }
+ com.google.protobuf.GeneratedMessageV3
+ .serializeStringMapTo(
+ output,
+ internalGetLabels(),
+ LabelsDefaultEntryHolder.defaultEntry,
+ 8);
+ if (metrics_ != null) {
+ output.writeMessage(9, getMetrics());
+ }
+ unknownFields.writeTo(output);
+ }
+
+ @java.lang.Override
+ public int getSerializedSize() {
+ int size = memoizedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (!getProjectIdBytes().isEmpty()) {
+ size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, projectId_);
+ }
+ if (!getClusterNameBytes().isEmpty()) {
+ size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, clusterName_);
+ }
+ if (config_ != null) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(3, getConfig());
+ }
+ if (status_ != null) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(4, getStatus());
+ }
+ if (!getClusterUuidBytes().isEmpty()) {
+ size += com.google.protobuf.GeneratedMessageV3.computeStringSize(6, clusterUuid_);
+ }
+ for (int i = 0; i < statusHistory_.size(); i++) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeMessageSize(7, statusHistory_.get(i));
+ }
+ for (java.util.Map.Entry+ * Describes the identifying information, config, and status of + * a cluster of Compute Engine instances. + *+ * + * Protobuf type {@code google.cloud.dataproc.v1beta2.Cluster} + */ + public static final class Builder extends + com.google.protobuf.GeneratedMessageV3.Builder
+ * Required. The Google Cloud Platform project ID that the cluster belongs to. + *+ * + *
string project_id = 1;
+ */
+ public java.lang.String getProjectId() {
+ java.lang.Object ref = projectId_;
+ if (!(ref instanceof java.lang.String)) {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ projectId_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * + * Required. The Google Cloud Platform project ID that the cluster belongs to. + *+ * + *
string project_id = 1;
+ */
+ public com.google.protobuf.ByteString
+ getProjectIdBytes() {
+ java.lang.Object ref = projectId_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ projectId_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * + * Required. The Google Cloud Platform project ID that the cluster belongs to. + *+ * + *
string project_id = 1;
+ */
+ public Builder setProjectId(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+
+ projectId_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * + * Required. The Google Cloud Platform project ID that the cluster belongs to. + *+ * + *
string project_id = 1;
+ */
+ public Builder clearProjectId() {
+
+ projectId_ = getDefaultInstance().getProjectId();
+ onChanged();
+ return this;
+ }
+ /**
+ * + * Required. The Google Cloud Platform project ID that the cluster belongs to. + *+ * + *
string project_id = 1;
+ */
+ public Builder setProjectIdBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ checkByteStringIsUtf8(value);
+
+ projectId_ = value;
+ onChanged();
+ return this;
+ }
+
+ private java.lang.Object clusterName_ = "";
+ /**
+ * + * Required. The cluster name. Cluster names within a project must be + * unique. Names of deleted clusters can be reused. + *+ * + *
string cluster_name = 2;
+ */
+ public java.lang.String getClusterName() {
+ java.lang.Object ref = clusterName_;
+ if (!(ref instanceof java.lang.String)) {
+ com.google.protobuf.ByteString bs =
+ (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ clusterName_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ * + * Required. The cluster name. Cluster names within a project must be + * unique. Names of deleted clusters can be reused. + *+ * + *
string cluster_name = 2;
+ */
+ public com.google.protobuf.ByteString
+ getClusterNameBytes() {
+ java.lang.Object ref = clusterName_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8(
+ (java.lang.String) ref);
+ clusterName_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ * + * Required. The cluster name. Cluster names within a project must be + * unique. Names of deleted clusters can be reused. + *+ * + *
string cluster_name = 2;
+ */
+ public Builder setClusterName(
+ java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+
+ clusterName_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * + * Required. The cluster name. Cluster names within a project must be + * unique. Names of deleted clusters can be reused. + *+ * + *
string cluster_name = 2;
+ */
+ public Builder clearClusterName() {
+
+ clusterName_ = getDefaultInstance().getClusterName();
+ onChanged();
+ return this;
+ }
+ /**
+ * + * Required. The cluster name. Cluster names within a project must be + * unique. Names of deleted clusters can be reused. + *+ * + *
string cluster_name = 2;
+ */
+ public Builder setClusterNameBytes(
+ com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ checkByteStringIsUtf8(value);
+
+ clusterName_ = value;
+ onChanged();
+ return this;
+ }
+
+ private com.google.cloud.dataproc.v1beta2.ClusterConfig config_ = null;
+ private com.google.protobuf.SingleFieldBuilderV3<
+ com.google.cloud.dataproc.v1beta2.ClusterConfig, com.google.cloud.dataproc.v1beta2.ClusterConfig.Builder, com.google.cloud.dataproc.v1beta2.ClusterConfigOrBuilder> configBuilder_;
+ /**
+ * + * Required. The cluster config. Note that Cloud Dataproc may set + * default values, and values may change when clusters are updated. + *+ * + *
.google.cloud.dataproc.v1beta2.ClusterConfig config = 3;
+ */
+ public boolean hasConfig() {
+ return configBuilder_ != null || config_ != null;
+ }
+ /**
+ * + * Required. The cluster config. Note that Cloud Dataproc may set + * default values, and values may change when clusters are updated. + *+ * + *
.google.cloud.dataproc.v1beta2.ClusterConfig config = 3;
+ */
+ public com.google.cloud.dataproc.v1beta2.ClusterConfig getConfig() {
+ if (configBuilder_ == null) {
+ return config_ == null ? com.google.cloud.dataproc.v1beta2.ClusterConfig.getDefaultInstance() : config_;
+ } else {
+ return configBuilder_.getMessage();
+ }
+ }
+ /**
+ * + * Required. The cluster config. Note that Cloud Dataproc may set + * default values, and values may change when clusters are updated. + *+ * + *
.google.cloud.dataproc.v1beta2.ClusterConfig config = 3;
+ */
+ public Builder setConfig(com.google.cloud.dataproc.v1beta2.ClusterConfig value) {
+ if (configBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ config_ = value;
+ onChanged();
+ } else {
+ configBuilder_.setMessage(value);
+ }
+
+ return this;
+ }
+ /**
+ * + * Required. The cluster config. Note that Cloud Dataproc may set + * default values, and values may change when clusters are updated. + *+ * + *
.google.cloud.dataproc.v1beta2.ClusterConfig config = 3;
+ */
+ public Builder setConfig(
+ com.google.cloud.dataproc.v1beta2.ClusterConfig.Builder builderForValue) {
+ if (configBuilder_ == null) {
+ config_ = builderForValue.build();
+ onChanged();
+ } else {
+ configBuilder_.setMessage(builderForValue.build());
+ }
+
+ return this;
+ }
+ /**
+ * + * Required. The cluster config. Note that Cloud Dataproc may set + * default values, and values may change when clusters are updated. + *+ * + *
.google.cloud.dataproc.v1beta2.ClusterConfig config = 3;
+ */
+ public Builder mergeConfig(com.google.cloud.dataproc.v1beta2.ClusterConfig value) {
+ if (configBuilder_ == null) {
+ if (config_ != null) {
+ config_ =
+ com.google.cloud.dataproc.v1beta2.ClusterConfig.newBuilder(config_).mergeFrom(value).buildPartial();
+ } else {
+ config_ = value;
+ }
+ onChanged();
+ } else {
+ configBuilder_.mergeFrom(value);
+ }
+
+ return this;
+ }
+ /**
+ * + * Required. The cluster config. Note that Cloud Dataproc may set + * default values, and values may change when clusters are updated. + *+ * + *
.google.cloud.dataproc.v1beta2.ClusterConfig config = 3;
+ */
+ public Builder clearConfig() {
+ if (configBuilder_ == null) {
+ config_ = null;
+ onChanged();
+ } else {
+ config_ = null;
+ configBuilder_ = null;
+ }
+
+ return this;
+ }
+ /**
+ * + * Required. The cluster config. Note that Cloud Dataproc may set + * default values, and values may change when clusters are updated. + *+ * + *
.google.cloud.dataproc.v1beta2.ClusterConfig config = 3;
+ */
+ public com.google.cloud.dataproc.v1beta2.ClusterConfig.Builder getConfigBuilder() {
+
+ onChanged();
+ return getConfigFieldBuilder().getBuilder();
+ }
+ /**
+ * + * Required. The cluster config. Note that Cloud Dataproc may set + * default values, and values may change when clusters are updated. + *+ * + *
.google.cloud.dataproc.v1beta2.ClusterConfig config = 3;
+ */
+ public com.google.cloud.dataproc.v1beta2.ClusterConfigOrBuilder getConfigOrBuilder() {
+ if (configBuilder_ != null) {
+ return configBuilder_.getMessageOrBuilder();
+ } else {
+ return config_ == null ?
+ com.google.cloud.dataproc.v1beta2.ClusterConfig.getDefaultInstance() : config_;
+ }
+ }
+ /**
+ * + * Required. The cluster config. Note that Cloud Dataproc may set + * default values, and values may change when clusters are updated. + *+ * + *
.google.cloud.dataproc.v1beta2.ClusterConfig config = 3;
+ */
+ private com.google.protobuf.SingleFieldBuilderV3<
+ com.google.cloud.dataproc.v1beta2.ClusterConfig, com.google.cloud.dataproc.v1beta2.ClusterConfig.Builder, com.google.cloud.dataproc.v1beta2.ClusterConfigOrBuilder>
+ getConfigFieldBuilder() {
+ if (configBuilder_ == null) {
+ configBuilder_ = new com.google.protobuf.SingleFieldBuilderV3<
+ com.google.cloud.dataproc.v1beta2.ClusterConfig, com.google.cloud.dataproc.v1beta2.ClusterConfig.Builder, com.google.cloud.dataproc.v1beta2.ClusterConfigOrBuilder>(
+ getConfig(),
+ getParentForChildren(),
+ isClean());
+ config_ = null;
+ }
+ return configBuilder_;
+ }
+
+ private com.google.protobuf.MapField<
+ java.lang.String, java.lang.String> labels_;
+ private com.google.protobuf.MapField+ * Optional. The labels to associate with this cluster. + * Label **keys** must contain 1 to 63 characters, and must conform to + * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). + * Label **values** may be empty, but, if present, must contain 1 to 63 + * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). + * No more than 32 labels can be associated with a cluster. + *+ * + *
map<string, string> labels = 8;
+ */
+
+ public boolean containsLabels(
+ java.lang.String key) {
+ if (key == null) { throw new java.lang.NullPointerException(); }
+ return internalGetLabels().getMap().containsKey(key);
+ }
+ /**
+ * Use {@link #getLabelsMap()} instead.
+ */
+ @java.lang.Deprecated
+ public java.util.Map+ * Optional. The labels to associate with this cluster. + * Label **keys** must contain 1 to 63 characters, and must conform to + * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). + * Label **values** may be empty, but, if present, must contain 1 to 63 + * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). + * No more than 32 labels can be associated with a cluster. + *+ * + *
map<string, string> labels = 8;
+ */
+
+ public java.util.Map+ * Optional. The labels to associate with this cluster. + * Label **keys** must contain 1 to 63 characters, and must conform to + * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). + * Label **values** may be empty, but, if present, must contain 1 to 63 + * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). + * No more than 32 labels can be associated with a cluster. + *+ * + *
map<string, string> labels = 8;
+ */
+
+ public java.lang.String getLabelsOrDefault(
+ java.lang.String key,
+ java.lang.String defaultValue) {
+ if (key == null) { throw new java.lang.NullPointerException(); }
+ java.util.Map+ * Optional. The labels to associate with this cluster. + * Label **keys** must contain 1 to 63 characters, and must conform to + * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). + * Label **values** may be empty, but, if present, must contain 1 to 63 + * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). + * No more than 32 labels can be associated with a cluster. + *+ * + *
map<string, string> labels = 8;
+ */
+
+ public java.lang.String getLabelsOrThrow(
+ java.lang.String key) {
+ if (key == null) { throw new java.lang.NullPointerException(); }
+ java.util.Map+ * Optional. The labels to associate with this cluster. + * Label **keys** must contain 1 to 63 characters, and must conform to + * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). + * Label **values** may be empty, but, if present, must contain 1 to 63 + * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). + * No more than 32 labels can be associated with a cluster. + *+ * + *
map<string, string> labels = 8;
+ */
+
+ public Builder removeLabels(
+ java.lang.String key) {
+ if (key == null) { throw new java.lang.NullPointerException(); }
+ internalGetMutableLabels().getMutableMap()
+ .remove(key);
+ return this;
+ }
+ /**
+ * Use alternate mutation accessors instead.
+ */
+ @java.lang.Deprecated
+ public java.util.Map+ * Optional. The labels to associate with this cluster. + * Label **keys** must contain 1 to 63 characters, and must conform to + * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). + * Label **values** may be empty, but, if present, must contain 1 to 63 + * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). + * No more than 32 labels can be associated with a cluster. + *+ * + *
map<string, string> labels = 8;
+ */
+ public Builder putLabels(
+ java.lang.String key,
+ java.lang.String value) {
+ if (key == null) { throw new java.lang.NullPointerException(); }
+ if (value == null) { throw new java.lang.NullPointerException(); }
+ internalGetMutableLabels().getMutableMap()
+ .put(key, value);
+ return this;
+ }
+ /**
+ * + * Optional. The labels to associate with this cluster. + * Label **keys** must contain 1 to 63 characters, and must conform to + * [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). + * Label **values** may be empty, but, if present, must contain 1 to 63 + * characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). + * No more than 32 labels can be associated with a cluster. + *+ * + *
map<string, string> labels = 8;
+ */
+
+ public Builder putAllLabels(
+ java.util.Map