diff --git a/.travis.yml b/.travis.yml index c023116917a3..1e4b639b0e0d 100644 --- a/.travis.yml +++ b/.travis.yml @@ -9,7 +9,7 @@ before_install: - cp target/travis/settings.xml ~/.m2/settings.xml install: mvn install -DskipTests=true -Dgpg.skip=true script: -- utilities/verify.sh +- travis_wait 30 utilities/verify.sh after_success: - utilities/after_success.sh env: diff --git a/README.md b/README.md index 6bc56bad250a..0768939df6b3 100644 --- a/README.md +++ b/README.md @@ -15,6 +15,7 @@ Java idiomatic client for [Google Cloud Platform][cloud-platform] services. This client supports the following Google Cloud Platform services: - [Google Cloud BigQuery] (#google-cloud-bigquery-alpha) (Alpha) +- [Google Cloud Compute] (#google-cloud-compute-alpha) (Alpha) - [Google Cloud Datastore] (#google-cloud-datastore) - [Google Cloud DNS] (#google-cloud-dns-alpha) (Alpha) - [Google Cloud Resource Manager] (#google-cloud-resource-manager-alpha) (Alpha) @@ -50,6 +51,8 @@ Example Applications - [`BigQueryExample`](./gcloud-java-examples/src/main/java/com/google/cloud/examples/bigquery/BigQueryExample.java) - A simple command line interface providing some of Cloud BigQuery's functionality - Read more about using this application on the [`BigQueryExample` docs page](http://googlecloudplatform.github.io/gcloud-java/apidocs/?com/google/cloud/examples/bigquery/BigQueryExample.html). +- [`ComputeExample`](./gcloud-java-examples/src/main/java/com/google/cloud/examples/compute/ComputeExample.java) - A simple command line interface providing some of Cloud Compute's functionality + - Read more about using this application on the [`gcloud-java-examples` docs page](http://googlecloudplatform.github.io/gcloud-java/apidocs/?com/google/cloud/examples/compute/ComputeExample.html). - [`Bookshelf`](https://github.com/GoogleCloudPlatform/getting-started-java/tree/master/bookshelf) - An App Engine app that manages a virtual bookshelf. - This app uses `gcloud-java` to interface with Cloud Datastore and Cloud Storage. It also uses Cloud SQL, another Google Cloud Platform service. - [`DatastoreExample`](./gcloud-java-examples/src/main/java/com/google/cloud/examples/datastore/DatastoreExample.java) - A simple command line interface for Cloud Datastore @@ -173,6 +176,78 @@ if (loadJob.status().error() != null) { } ``` +Google Cloud Compute (Alpha) +---------------------- + +- [API Documentation][compute-api] +- [Official Documentation][cloud-compute-docs] + +#### Preview + +Here are two code snippets showing simple usage examples from within Compute/App Engine. Note that +you must [supply credentials](#authentication) and a project ID if running this snippet elsewhere. + +The first snippet shows how to create a snapshot from an existing disk. Complete source code can be +found at +[CreateSnapshot.java](./gcloud-java-examples/src/main/java/com/google/cloud/examples/compute/snippets/CreateSnapshot.java). + +```java +import com.google.cloud.compute.Compute; +import com.google.cloud.compute.ComputeOptions; +import com.google.cloud.compute.Disk; +import com.google.cloud.compute.DiskId; +import com.google.cloud.compute.Operation; +import com.google.cloud.compute.Snapshot; + +Compute compute = ComputeOptions.defaultInstance().service(); +DiskId diskId = DiskId.of("us-central1-a", "disk-name"); +Disk disk = compute.getDisk(diskId, Compute.DiskOption.fields()); +if (disk != null) { + String snapshotName = "disk-name-snapshot"; + Operation operation = disk.createSnapshot(snapshotName); + while (!operation.isDone()) { + Thread.sleep(1000L); + } + if (operation.errors() == null) { + // use snapshot + Snapshot snapshot = compute.getSnapshot("disk-name-snapshot"); + } +} +``` +The second snippet shows how to create a virtual machine instance. Complete source code can be found +at +[CreateInstance.java](./gcloud-java-examples/src/main/java/com/google/cloud/examples/compute/snippets/CreateInstance.java). +```java +import com.google.cloud.compute.AttachedDisk; +import com.google.cloud.compute.Compute; +import com.google.cloud.compute.ComputeOptions; +import com.google.cloud.compute.ImageId; +import com.google.cloud.compute.Instance; +import com.google.cloud.compute.InstanceId; +import com.google.cloud.compute.InstanceInfo; +import com.google.cloud.compute.MachineTypeId; +import com.google.cloud.compute.NetworkId; +import com.google.cloud.compute.NetworkInterface; +import com.google.cloud.compute.Operation; + +Compute compute = ComputeOptions.defaultInstance().service(); +ImageId imageId = ImageId.of("debian-cloud", "debian-8-jessie-v20160329"); +NetworkId networkId = NetworkId.of("default"); +AttachedDisk attachedDisk = AttachedDisk.of(AttachedDisk.CreateDiskConfiguration.of(imageId)); +NetworkInterface networkInterface = NetworkInterface.of(networkId); +InstanceId instanceId = InstanceId.of("us-central1-a", "instance-name"); +MachineTypeId machineTypeId = MachineTypeId.of("us-central1-a", "n1-standard-1"); +Operation operation = + compute.create(InstanceInfo.of(instanceId, machineTypeId, attachedDisk, networkInterface)); +while (!operation.isDone()) { + Thread.sleep(1000L); +} +if (operation.errors() == null) { + // use instance + Instance instance = compute.getInstance(instanceId); +} +``` + Google Cloud Datastore ---------------------- @@ -455,3 +530,7 @@ Apache 2.0 - See [LICENSE] for more information. [cloud-bigquery]: https://cloud.google.com/bigquery/ [cloud-bigquery-docs]: https://cloud.google.com/bigquery/docs/overview [bigquery-api]: http://googlecloudplatform.github.io/gcloud-java/apidocs/index.html?com/google/cloud/bigquery/package-summary.html + +[cloud-compute]: https://cloud.google.com/compute/ +[cloud-compute-docs]: https://cloud.google.com/compute/docs/overview +[compute-api]: http://googlecloudplatform.github.io/gcloud-java/apidocs/index.html?com/google/cloud/compute/package-summary.html diff --git a/gcloud-java-compute/README.md b/gcloud-java-compute/README.md new file mode 100644 index 000000000000..7bd7c83ee523 --- /dev/null +++ b/gcloud-java-compute/README.md @@ -0,0 +1,267 @@ +Google Cloud Java Client for Compute (Alpha) +==================================== + +Java idiomatic client for [Google Cloud Compute](https://cloud.google.com/compute). + +[![Build Status](https://travis-ci.org/GoogleCloudPlatform/gcloud-java.svg?branch=master)](https://travis-ci.org/GoogleCloudPlatform/gcloud-java) +[![Coverage Status](https://coveralls.io/repos/GoogleCloudPlatform/gcloud-java/badge.svg?branch=master)](https://coveralls.io/r/GoogleCloudPlatform/gcloud-java?branch=master) +[![Maven](https://img.shields.io/maven-central/v/com.google.cloud/gcloud-java-compute.svg)]( https://img.shields.io/maven-central/v/com.google.cloud/gcloud-java-compute.svg) +[![Codacy Badge](https://api.codacy.com/project/badge/grade/9da006ad7c3a4fe1abd142e77c003917)](https://www.codacy.com/app/mziccard/gcloud-java) +[![Dependency Status](https://www.versioneye.com/user/projects/56bd8ee72a29ed002d2b0969/badge.svg?style=flat)](https://www.versioneye.com/user/projects/56bd8ee72a29ed002d2b0969) + +- [Homepage](https://googlecloudplatform.github.io/gcloud-java/) +- [API Documentation](http://googlecloudplatform.github.io/gcloud-java/apidocs/index.html?com/google/cloud/compute/package-summary.html) + +> Note: This client is a work-in-progress, and may occasionally +> make backwards-incompatible changes. + +Quickstart +---------- +If you are using Maven, add this to your pom.xml file +```xml + + com.google.cloud + gcloud-java-compute + 0.2.0 + +``` +If you are using Gradle, add this to your dependencies +```Groovy +compile 'com.google.cloud:gcloud-java-compute:0.2.0' +``` +If you are using SBT, add this to your dependencies +```Scala +libraryDependencies += "com.google.cloud" % "gcloud-java-compute" % "0.2.0" +``` + +Example Application +------------------- + +[`ComputeExample`](../gcloud-java-examples/src/main/java/com/google/cloud/examples/compute/ComputeExample.java) +is a simple command line interface that provides some of Google Cloud Compute Engine's +functionality. Read more about using the application on the +[`ComputeExample` docs page](http://googlecloudplatform.github.io/gcloud-java/apidocs/?com/google/cloud/examples/compute/ComputeExample.html). + +Authentication +-------------- + +See the [Authentication](https://github.com/GoogleCloudPlatform/gcloud-java#authentication) section in the base directory's README. + +About Google Cloud Compute +-------------------------- + +[Google Cloud Compute][cloud-compute] delivers virtual machines running in Google's innovative data +centers and worldwide fiber network. Compute Engine's tooling and workflow support enable scaling +from single instances to global, load-balanced cloud computing. Compute Engine's VMs boot quickly, +come with persistent disk storage, deliver consistent performance and are available in many +configurations. + +Be sure to activate the Google Cloud Compute API on the Developer's Console to use Compute from +your project. + +See the ``gcloud-java`` API [compute documentation][compute-api] to learn how to interact +with Google Cloud Compute using this Client Library. + +Getting Started +--------------- + +#### Prerequisites +For this tutorial, you will need a [Google Developers Console](https://console.developers.google.com/) +project with the Compute Engine API enabled. You will need to [enable billing](https://support.google.com/cloud/answer/6158867?hl=en) +to use Google Cloud DNS. [Follow these instructions](https://cloud.google.com/docs/authentication#preparation) +to get your project set up. You will also need to set up the local development environment by +[installing the Google Cloud SDK](https://cloud.google.com/sdk/) and running the following commands +in command line: `gcloud auth login` and `gcloud config set project [YOUR PROJECT ID]`. + +#### Installation and setup +You'll need to obtain the `gcloud-java-compute` library. See the [Quickstart](#quickstart) section +to add `gcloud-java-compute` as a dependency in your code. + +#### Creating an authorized service object +To make authenticated requests to Google Cloud Compute Engine, you must create a service object with +credentials. You can then make API calls by calling methods on the Compute service object. The +simplest way to authenticate is to use [Application Default Credentials](https://developers.google.com/identity/protocols/application-default-credentials). +These credentials are automatically inferred from your environment, so you only need the following +code to create your service object: + +```java +import com.google.cloud.compute.Compute; +import com.google.cloud.compute.ComputeOptions; + +Compute compute = ComputeOptions.defaultInstance().service(); +``` + +For other authentication options, see the [Authentication](https://github.com/GoogleCloudPlatform/gcloud-java#authentication) +page. + +#### Creating a region IP address +An external region IP address can be associated to a Google Compute Engine instance to communicate +with instances in different regions or to communicate with the instance from ouside of Compute +Engine. In this code snippet, we will create a new external region address. + +Add the following imports at the top of your file: + +```java +import com.google.cloud.compute.AddressInfo; +import com.google.cloud.compute.Operation; +import com.google.cloud.compute.RegionAddressId; +``` + +Then add the following code to create an address. Most Compute Engine calls return an `Operation` +object that can be used to wait for operation completion and to check whether operation failed or +succeeded: + +```java +RegionAddressId addressId = RegionAddressId.of("us-central1", "test-address"); +Operation operation = compute.create(AddressInfo.of(addressId)); +while (!operation.isDone()) { + Thread.sleep(1000L); +} +operation = operation.reload(); +if (operation.errors() == null) { + System.out.println("Address " + addressId + " was successfully created"); +} else { + // inspect operation.errors() + throw new RuntimeException("Address creation failed"); +} +``` + +#### Creating a persistent disk +A persistent disk can be used as primary storage for your virtual machine instances. Persistent +disks can be created empty, from a disk image or from a disk snapshot. Compute Engine offers +[publicly-available images](https://cloud.google.com/compute/docs/operating-systems/) of certain +operating systems that you can use. In this code snippet, we will create a new persistent disk from +a publicly-available image. + +Add the following imports at the top of your file: + +```java +import com.google.cloud.compute.DiskInfo; +import com.google.cloud.compute.DiskId; +import com.google.cloud.compute.ImageDiskConfiguration; +import com.google.cloud.compute.ImageId; +``` + +Then add the following code to create a disk and wait for disk creation to terminate. + +```java +ImageId imageId = ImageId.of("debian-cloud", "debian-8-jessie-v20160329"); +DiskId diskId = DiskId.of("us-central1-a", "test-disk"); +ImageDiskConfiguration diskConfiguration = ImageDiskConfiguration.of(imageId); +DiskInfo disk = DiskInfo.of(diskId, diskConfiguration); +Operation operation = compute.create(disk); +while (!operation.isDone()) { + Thread.sleep(1000L); +} +operation = operation.reload(); +if (operation.errors() == null) { + System.out.println("Disk " + diskId + " was successfully created"); +} else { + // inspect operation.errors() + throw new RuntimeException("Disk creation failed"); +} +``` + +#### Creating a virtual machine instance +An Google Compute Engine instance is a virtual machine (VM) hosted on Google's infrastructure. An +instance can be created given it's identity, a machine type, one boot disk and a network interface. +In this code snippet, we will create a virtual machine instance in the default network using as a +boot disk the disk we have just created and assigning to it the just created IP address. + +Add the following imports at the top of your file: + +```java +import com.google.cloud.compute.AttachedDisk; +import com.google.cloud.compute.AttachedDisk.PersistentDiskConfiguration; +import com.google.cloud.compute.InstanceId; +import com.google.cloud.compute.InstanceInfo; +import com.google.cloud.compute.MachineTypeId; +import com.google.cloud.compute.NetworkConfiguration; +import com.google.cloud.compute.NetworkConfiguration.AccessConfig; +import com.google.cloud.compute.NetworkId; +import com.google.cloud.compute.NetworkInterface; +``` + +Then add the following code to create an instance and wait for instance creation to terminate. + +```java +Address externalIp = compute.getAddress(addressId); +InstanceId instanceId = InstanceId.of("us-central1-a", "test-instance"); +NetworkId networkId = NetworkId.of("default"); +PersistentDiskConfiguration attachConfiguration = + PersistentDiskConfiguration.builder(diskId).boot(true).build(); +AttachedDisk attachedDisk = AttachedDisk.of("dev0", attachConfiguration); +NetworkInterface networkInterface = NetworkInterface.builder(networkId) + .accessConfigurations(AccessConfig.of(externalIp.address())) + .build(); +MachineTypeId machineTypeId = MachineTypeId.of("us-central1-a", "n1-standard-1"); +InstanceInfo instance = + InstanceInfo.of(instanceId, machineTypeId, attachedDisk, networkInterface); +Operation operation = compute.create(instance); +while (!operation.isDone()) { + Thread.sleep(1000L); +} +operation = operation.reload(); +if (operation.errors() == null) { + System.out.println("Instance " + instanceId + " was successfully created"); +} else { + // inspect operation.errors() + throw new RuntimeException("Instance creation failed"); +} +``` + +#### Complete source code + +In +[CreateAddressDiskAndInstance.java](../gcloud-java-examples/src/main/java/com/google/cloud/examples/compute/snippets/CreateAddressDiskAndInstance.java) +we put together all the code shown above into one program. The program assumes that you are +running on Compute Engine or from your own desktop. To run the example on App Engine, simply move +the code from the main method to your application's servlet class and change the print statements to +display on your webpage. + +Troubleshooting +--------------- + +To get help, follow the `gcloud-java` links in the `gcloud-*`[shared Troubleshooting document](https://github.com/GoogleCloudPlatform/gcloud-common/blob/master/troubleshooting/readme.md#troubleshooting). + +Java Versions +------------- + +Java 7 or above is required for using this client. + +Testing +------- + +This library has tools to help make tests for code using Cloud Compute. + +See [TESTING] to read more about testing. + +Versioning +---------- + +This library follows [Semantic Versioning](http://semver.org/). + +It is currently in major version zero (``0.y.z``), which means that anything +may change at any time and the public API should not be considered +stable. + +Contributing +------------ + +Contributions to this library are always welcome and highly encouraged. + +See [CONTRIBUTING] for more information on how to get started. + +License +------- + +Apache 2.0 - See [LICENSE] for more information. + + +[CONTRIBUTING]:https://github.com/GoogleCloudPlatform/gcloud-java/blob/master/CONTRIBUTING.md +[LICENSE]: https://github.com/GoogleCloudPlatform/gcloud-java/blob/master/LICENSE +[TESTING]: https://github.com/GoogleCloudPlatform/gcloud-java/blob/master/TESTING.md#testing-code-that-uses-compute +[cloud-platform]: https://cloud.google.com/ + +[cloud-compute]: https://cloud.google.com/compute/ +[compute-api]: http://googlecloudplatform.github.io/gcloud-java/apidocs/index.html?com/google/cloud/compute/package-summary.html diff --git a/gcloud-java-compute/pom.xml b/gcloud-java-compute/pom.xml new file mode 100644 index 000000000000..40a09b008076 --- /dev/null +++ b/gcloud-java-compute/pom.xml @@ -0,0 +1,57 @@ + + + 4.0.0 + gcloud-java-compute + jar + GCloud Java compute + https://github.com/GoogleCloudPlatform/gcloud-java/tree/master/gcloud-java-compute + + Java idiomatic client for Google Cloud Compute Engine. + + + com.google.cloud + gcloud-java-pom + 0.2.1-SNAPSHOT + + + gcloud-java-compute + + + + ${project.groupId} + gcloud-java-core + ${project.version} + + + com.google.apis + google-api-services-compute + v1-rev103-1.21.0 + compile + + + com.google.guava + guava-jdk5 + + + + + ${project.groupId} + gcloud-java-core + ${project.version} + test-jar + test + + + junit + junit + 4.12 + test + + + org.easymock + easymock + 3.4 + test + + + diff --git a/gcloud-java-compute/src/main/java/com/google/cloud/compute/Address.java b/gcloud-java-compute/src/main/java/com/google/cloud/compute/Address.java new file mode 100644 index 000000000000..10e99cadb60f --- /dev/null +++ b/gcloud-java-compute/src/main/java/com/google/cloud/compute/Address.java @@ -0,0 +1,191 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import static com.google.common.base.Preconditions.checkNotNull; + +import com.google.cloud.compute.Compute.AddressOption; +import com.google.cloud.compute.Compute.OperationOption; + +import java.io.IOException; +import java.io.ObjectInputStream; +import java.util.Objects; + +/** + * A Google Compute Engine address. With Compute Engine you can create static external IP addresses + * that are assigned to your project and persist until you explicitly release them. A region address + * can be assigned to a Compute Engine instance or to a regional forwarding rule. Compute Engine + * also allows you to create global addresses that are used for global forwarding rules. Both global + * addresses and global forwarding rules can only be used for HTTP load balancing. {@code Address} + * adds a layer of service-related functionality over {@link AddressInfo}. Objects of this class are + * immutable. To get an {@code Address} object with the most recent information use {@link #reload}. + * + * @see + * Static external IP addresses + * @see HTTP Load Balancing + */ +public class Address extends AddressInfo { + + private static final long serialVersionUID = 3457542817554062712L; + + private final ComputeOptions options; + private transient Compute compute; + + /** + * A builder for {@code Address} objects. + */ + public static class Builder extends AddressInfo.Builder { + + private final Compute compute; + private final AddressInfo.BuilderImpl infoBuilder; + + Builder(Compute compute, AddressId addressId) { + this.compute = compute; + this.infoBuilder = new AddressInfo.BuilderImpl(); + this.infoBuilder.addressId(addressId); + } + + Builder(Address address) { + this.compute = address.compute; + this.infoBuilder = new AddressInfo.BuilderImpl(address); + } + + @Override + public Builder address(String address) { + infoBuilder.address(address); + return this; + } + + @Override + Builder creationTimestamp(Long creationTimestamp) { + infoBuilder.creationTimestamp(creationTimestamp); + return this; + } + + @Override + public Builder description(String description) { + infoBuilder.description(description); + return this; + } + + @Override + Builder generatedId(String generatedId) { + infoBuilder.generatedId(generatedId); + return this; + } + + @Override + public Builder addressId(AddressId addressId) { + infoBuilder.addressId(addressId); + return this; + } + + @Override + Builder status(Status status) { + infoBuilder.status(status); + return this; + } + + @Override + Builder usage(Usage usage) { + infoBuilder.usage(usage); + return this; + } + + @Override + public Address build() { + return new Address(compute, infoBuilder); + } + } + + Address(Compute compute, AddressInfo.BuilderImpl infoBuilder) { + super(infoBuilder); + this.compute = checkNotNull(compute); + this.options = compute.options(); + } + + /** + * Checks if this address exists. + * + * @return {@code true} if this address exists, {@code false} otherwise + * @throws ComputeException upon failure + */ + public boolean exists() { + return reload(AddressOption.fields()) != null; + } + + /** + * Fetches the current address' latest information. Returns {@code null} if the address does not + * exist. + * + * @param options address options + * @return an {@code Address} object with latest information or {@code null} if not found + * @throws ComputeException upon failure + */ + public Address reload(AddressOption... options) { + return compute.getAddress(addressId(), options); + } + + /** + * Deletes this address. + * + * @return an {@code Operation} object if delete request was successfully sent, {@code null} if + * the address was not found + * @throws ComputeException upon failure + */ + public Operation delete(OperationOption... options) { + return compute.deleteAddress(addressId(), options); + } + + /** + * Returns the address's {@code Compute} object used to issue requests. + */ + public Compute compute() { + return compute; + } + + @Override + public Builder toBuilder() { + return new Builder(this); + } + + @Override + public final boolean equals(Object obj) { + if (obj == this) { + return true; + } + if (obj == null || !obj.getClass().equals(Address.class)) { + return false; + } + Address other = (Address) obj; + return Objects.equals(toPb(), other.toPb()) && Objects.equals(options, other.options); + } + + @Override + public final int hashCode() { + return Objects.hash(super.hashCode(), options); + } + + private void readObject(ObjectInputStream input) throws IOException, ClassNotFoundException { + input.defaultReadObject(); + this.compute = options.service(); + } + + static Address fromPb(Compute compute, com.google.api.services.compute.model.Address addressPb) { + return new Address(compute, new AddressInfo.BuilderImpl(addressPb)); + } +} diff --git a/gcloud-java-compute/src/main/java/com/google/cloud/compute/AddressId.java b/gcloud-java-compute/src/main/java/com/google/cloud/compute/AddressId.java new file mode 100644 index 000000000000..e5440e6f6d34 --- /dev/null +++ b/gcloud-java-compute/src/main/java/com/google/cloud/compute/AddressId.java @@ -0,0 +1,91 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import static com.google.common.base.Preconditions.checkNotNull; + +import com.google.common.base.MoreObjects; + +import java.util.Objects; + +/** + * Base class for Google Compute Engine address identities. + */ +public abstract class AddressId extends ResourceId { + + private static final long serialVersionUID = 147328216049936438L; + + private final String address; + + /** + * Possible types for a Google Compute Engine address identity. + */ + enum Type { + /** + * Global static external IP addresses can be assigned to global forwarding rules. + */ + GLOBAL, + + /** + * Region static external IP addresses can be assigned to instances and region forwarding rules. + */ + REGION + } + + AddressId(String project, String address) { + super(project); + this.address = checkNotNull(address); + } + + /** + * Returns the type of this address identity. + */ + public abstract Type type(); + + /** + * Returns the name of the address resource. The name must be 1-63 characters long and comply with + * RFC1035. Specifically, the name must match the regular expression + * {@code [a-z]([-a-z0-9]*[a-z0-9])?} which means the first character must be a lowercase letter, + * and all following characters must be a dash, lowercase letter, or digit, except the last + * character, which cannot be a dash. + * + * @see RFC1035 + */ + public String address() { + return address; + } + + @Override + MoreObjects.ToStringHelper toStringHelper() { + return super.toStringHelper().add("address", address); + } + + @Override + final int baseHashCode() { + return Objects.hash(super.baseHashCode(), address); + } + + @Override + final boolean baseEquals(ResourceId resourceId) { + return resourceId instanceof AddressId + && super.baseEquals(resourceId) + && Objects.equals(address, ((AddressId) resourceId).address); + } + + @Override + abstract AddressId setProjectId(String projectId); +} diff --git a/gcloud-java-compute/src/main/java/com/google/cloud/compute/AddressInfo.java b/gcloud-java-compute/src/main/java/com/google/cloud/compute/AddressInfo.java new file mode 100644 index 000000000000..9a95c77119c8 --- /dev/null +++ b/gcloud-java-compute/src/main/java/com/google/cloud/compute/AddressInfo.java @@ -0,0 +1,574 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import static com.google.common.base.Preconditions.checkNotNull; + +import com.google.api.services.compute.model.Address; +import com.google.common.base.Function; +import com.google.common.base.MoreObjects; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Lists; + +import org.joda.time.format.DateTimeFormatter; +import org.joda.time.format.ISODateTimeFormat; + +import java.io.Serializable; +import java.math.BigInteger; +import java.util.List; +import java.util.Objects; + +/** + * A Google Compute Engine address. With Compute Engine you can create static external IP addresses + * that are assigned to your project and persist until you explicitly release them. A region address + * can be assigned to a Compute Engine instance or to a regional forwarding rule. To create a region + * address, use a {@link RegionAddressId} identity. Compute Engine also allows you to create global + * addresses that are used for global forwarding rules. Both global addresses and global forwarding + * rules can only be used for HTTP load balancing. To create a global address, use a + * {@link GlobalAddressId} identity. + * + * @see + * Static external IP addresses + * @see HTTP Load Balancing + */ +public class AddressInfo implements Serializable { + + static final Function FROM_PB_FUNCTION = + new Function() { + @Override + public AddressInfo apply(Address pb) { + return AddressInfo.fromPb(pb); + } + }; + static final Function TO_PB_FUNCTION = + new Function() { + @Override + public Address apply(AddressInfo addressInfo) { + return addressInfo.toPb(); + } + }; + + private static final long serialVersionUID = 7678434703520207500L; + private static final DateTimeFormatter TIMESTAMP_FORMATTER = ISODateTimeFormat.dateTime(); + + private final String address; + private final Long creationTimestamp; + private final String description; + private final String generatedId; + private final AddressId addressId; + private final Status status; + private final Usage usage; + + /** + * The status of the address. + */ + public enum Status { + + /** + * The address is reserved for the project and is available for use. + */ + RESERVED, + + /** + * The address is currently being used and thus not available. + */ + IN_USE + } + + /** + * Base class for a Google Compute Engine address's usage information. Implementations of this + * class represent different possible usages of a Compute Engine address. {@link InstanceUsage} + * contains information for region addresses assigned to a Google Compute Engine instance. + * {@link RegionForwardingUsage} contains information for region addresses assigned to one or more + * region forwarding rules. {@link GlobalForwardingUsage} contains information for global + * addresses assigned to one or more global forwarding rules. + */ + public abstract static class Usage implements Serializable { + + private static final long serialVersionUID = -5028609518171408695L; + + Usage() {} + + /** + * Returns the identities of resources currently using this address. + */ + public abstract List users(); + + final boolean baseEquals(Usage usage) { + return Objects.equals(toPb(), usage.toPb()); + } + + Address toPb() { + return new Address().setUsers(Lists.transform(users(), new Function() { + @Override + public String apply(ResourceId resourceId) { + return resourceId.selfLink(); + } + })); + } + + @SuppressWarnings("unchecked") + static T fromPb(Address addressPb) { + String url = addressPb.getUsers().get(0); + if (InstanceId.matchesUrl(url)) { + return (T) InstanceUsage.fromPb(addressPb); + } else if (RegionForwardingRuleId.matchesUrl(url)) { + return (T) RegionForwardingUsage.fromPb(addressPb); + } else if (GlobalForwardingRuleId.matchesUrl(url)) { + return (T) GlobalForwardingUsage.fromPb(addressPb); + } else { + throw new IllegalArgumentException("Unexpected resource URL for address user"); + } + } + } + + /** + * Usage information for a Google Compute Engine region address assigned to a virtual machine + * instance. + */ + public static final class InstanceUsage extends Usage { + + private static final long serialVersionUID = -5028609518171408695L; + + private final InstanceId instance; + + InstanceUsage(InstanceId instance) { + this.instance = checkNotNull(instance); + } + + /** + * Returns the identity of the instance using the address. + */ + public InstanceId instance() { + return instance; + } + + @Override + public List users() { + return ImmutableList.of(instance); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this).add("instance", instance).toString(); + } + + @Override + public boolean equals(Object obj) { + return obj instanceof InstanceUsage && baseEquals((InstanceUsage) obj); + } + + @Override + public int hashCode() { + return Objects.hash(instance); + } + + @SuppressWarnings("unchecked") + static InstanceUsage fromPb(Address addressPb) { + return new InstanceUsage(InstanceId.fromUrl(addressPb.getUsers().get(0))); + } + } + + /** + * Usage information for a Google Compute Engine region address assigned to one or more region + * forwarding rules. + */ + public static final class RegionForwardingUsage extends Usage { + + private static final long serialVersionUID = -4255145869626427363L; + + private final List forwardingRules; + + RegionForwardingUsage(List forwardingRules) { + this.forwardingRules = ImmutableList.copyOf(forwardingRules); + } + + /** + * Returns a list of identities of region forwarding rules that are currently using the address. + */ + public List forwardingRules() { + return forwardingRules; + } + + @Override + public List users() { + return forwardingRules; + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this).add("forwardingRules", forwardingRules).toString(); + } + + @Override + public boolean equals(Object obj) { + return obj instanceof RegionForwardingUsage && baseEquals((RegionForwardingUsage) obj); + } + + @Override + public int hashCode() { + return Objects.hash(forwardingRules); + } + + @SuppressWarnings("unchecked") + static RegionForwardingUsage fromPb(Address addressPb) { + return new RegionForwardingUsage( + Lists.transform(addressPb.getUsers(), RegionForwardingRuleId.FROM_URL_FUNCTION)); + } + } + + /** + * Usage information for a Google Compute Engine global address assigned to one or more global + * forwarding rules. + */ + public static final class GlobalForwardingUsage extends Usage { + + private static final long serialVersionUID = -2974154224319117433L; + + private final List forwardingRules; + + GlobalForwardingUsage(List forwardingRules) { + this.forwardingRules = ImmutableList.copyOf(forwardingRules); + } + + /** + * Returns a list of identities of global forwarding rules that are currently using the address. + */ + public List forwardingRules() { + return forwardingRules; + } + + @Override + public List users() { + return forwardingRules; + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this).add("forwardingRules", forwardingRules).toString(); + } + + @Override + public boolean equals(Object obj) { + return obj instanceof GlobalForwardingUsage && baseEquals((GlobalForwardingUsage) obj); + } + + @Override + public int hashCode() { + return Objects.hash(forwardingRules); + } + + @SuppressWarnings("unchecked") + static GlobalForwardingUsage fromPb(Address addressPb) { + return new GlobalForwardingUsage( + Lists.transform(addressPb.getUsers(), GlobalForwardingRuleId.FROM_URL_FUNCTION)); + } + } + + /** + * A builder for {@code AddressInfo} objects. + */ + public abstract static class Builder { + + /** + * Sets the actual IP address. + */ + public abstract Builder address(String address); + + abstract Builder creationTimestamp(Long creationTimestamp); + + /** + * Sets an optional textual description of the address. + */ + public abstract Builder description(String description); + + abstract Builder generatedId(String generatedId); + + public abstract Builder addressId(AddressId addressId); + + abstract Builder status(Status status); + + abstract Builder usage(Usage usage); + + /** + * Creates an {@code AddressInfo} object. + */ + public abstract AddressInfo build(); + } + + static final class BuilderImpl extends Builder { + + private String address; + private Long creationTimestamp; + private String description; + private String generatedId; + private AddressId addressId; + private Status status; + private Usage usage; + + BuilderImpl() {} + + BuilderImpl(AddressInfo addressInfo) { + this.address = addressInfo.address; + this.creationTimestamp = addressInfo.creationTimestamp; + this.description = addressInfo.description; + this.generatedId = addressInfo.generatedId; + this.addressId = addressInfo.addressId; + this.status = addressInfo.status; + this.usage = addressInfo.usage; + } + + BuilderImpl(Address addressPb) { + if (RegionAddressId.matchesUrl(addressPb.getSelfLink())) { + addressId = RegionAddressId.fromUrl(addressPb.getSelfLink()); + } else { + addressId = GlobalAddressId.fromUrl(addressPb.getSelfLink()); + } + address = addressPb.getAddress(); + if (addressPb.getCreationTimestamp() != null) { + creationTimestamp = TIMESTAMP_FORMATTER.parseMillis(addressPb.getCreationTimestamp()); + } + description = addressPb.getDescription(); + if (addressPb.getId() != null) { + generatedId = addressPb.getId().toString(); + } + if (addressPb.getStatus() != null) { + status = Status.valueOf(addressPb.getStatus()); + } + if (addressPb.getUsers() != null && addressPb.getUsers().size() > 0) { + usage = Usage.fromPb(addressPb); + } + } + + @Override + public BuilderImpl address(String address) { + this.address = address; + return this; + } + + @Override + BuilderImpl creationTimestamp(Long creationTimestamp) { + this.creationTimestamp = creationTimestamp; + return this; + } + + @Override + public BuilderImpl description(String description) { + this.description = description; + return this; + } + + @Override + BuilderImpl generatedId(String generatedId) { + this.generatedId = generatedId; + return this; + } + + @Override + public BuilderImpl addressId(AddressId addressId) { + this.addressId = checkNotNull(addressId); + return this; + } + + @Override + BuilderImpl status(Status status) { + this.status = status; + return this; + } + + @Override + BuilderImpl usage(Usage usage) { + this.usage = usage; + return this; + } + + @Override + public AddressInfo build() { + return new AddressInfo(this); + } + } + + AddressInfo(BuilderImpl builder) { + address = builder.address; + creationTimestamp = builder.creationTimestamp; + description = builder.description; + generatedId = builder.generatedId; + addressId = checkNotNull(builder.addressId); + status = builder.status; + usage = builder.usage; + } + + /** + * Returns the static external IP address represented by this object. + */ + public String address() { + return address; + } + + /** + * Returns the creation timestamp in milliseconds since epoch. + */ + public Long creationTimestamp() { + return creationTimestamp; + } + + /** + * Returns an optional textual description of the address. + */ + public String description() { + return description; + } + + /** + * Returns the service-generated unique identifier for the address. + */ + public String generatedId() { + return generatedId; + } + + /** + * Returns the address identity. Returns {@link GlobalAddressId} for a global address, returns + * {@link RegionAddressId} for a region address. + */ + @SuppressWarnings("unchecked") + public T addressId() { + return (T) addressId; + } + + /** + * Returns the status of the address. + */ + public Status status() { + return status; + } + + /** + * Returns the usage information of the address. Returns an {@link InstanceUsage} object for + * region addresses that are assigned to VM instances. Returns a {@link RegionForwardingUsage} + * object for region addresses assigned to region forwarding rules. Returns a + * {@link GlobalForwardingUsage} object for global addresses assigned to global forwarding rules. + * Returns {@code null} if the address is not in use. + */ + @SuppressWarnings("unchecked") + public T usage() { + return (T) usage; + } + + /** + * Returns a builder for the {@code AddressInfo} object. + */ + public Builder toBuilder() { + return new BuilderImpl(this); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("address", address) + .add("creationTimestamp", creationTimestamp) + .add("description", description) + .add("generatedId", generatedId) + .add("addressId", addressId) + .add("status", status) + .add("usage", usage) + .toString(); + } + + @Override + public int hashCode() { + return Objects.hash(address, creationTimestamp, description, generatedId, addressId, status, + usage); + } + + @Override + public boolean equals(Object obj) { + return obj == this + || obj != null + && obj.getClass().equals(AddressInfo.class) + && Objects.equals(toPb(), ((AddressInfo) obj).toPb()); + } + + AddressInfo setProjectId(String projectId) { + if (addressId().project() != null) { + return this; + } + return toBuilder().addressId(addressId.setProjectId(projectId)).build(); + } + + Address toPb() { + Address addressPb = usage != null ? usage.toPb() : new Address(); + addressPb.setAddress(address); + if (creationTimestamp != null) { + addressPb.setCreationTimestamp(TIMESTAMP_FORMATTER.print(creationTimestamp)); + } + addressPb.setDescription(description); + if (generatedId != null) { + addressPb.setId(new BigInteger(generatedId)); + } + addressPb.setName(addressId.address()); + if (addressId.type() == AddressId.Type.REGION) { + addressPb.setRegion(this.addressId().regionId().selfLink()); + } + if (status != null) { + addressPb.setStatus(status.name()); + } + addressPb.setSelfLink(addressId.selfLink()); + return addressPb; + } + + /** + * Returns a builder for the {@code AddressInfo} object given it's identity. + */ + public static Builder builder(AddressId addressId) { + return new BuilderImpl().addressId(addressId); + } + + /** + * Returns an {@code AddressInfo} object for the provided identity. + */ + public static AddressInfo of(AddressId addressId) { + return builder(addressId).build(); + } + + /** + * Returns an {@code AddressInfo} object for the provided name. The object corresponds to a global + * address. + */ + public static AddressInfo of(String name) { + return of(GlobalAddressId.of(name)); + } + + /** + * Returns an {@code AddressInfo} object for the provided region identity and name. The object + * corresponds to a region address. + */ + public static AddressInfo of(RegionId regionId, String name) { + return of(RegionAddressId.of(regionId, name)); + } + + /** + * Returns an {@code AddressInfo} object for the provided region and address names. The object + * corresponds to a region address. + */ + public static AddressInfo of(String region, String name) { + return of(RegionAddressId.of(region, name)); + } + + static AddressInfo fromPb(Address addressPb) { + return new BuilderImpl(addressPb).build(); + } +} diff --git a/gcloud-java-compute/src/main/java/com/google/cloud/compute/AttachedDisk.java b/gcloud-java-compute/src/main/java/com/google/cloud/compute/AttachedDisk.java new file mode 100644 index 000000000000..d4ad674a8a5e --- /dev/null +++ b/gcloud-java-compute/src/main/java/com/google/cloud/compute/AttachedDisk.java @@ -0,0 +1,926 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import static com.google.common.base.Preconditions.checkNotNull; + +import com.google.api.services.compute.model.AttachedDiskInitializeParams; +import com.google.common.base.Function; +import com.google.common.base.MoreObjects; +import com.google.common.collect.Lists; + +import java.io.Serializable; +import java.util.List; +import java.util.Objects; + +/** + * A disk attached to a Google Compute Engine instance. To create a new disk to attach when an image + * is being created use {@link CreateDiskConfiguration}. To attach an existing persistent disk use + * {@link PersistentDiskConfiguration}. To attach a scratch disk use + * {@link ScratchDiskConfiguration}. + */ +public class AttachedDisk implements Serializable { + + static final Function + FROM_PB_FUNCTION = + new Function() { + @Override + public AttachedDisk apply( + com.google.api.services.compute.model.AttachedDisk pb) { + return AttachedDisk.fromPb(pb); + } + }; + static final Function + TO_PB_FUNCTION = + new Function() { + @Override + public com.google.api.services.compute.model.AttachedDisk apply( + AttachedDisk attachedDisk) { + return attachedDisk.toPb(); + } + }; + private static final long serialVersionUID = 2969789134157943798L; + + private final String deviceName; + private final Integer index; + private final AttachedDiskConfiguration configuration; + private final List licenses; + + /** + * Base class for {@code AttachedDisk} configuration. Use {@link PersistentDiskConfiguration} to + * attach an existing persistent disk. Use {@link CreateDiskConfiguration} to create a boot + * persistent disk to attach to the instance. Use {@link ScratchDiskConfiguration} to attach a + * scratch disk. + */ + public abstract static class AttachedDiskConfiguration implements Serializable { + + private static final long serialVersionUID = 8813134841283115565L; + + private final Type type; + private final InterfaceType interfaceType; + private final Boolean boot; + private final Boolean autoDelete; + + /** + * Specifies the type of the attached disk. + */ + public enum Type { + /** + * A persistent disk attached to a VM instance. Such an attached disk must already exist or + * can be created along with the instance by using {@link CreateDiskConfiguration}. A + * persistent disk can be attached to other VM instances. + */ + PERSISTENT, + + /** + * A scratch disk is created with the VM instance it is attached to. Scratch disks are only + * available to their VM instance. + */ + SCRATCH + } + + /** + * Specifies the disk interface to use for attaching this disk, which is either {@code SCSI} + * or {@code NVME}. Persistent disks must always use {@code SCSI}. Scratch SSDs can use either + * {@code NVME} or {@code SCSI}. + */ + public enum InterfaceType { + SCSI, + NVME + } + + AttachedDiskConfiguration(Type type, InterfaceType interfaceType, Boolean boot, + Boolean autoDelete) { + this.type = checkNotNull(type); + this.interfaceType = interfaceType; + this.boot = boot; + this.autoDelete = autoDelete; + } + + /** + * Returns the type of the attached disk. + */ + public Type type() { + return type; + } + + /** + * Returns the interface to use to attach the disk. If not specified, {@link InterfaceType#SCSI} + * is used. + */ + public InterfaceType interfaceType() { + return interfaceType; + } + + /** + * Returns whether to use the attached disk as a boot disk. If {@code true} the virtual machine + * will use the first partition of the disk for its root filesystem. If not specified, the + * disk is not used as a boot disk. + */ + public Boolean boot() { + return boot; + } + + /** + * Returns whether the disk should auto-delete when the instance to which it's attached is + * deleted. If not specified, the disk is not deleted automatically. + */ + public Boolean autoDelete() { + return autoDelete; + } + + MoreObjects.ToStringHelper toStringHelper() { + return MoreObjects.toStringHelper(this) + .add("type", type) + .add("interfaceType", interfaceType) + .add("boot", boot) + .add("autoDelete", autoDelete); + } + + @Override + public String toString() { + return toStringHelper().toString(); + } + + final int baseHashCode() { + return Objects.hash(type, interfaceType); + } + + final boolean baseEquals(AttachedDiskConfiguration diskConfiguration) { + return Objects.equals(toPb(), diskConfiguration.toPb()); + } + + abstract AttachedDiskConfiguration setProjectId(String projectId); + + com.google.api.services.compute.model.AttachedDisk toPb() { + com.google.api.services.compute.model.AttachedDisk attachedDiskPb = + new com.google.api.services.compute.model.AttachedDisk(); + attachedDiskPb.setType(type.name()); + if (interfaceType != null) { + attachedDiskPb.setInterface(interfaceType.name()); + } + attachedDiskPb.setBoot(boot); + attachedDiskPb.setAutoDelete(autoDelete); + return attachedDiskPb; + } + + @SuppressWarnings("unchecked") + static T fromPb( + com.google.api.services.compute.model.AttachedDisk diskPb) { + switch (Type.valueOf(diskPb.getType())) { + case PERSISTENT: + if (diskPb.getSource() == null) { + return (T) CreateDiskConfiguration.fromPb(diskPb); + } else { + return (T) PersistentDiskConfiguration.fromPb(diskPb); + } + case SCRATCH: + return (T) ScratchDiskConfiguration.fromPb(diskPb); + default: + // should be unreachable + throw new IllegalArgumentException("Unrecognized attached disk type"); + } + } + } + + /** + * An attached disk configuration for existing persistent disks. + */ + public static final class PersistentDiskConfiguration extends AttachedDiskConfiguration { + + private static final long serialVersionUID = 6367613188140104726L; + + private final DiskId sourceDisk; + private final Mode mode; + + /** + * Specifies the mode in which to attach the disk. + */ + public enum Mode { + /** + * The instance can both read and write to the disk. + */ + READ_WRITE, + + /** + * The instance is only allowed to read the disk. + */ + READ_ONLY + } + + /** + * A builder for {@code PersistentDiskConfiguration} objects. + */ + public static final class Builder { + + private DiskId sourceDisk; + private Mode mode; + private Boolean boot; + private Boolean autoDelete; + + private Builder(DiskId sourceDisk) { + this.sourceDisk = checkNotNull(sourceDisk); + } + + private Builder(PersistentDiskConfiguration configuration) { + sourceDisk = configuration.sourceDisk; + mode = configuration.mode; + boot = configuration.boot(); + autoDelete = configuration.autoDelete(); + } + + /** + * Sets the identity of the persistent disk to be attached. + */ + public Builder sourceDisk(DiskId sourceDisk) { + this.sourceDisk = checkNotNull(sourceDisk); + return this; + } + + /** + * Sets the mode in which to attach this disk. If not specified, the disk is attached in + * {@link Mode#READ_WRITE} mode. + */ + public Builder mode(Mode mode) { + this.mode = mode; + return this; + } + + /** + * Sets whether to use the attached disk as a boot disk. If {@code true} the virtual machine + * instance will use the first partition of the disk for its root filesystem. If not + * specified, the isk is not used as a boot disk. + */ + public Builder boot(boolean boot) { + this.boot = boot; + return this; + } + + /** + * Sets whether the disk should auto-delete when the instance to which it's attached is + * deleted. If not specified, the disk is not deleted automatically. + */ + public Builder autoDelete(boolean autoDelete) { + this.autoDelete = autoDelete; + return this; + } + + /** + * Creates a {@code PersistentDiskConfiguration} object. + */ + public PersistentDiskConfiguration build() { + return new PersistentDiskConfiguration(this); + } + } + + private PersistentDiskConfiguration(Builder builder) { + super(Type.PERSISTENT, null, builder.boot, builder.autoDelete); + this.sourceDisk = builder.sourceDisk; + this.mode = builder.mode; + } + + /** + * Returns the identity of the persistent disk to be attached. + */ + public DiskId sourceDisk() { + return sourceDisk; + } + + /** + * Returns the mode in which to attach this disk. If not specified, the disk is attached in + * {@link Mode#READ_WRITE} mode. + */ + public Mode mode() { + return mode; + } + + /** + * Returns a builder for the current configuration. + */ + public Builder toBuilder() { + return new Builder(this); + } + + @Override + MoreObjects.ToStringHelper toStringHelper() { + return super.toStringHelper().add("sourceDisk", sourceDisk).add("mode", mode); + } + + @Override + public int hashCode() { + return Objects.hash(baseHashCode(), sourceDisk, mode); + } + + @Override + public boolean equals(Object obj) { + return obj == this + || obj != null + && obj.getClass().equals(PersistentDiskConfiguration.class) + && baseEquals((PersistentDiskConfiguration) obj); + } + + @Override + PersistentDiskConfiguration setProjectId(String projectId) { + if (sourceDisk.project() != null) { + return this; + } + return toBuilder().sourceDisk(sourceDisk.setProjectId(projectId)).build(); + } + + @Override + com.google.api.services.compute.model.AttachedDisk toPb() { + com.google.api.services.compute.model.AttachedDisk attachedDiskPb = super.toPb(); + attachedDiskPb.setSource(sourceDisk.selfLink()); + attachedDiskPb.setMode(mode != null ? mode.toString() : null); + return attachedDiskPb; + } + + /** + * Returns a builder for a {@code PersistentDiskConfiguration} object given the identity of the + * persistent disk to attach. + */ + public static Builder builder(DiskId sourceDisk) { + return new Builder(sourceDisk); + } + + /** + * Returns a {@code PersistentDiskConfiguration} object given the identity of the persistent + * disk to attach. + */ + public static PersistentDiskConfiguration of(DiskId sourceDisk) { + return builder(sourceDisk).build(); + } + + @SuppressWarnings("unchecked") + static PersistentDiskConfiguration fromPb( + com.google.api.services.compute.model.AttachedDisk diskPb) { + Builder builder = new Builder(DiskId.fromUrl(diskPb.getSource())); + if (diskPb.getMode() != null) { + builder.mode(Mode.valueOf(diskPb.getMode())); + } + if (diskPb.getBoot() != null) { + builder.boot(diskPb.getBoot()); + } + if (diskPb.getAutoDelete() != null) { + builder.autoDelete(diskPb.getAutoDelete()); + } + return builder.build(); + } + } + + /** + * An attached disk configuration for bootable persistent disks that must be created with the + * instance they are attached to. Attached disks that use this configuration can only be attached + * to an instance upon creation. A {@code CreateDiskConfiguration} object is never returned by the + * service: after the instance is created the corresponding attached disk will be returned with a + * {@link PersistentDiskConfiguration}. + */ + public static final class CreateDiskConfiguration extends AttachedDiskConfiguration { + + private static final long serialVersionUID = 961995522284348824L; + + private final String diskName; + private final DiskTypeId diskType; + private final Long diskSizeGb; + private final ImageId sourceImage; + + /** + * A builder for {@code CreateDiskConfiguration} objects. + */ + public static final class Builder { + + private String diskName; + private DiskTypeId diskType; + private Long diskSizeGb; + private ImageId sourceImage; + private Boolean autoDelete; + + private Builder(ImageId sourceImage) { + this.sourceImage = checkNotNull(sourceImage); + } + + private Builder(CreateDiskConfiguration configuration) { + this.diskName = configuration.diskName; + this.diskType = configuration.diskType; + this.diskSizeGb = configuration.diskSizeGb; + this.sourceImage = configuration.sourceImage; + this.autoDelete = configuration.autoDelete(); + } + + /** + * Sets the name to be assigned to the disk. If not specified, the disk is given the + * instance's name. + */ + public Builder diskName(String diskName) { + this.diskName = diskName; + return this; + } + + /** + * Sets the identity of the disk type. If not specified, {@code pd-standard} is used. + */ + public Builder diskType(DiskTypeId diskType) { + this.diskType = diskType; + return this; + } + + /** + * Sets the size of the persistent disk, in GB. If not set the disk will have the size of the + * source image. This value can be larger than the image's size. If the provided size is + * smaller than the image's size, then instance creation will fail. + */ + public Builder diskSizeGb(Long diskSizeGb) { + this.diskSizeGb = diskSizeGb; + return this; + } + + /** + * Sets the identity of the source image used to create the disk. + */ + public Builder sourceImage(ImageId sourceImage) { + this.sourceImage = checkNotNull(sourceImage); + return this; + } + + /** + * Sets whether the disk should auto-delete when the instance to which it's attached is + * deleted. If not specified, the disk is not deleted automatically. + */ + public Builder autoDelete(Boolean autoDelete) { + this.autoDelete = autoDelete; + return this; + } + + /** + * Creates a {@code CreateDiskConfiguration} object. + */ + public CreateDiskConfiguration build() { + return new CreateDiskConfiguration(this); + } + } + + private CreateDiskConfiguration(Builder builder) { + super(Type.PERSISTENT, null, true, builder.autoDelete); + this.diskName = builder.diskName; + this.diskType = builder.diskType; + this.diskSizeGb = builder.diskSizeGb; + this.sourceImage = builder.sourceImage; + } + + /** + * Returns the name to be assigned to the disk. If not specified, the disk is given the + * instance's name. + */ + public String diskName() { + return diskName; + } + + /** + * Returns the identity of the disk type. If not specified, {@code pd-standard} is used. + */ + public DiskTypeId diskType() { + return diskType; + } + + /** + * Returns the size of the persistent disk, in GB. If not set the disk will have the size of the + * source image. This value can be larger than the image's size. If the provided size is smaller + * than the image's size then instance creation will fail. + */ + public Long diskSizeGb() { + return diskSizeGb; + } + + /** + * Returns the identity of the source image used to create the disk. + */ + public ImageId sourceImage() { + return sourceImage; + } + + /** + * Returns a builder for the current configuration. + */ + public Builder toBuilder() { + return new Builder(this); + } + + @Override + MoreObjects.ToStringHelper toStringHelper() { + return super.toStringHelper() + .add("diskName", diskName) + .add("diskType", diskType) + .add("diskSizeGb", diskSizeGb) + .add("sourceImage", sourceImage); + } + + @Override + public int hashCode() { + return Objects.hash(baseHashCode(), diskName, diskType, diskSizeGb, sourceImage); + } + + @Override + public boolean equals(Object obj) { + return obj == this + || obj != null + && obj.getClass().equals(CreateDiskConfiguration.class) + && baseEquals((CreateDiskConfiguration) obj); + } + + @Override + CreateDiskConfiguration setProjectId(String projectId) { + Builder builder = toBuilder(); + if (builder.diskType != null) { + builder.diskType(diskType.setProjectId(projectId)); + } + if (builder.sourceImage != null) { + builder.sourceImage(sourceImage.setProjectId(projectId)); + } + return builder.build(); + } + + @Override + com.google.api.services.compute.model.AttachedDisk toPb() { + AttachedDiskInitializeParams initializeParamsPb = new AttachedDiskInitializeParams(); + initializeParamsPb.setDiskName(diskName); + initializeParamsPb.setDiskSizeGb(diskSizeGb); + initializeParamsPb.setSourceImage(sourceImage.selfLink()); + if (diskType != null) { + initializeParamsPb.setDiskType(diskType.selfLink()); + } + com.google.api.services.compute.model.AttachedDisk attachedDiskPb = super.toPb(); + attachedDiskPb.setInitializeParams(initializeParamsPb); + return attachedDiskPb; + } + + /** + * Returns a builder for a {@code CreateDiskConfiguration} object given the source image that + * will be used to create the disk. + */ + public static Builder builder(ImageId sourceImage) { + return new Builder(sourceImage); + } + + /** + * Returns a {@code CreateDiskConfiguration} object given the source image that will be used to + * create the disk. + */ + public static CreateDiskConfiguration of(ImageId sourceImage) { + return builder(sourceImage).build(); + } + + @SuppressWarnings("unchecked") + static CreateDiskConfiguration fromPb( + com.google.api.services.compute.model.AttachedDisk diskPb) { + AttachedDiskInitializeParams initializeParamsPb = diskPb.getInitializeParams(); + Builder builder = builder(ImageId.fromUrl(initializeParamsPb.getSourceImage())); + if (initializeParamsPb.getDiskType() != null) { + builder.diskType(DiskTypeId.fromUrl(initializeParamsPb.getDiskType())); + } + builder.diskName(initializeParamsPb.getDiskName()); + builder.diskSizeGb(initializeParamsPb.getDiskSizeGb()); + builder.autoDelete(diskPb.getAutoDelete()); + if (initializeParamsPb.getDiskType() != null) { + builder.diskType(DiskTypeId.fromUrl(initializeParamsPb.getDiskType())); + } + return builder.build(); + } + } + + /** + * An attached disk configuration for scratch disks. Attached disks that use this configuration + * can only be attached to an instance upon creation. + */ + public static final class ScratchDiskConfiguration extends AttachedDiskConfiguration { + + private static final long serialVersionUID = -8445453507234691254L; + + private final DiskTypeId diskType; + + /** + * A builder for {@code ScratchDiskConfiguration} objects. + */ + public static final class Builder { + + private DiskTypeId diskType; + private InterfaceType interfaceType; + + private Builder() {} + + private Builder(ScratchDiskConfiguration configuration) { + this.diskType = configuration.diskType; + this.interfaceType = configuration.interfaceType(); + } + + /** + * Sets the identity of the disk type for the scratch disk to attach. + */ + public Builder diskType(DiskTypeId diskType) { + this.diskType = diskType; + return this; + } + + /** + * Sets the interface type. If not specified, {@code SCSI} is used. + */ + public Builder interfaceType(InterfaceType interfaceType) { + this.interfaceType = interfaceType; + return this; + } + + /** + * Creates a {@code ScratchDiskConfiguration} object. + */ + public ScratchDiskConfiguration build() { + return new ScratchDiskConfiguration(this); + } + } + + private ScratchDiskConfiguration(Builder builder) { + super(Type.SCRATCH, builder.interfaceType, false, true); + this.diskType = builder.diskType; + } + + /** + * Returns the identity of the disk type for the scratch disk to attach. + */ + public DiskTypeId diskType() { + return diskType; + } + + /** + * Returns a builder for the current configuration. + */ + public Builder toBuilder() { + return new Builder(this); + } + + @Override + MoreObjects.ToStringHelper toStringHelper() { + return super.toStringHelper().add("diskType", diskType); + } + + @Override + public int hashCode() { + return Objects.hash(baseHashCode()); + } + + @Override + public boolean equals(Object obj) { + return obj == this + || obj != null + && obj.getClass().equals(ScratchDiskConfiguration.class) + && baseEquals((ScratchDiskConfiguration) obj); + } + + @Override + ScratchDiskConfiguration setProjectId(String projectId) { + if (diskType.project() != null) { + return this; + } + return toBuilder().diskType(diskType.setProjectId(projectId)).build(); + } + + @Override + com.google.api.services.compute.model.AttachedDisk toPb() { + com.google.api.services.compute.model.AttachedDisk attachedDiskPb = super.toPb(); + if (diskType != null) { + AttachedDiskInitializeParams initializeParamsPb = new AttachedDiskInitializeParams(); + initializeParamsPb.setDiskType(diskType.selfLink()); + attachedDiskPb.setInitializeParams(initializeParamsPb); + } + return attachedDiskPb; + } + + /** + * Returns a builder for {@code ScratchDiskConfiguration} objects given the disk type identity. + */ + public static Builder builder(DiskTypeId diskType) { + return new Builder().diskType(diskType); + } + + /** + * Returns a {@code ScratchDiskConfiguration} object given the disk type identity. The disk will + * be attached via the default interface ({@link InterfaceType#SCSI}). + */ + public static ScratchDiskConfiguration of(DiskTypeId diskType) { + return builder(diskType).build(); + } + + @SuppressWarnings("unchecked") + static ScratchDiskConfiguration fromPb( + com.google.api.services.compute.model.AttachedDisk diskPb) { + Builder builder = new Builder(); + if (diskPb.getInterface() != null) { + builder.interfaceType(InterfaceType.valueOf(diskPb.getInterface())); + } + if (diskPb.getInitializeParams() != null + && diskPb.getInitializeParams().getDiskType() != null) { + builder.diskType(DiskTypeId.fromUrl(diskPb.getInitializeParams().getDiskType())); + } + return builder.build(); + } + } + + /** + * A builder for {@code AttachedDisk} objects. + */ + public static final class Builder { + + private String deviceName; + private Integer index; + private AttachedDiskConfiguration configuration; + private List licenses; + + Builder(AttachedDiskConfiguration configuration) { + this.configuration = checkNotNull(configuration); + } + + Builder(AttachedDisk attachedDisk) { + this.deviceName = attachedDisk.deviceName; + this.index = attachedDisk.index; + this.configuration = attachedDisk.configuration; + this.licenses = attachedDisk.licenses; + } + + /** + * Sets the unique device name of your choice that is reflected into the + * {@code /dev/disk/by-id/google-*} tree of a Linux operating system running within the + * instance. This name can be used to reference the device for mounting, resizing, and so on, + * from within the instance. If not specified, the service chooses a default device name to + * apply to this disk, in the form {@code persistent-disks-x}, where x is a number assigned by + * Google Compute Engine. + */ + public Builder deviceName(String deviceName) { + this.deviceName = deviceName; + return this; + } + + /** + * Sets a zero-based index to this disk, where 0 is reserved for the boot disk. For example, + * if you have many disks attached to an instance, each disk would have an unique index number. + * If not specified, the service will choose an appropriate value. + */ + public Builder index(Integer index) { + this.index = index; + return this; + } + + /** + * Sets the attached disk configuration. Use {@link ScratchDiskConfiguration} to attach a + * scratch disk to the instance. Use {@link PersistentDiskConfiguration} to attach a + * persistent disk to the instance. Use {@link CreateDiskConfiguration} to create and attach + * a new persistent disk. + */ + public Builder configuration(AttachedDiskConfiguration configuration) { + this.configuration = checkNotNull(configuration); + return this; + } + + Builder licenses(List licenses) { + this.licenses = licenses; + return this; + } + + /** + * Creates an {@code AttachedDisk} object. + */ + public AttachedDisk build() { + return new AttachedDisk(this); + } + } + + private AttachedDisk(Builder builder) { + this.deviceName = builder.deviceName; + this.index = builder.index; + this.configuration = builder.configuration; + this.licenses = builder.licenses; + } + + /** + * Returns the unique device name of your choice that is reflected into the + * {@code /dev/disk/by-id/google-*} tree of a Linux operating system running within the + * instance. This name can be used to reference the device for mounting, resizing, and so on, + * from within the instance. If not specified, the service chooses a default device name to + * apply to this disk, in the form {@code persistent-disks-x}, where x is a number assigned by + * Google Compute Engine. + */ + public String deviceName() { + return deviceName; + } + + /** + * Returns a zero-based index to this disk, where 0 is reserved for the boot disk. + */ + public Integer index() { + return index; + } + + /** + * Returns the attached disk configuration. Returns {@link ScratchDiskConfiguration} to attach a + * scratch disk to the instance. Returns {@link PersistentDiskConfiguration} to attach a + * persistent disk to the instance. Returns {@link CreateDiskConfiguration} to create and attach + * a new persistent disk. + */ + @SuppressWarnings("unchecked") + public T configuration() { + return (T) configuration; + } + + /** + * Returns a list of publicly accessible licenses for the attached disk. + */ + public List licenses() { + return licenses; + } + + /** + * Returns a builder for the current attached disk. + */ + public Builder toBuilder() { + return new Builder(this); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("deviceName", deviceName) + .add("index", index) + .add("configuration", configuration) + .add("licenses", licenses) + .toString(); + } + + @Override + public final int hashCode() { + return Objects.hash(deviceName, index, configuration, licenses); + } + + @Override + public final boolean equals(Object obj) { + return obj == this + || obj != null + && obj.getClass().equals(AttachedDisk.class) + && Objects.equals(toPb(), ((AttachedDisk) obj).toPb()); + } + + AttachedDisk setProjectId(String projectId) { + return toBuilder().configuration(configuration.setProjectId(projectId)).build(); + } + + com.google.api.services.compute.model.AttachedDisk toPb() { + com.google.api.services.compute.model.AttachedDisk attachedDiskPb = configuration.toPb(); + attachedDiskPb.setDeviceName(deviceName); + attachedDiskPb.setIndex(index); + if (licenses != null) { + attachedDiskPb.setLicenses(Lists.transform(licenses, LicenseId.TO_URL_FUNCTION)); + } + return attachedDiskPb; + } + + /** + * Returns a builder for an {@code AttachedDisk} object given its configuration. + */ + public static Builder builder(AttachedDiskConfiguration configuration) { + return new Builder(configuration).configuration(configuration); + } + + /** + * Returns an {@code AttachedDisk} object given its configuration. + */ + public static AttachedDisk of(AttachedDiskConfiguration configuration) { + return builder(configuration).build(); + } + + /** + * Returns an {@code AttachedDisk} object given the device name and its configuration. + */ + public static AttachedDisk of(String deviceName, AttachedDiskConfiguration configuration) { + return builder(configuration).deviceName(deviceName).build(); + } + + static AttachedDisk fromPb(com.google.api.services.compute.model.AttachedDisk diskPb) { + AttachedDiskConfiguration configuration = AttachedDiskConfiguration.fromPb(diskPb); + Builder builder = builder(configuration); + builder.deviceName(diskPb.getDeviceName()); + builder.index(diskPb.getIndex()); + if (diskPb.getLicenses() != null) { + builder.licenses(Lists.transform(diskPb.getLicenses(), LicenseId.FROM_URL_FUNCTION)); + } + return builder.build(); + } +} diff --git a/gcloud-java-compute/src/main/java/com/google/cloud/compute/Compute.java b/gcloud-java-compute/src/main/java/com/google/cloud/compute/Compute.java new file mode 100644 index 000000000000..47d594ba6ec5 --- /dev/null +++ b/gcloud-java-compute/src/main/java/com/google/cloud/compute/Compute.java @@ -0,0 +1,2709 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import static com.google.common.base.Preconditions.checkNotNull; + +import com.google.cloud.FieldSelector; +import com.google.cloud.FieldSelector.Helper; +import com.google.cloud.Page; +import com.google.cloud.Service; +import com.google.cloud.compute.AttachedDisk.PersistentDiskConfiguration; +import com.google.cloud.compute.NetworkInterface.AccessConfig; +import com.google.cloud.compute.spi.ComputeRpc; +import com.google.common.base.MoreObjects; +import com.google.common.collect.ImmutableList; + +import java.io.Serializable; +import java.util.List; +import java.util.Objects; + +/** + * An interface for Google Cloud Compute Engine. + * + * @see Google Cloud Compute Engine + */ +public interface Compute extends Service { + + /** + * Fields of a Compute Engine DiskType resource. + * + * @see Disk + * Type Resource + */ + enum DiskTypeField implements FieldSelector { + CREATION_TIMESTAMP("creationTimestamp"), + DEFAULT_DISK_SIZE_GB("defaultDiskSizeGb"), + DESCRIPTION("description"), + ID("id"), + NAME("name"), + SELF_LINK("selfLink"), + VALID_DISK_SIZE("validDiskSize"), + ZONE("zone"), + DEPRECATED("deprecated"); + + static final List REQUIRED_FIELDS = ImmutableList.of(SELF_LINK); + + private final String selector; + + DiskTypeField(String selector) { + this.selector = selector; + } + + @Override + public String selector() { + return selector; + } + } + + /** + * Fields of a Compute Engine MachineType resource. + * + * @see + * Machine Type Resource + */ + enum MachineTypeField implements FieldSelector { + CREATION_TIMESTAMP("creationTimestamp"), + DESCRIPTION("description"), + GUEST_CPUS("guestCpus"), + ID("id"), + IMAGE_SPACE_GB("imageSpaceGb"), + MAXIMUM_PERSISTENT_DISKS("maximumPersistentDisks"), + MAXIMUM_PERSISTENT_DISKS_SIZE_GB("maximumPersistentDisksSizeGb"), + MEMORY_MB("memoryMb"), + NAME("name"), + SCRATCH_DISKS("scratchDisks"), + SELF_LINK("selfLink"), + ZONE("zone"), + DEPRECATED("deprecated"); + + static final List REQUIRED_FIELDS = ImmutableList.of(SELF_LINK); + + private final String selector; + + MachineTypeField(String selector) { + this.selector = selector; + } + + @Override + public String selector() { + return selector; + } + } + + /** + * Fields of a Compute Engine Region resource. + * + * @see + * Region Resource + */ + enum RegionField implements FieldSelector { + CREATION_TIMESTAMP("creationTimestamp"), + DESCRIPTION("description"), + ID("id"), + NAME("name"), + QUOTAS("quotas"), + SELF_LINK("selfLink"), + STATUS("status"), + ZONES("zones"), + DEPRECATED("deprecated"); + + static final List REQUIRED_FIELDS = ImmutableList.of(SELF_LINK); + + private final String selector; + + RegionField(String selector) { + this.selector = selector; + } + + @Override + public String selector() { + return selector; + } + } + + /** + * Fields of a Compute Engine Zone resource. + * + * @see Zone + * Resource + */ + enum ZoneField implements FieldSelector { + CREATION_TIMESTAMP("creationTimestamp"), + DESCRIPTION("description"), + ID("id"), + NAME("name"), + REGION("region"), + SELF_LINK("selfLink"), + STATUS("status"), + DEPRECATED("deprecated"); + + static final List REQUIRED_FIELDS = ImmutableList.of(SELF_LINK); + + private final String selector; + + ZoneField(String selector) { + this.selector = selector; + } + + @Override + public String selector() { + return selector; + } + } + + /** + * Fields of a Compute Engine License resource. + * + * @see License + * Resource + */ + enum LicenseField implements FieldSelector { + CHARGES_USE_FEE("chargesUseFee"), + NAME("name"), + SELF_LINK("selfLink"); + + static final List REQUIRED_FIELDS = ImmutableList.of(SELF_LINK); + + private final String selector; + + LicenseField(String selector) { + this.selector = selector; + } + + @Override + public String selector() { + return selector; + } + } + + /** + * Fields of a Compute Engine Operation resource. + * + * @see + * GlobalOperation Resource + * @see + * RegionOperation Resource + * @see + * ZoneOperation Resource + */ + enum OperationField implements FieldSelector { + CLIENT_OPERATION_ID("clientOperationId"), + DESCRIPTION("description"), + END_TIME("endTime"), + ERROR("error"), + HTTP_ERROR_MESSAGE("httpErrorMessage"), + HTTP_ERROR_STATUS_CODE("httpErrorStatusCode"), + ID("id"), + INSERT_TIME("insertTime"), + NAME("name"), + OPERATION_TYPE("operationType"), + PROGRESS("progress"), + REGION("region"), + SELF_LINK("selfLink"), + START_TIME("startTime"), + STATUS("status"), + STATUS_MESSAGE("statusMessage"), + TARGET_ID("targetId"), + TARGET_LINK("targetLink"), + USER("user"), + WARNINGS("warnings"), + ZONE("zone"); + + static final List REQUIRED_FIELDS = ImmutableList.of(SELF_LINK); + + private final String selector; + + OperationField(String selector) { + this.selector = selector; + } + + @Override + public String selector() { + return selector; + } + } + + /** + * Fields of a Compute Engine Address resource. + * + * @see Region + * Address Resource + * @see + * Global Address Resource + */ + enum AddressField implements FieldSelector { + ADDRESS("address"), + CREATION_TIMESTAMP("creationTimestamp"), + DESCRIPTION("description"), + ID("id"), + NAME("name"), + REGION("region"), + SELF_LINK("selfLink"), + STATUS("status"), + USERS("users"); + + static final List REQUIRED_FIELDS = ImmutableList.of(SELF_LINK); + + private final String selector; + + AddressField(String selector) { + this.selector = selector; + } + + @Override + public String selector() { + return selector; + } + } + + /** + * Fields of a Compute Engine Disk resource. + * + * @see Disk + * Resource + */ + enum DiskField implements FieldSelector { + CREATION_TIMESTAMP("creationTimestamp"), + DESCRIPTION("description"), + ID("id"), + LAST_ATTACH_TIMESTAMP("lastAttachTimestamp"), + LAST_DETACH_TIMESTAMP("lastDetachTimestamp"), + LICENSES("licenses"), + NAME("name"), + OPTIONS("options"), + SELF_LINK("selfLink"), + SIZE_GB("sizeGb"), + SOURCE_IMAGE("sourceImage"), + SOURCE_IMAGE_ID("sourceImageId"), + SOURCE_SNAPSHOT("sourceSnapshot"), + SOURCE_SNAPSHOT_ID("sourceSnapshotId"), + STATUS("status"), + TYPE("type"), + USERS("users"), + ZONE("zone"); + + static final List REQUIRED_FIELDS = + ImmutableList.of(SELF_LINK, TYPE, SOURCE_IMAGE, SOURCE_SNAPSHOT); + + private final String selector; + + DiskField(String selector) { + this.selector = selector; + } + + @Override + public String selector() { + return selector; + } + } + + /** + * Fields of a Compute Engine Snapshot resource. + * + * @see + * Snapshot Resource + */ + enum SnapshotField implements FieldSelector { + CREATION_TIMESTAMP("creationTimestamp"), + DESCRIPTION("description"), + DISK_SIZE_GB("diskSizeGb"), + ID("id"), + LICENSES("licenses"), + NAME("name"), + SELF_LINK("selfLink"), + SOURCE_DISK("sourceDisk"), + SOURCE_DISK_ID("sourceDiskId"), + STATUS("status"), + STORAGE_BYTES("storageBytes"), + STORAGE_BYTES_STATUS("storageBytesStatus"); + + static final List REQUIRED_FIELDS = ImmutableList.of(SELF_LINK); + + private final String selector; + + SnapshotField(String selector) { + this.selector = selector; + } + + @Override + public String selector() { + return selector; + } + } + + /** + * Fields of a Compute Engine Image resource. + * + * @see Image + * Resource + */ + enum ImageField implements FieldSelector { + ARCHIVE_SIZE_BYTES("archiveSizeBytes"), + CREATION_TIMESTAMP("creationTimestamp"), + DEPRECATED("deprecated"), + DESCRIPTION("description"), + DISK_SIZE_GB("diskSizeGb"), + ID("id"), + LICENSES("licenses"), + NAME("name"), + RAW_DISK("rawDisk"), + SELF_LINK("selfLink"), + SOURCE_DISK("sourceDisk"), + SOURCE_DISK_ID("sourceDiskId"), + SOURCE_TYPE("sourceType"); + + static final List REQUIRED_FIELDS = + ImmutableList.of(SELF_LINK, SOURCE_DISK, RAW_DISK); + + private final String selector; + + ImageField(String selector) { + this.selector = selector; + } + + @Override + public String selector() { + return selector; + } + } + + /** + * Fields of a Compute Engine Subnetwork resource. + * + * @see + * Subnetwork Resource + */ + enum SubnetworkField implements FieldSelector { + CREATION_TIMESTAMP("creationTimestamp"), + DESCRIPTION("description"), + GATEWAY_ADDRESS("gatewayAddress"), + ID("id"), + IP_CIDR_RANGE("ipCidrRange"), + NAME("name"), + NETWORK("network"), + REGION("region"), + SELF_LINK("selfLink"); + + static final List REQUIRED_FIELDS = ImmutableList.of(SELF_LINK); + + private final String selector; + + SubnetworkField(String selector) { + this.selector = selector; + } + + @Override + public String selector() { + return selector; + } + } + + /** + * Fields of a Compute Engine Network resource. + * + * @see + * Network Resource + */ + enum NetworkField implements FieldSelector { + IPV4_RANGE("IPv4Range"), + AUTO_CREATE_SUBNETWORKS("autoCreateSubnetworks"), + CREATION_TIMESTAMP("creationTimestamp"), + DESCRIPTION("description"), + GATEWAY_IPV4("gatewayIPv4"), + ID("id"), + NAME("name"), + SELF_LINK("selfLink"), + SUBNETWORKS("subnetworks"); + + static final List REQUIRED_FIELDS = + ImmutableList.of(SELF_LINK, IPV4_RANGE, AUTO_CREATE_SUBNETWORKS); + + private final String selector; + + NetworkField(String selector) { + this.selector = selector; + } + + @Override + public String selector() { + return selector; + } + } + + /** + * Fields of a Compute Engine Instance resource. + * + * @see + * Network Resource + */ + enum InstanceField implements FieldSelector { + CAN_IP_FORWARD("canIpForward"), + CPU_PLATFORM("cpuPlatform"), + CREATION_TIMESTAMP("creationTimestamp"), + DESCRIPTION("description"), + DISKS("disks"), + ID("id"), + MACHINE_TYPE("machineType"), + METADATA("metadata"), + NAME("name"), + NETWORK_INTERFACES("networkInterfaces"), + SCHEDULING("scheduling"), + SELF_LINK("selfLink"), + SERVICE_ACCOUNTS("serviceAccounts"), + STATUS("status"), + STATUS_MESSAGE("statusMessage"), + TAGS("tags"), + ZONE("zone"); + + static final List REQUIRED_FIELDS = ImmutableList.of(SELF_LINK); + + private final String selector; + + InstanceField(String selector) { + this.selector = selector; + } + + @Override + public String selector() { + return selector; + } + } + + /** + * Base class for list filters. + */ + abstract class ListFilter implements Serializable { + + private static final long serialVersionUID = -238638392811165127L; + + private final String field; + private final ComparisonOperator operator; + private final Object value; + + enum ComparisonOperator { + /** + * Defines an equals filter. + */ + EQ, + + /** + * Defines a not-equals filter. + */ + NE + } + + ListFilter(String field, ComparisonOperator operator, Object value) { + this.field = field; + this.operator = operator; + this.value = value; + } + + @Override + public int hashCode() { + return Objects.hash(field, operator, value); + } + + @Override + public boolean equals(Object obj) { + return obj instanceof ListFilter && toPb().equals(((ListFilter) obj).toPb()); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("field", field) + .add("operator", operator) + .add("value", value) + .toString(); + } + + String toPb() { + return field + ' ' + operator.name().toLowerCase() + ' ' + value.toString(); + } + } + + /** + * Class for filtering disk type lists. + */ + class DiskTypeFilter extends ListFilter { + + private static final long serialVersionUID = 4847837203592234453L; + + private DiskTypeFilter(DiskTypeField field, ComparisonOperator operator, Object value) { + super(field.selector(), operator, value); + } + + /** + * Returns an equals filter for the given field and string value. For string fields, + * {@code value} is interpreted as a regular expression using RE2 syntax. {@code value} must + * match the entire field. + * + * @see RE2 + */ + public static DiskTypeFilter equals(DiskTypeField field, String value) { + return new DiskTypeFilter(checkNotNull(field), ComparisonOperator.EQ, checkNotNull(value)); + } + + /** + * Returns a not-equals filter for the given field and string value. For string fields, + * {@code value} is interpreted as a regular expression using RE2 syntax. {@code value} must + * match the entire field. + * + * @see RE2 + */ + public static DiskTypeFilter notEquals(DiskTypeField field, String value) { + return new DiskTypeFilter(checkNotNull(field), ComparisonOperator.NE, checkNotNull(value)); + } + + /** + * Returns an equals filter for the given field and long value. + */ + public static DiskTypeFilter equals(DiskTypeField field, long value) { + return new DiskTypeFilter(checkNotNull(field), ComparisonOperator.EQ, value); + } + + /** + * Returns a not-equals filter for the given field and long value. + */ + public static DiskTypeFilter notEquals(DiskTypeField field, long value) { + return new DiskTypeFilter(checkNotNull(field), ComparisonOperator.NE, value); + } + } + + /** + * Class for filtering machine type lists. + */ + class MachineTypeFilter extends ListFilter { + + private static final long serialVersionUID = 7346062041571853235L; + + private MachineTypeFilter(MachineTypeField field, ComparisonOperator operator, Object value) { + super(field.selector(), operator, value); + } + + /** + * Returns an equals filter for the given field and string value. For string fields, + * {@code value} is interpreted as a regular expression using RE2 syntax. {@code value} must + * match the entire field. + * + * @see RE2 + */ + public static MachineTypeFilter equals(MachineTypeField field, String value) { + return new MachineTypeFilter(checkNotNull(field), ComparisonOperator.EQ, checkNotNull(value)); + } + + /** + * Returns a not-equals filter for the given field and string value. For string fields, + * {@code value} is interpreted as a regular expression using RE2 syntax. {@code value} must + * match the entire field. + * + * @see RE2 + */ + public static MachineTypeFilter notEquals(MachineTypeField field, String value) { + return new MachineTypeFilter(checkNotNull(field), ComparisonOperator.NE, checkNotNull(value)); + } + + /** + * Returns an equals filter for the given field and long value. + */ + public static MachineTypeFilter equals(MachineTypeField field, long value) { + return new MachineTypeFilter(checkNotNull(field), ComparisonOperator.EQ, value); + } + + /** + * Returns a not-equals filter for the given field and long value. + */ + public static MachineTypeFilter notEquals(MachineTypeField field, long value) { + return new MachineTypeFilter(checkNotNull(field), ComparisonOperator.NE, value); + } + } + + /** + * Class for filtering region lists. + */ + class RegionFilter extends ListFilter { + + private static final long serialVersionUID = 4464892812442567172L; + + private RegionFilter(RegionField field, ComparisonOperator operator, Object value) { + super(field.selector(), operator, value); + } + + /** + * Returns an equals filter for the given field and string value. For string fields, + * {@code value} is interpreted as a regular expression using RE2 syntax. {@code value} must + * match the entire field. + * + * @see RE2 + */ + public static RegionFilter equals(RegionField field, String value) { + return new RegionFilter(checkNotNull(field), ComparisonOperator.EQ, value); + } + + /** + * Returns a not-equals filter for the given field and string value. For string fields, + * {@code value} is interpreted as a regular expression using RE2 syntax. {@code value} must + * match the entire field. + * + * @see RE2 + */ + public static RegionFilter notEquals(RegionField field, String value) { + return new RegionFilter(checkNotNull(field), ComparisonOperator.NE, checkNotNull(value)); + } + } + + /** + * Class for filtering zone lists. + */ + class ZoneFilter extends ListFilter { + + private static final long serialVersionUID = -3927428278548808737L; + + private ZoneFilter(ZoneField field, ComparisonOperator operator, Object value) { + super(field.selector(), operator, value); + } + + /** + * Returns an equals filter for the given field and string value. For string fields, + * {@code value} is interpreted as a regular expression using RE2 syntax. {@code value} must + * match the entire field. + * + * @see RE2 + */ + public static ZoneFilter equals(ZoneField field, String value) { + return new ZoneFilter(checkNotNull(field), ComparisonOperator.EQ, checkNotNull(value)); + } + + /** + * Returns a not-equals filter for the given field and string value. For string fields, + * {@code value} is interpreted as a regular expression using RE2 syntax. {@code value} must + * match the entire field. + * + * @see RE2 + */ + public static ZoneFilter notEquals(ZoneField field, String value) { + return new ZoneFilter(checkNotNull(field), ComparisonOperator.NE, checkNotNull(value)); + } + } + + /** + * Class for filtering operation lists. + */ + class OperationFilter extends ListFilter { + + private static final long serialVersionUID = -3202249202748346427L; + + private OperationFilter(OperationField field, ComparisonOperator operator, Object value) { + super(field.selector(), operator, value); + } + + /** + * Returns an equals filter for the given field and string value. For string fields, + * {@code value} is interpreted as a regular expression using RE2 syntax. {@code value} must + * match the entire field. + * + * @see RE2 + */ + public static OperationFilter equals(OperationField field, String value) { + return new OperationFilter(checkNotNull(field), ComparisonOperator.EQ, checkNotNull(value)); + } + + /** + * Returns a not-equals filter for the given field and string value. For string fields, + * {@code value} is interpreted as a regular expression using RE2 syntax. {@code value} must + * match the entire field. + * + * @see RE2 + */ + public static OperationFilter notEquals(OperationField field, String value) { + return new OperationFilter(checkNotNull(field), ComparisonOperator.NE, checkNotNull(value)); + } + + /** + * Returns an equals filter for the given field and long value. + */ + public static OperationFilter equals(OperationField field, long value) { + return new OperationFilter(checkNotNull(field), ComparisonOperator.EQ, value); + } + + /** + * Returns a not-equals filter for the given field and long value. + */ + public static OperationFilter notEquals(OperationField field, long value) { + return new OperationFilter(checkNotNull(field), ComparisonOperator.NE, value); + } + } + + /** + * Class for filtering address lists. + */ + class AddressFilter extends ListFilter { + + private static final long serialVersionUID = -227481644259653765L; + + private AddressFilter(AddressField field, ComparisonOperator operator, Object value) { + super(field.selector(), operator, value); + } + + /** + * Returns an equals filter for the given field and string value. For string fields, + * {@code value} is interpreted as a regular expression using RE2 syntax. {@code value} must + * match the entire field. + * + * @see RE2 + */ + public static AddressFilter equals(AddressField field, String value) { + return new AddressFilter(checkNotNull(field), ComparisonOperator.EQ, checkNotNull(value)); + } + + /** + * Returns a not-equals filter for the given field and string value. For string fields, + * {@code value} is interpreted as a regular expression using RE2 syntax. {@code value} must + * match the entire field. + * + * @see RE2 + */ + public static AddressFilter notEquals(AddressField field, String value) { + return new AddressFilter(checkNotNull(field), ComparisonOperator.NE, checkNotNull(value)); + } + } + + /** + * Class for filtering snapshot lists. + */ + class SnapshotFilter extends ListFilter { + + private static final long serialVersionUID = 8757711630092406747L; + + private SnapshotFilter(SnapshotField field, ComparisonOperator operator, Object value) { + super(field.selector(), operator, value); + } + + /** + * Returns an equals filter for the given field and string value. For string fields, + * {@code value} is interpreted as a regular expression using RE2 syntax. {@code value} must + * match the entire field. + * + * @see RE2 + */ + public static SnapshotFilter equals(SnapshotField field, String value) { + return new SnapshotFilter(checkNotNull(field), ComparisonOperator.EQ, checkNotNull(value)); + } + + /** + * Returns a not-equals filter for the given field and string value. For string fields, + * {@code value} is interpreted as a regular expression using RE2 syntax. {@code value} must + * match the entire field. + * + * @see RE2 + */ + public static SnapshotFilter notEquals(SnapshotField field, String value) { + return new SnapshotFilter(checkNotNull(field), ComparisonOperator.NE, checkNotNull(value)); + } + + /** + * Returns an equals filter for the given field and long value. + */ + public static SnapshotFilter equals(SnapshotField field, long value) { + return new SnapshotFilter(checkNotNull(field), ComparisonOperator.EQ, value); + } + + /** + * Returns a not-equals filter for the given field and long value. + */ + public static SnapshotFilter notEquals(SnapshotField field, long value) { + return new SnapshotFilter(checkNotNull(field), ComparisonOperator.NE, value); + } + } + + /** + * Class for filtering image lists. + */ + class ImageFilter extends ListFilter { + + private static final long serialVersionUID = -3601427417234098397L; + + private ImageFilter(ImageField field, ComparisonOperator operator, Object value) { + super(field.selector(), operator, value); + } + + /** + * Returns an equals filter for the given field and string value. For string fields, + * {@code value} is interpreted as a regular expression using RE2 syntax. {@code value} must + * match the entire field. + * + * @see RE2 + */ + public static ImageFilter equals(ImageField field, String value) { + return new ImageFilter(checkNotNull(field), ComparisonOperator.EQ, checkNotNull(value)); + } + + /** + * Returns a not-equals filter for the given field and string value. For string fields, + * {@code value} is interpreted as a regular expression using RE2 syntax. {@code value} must + * match the entire field. + * + * @see RE2 + */ + public static ImageFilter notEquals(ImageField field, String value) { + return new ImageFilter(checkNotNull(field), ComparisonOperator.NE, checkNotNull(value)); + } + + /** + * Returns an equals filter for the given field and long value. + */ + public static ImageFilter equals(ImageField field, long value) { + return new ImageFilter(checkNotNull(field), ComparisonOperator.EQ, value); + } + + /** + * Returns a not-equals filter for the given field and long value. + */ + public static ImageFilter notEquals(ImageField field, long value) { + return new ImageFilter(checkNotNull(field), ComparisonOperator.NE, value); + } + } + + /** + * Class for filtering disk lists. + */ + class DiskFilter extends ListFilter { + + private static final long serialVersionUID = 5856790665396877913L; + + private DiskFilter(DiskField field, ComparisonOperator operator, Object value) { + super(field.selector(), operator, value); + } + + /** + * Returns an equals filter for the given field and string value. For string fields, + * {@code value} is interpreted as a regular expression using RE2 syntax. {@code value} must + * match the entire field. + * + * @see RE2 + */ + public static DiskFilter equals(DiskField field, String value) { + return new DiskFilter(checkNotNull(field), ComparisonOperator.EQ, checkNotNull(value)); + } + + /** + * Returns a not-equals filter for the given field and string value. For string fields, + * {@code value} is interpreted as a regular expression using RE2 syntax. {@code value} must + * match the entire field. + * + * @see RE2 + */ + public static DiskFilter notEquals(DiskField field, String value) { + return new DiskFilter(checkNotNull(field), ComparisonOperator.NE, checkNotNull(value)); + } + + /** + * Returns an equals filter for the given field and long value. + */ + public static DiskFilter equals(DiskField field, long value) { + return new DiskFilter(checkNotNull(field), ComparisonOperator.EQ, value); + } + + /** + * Returns a not-equals filter for the given field and long value. + */ + public static DiskFilter notEquals(DiskField field, long value) { + return new DiskFilter(checkNotNull(field), ComparisonOperator.NE, value); + } + } + + /** + * Class for filtering subnetwork lists. + */ + class SubnetworkFilter extends ListFilter { + + private static final long serialVersionUID = 979448583739105481L; + + private SubnetworkFilter(SubnetworkField field, ComparisonOperator operator, Object value) { + super(field.selector(), operator, value); + } + + /** + * Returns an equals filter for the given field and string value. For string fields, + * {@code value} is interpreted as a regular expression using RE2 syntax. {@code value} must + * match the entire field. + * + * @see RE2 + */ + public static SubnetworkFilter equals(SubnetworkField field, String value) { + return new SubnetworkFilter(checkNotNull(field), ComparisonOperator.EQ, checkNotNull(value)); + } + + /** + * Returns a not-equals filter for the given field and string value. For string fields, + * {@code value} is interpreted as a regular expression using RE2 syntax. {@code value} must + * match the entire field. + * + * @see RE2 + */ + public static SubnetworkFilter notEquals(SubnetworkField field, String value) { + return new SubnetworkFilter(checkNotNull(field), ComparisonOperator.NE, checkNotNull(value)); + } + } + + /** + * Class for filtering network lists. + */ + class NetworkFilter extends ListFilter { + + private static final long serialVersionUID = 7921406498804130930L; + + private NetworkFilter(NetworkField field, ComparisonOperator operator, Object value) { + super(field.selector(), operator, value); + } + + /** + * Returns an equals filter for the given field and string value. For string fields, + * {@code value} is interpreted as a regular expression using RE2 syntax. {@code value} must + * match the entire field. + * + * @see RE2 + */ + public static NetworkFilter equals(NetworkField field, String value) { + return new NetworkFilter(checkNotNull(field), ComparisonOperator.EQ, checkNotNull(value)); + } + + /** + * Returns a not-equals filter for the given field and string value. For string fields, + * {@code value} is interpreted as a regular expression using RE2 syntax. {@code value} must + * match the entire field. + * + * @see RE2 + */ + public static NetworkFilter notEquals(NetworkField field, String value) { + return new NetworkFilter(checkNotNull(field), ComparisonOperator.NE, checkNotNull(value)); + } + + /** + * Returns a equals filter for the given field and boolean value. + */ + public static NetworkFilter equals(NetworkField field, boolean value) { + return new NetworkFilter(checkNotNull(field), ComparisonOperator.EQ, value); + } + + /** + * Returns a not-equals filter for the given field and boolean value. + */ + public static NetworkFilter notEquals(NetworkField field, boolean value) { + return new NetworkFilter(checkNotNull(field), ComparisonOperator.NE, value); + } + } + + /** + * Class for filtering instance lists. + */ + class InstanceFilter extends ListFilter { + + private static final long serialVersionUID = 679008888882025686L; + + private InstanceFilter(InstanceField field, ComparisonOperator operator, Object value) { + super(field.selector(), operator, value); + } + + /** + * Returns an equals filter for the given field and string value. For string fields, + * {@code value} is interpreted as a regular expression using RE2 syntax. {@code value} must + * match the entire field. + * + * @see RE2 + */ + public static InstanceFilter equals(InstanceField field, String value) { + return new InstanceFilter(checkNotNull(field), ComparisonOperator.EQ, checkNotNull(value)); + } + + /** + * Returns a not-equals filter for the given field and string value. For string fields, + * {@code value} is interpreted as a regular expression using RE2 syntax. {@code value} must + * match the entire field. + * + * @see RE2 + */ + public static InstanceFilter notEquals(InstanceField field, String value) { + return new InstanceFilter(checkNotNull(field), ComparisonOperator.NE, checkNotNull(value)); + } + + /** + * Returns a equals filter for the given field and boolean value. + */ + public static InstanceFilter equals(InstanceField field, boolean value) { + return new InstanceFilter(checkNotNull(field), ComparisonOperator.EQ, value); + } + + /** + * Returns a not-equals filter for the given field and boolean value. + */ + public static InstanceFilter notEquals(InstanceField field, boolean value) { + return new InstanceFilter(checkNotNull(field), ComparisonOperator.EQ, value); + } + } + + /** + * Class for specifying disk type get options. + */ + class DiskTypeOption extends Option { + + private static final long serialVersionUID = 7349162455602991554L; + + private DiskTypeOption(ComputeRpc.Option option, Object value) { + super(option, value); + } + + /** + * Returns an option to specify the disk type's fields to be returned by the RPC call. If this + * option is not provided, all disk type's fields are returned. {@code DiskTypeOption.fields} + * can be used to specify only the fields of interest. {@link DiskType#diskTypeId()} is always + * returned, even if not specified. + */ + public static DiskTypeOption fields(DiskTypeField... fields) { + return new DiskTypeOption(ComputeRpc.Option.FIELDS, + Helper.selector(DiskTypeField.REQUIRED_FIELDS, fields)); + } + } + + /** + * Class for specifying disk type list options. + */ + class DiskTypeListOption extends Option { + + private static final long serialVersionUID = 9051194230847610951L; + + private DiskTypeListOption(ComputeRpc.Option option, Object value) { + super(option, value); + } + + /** + * Returns an option to specify a filter on the disk types being listed. + */ + public static DiskTypeListOption filter(DiskTypeFilter filter) { + return new DiskTypeListOption(ComputeRpc.Option.FILTER, filter.toPb()); + } + + /** + * Returns an option to specify the maximum number of disk types returned per page. + * {@code pageSize} must be between 0 and 500 (inclusive). If not specified 500 is used. + */ + public static DiskTypeListOption pageSize(long pageSize) { + return new DiskTypeListOption(ComputeRpc.Option.MAX_RESULTS, pageSize); + } + + /** + * Returns an option to specify the page token from which to start listing disk types. + */ + public static DiskTypeListOption pageToken(String pageToken) { + return new DiskTypeListOption(ComputeRpc.Option.PAGE_TOKEN, pageToken); + } + + /** + * Returns an option to specify the disk type's fields to be returned by the RPC call. If this + * option is not provided, all disk type's fields are returned. + * {@code DiskTypeListOption.fields} can be used to specify only the fields of interest. + * {@link DiskType#diskTypeId()} is always returned, even if not specified. + */ + public static DiskTypeListOption fields(DiskTypeField... fields) { + return new DiskTypeListOption(ComputeRpc.Option.FIELDS, + Helper.listSelector("items", DiskTypeField.REQUIRED_FIELDS, fields)); + } + } + + /** + * Class for specifying disk type aggregated list options. + */ + class DiskTypeAggregatedListOption extends Option { + + private static final long serialVersionUID = 7611137483018305170L; + + private DiskTypeAggregatedListOption(ComputeRpc.Option option, Object value) { + super(option, value); + } + + /** + * Returns an option to specify a filter on the disk types being listed. + */ + public static DiskTypeAggregatedListOption filter(DiskTypeFilter filter) { + return new DiskTypeAggregatedListOption(ComputeRpc.Option.FILTER, filter.toPb()); + } + + /** + * Returns an option to specify the maximum number of disk types returned per page. + * {@code pageSize} must be between 0 and 500 (inclusive). If not specified 500 is used. + */ + public static DiskTypeAggregatedListOption pageSize(long pageSize) { + return new DiskTypeAggregatedListOption(ComputeRpc.Option.MAX_RESULTS, pageSize); + } + + /** + * Returns an option to specify the page token from which to start listing disk types. + */ + public static DiskTypeAggregatedListOption pageToken(String pageToken) { + return new DiskTypeAggregatedListOption(ComputeRpc.Option.PAGE_TOKEN, pageToken); + } + } + + /** + * Class for specifying machine type get options. + */ + class MachineTypeOption extends Option { + + private static final long serialVersionUID = 7349162455602991554L; + + private MachineTypeOption(ComputeRpc.Option option, Object value) { + super(option, value); + } + + /** + * Returns an option to specify the machine type's fields to be returned by the RPC call. If + * this option is not provided, all machine type's fields are returned. + * {@code MachineTypeOption.fields} can be used to specify only the fields of interest. + * {@link MachineType#machineTypeId()} is always returned, even if not specified. + */ + public static MachineTypeOption fields(MachineTypeField... fields) { + return new MachineTypeOption(ComputeRpc.Option.FIELDS, + Helper.selector(MachineTypeField.REQUIRED_FIELDS, fields)); + } + } + + /** + * Class for specifying machine type list options. + */ + class MachineTypeListOption extends Option { + + private static final long serialVersionUID = -2974553049419897902L; + + private MachineTypeListOption(ComputeRpc.Option option, Object value) { + super(option, value); + } + + /** + * Returns an option to specify a filter on the machine types being listed. + */ + public static MachineTypeListOption filter(MachineTypeFilter filter) { + return new MachineTypeListOption(ComputeRpc.Option.FILTER, filter.toPb()); + } + + /** + * Returns an option to specify the maximum number of machine types returned per page. + * {@code pageSize} must be between 0 and 500 (inclusive). If not specified 500 is used. + */ + public static MachineTypeListOption pageSize(long pageSize) { + return new MachineTypeListOption(ComputeRpc.Option.MAX_RESULTS, pageSize); + } + + /** + * Returns an option to specify the page token from which to start listing machine types. + */ + public static MachineTypeListOption pageToken(String pageToken) { + return new MachineTypeListOption(ComputeRpc.Option.PAGE_TOKEN, pageToken); + } + + /** + * Returns an option to specify the machine type's fields to be returned by the RPC call. If + * this option is not provided, all machine type's fields are returned. + * {@code MachineTypeListOption.fields} can be used to specify only the fields of interest. + * {@link MachineType#machineTypeId()} is always returned, even if not specified. + */ + public static MachineTypeListOption fields(MachineTypeField... fields) { + return new MachineTypeListOption(ComputeRpc.Option.FIELDS, + Helper.listSelector("items", MachineTypeField.REQUIRED_FIELDS, fields)); + } + } + + /** + * Class for specifying machine type aggregated list options. + */ + class MachineTypeAggregatedListOption extends Option { + + private static final long serialVersionUID = 8492257475500296057L; + + private MachineTypeAggregatedListOption(ComputeRpc.Option option, Object value) { + super(option, value); + } + + /** + * Returns an option to specify a filter on the machine types being listed. + */ + public static MachineTypeAggregatedListOption filter(MachineTypeFilter filter) { + return new MachineTypeAggregatedListOption(ComputeRpc.Option.FILTER, filter.toPb()); + } + + /** + * Returns an option to specify the maximum number of machine types returned per page. + * {@code pageSize} must be between 0 and 500 (inclusive). If not specified 500 is used. + */ + public static MachineTypeAggregatedListOption pageSize(long pageSize) { + return new MachineTypeAggregatedListOption(ComputeRpc.Option.MAX_RESULTS, pageSize); + } + + /** + * Returns an option to specify the page token from which to start listing machine types. + */ + public static MachineTypeAggregatedListOption pageToken(String pageToken) { + return new MachineTypeAggregatedListOption(ComputeRpc.Option.PAGE_TOKEN, pageToken); + } + } + + /** + * Class for specifying region get options. + */ + class RegionOption extends Option { + + private static final long serialVersionUID = 2025084807788610826L; + + private RegionOption(ComputeRpc.Option option, Object value) { + super(option, value); + } + + /** + * Returns an option to specify the region's fields to be returned by the RPC call. If this + * option is not provided, all region's fields are returned. {@code RegionOption.fields} can be + * used to specify only the fields of interest. {@link Region#regionId()} is always + * returned, even if not specified. + */ + public static RegionOption fields(RegionField... fields) { + return new RegionOption(ComputeRpc.Option.FIELDS, + Helper.selector(RegionField.REQUIRED_FIELDS, fields)); + } + } + + /** + * Class for specifying region list options. + */ + class RegionListOption extends Option { + + private static final long serialVersionUID = 3348089279267170211L; + + private RegionListOption(ComputeRpc.Option option, Object value) { + super(option, value); + } + + /** + * Returns an option to specify a filter on the regions being listed. + */ + public static RegionListOption filter(RegionFilter filter) { + return new RegionListOption(ComputeRpc.Option.FILTER, filter.toPb()); + } + + /** + * Returns an option to specify the maximum number of regions returned per page. + * {@code pageSize} must be between 0 and 500 (inclusive). If not specified 500 is used. + */ + public static RegionListOption pageSize(long pageSize) { + return new RegionListOption(ComputeRpc.Option.MAX_RESULTS, pageSize); + } + + /** + * Returns an option to specify the page token from which to start listing regions. + */ + public static RegionListOption pageToken(String pageToken) { + return new RegionListOption(ComputeRpc.Option.PAGE_TOKEN, pageToken); + } + + /** + * Returns an option to specify the region's fields to be returned by the RPC call. If this + * option is not provided, all region's fields are returned. {@code RegionListOption.fields} can + * be used to specify only the fields of interest. {@link Region#regionId()} is always + * returned, even if not specified. + */ + public static RegionListOption fields(RegionField... fields) { + return new RegionListOption(ComputeRpc.Option.FIELDS, + Helper.listSelector("items", RegionField.REQUIRED_FIELDS, fields)); + } + } + + /** + * Class for specifying zone get options. + */ + class ZoneOption extends Option { + + private static final long serialVersionUID = -2968652076389846258L; + + private ZoneOption(ComputeRpc.Option option, Object value) { + super(option, value); + } + + /** + * Returns an option to specify the zone's fields to be returned by the RPC call. If this option + * is not provided, all zone's fields are returned. {@code ZoneOption.fields} can be used to + * specify only the fields of interest. {@link Zone#zoneId()} is always returned, even if + * not specified. + */ + public static ZoneOption fields(ZoneField... fields) { + return new ZoneOption(ComputeRpc.Option.FIELDS, + Helper.selector(ZoneField.REQUIRED_FIELDS, fields)); + } + } + + /** + * Class for specifying zone list options. + */ + class ZoneListOption extends Option { + + private static final long serialVersionUID = -4721971371200905764L; + + private ZoneListOption(ComputeRpc.Option option, Object value) { + super(option, value); + } + + /** + * Returns an option to specify a filter on the zones being listed. + */ + public static ZoneListOption filter(ZoneFilter filter) { + return new ZoneListOption(ComputeRpc.Option.FILTER, filter.toPb()); + } + + /** + * Returns an option to specify the maximum number of zones returned per page. + * {@code pageSize} must be between 0 and 500 (inclusive). If not specified 500 is used. + */ + public static ZoneListOption pageSize(long pageSize) { + return new ZoneListOption(ComputeRpc.Option.MAX_RESULTS, pageSize); + } + + /** + * Returns an option to specify the page token from which to start listing zones. + */ + public static ZoneListOption pageToken(String pageToken) { + return new ZoneListOption(ComputeRpc.Option.PAGE_TOKEN, pageToken); + } + + /** + * Returns an option to specify the zone's fields to be returned by the RPC call. If this option + * is not provided, all zone's fields are returned. {@code ZoneListOption.fields} can be used to + * specify only the fields of interest. {@link Zone#zoneId()} is always returned, even if + * not specified. + */ + public static ZoneListOption fields(ZoneField... fields) { + return new ZoneListOption(ComputeRpc.Option.FIELDS, + Helper.listSelector("items", ZoneField.REQUIRED_FIELDS, fields)); + } + } + + /** + * Class for specifying license get options. + */ + class LicenseOption extends Option { + + private static final long serialVersionUID = -2968652076389846258L; + + private LicenseOption(ComputeRpc.Option option, Object value) { + super(option, value); + } + + /** + * Returns an option to specify the license's fields to be returned by the RPC call. If this + * option is not provided, all license's fields are returned. {@code LicenseOption.fields} can + * be used to specify only the fields of interest. {@link License#licenseId()} is always + * returned, even if not specified. + */ + public static LicenseOption fields(LicenseField... fields) { + return new LicenseOption(ComputeRpc.Option.FIELDS, + Helper.selector(LicenseField.REQUIRED_FIELDS, fields)); + } + } + + /** + * Class for specifying operation get options. + */ + class OperationOption extends Option { + + private static final long serialVersionUID = -4572636917684779912L; + + private OperationOption(ComputeRpc.Option option, Object value) { + super(option, value); + } + + /** + * Returns an option to specify the operation's fields to be returned by the RPC call. If this + * option is not provided, all operation's fields are returned. {@code OperationOption.fields} + * can be used to specify only the fields of interest. {@link Operation#operationId()} is + * always returned, even if not specified. + */ + public static OperationOption fields(OperationField... fields) { + return new OperationOption(ComputeRpc.Option.FIELDS, + Helper.selector(OperationField.REQUIRED_FIELDS, fields)); + } + } + + /** + * Class for specifying operation list options. + */ + class OperationListOption extends Option { + + private static final long serialVersionUID = -1509532420587265823L; + + private OperationListOption(ComputeRpc.Option option, Object value) { + super(option, value); + } + + /** + * Returns an option to specify a filter on the operations being listed. + */ + public static OperationListOption filter(OperationFilter filter) { + return new OperationListOption(ComputeRpc.Option.FILTER, filter.toPb()); + } + + /** + * Returns an option to specify the maximum number of operations returned per page. + * {@code pageSize} must be between 0 and 500 (inclusive). If not specified 500 is used. + */ + public static OperationListOption pageSize(long pageSize) { + return new OperationListOption(ComputeRpc.Option.MAX_RESULTS, pageSize); + } + + /** + * Returns an option to specify the page token from which to start listing operations. + */ + public static OperationListOption pageToken(String pageToken) { + return new OperationListOption(ComputeRpc.Option.PAGE_TOKEN, pageToken); + } + + /** + * Returns an option to specify the operation's fields to be returned by the RPC call. If this + * option is not provided, all operation's fields are returned. + * {@code OperationListOption.fields} can be used to specify only the fields of interest. + * {@link Operation#operationId()} is always returned, even if not specified. + */ + public static OperationListOption fields(OperationField... fields) { + return new OperationListOption(ComputeRpc.Option.FIELDS, + Helper.listSelector("items", OperationField.REQUIRED_FIELDS, fields)); + } + } + + /** + * Class for specifying address get options. + */ + class AddressOption extends Option { + + private static final long serialVersionUID = -5755491818692494389L; + + private AddressOption(ComputeRpc.Option option, Object value) { + super(option, value); + } + + /** + * Returns an option to specify the address' fields to be returned by the RPC call. If this + * option is not provided, all address' fields are returned. {@code AddressOption.fields} can be + * used to specify only the fields of interest. {@link Address#addressId()} is always + * returned, even if not specified. + */ + public static AddressOption fields(AddressField... fields) { + return new AddressOption(ComputeRpc.Option.FIELDS, + Helper.selector(AddressField.REQUIRED_FIELDS, fields)); + } + } + + /** + * Class for specifying address list options. + */ + class AddressListOption extends Option { + + private static final long serialVersionUID = -4281322966374929346L; + + private AddressListOption(ComputeRpc.Option option, Object value) { + super(option, value); + } + + /** + * Returns an option to specify a filter on the addresses being listed. + */ + public static AddressListOption filter(AddressFilter filter) { + return new AddressListOption(ComputeRpc.Option.FILTER, filter.toPb()); + } + + /** + * Returns an option to specify the maximum number of addresses returned per page. + * {@code pageSize} must be between 0 and 500 (inclusive). If not specified 500 is used. + */ + public static AddressListOption pageSize(long pageSize) { + return new AddressListOption(ComputeRpc.Option.MAX_RESULTS, pageSize); + } + + /** + * Returns an option to specify the page token from which to start listing addresses. + */ + public static AddressListOption pageToken(String pageToken) { + return new AddressListOption(ComputeRpc.Option.PAGE_TOKEN, pageToken); + } + + /** + * Returns an option to specify the address' fields to be returned by the RPC call. If this + * option is not provided, all address' fields are returned. {@code AddressListOption.fields} + * can be used to specify only the fields of interest. {@link Address#addressId()} is always + * returned, even if not specified. + */ + public static AddressListOption fields(AddressField... fields) { + return new AddressListOption(ComputeRpc.Option.FIELDS, + Helper.listSelector("items", AddressField.REQUIRED_FIELDS, fields)); + } + } + + /** + * Class for specifying address aggregated list options. + */ + class AddressAggregatedListOption extends Option { + + private static final long serialVersionUID = -95538941541279561L; + + private AddressAggregatedListOption(ComputeRpc.Option option, Object value) { + super(option, value); + } + + /** + * Returns an option to specify a filter on the addresses being listed. + */ + public static AddressAggregatedListOption filter(AddressFilter filter) { + return new AddressAggregatedListOption(ComputeRpc.Option.FILTER, filter.toPb()); + } + + /** + * Returns an option to specify the maximum number of addresses returned per page. + * {@code pageSize} must be between 0 and 500 (inclusive). If not specified 500 is used. + */ + public static AddressAggregatedListOption pageSize(long pageSize) { + return new AddressAggregatedListOption(ComputeRpc.Option.MAX_RESULTS, pageSize); + } + + /** + * Returns an option to specify the page token from which to start listing addresses. + */ + public static AddressAggregatedListOption pageToken(String pageToken) { + return new AddressAggregatedListOption(ComputeRpc.Option.PAGE_TOKEN, pageToken); + } + } + + /** + * Class for specifying snapshot get options. + */ + class SnapshotOption extends Option { + + private static final long serialVersionUID = -3505179459035500945L; + + private SnapshotOption(ComputeRpc.Option option, Object value) { + super(option, value); + } + + /** + * Returns an option to specify the snapshot's fields to be returned by the RPC call. If this + * option is not provided, all the snapshot's fields are returned. {@code SnapshotOption.fields} + * can be used to specify only the fields of interest. {@link Snapshot#snapshotId()} is always + * returned, even if not specified. + */ + public static SnapshotOption fields(SnapshotField... fields) { + return new SnapshotOption(ComputeRpc.Option.FIELDS, + Helper.selector(SnapshotField.REQUIRED_FIELDS, fields)); + } + } + + /** + * Class for specifying snapshot list options. + */ + class SnapshotListOption extends Option { + + private static final long serialVersionUID = 8278588147660831257L; + + private SnapshotListOption(ComputeRpc.Option option, Object value) { + super(option, value); + } + + /** + * Returns an option to specify a filter on the snapshots being listed. + */ + public static SnapshotListOption filter(SnapshotFilter filter) { + return new SnapshotListOption(ComputeRpc.Option.FILTER, filter.toPb()); + } + + /** + * Returns an option to specify the maximum number of snapshots returned per page. + * {@code pageSize} must be between 0 and 500 (inclusive). If not specified 500 is used. + */ + public static SnapshotListOption pageSize(long pageSize) { + return new SnapshotListOption(ComputeRpc.Option.MAX_RESULTS, pageSize); + } + + /** + * Returns an option to specify the page token from which to start listing snapshots. + */ + public static SnapshotListOption pageToken(String pageToken) { + return new SnapshotListOption(ComputeRpc.Option.PAGE_TOKEN, pageToken); + } + + /** + * Returns an option to specify the snapshot's fields to be returned by the RPC call. If this + * option is not provided, all the snapshot's fields are returned. + * {@code SnapshotListOption.fields} can be used to specify only the fields of interest. + * {@link Snapshot#snapshotId()} is always returned, even if not specified. + */ + public static SnapshotListOption fields(SnapshotField... fields) { + return new SnapshotListOption(ComputeRpc.Option.FIELDS, + Helper.listSelector("items", SnapshotField.REQUIRED_FIELDS, fields)); + } + } + + /** + * Class for specifying image get options. + */ + class ImageOption extends Option { + + private static final long serialVersionUID = -7622190783089299272L; + + private ImageOption(ComputeRpc.Option option, Object value) { + super(option, value); + } + + /** + * Returns an option to specify the image's fields to be returned by the RPC call. If this + * option is not provided, all image's fields are returned. {@code ImageOption.fields} can be + * used to specify only the fields of interest. {@link Image#imageId()} and + * {@link Image#configuration()} are always returned, even if not specified. + */ + public static ImageOption fields(ImageField... fields) { + return new ImageOption(ComputeRpc.Option.FIELDS, + Helper.selector(ImageField.REQUIRED_FIELDS, fields)); + } + } + + /** + * Class for specifying image list options. + */ + class ImageListOption extends Option { + + private static final long serialVersionUID = -4927977224287915654L; + + private ImageListOption(ComputeRpc.Option option, Object value) { + super(option, value); + } + + /** + * Returns an option to specify a filter on the images being listed. + */ + public static ImageListOption filter(ImageFilter filter) { + return new ImageListOption(ComputeRpc.Option.FILTER, filter.toPb()); + } + + /** + * Returns an option to specify the maximum number of images returned per page. {@code pageSize} + * must be between 0 and 500 (inclusive). If not specified 500 is used. + */ + public static ImageListOption pageSize(long pageSize) { + return new ImageListOption(ComputeRpc.Option.MAX_RESULTS, pageSize); + } + + /** + * Returns an option to specify the page token from which to start listing images. + */ + public static ImageListOption pageToken(String pageToken) { + return new ImageListOption(ComputeRpc.Option.PAGE_TOKEN, pageToken); + } + + /** + * Returns an option to specify the image's fields to be returned by the RPC call. If this + * option is not provided, all image's fields are returned. {@code ImageListOption.fields} can + * be used to specify only the fields of interest. {@link Image#imageId()} and + * {@link Image#configuration()} are always returned, even if not specified. + */ + public static ImageListOption fields(ImageField... fields) { + return new ImageListOption(ComputeRpc.Option.FIELDS, + Helper.listSelector("items", ImageField.REQUIRED_FIELDS, fields)); + } + } + + /** + * Class for specifying disk get options. + */ + class DiskOption extends Option { + + private static final long serialVersionUID = -4354796876226661667L; + + private DiskOption(ComputeRpc.Option option, Object value) { + super(option, value); + } + + /** + * Returns an option to specify the disk's fields to be returned by the RPC call. If this option + * is not provided, all disk's fields are returned. {@code DiskOption.fields} can be used to + * specify only the fields of interest. {@link Disk#diskId()}, + * {@link DiskConfiguration#diskType()} and either + * {@link SnapshotDiskConfiguration#sourceSnapshot()} or + * {@link ImageDiskConfiguration#sourceImage()} are always returned, even if not specified. + */ + public static DiskOption fields(DiskField... fields) { + return new DiskOption(ComputeRpc.Option.FIELDS, + Helper.selector(DiskField.REQUIRED_FIELDS, fields)); + } + } + + /** + * Class for specifying disk list options. + */ + class DiskListOption extends Option { + + private static final long serialVersionUID = -5148497888688645905L; + + private DiskListOption(ComputeRpc.Option option, Object value) { + super(option, value); + } + + /** + * Returns an option to specify a filter on the disks being listed. + */ + public static DiskListOption filter(DiskFilter filter) { + return new DiskListOption(ComputeRpc.Option.FILTER, filter.toPb()); + } + + /** + * Returns an option to specify the maximum number of disks returned per page. {@code pageSize} + * must be between 0 and 500 (inclusive). If not specified 500 is used. + */ + public static DiskListOption pageSize(long pageSize) { + return new DiskListOption(ComputeRpc.Option.MAX_RESULTS, pageSize); + } + + /** + * Returns an option to specify the page token from which to start listing disks. + */ + public static DiskListOption pageToken(String pageToken) { + return new DiskListOption(ComputeRpc.Option.PAGE_TOKEN, pageToken); + } + + /** + * Returns an option to specify the disk's fields to be returned by the RPC call. If this option + * is not provided, all disk's fields are returned. {@code DiskListOption.fields} can be used to + * specify only the fields of interest. {@link Disk#diskId()}, + * {@link DiskConfiguration#diskType()} and either + * {@link SnapshotDiskConfiguration#sourceSnapshot()} or + * {@link ImageDiskConfiguration#sourceImage()} are always returned, even if not specified. + */ + public static DiskListOption fields(DiskField... fields) { + return new DiskListOption(ComputeRpc.Option.FIELDS, + Helper.listSelector("items", DiskField.REQUIRED_FIELDS, fields)); + } + } + + /** + * Class for specifying disk aggregated list options. + */ + class DiskAggregatedListOption extends Option { + + private static final long serialVersionUID = 1163784797870242766L; + + private DiskAggregatedListOption(ComputeRpc.Option option, Object value) { + super(option, value); + } + + /** + * Returns an option to specify a filter on the disks being listed. + */ + public static DiskAggregatedListOption filter(DiskFilter filter) { + return new DiskAggregatedListOption(ComputeRpc.Option.FILTER, filter.toPb()); + } + + /** + * Returns an option to specify the maximum number of disks returned per page. {@code pageSize} + * must be between 0 and 500 (inclusive). If not specified 500 is used. + */ + public static DiskAggregatedListOption pageSize(long pageSize) { + return new DiskAggregatedListOption(ComputeRpc.Option.MAX_RESULTS, pageSize); + } + + /** + * Returns an option to specify the page token from which to start listing disks. + */ + public static DiskAggregatedListOption pageToken(String pageToken) { + return new DiskAggregatedListOption(ComputeRpc.Option.PAGE_TOKEN, pageToken); + } + } + + /** + * Class for specifying subnetwork get options. + */ + class SubnetworkOption extends Option { + + private static final long serialVersionUID = 1994416967962074717L; + + private SubnetworkOption(ComputeRpc.Option option, Object value) { + super(option, value); + } + + /** + * Returns an option to specify the subnetwork's fields to be returned by the RPC call. If this + * option is not provided, all subnetwork's fields are returned. {@code SubnetworkOption.fields} + * can be used to specify only the fields of interest. {@link Subnetwork#subnetworkId()} is + * always returned, even if not specified. + */ + public static SubnetworkOption fields(SubnetworkField... fields) { + return new SubnetworkOption(ComputeRpc.Option.FIELDS, + Helper.selector(SubnetworkField.REQUIRED_FIELDS, fields)); + } + } + + /** + * Class for specifying subnetwork list options. + */ + class SubnetworkListOption extends Option { + + private static final long serialVersionUID = -2978666213373829606L; + + private SubnetworkListOption(ComputeRpc.Option option, Object value) { + super(option, value); + } + + /** + * Returns an option to specify a filter on the subnetworks being listed. + */ + public static SubnetworkListOption filter(SubnetworkFilter filter) { + return new SubnetworkListOption(ComputeRpc.Option.FILTER, filter.toPb()); + } + + /** + * Returns an option to specify the maximum number of subnetworks returned per page. + * {@code pageSize} must be between 0 and 500 (inclusive). If not specified 500 is used. + */ + public static SubnetworkListOption pageSize(long pageSize) { + return new SubnetworkListOption(ComputeRpc.Option.MAX_RESULTS, pageSize); + } + + /** + * Returns an option to specify the page token from which to start listing subnetworks. + */ + public static SubnetworkListOption pageToken(String pageToken) { + return new SubnetworkListOption(ComputeRpc.Option.PAGE_TOKEN, pageToken); + } + + /** + * Returns an option to specify the subnetwork's fields to be returned by the RPC call. If this + * option is not provided, all subnetwork's fields are returned. + * {@code SubnetworListkOption.fields} can be used to specify only the fields of interest. + * {@link Subnetwork#subnetworkId()} is always returned, even if not specified. + */ + public static SubnetworkListOption fields(SubnetworkField... fields) { + return new SubnetworkListOption(ComputeRpc.Option.FIELDS, + Helper.listSelector("items", SubnetworkField.REQUIRED_FIELDS, fields)); + } + } + + /** + * Class for specifying subnetwork aggregated list options. + */ + class SubnetworkAggregatedListOption extends Option { + + private static final long serialVersionUID = -4033514850525545027L; + + private SubnetworkAggregatedListOption(ComputeRpc.Option option, Object value) { + super(option, value); + } + + /** + * Returns an option to specify a filter on the subnetworks being listed. + */ + public static SubnetworkAggregatedListOption filter(SubnetworkFilter filter) { + return new SubnetworkAggregatedListOption(ComputeRpc.Option.FILTER, filter.toPb()); + } + + /** + * Returns an option to specify the maximum number of subnetworks returned per page. + * {@code pageSize} must be between 0 and 500 (inclusive). If not specified 500 is used. + */ + public static SubnetworkAggregatedListOption pageSize(long pageSize) { + return new SubnetworkAggregatedListOption(ComputeRpc.Option.MAX_RESULTS, pageSize); + } + + /** + * Returns an option to specify the page token from which to start listing subnetworks. + */ + public static SubnetworkAggregatedListOption pageToken(String pageToken) { + return new SubnetworkAggregatedListOption(ComputeRpc.Option.PAGE_TOKEN, pageToken); + } + } + + /** + * Class for specifying network get options. + */ + class NetworkOption extends Option { + + private static final long serialVersionUID = 5346750551643875754L; + + private NetworkOption(ComputeRpc.Option option, Object value) { + super(option, value); + } + + /** + * Returns an option to specify the network's fields to be returned by the RPC call. If this + * option is not provided, all network's fields are returned. {@code NetworkOption.fields} + * can be used to specify only the fields of interest. {@link Network#networkId()} and + * {@link Network#configuration()} are always returned, even if not specified. + */ + public static NetworkOption fields(NetworkField... fields) { + return new NetworkOption(ComputeRpc.Option.FIELDS, + Helper.selector(NetworkField.REQUIRED_FIELDS, fields)); + } + } + + /** + * Class for specifying network list options. + */ + class NetworkListOption extends Option { + + private static final long serialVersionUID = -4291731916527773896L; + + private NetworkListOption(ComputeRpc.Option option, Object value) { + super(option, value); + } + + /** + * Returns an option to specify a filter on the networks being listed. + */ + public static NetworkListOption filter(NetworkFilter filter) { + return new NetworkListOption(ComputeRpc.Option.FILTER, filter.toPb()); + } + + /** + * Returns an option to specify the maximum number of networks returned per page. + * {@code pageSize} must be between 0 and 500 (inclusive). If not specified 500 is used. + */ + public static NetworkListOption pageSize(long pageSize) { + return new NetworkListOption(ComputeRpc.Option.MAX_RESULTS, pageSize); + } + + /** + * Returns an option to specify the page token from which to start listing networks. + */ + public static NetworkListOption pageToken(String pageToken) { + return new NetworkListOption(ComputeRpc.Option.PAGE_TOKEN, pageToken); + } + + /** + * Returns an option to specify the network's fields to be returned by the RPC call. If this + * option is not provided, all network's fields are returned. {@code NetworkListOption.fields} + * can be used to specify only the fields of interest. {@link Network#networkId()} and + * {@link Network#configuration()} are always returned, even if not specified. + */ + public static NetworkListOption fields(NetworkField... fields) { + return new NetworkListOption(ComputeRpc.Option.FIELDS, + Helper.listSelector("items", NetworkField.REQUIRED_FIELDS, fields)); + } + } + + /** + * Class for specifying instance get options. + */ + class InstanceOption extends Option { + + private static final long serialVersionUID = -5277658025892081493L; + + private InstanceOption(ComputeRpc.Option option, Object value) { + super(option, value); + } + + /** + * Returns an option to specify the instance's fields to be returned by the RPC call. If this + * option is not provided, all instance's fields are returned. {@code InstanceOption.fields} + * can be used to specify only the fields of interest. {@link Instance#instanceId()} is always + * returned, even if not specified. + */ + public static InstanceOption fields(InstanceField... fields) { + return new InstanceOption(ComputeRpc.Option.FIELDS, + Helper.selector(InstanceField.REQUIRED_FIELDS, fields)); + } + } + + /** + * Class for specifying instance list options. + */ + class InstanceListOption extends Option { + + private static final long serialVersionUID = -1096684312959047430L; + + private InstanceListOption(ComputeRpc.Option option, Object value) { + super(option, value); + } + + /** + * Returns an option to specify a filter on the instances being listed. + */ + public static InstanceListOption filter(InstanceFilter filter) { + return new InstanceListOption(ComputeRpc.Option.FILTER, filter.toPb()); + } + + /** + * Returns an option to specify the maximum number of instances returned per page. + * {@code pageSize} must be between 0 and 500 (inclusive). If not specified 500 is used. + */ + public static InstanceListOption pageSize(long pageSize) { + return new InstanceListOption(ComputeRpc.Option.MAX_RESULTS, pageSize); + } + + /** + * Returns an option to specify the page token from which to start listing instances. + */ + public static InstanceListOption pageToken(String pageToken) { + return new InstanceListOption(ComputeRpc.Option.PAGE_TOKEN, pageToken); + } + + /** + * Returns an option to specify the instance's fields to be returned by the RPC call. If this + * option is not provided, all instance's fields are returned. {@code InstanceListOption.fields} + * can be used to specify only the fields of interest. {@link Instance#instanceId()} is always + * returned, even if not specified. + */ + public static InstanceListOption fields(InstanceField... fields) { + return new InstanceListOption(ComputeRpc.Option.FIELDS, + Helper.listSelector("items", InstanceField.REQUIRED_FIELDS, fields)); + } + } + + /** + * Class for specifying instance aggregated list options. + */ + class InstanceAggregatedListOption extends Option { + + private static final long serialVersionUID = -2020005298975967713L; + + private InstanceAggregatedListOption(ComputeRpc.Option option, Object value) { + super(option, value); + } + + /** + * Returns an option to specify a filter on the instances being listed. + */ + public static InstanceAggregatedListOption filter(InstanceFilter filter) { + return new InstanceAggregatedListOption(ComputeRpc.Option.FILTER, filter.toPb()); + } + + /** + * Returns an option to specify the maximum number of instances returned per page. + * {@code pageSize} must be between 0 and 500 (inclusive). If not specified 500 is used. + */ + public static InstanceAggregatedListOption pageSize(long pageSize) { + return new InstanceAggregatedListOption(ComputeRpc.Option.MAX_RESULTS, pageSize); + } + + /** + * Returns an option to specify the page token from which to start listing instances. + */ + public static InstanceAggregatedListOption pageToken(String pageToken) { + return new InstanceAggregatedListOption(ComputeRpc.Option.PAGE_TOKEN, pageToken); + } + } + + /** + * Returns the requested disk type or {@code null} if not found. + * + * @throws ComputeException upon failure + */ + DiskType getDiskType(DiskTypeId diskTypeId, DiskTypeOption... options); + + /** + * Returns the requested disk type or {@code null} if not found. + * + * @throws ComputeException upon failure + */ + DiskType getDiskType(String zone, String diskType, DiskTypeOption... options); + + /** + * Lists the disk types in the provided zone. + * + * @throws ComputeException upon failure + */ + Page listDiskTypes(String zone, DiskTypeListOption... options); + + /** + * Lists the disk types in all zones. + * + * @throws ComputeException upon failure + */ + Page listDiskTypes(DiskTypeAggregatedListOption... options); + + /** + * Returns the requested machine type or {@code null} if not found. + * + * @throws ComputeException upon failure + */ + MachineType getMachineType(MachineTypeId machineTypeId, MachineTypeOption... options); + + /** + * Returns the requested machine type or {@code null} if not found. + * + * @throws ComputeException upon failure + */ + MachineType getMachineType(String zone, String machineType, MachineTypeOption... options); + + /** + * Lists the machine types in the provided zone. + * + * @throws ComputeException upon failure + */ + Page listMachineTypes(String zone, MachineTypeListOption... options); + + /** + * Lists the machine types in all zones. + * + * @throws ComputeException upon failure + */ + Page listMachineTypes(MachineTypeAggregatedListOption... options); + + /** + * Returns the requested region or {@code null} if not found. + * + * @throws ComputeException upon failure + */ + Region getRegion(String region, RegionOption... options); + + /** + * Lists the regions. + * + * @throws ComputeException upon failure + */ + Page listRegions(RegionListOption... options); + + /** + * Returns the requested zone or {@code null} if not found. + * + * @throws ComputeException upon failure + */ + Zone getZone(String zone, ZoneOption... options); + + /** + * Lists the zones. + * + * @throws ComputeException upon failure + */ + Page listZones(ZoneListOption... options); + + /** + * Returns the requested license or {@code null} if not found. + * + * @throws ComputeException upon failure + */ + License getLicense(String license, LicenseOption... options); + + /** + * Returns the requested license or {@code null} if not found. + * + * @throws ComputeException upon failure + */ + License getLicense(LicenseId license, LicenseOption... options); + + /** + * Returns the requested operation or {@code null} if not found. + * + * @throws ComputeException upon failure + */ + Operation getOperation(OperationId operationId, OperationOption... options); + + /** + * Lists the global operations. + * + * @throws ComputeException upon failure + */ + Page listGlobalOperations(OperationListOption... options); + + /** + * Lists the operations for the provided region. These are operations that create/modify/delete + * resources that live in a region (e.g. subnetworks). + * + * @throws ComputeException upon failure + */ + Page listRegionOperations(String region, OperationListOption... options); + + /** + * Lists the operations for the provided zone. These are operations that create/modify/delete + * resources that live in a zone (e.g. instances). + * + * @throws ComputeException upon failure + */ + Page listZoneOperations(String zone, OperationListOption... options); + + /** + * Deletes the requested operation. Delete is only possible for operations that have completed + * their execution. Any attempt to delete a running operation will fail. + * + * @return {@code true} if operation was deleted, {@code false} if it was not found + * @throws ComputeException upon failure + */ + boolean deleteOperation(OperationId operation); + + /** + * Returns the requested address or {@code null} if not found. + * + * @throws ComputeException upon failure + */ + Address getAddress(AddressId addressId, AddressOption... options); + + /** + * Creates a new address. + * + * @return an operation for address' creation + * @throws ComputeException upon failure + */ + Operation create(AddressInfo address, OperationOption... options); + + /** + * Lists the global addresses. + * + * @throws ComputeException upon failure + */ + Page
listGlobalAddresses(AddressListOption... options); + + /** + * Lists the region addresses for the provided region. + * + * @throws ComputeException upon failure + */ + Page
listRegionAddresses(String region, AddressListOption... options); + + /** + * Lists both global and region addresses. + * + * @throws ComputeException upon failure + */ + Page
listAddresses(AddressAggregatedListOption... options); + + /** + * Deletes the requested address. + * + * @return an operation if the request was issued correctly, {@code null} if the address was not + * found + * @throws ComputeException upon failure + */ + Operation deleteAddress(AddressId addressId, OperationOption... options); + + /** + * Creates a new snapshot. + * + * @return a zone operation for snapshot creation + * @throws ComputeException upon failure + */ + Operation create(SnapshotInfo snapshot, OperationOption... options); + + /** + * Returns the requested snapshot or {@code null} if not found. + * + * @throws ComputeException upon failure + */ + Snapshot getSnapshot(String snapshot, SnapshotOption... options); + + /** + * Lists snapshots. + * + * @throws ComputeException upon failure + */ + Page listSnapshots(SnapshotListOption... options); + + /** + * Deletes the requested snapshot. Keep in mind that deleting a single snapshot might not + * necessarily delete all the data for that snapshot. If any data for the snapshot that is marked + * for deletion is needed for subsequent snapshots, the data will be moved to the next snapshot. + * + * @return a global operation if the request was issued correctly, {@code null} if the snapshot + * was not found + * @throws ComputeException upon failure + * @see + * Deleting a snapshot + */ + Operation deleteSnapshot(SnapshotId snapshot, OperationOption... options); + + /** + * Deletes the requested snapshot. Keep in mind that deleting a single snapshot might not + * necessarily delete all the data for that snapshot. If any data on the snapshot that is marked + * for deletion is needed for subsequent snapshots, the data will be moved to the next snapshot. + * + * @return a global operation if the request was issued correctly, {@code null} if the snapshot + * was not found + * @throws ComputeException upon failure + * @see + * Deleting a snapshot + */ + Operation deleteSnapshot(String snapshot, OperationOption... options); + + /** + * Creates a new image. + * + * @return a global operation for image's creation + * @throws ComputeException upon failure + */ + Operation create(ImageInfo image, OperationOption... options); + + /** + * Returns the requested image or {@code null} if not found. + * + * @throws ComputeException upon failure + */ + Image getImage(ImageId imageId, ImageOption... options); + + /** + * Lists images in the provided project that are available to the current user. This method can be + * used to list publicly-available images by providing the respective image project. Examples of + * image projects are: {@code centos-cloud}, {@code coreos-cloud}, {@code debian-cloud}, + * {@code opensuse-cloud}, {@code rhel-cloud}, {@code suse-cloud}, {@code ubuntu-os-cloud} and + * {@code windows-cloud}. Attempting to delete or deprecate a publicly-available image will fail. + * + * @throws ComputeException upon failure + * @see Operating Systems + */ + Page listImages(String project, ImageListOption... options); + + /** + * Lists images in the current project. + * + * @throws ComputeException upon failure + */ + Page listImages(ImageListOption... options); + + /** + * Deletes the requested image. + * + * @return a global operation if the delete request was issued correctly, {@code null} if the + * image was not found + * @throws ComputeException upon failure or if {@code image} is a publicly-available image + */ + Operation deleteImage(ImageId image, OperationOption... options); + + /** + * Deprecates the requested image. + * + * @return a global operation if the deprecation request was issued correctly, {@code null} if the + * image was not found + * @throws ComputeException upon failure or if {@code image} is a publicly-available image + */ + Operation deprecate(ImageId image, DeprecationStatus deprecationStatus, + OperationOption... options); + + /** + * Returns the requested disk or {@code null} if not found. + * + * @throws ComputeException upon failure + */ + Disk getDisk(DiskId diskId, DiskOption... options); + + /** + * Creates a new disk. + * + * @return a zone operation for disk's creation + * @throws ComputeException upon failure + */ + Operation create(DiskInfo disk, OperationOption... options); + + /** + * Lists disks for the provided zone. + * + * @throws ComputeException upon failure + */ + Page listDisks(String zone, DiskListOption... options); + + /** + * Lists disks for all zones. + * + * @throws ComputeException upon failure + */ + Page listDisks(DiskAggregatedListOption... options); + + /** + * Deletes the requested disk. + * + * @return a zone operation if the request was issued correctly, {@code null} if the disk was not + * found + * @throws ComputeException upon failure + */ + Operation deleteDisk(DiskId disk, OperationOption... options); + + /** + * Resizes the disk to the requested size. The new size must be larger than the previous one. + * + * @return a zone operation if the request was issued correctly, {@code null} if the disk was not + * found + * @throws ComputeException upon failure or if the new disk size is smaller than the previous one + */ + Operation resize(DiskId disk, long sizeGb, OperationOption... options); + + /* + * Creates a new subnetwork. + * + * @return a region operation for subnetwork's creation + * @throws ComputeException upon failure + */ + Operation create(SubnetworkInfo subnetwork, OperationOption... options); + + /** + * Returns the requested subnetwork or {@code null} if not found. + * + * @throws ComputeException upon failure + */ + Subnetwork getSubnetwork(SubnetworkId subnetworkId, SubnetworkOption... options); + + /** + * Lists subnetworks for the provided region. + * + * @throws ComputeException upon failure + */ + Page listSubnetworks(String region, SubnetworkListOption... options); + + /** + * Lists subnetworks for all regions. + * + * @throws ComputeException upon failure + */ + Page listSubnetworks(SubnetworkAggregatedListOption... options); + + /** + * Deletes the requested subnetwork. Any attempt to delete an automatically created subnetwork + * will fail. + * + * @return a region operation if the delete request was issued correctly, {@code null} if the + * subnetwork was not found + * @throws ComputeException upon failure + */ + Operation deleteSubnetwork(SubnetworkId subnetwork, OperationOption... options); + + /** + * Creates a new network. + * + * @return a global operation for network's creation + * @throws ComputeException upon failure + */ + Operation create(NetworkInfo network, OperationOption... options); + + /** + * Returns the requested network or {@code null} if not found. + * + * @throws ComputeException upon failure + */ + Network getNetwork(String network, NetworkOption... options); + + /** + * Lists networks. + * + * @throws ComputeException upon failure + */ + Page listNetworks(NetworkListOption... options); + + /** + * Deletes the requested network. + * + * @return a global operation if the delete request was issued correctly, {@code null} if the + * network was not found + * @throws ComputeException upon failure + */ + Operation deleteNetwork(String network, OperationOption... options); + + /** + * Deletes the requested network. + * + * @return a global operation if the delete request was issued correctly, {@code null} if the + * network was not found + * @throws ComputeException upon failure + */ + Operation deleteNetwork(NetworkId network, OperationOption... options); + + /** + * Creates a new instance. + * + * @return a zone operation for instance's creation + * @throws ComputeException upon failure + */ + Operation create(InstanceInfo instance, OperationOption... options); + + /** + * Returns the requested instance or {@code null} if not found. + * + * @throws ComputeException upon failure + */ + Instance getInstance(InstanceId instance, InstanceOption... options); + + /** + * Lists instances for the provided zone. + * + * @throws ComputeException upon failure + */ + Page listInstances(String zone, InstanceListOption... options); + + /** + * Lists instances for all zones. + * + * @throws ComputeException upon failure + */ + Page listInstances(InstanceAggregatedListOption... options); + + /** + * Deletes the requested instance. + * + * @return a zone operation if the delete request was issued correctly, {@code null} if the + * instance was not found + * @throws ComputeException upon failure + */ + Operation deleteInstance(InstanceId instance, OperationOption... options); + + /** + * Adds an access configuration to an instance's network interface. + * + * @return a zone operation if the add request was issued correctly, {@code null} if the instance + * was not found + * @throws ComputeException upon failure + */ + Operation addAccessConfig(InstanceId instance, String networkInterface, AccessConfig accessConfig, + OperationOption... options); + + /** + * Attaches a persistent disk to an instance given its configuration. + * + * @return a zone operation if the attach request was issued correctly, {@code null} if the + * instance was not found + * @throws ComputeException upon failure + */ + Operation attachDisk(InstanceId instance, PersistentDiskConfiguration configuration, + OperationOption... options); + + /** + * Attaches a persistent disk to an instance given the device name and its configuration. + * + * @return a zone operation if the attach request was issued correctly, {@code null} if the + * instance was not found + * @throws ComputeException upon failure + */ + Operation attachDisk(InstanceId instance, String deviceName, + PersistentDiskConfiguration configuration, OperationOption... options); + + /** + * Attaches a persistent disk to an instance given the device name, its configuration and the + * device index. + * + * @return a zone operation if the attach request was issued correctly, {@code null} if the + * instance was not found + * @throws ComputeException upon failure + */ + Operation attachDisk(InstanceId instance, String deviceName, + PersistentDiskConfiguration configuration, int index, OperationOption... options); + + /** + * Deletes an access configuration from an instance's network interface. + * + * @return a zone operation if the delete request was issued correctly, {@code null} if the + * instance was not found + * @throws ComputeException upon failure + */ + Operation deleteAccessConfig(InstanceId instance, String networkInterface, String accessConfig, + OperationOption... options); + + /** + * Detaches a disk from an instance. + * + * @return a zone operation if the detach request was issued correctly, {@code null} if the + * instance was not found + * @throws ComputeException upon failure + */ + Operation detachDisk(InstanceId instance, String deviceName, OperationOption... options); + + /** + * Returns the serial port output for the provided instance and port number. {@code port} must be + * between 1 and 4 (inclusive). + * + * @return the serial port output or {@code null} if the instance was not found + * @throws ComputeException upon failure + */ + String getSerialPortOutput(InstanceId instance, int port); + + /** + * Returns the default serial port output for the provided instance. Default serial port + * corresponds to port number 1. + * + * @return the serial port output or {@code null} if the instance was not found + * @throws ComputeException upon failure + */ + String getSerialPortOutput(InstanceId instance); + + /** + * Resets the provided instance. + * + * @return a zone operation if the reset request was issued correctly, {@code null} if the + * instance was not found + * @throws ComputeException upon failure + */ + Operation reset(InstanceId instance, OperationOption... options); + + /** + * Sets the auto-delete flag for a disk attached to the provided instance. + * + * @return a zone operation if the flag setting request was issued correctly, {@code null} if the + * instance was not found + * @throws ComputeException upon failure + */ + Operation setDiskAutoDelete(InstanceId instance, String deviceName, boolean autoDelete, + OperationOption... options); + + /** + * Sets the machine type for the provided instance. Instance must be in + * {@link InstanceInfo.Status#TERMINATED} state to be able to set its machine type. + * + * @return a zone operation if the set request was issued correctly, {@code null} if the instance + * was not found + * @throws ComputeException upon failure + */ + Operation setMachineType(InstanceId instance, MachineTypeId machineType, + OperationOption... options); + + /** + * Sets the metadata for the provided instance. + * + * @return a zone operation if the set request was issued correctly, {@code null} if the instance + * was not found + * @throws ComputeException upon failure + */ + Operation setMetadata(InstanceId instance, Metadata metadata, OperationOption... options); + + /** + * Sets the scheduling options for the provided instance. + * + * @return a zone operation if the set request was issued correctly, {@code null} if the instance + * was not found + * @throws ComputeException upon failure + */ + Operation setSchedulingOptions(InstanceId instance, SchedulingOptions scheduling, + OperationOption... options); + + /** + * Sets the tags for the provided instance. + * + * @return a zone operation if the set request was issued correctly, {@code null} if the instance + * was not found + * @throws ComputeException upon failure + */ + Operation setTags(InstanceId instance, Tags tags, OperationOption... options); + + /** + * Starts the provided instance. + * + * @return a zone operation if the start request was issued correctly, {@code null} if the + * instance was not found + * @throws ComputeException upon failure + */ + Operation start(InstanceId instance, OperationOption... options); + + /** + * Stops the provided instance. + * + * @return a zone operation if the stop request was issued correctly, {@code null} if the instance + * was not found + * @throws ComputeException upon failure + */ + Operation stop(InstanceId instance, OperationOption... options); +} diff --git a/gcloud-java-compute/src/main/java/com/google/cloud/compute/ComputeException.java b/gcloud-java-compute/src/main/java/com/google/cloud/compute/ComputeException.java new file mode 100644 index 000000000000..1a69457040d6 --- /dev/null +++ b/gcloud-java-compute/src/main/java/com/google/cloud/compute/ComputeException.java @@ -0,0 +1,63 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import com.google.cloud.BaseServiceException; +import com.google.cloud.RetryHelper.RetryHelperException; +import com.google.cloud.RetryHelper.RetryInterruptedException; +import com.google.common.collect.ImmutableSet; + +import java.io.IOException; +import java.util.Set; + +/** + * Compute Engine service exception. + */ +public class ComputeException extends BaseServiceException { + + private static final Set RETRYABLE_ERRORS = ImmutableSet.of(new Error(500, null)); + private static final long serialVersionUID = -8039359778707845810L; + + ComputeException(int code, String message) { + super(code, message, null, true, null); + } + + ComputeException(int code, String message, Throwable cause) { + super(code, message, null, true, cause); + } + + public ComputeException(IOException exception) { + super(exception, true); + } + + @Override + protected Set retryableErrors() { + return RETRYABLE_ERRORS; + } + + /** + * Translate RetryHelperException to the ComputeException that caused the error. This method will + * always throw an exception. + * + * @throws ComputeException when {@code ex} was caused by a {@code ComputeException} + * @throws RetryInterruptedException when {@code ex} is a {@code RetryInterruptedException} + */ + static BaseServiceException translateAndThrow(RetryHelperException ex) { + BaseServiceException.translateAndPropagateIfPossible(ex); + throw new ComputeException(UNKNOWN_CODE, ex.getMessage(), ex.getCause()); + } +} diff --git a/gcloud-java-compute/src/main/java/com/google/cloud/compute/ComputeFactory.java b/gcloud-java-compute/src/main/java/com/google/cloud/compute/ComputeFactory.java new file mode 100644 index 000000000000..a4daac3f4d56 --- /dev/null +++ b/gcloud-java-compute/src/main/java/com/google/cloud/compute/ComputeFactory.java @@ -0,0 +1,25 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import com.google.cloud.ServiceFactory; + +/** + * An interface for Compute factories. + */ +public interface ComputeFactory extends ServiceFactory { +} diff --git a/gcloud-java-compute/src/main/java/com/google/cloud/compute/ComputeImpl.java b/gcloud-java-compute/src/main/java/com/google/cloud/compute/ComputeImpl.java new file mode 100644 index 000000000000..c0846749d326 --- /dev/null +++ b/gcloud-java-compute/src/main/java/com/google/cloud/compute/ComputeImpl.java @@ -0,0 +1,2051 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import static com.google.cloud.RetryHelper.runWithRetries; +import static com.google.common.base.Preconditions.checkArgument; + +import com.google.cloud.BaseService; +import com.google.cloud.Page; +import com.google.cloud.PageImpl; +import com.google.cloud.PageImpl.NextPageFetcher; +import com.google.cloud.RetryHelper; +import com.google.cloud.compute.AttachedDisk.PersistentDiskConfiguration; +import com.google.cloud.compute.NetworkInterface.AccessConfig; +import com.google.cloud.compute.spi.ComputeRpc; +import com.google.common.base.Function; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Iterables; +import com.google.common.collect.Maps; + +import java.util.Map; +import java.util.concurrent.Callable; + +final class ComputeImpl extends BaseService implements Compute { + + private static class GlobalOperationPageFetcher implements NextPageFetcher { + + private static final long serialVersionUID = -2488912172182315364L; + private final Map requestOptions; + private final ComputeOptions serviceOptions; + + GlobalOperationPageFetcher(ComputeOptions serviceOptions, String cursor, + Map optionMap) { + this.requestOptions = + PageImpl.nextRequestOptions(ComputeRpc.Option.PAGE_TOKEN, cursor, optionMap); + this.serviceOptions = serviceOptions; + } + + @Override + public Page nextPage() { + return listGlobalOperations(serviceOptions, requestOptions); + } + } + + private static class DiskTypePageFetcher implements NextPageFetcher { + + private static final long serialVersionUID = -5253916264932522976L; + private final Map requestOptions; + private final ComputeOptions serviceOptions; + private final String zone; + + DiskTypePageFetcher(String zone, ComputeOptions serviceOptions, String cursor, + Map optionMap) { + this.requestOptions = + PageImpl.nextRequestOptions(ComputeRpc.Option.PAGE_TOKEN, cursor, optionMap); + this.serviceOptions = serviceOptions; + this.zone = zone; + } + + @Override + public Page nextPage() { + return listDiskTypes(zone, serviceOptions, requestOptions); + } + } + + private static class AggregatedDiskTypePageFetcher implements NextPageFetcher { + + private static final long serialVersionUID = -1664743503750307996L; + private final Map requestOptions; + private final ComputeOptions serviceOptions; + + AggregatedDiskTypePageFetcher(ComputeOptions serviceOptions, String cursor, + Map optionMap) { + this.requestOptions = + PageImpl.nextRequestOptions(ComputeRpc.Option.PAGE_TOKEN, cursor, optionMap); + this.serviceOptions = serviceOptions; + } + + @Override + public Page nextPage() { + return listDiskTypes(serviceOptions, requestOptions); + } + } + + private static class MachineTypePageFetcher implements NextPageFetcher { + + private static final long serialVersionUID = -5048133000517001933L; + private final Map requestOptions; + private final ComputeOptions serviceOptions; + private final String zone; + + MachineTypePageFetcher(String zone, ComputeOptions serviceOptions, String cursor, + Map optionMap) { + this.requestOptions = + PageImpl.nextRequestOptions(ComputeRpc.Option.PAGE_TOKEN, cursor, optionMap); + this.serviceOptions = serviceOptions; + this.zone = zone; + } + + @Override + public Page nextPage() { + return listMachineTypes(zone, serviceOptions, requestOptions); + } + } + + private static class AggregatedMachineTypePageFetcher implements NextPageFetcher { + + private static final long serialVersionUID = 2919227789802660026L; + private final Map requestOptions; + private final ComputeOptions serviceOptions; + + AggregatedMachineTypePageFetcher(ComputeOptions serviceOptions, String cursor, + Map optionMap) { + this.requestOptions = + PageImpl.nextRequestOptions(ComputeRpc.Option.PAGE_TOKEN, cursor, optionMap); + this.serviceOptions = serviceOptions; + } + + @Override + public Page nextPage() { + return listMachineTypes(serviceOptions, requestOptions); + } + } + + private static class RegionPageFetcher implements NextPageFetcher { + + private static final long serialVersionUID = 4180147045485258863L; + private final Map requestOptions; + private final ComputeOptions serviceOptions; + + RegionPageFetcher(ComputeOptions serviceOptions, String cursor, + Map optionMap) { + this.requestOptions = + PageImpl.nextRequestOptions(ComputeRpc.Option.PAGE_TOKEN, cursor, optionMap); + this.serviceOptions = serviceOptions; + } + + @Override + public Page nextPage() { + return listRegions(serviceOptions, requestOptions); + } + } + + private static class ZonePageFetcher implements NextPageFetcher { + + private static final long serialVersionUID = -3946202621600687597L; + private final Map requestOptions; + private final ComputeOptions serviceOptions; + + ZonePageFetcher(ComputeOptions serviceOptions, String cursor, + Map optionMap) { + this.requestOptions = + PageImpl.nextRequestOptions(ComputeRpc.Option.PAGE_TOKEN, cursor, optionMap); + this.serviceOptions = serviceOptions; + } + + @Override + public Page nextPage() { + return listZones(serviceOptions, requestOptions); + } + } + + private static class RegionOperationPageFetcher implements NextPageFetcher { + + private static final long serialVersionUID = 4111705358926164078L; + private final Map requestOptions; + private final ComputeOptions serviceOptions; + private final String region; + + RegionOperationPageFetcher(String region, ComputeOptions serviceOptions, String cursor, + Map optionMap) { + this.requestOptions = + PageImpl.nextRequestOptions(ComputeRpc.Option.PAGE_TOKEN, cursor, optionMap); + this.serviceOptions = serviceOptions; + this.region = region; + } + + @Override + public Page nextPage() { + return listRegionOperations(region, serviceOptions, requestOptions); + } + } + + private static class ZoneOperationPageFetcher implements NextPageFetcher { + + private static final long serialVersionUID = -9012504536518197793L; + private final Map requestOptions; + private final ComputeOptions serviceOptions; + private final String zone; + + ZoneOperationPageFetcher(String zone, ComputeOptions serviceOptions, String cursor, + Map optionMap) { + this.requestOptions = + PageImpl.nextRequestOptions(ComputeRpc.Option.PAGE_TOKEN, cursor, optionMap); + this.serviceOptions = serviceOptions; + this.zone = zone; + } + + @Override + public Page nextPage() { + return listZoneOperations(zone, serviceOptions, requestOptions); + } + } + + private static class GlobalAddressPageFetcher implements NextPageFetcher
{ + + private static final long serialVersionUID = -3832055341507574454L; + private final Map requestOptions; + private final ComputeOptions serviceOptions; + + GlobalAddressPageFetcher(ComputeOptions serviceOptions, String cursor, + Map optionMap) { + this.requestOptions = + PageImpl.nextRequestOptions(ComputeRpc.Option.PAGE_TOKEN, cursor, optionMap); + this.serviceOptions = serviceOptions; + } + + @Override + public Page
nextPage() { + return listGlobalAddresses(serviceOptions, requestOptions); + } + } + + private static class RegionAddressPageFetcher implements NextPageFetcher
{ + + private static final long serialVersionUID = 7080596594494397027L; + private final Map requestOptions; + private final ComputeOptions serviceOptions; + private final String region; + + RegionAddressPageFetcher(String region, ComputeOptions serviceOptions, String cursor, + Map optionMap) { + this.requestOptions = + PageImpl.nextRequestOptions(ComputeRpc.Option.PAGE_TOKEN, cursor, optionMap); + this.serviceOptions = serviceOptions; + this.region = region; + } + + @Override + public Page
nextPage() { + return listRegionAddresses(region, serviceOptions, requestOptions); + } + } + + private static class AggregatedAddressPageFetcher implements NextPageFetcher
{ + + private static final long serialVersionUID = -5798942282919494950L; + private final Map requestOptions; + private final ComputeOptions serviceOptions; + + AggregatedAddressPageFetcher(ComputeOptions serviceOptions, String cursor, + Map optionMap) { + this.requestOptions = + PageImpl.nextRequestOptions(ComputeRpc.Option.PAGE_TOKEN, cursor, optionMap); + this.serviceOptions = serviceOptions; + } + + @Override + public Page
nextPage() { + return listAddresses(serviceOptions, requestOptions); + } + } + + private static class SnapshotPageFetcher implements NextPageFetcher { + + private static final long serialVersionUID = 6205774609802216986L; + private final Map requestOptions; + private final ComputeOptions serviceOptions; + + SnapshotPageFetcher(ComputeOptions serviceOptions, String cursor, + Map optionMap) { + this.requestOptions = + PageImpl.nextRequestOptions(ComputeRpc.Option.PAGE_TOKEN, cursor, optionMap); + this.serviceOptions = serviceOptions; + } + + @Override + public Page nextPage() { + return listSnapshots(serviceOptions, requestOptions); + } + } + + private static class ImagePageFetcher implements NextPageFetcher { + + private static final long serialVersionUID = 6403679803137922023L; + private final Map requestOptions; + private final ComputeOptions serviceOptions; + private final String project; + + ImagePageFetcher(String project, ComputeOptions serviceOptions, String cursor, + Map optionMap) { + this.requestOptions = + PageImpl.nextRequestOptions(ComputeRpc.Option.PAGE_TOKEN, cursor, optionMap); + this.serviceOptions = serviceOptions; + this.project = project; + } + + @Override + public Page nextPage() { + return listImages(project, serviceOptions, requestOptions); + } + } + + private static class DiskPageFetcher implements NextPageFetcher { + + private static final long serialVersionUID = 4146589787872718476L; + private final Map requestOptions; + private final ComputeOptions serviceOptions; + private final String zone; + + DiskPageFetcher(String zone, ComputeOptions serviceOptions, String cursor, + Map optionMap) { + this.requestOptions = + PageImpl.nextRequestOptions(ComputeRpc.Option.PAGE_TOKEN, cursor, optionMap); + this.serviceOptions = serviceOptions; + this.zone = zone; + } + + @Override + public Page nextPage() { + return listDisks(zone, serviceOptions, requestOptions); + } + } + + private static class AggregatedDiskPageFetcher implements NextPageFetcher { + + private static final long serialVersionUID = -5240045334115926206L; + private final Map requestOptions; + private final ComputeOptions serviceOptions; + + AggregatedDiskPageFetcher(ComputeOptions serviceOptions, String cursor, + Map optionMap) { + this.requestOptions = + PageImpl.nextRequestOptions(ComputeRpc.Option.PAGE_TOKEN, cursor, optionMap); + this.serviceOptions = serviceOptions; + } + + @Override + public Page nextPage() { + return listDisks(serviceOptions, requestOptions); + } + } + + private static class SubnetworkPageFetcher implements NextPageFetcher { + + private static final long serialVersionUID = 3674038457884412651L; + private final Map requestOptions; + private final ComputeOptions serviceOptions; + private final String region; + + SubnetworkPageFetcher(String region, ComputeOptions serviceOptions, String cursor, + Map optionMap) { + this.requestOptions = + PageImpl.nextRequestOptions(ComputeRpc.Option.PAGE_TOKEN, cursor, optionMap); + this.serviceOptions = serviceOptions; + this.region = region; + } + + @Override + public Page nextPage() { + return listSubnetworks(region, serviceOptions, requestOptions); + } + } + + private static class AggregatedSubnetworkPageFetcher implements NextPageFetcher { + + private static final long serialVersionUID = 771343548833894551L; + private final Map requestOptions; + private final ComputeOptions serviceOptions; + + AggregatedSubnetworkPageFetcher(ComputeOptions serviceOptions, String cursor, + Map optionMap) { + this.requestOptions = + PageImpl.nextRequestOptions(ComputeRpc.Option.PAGE_TOKEN, cursor, optionMap); + this.serviceOptions = serviceOptions; + } + + @Override + public Page nextPage() { + return listSubnetworks(serviceOptions, requestOptions); + } + } + + private static class NetworkPageFetcher implements NextPageFetcher { + + private static final long serialVersionUID = 5580210850353114531L; + private final Map requestOptions; + private final ComputeOptions serviceOptions; + + NetworkPageFetcher(ComputeOptions serviceOptions, String cursor, + Map optionMap) { + this.requestOptions = + PageImpl.nextRequestOptions(ComputeRpc.Option.PAGE_TOKEN, cursor, optionMap); + this.serviceOptions = serviceOptions; + } + + @Override + public Page nextPage() { + return listNetworks(serviceOptions, requestOptions); + } + } + + private static class InstancePageFetcher implements NextPageFetcher { + + private static final long serialVersionUID = 7563769742657453865L; + private final Map requestOptions; + private final ComputeOptions serviceOptions; + private final String zone; + + InstancePageFetcher(String zone, ComputeOptions serviceOptions, String cursor, + Map optionMap) { + this.requestOptions = + PageImpl.nextRequestOptions(ComputeRpc.Option.PAGE_TOKEN, cursor, optionMap); + this.serviceOptions = serviceOptions; + this.zone = zone; + } + + @Override + public Page nextPage() { + return listInstances(zone, serviceOptions, requestOptions); + } + } + + private static class AggregatedInstancePageFetcher implements NextPageFetcher { + + private static final long serialVersionUID = 1863059389783095681L; + private final Map requestOptions; + private final ComputeOptions serviceOptions; + + AggregatedInstancePageFetcher(ComputeOptions serviceOptions, String cursor, + Map optionMap) { + this.requestOptions = + PageImpl.nextRequestOptions(ComputeRpc.Option.PAGE_TOKEN, cursor, optionMap); + this.serviceOptions = serviceOptions; + } + + @Override + public Page nextPage() { + return listInstances(serviceOptions, requestOptions); + } + } + + private final ComputeRpc computeRpc; + + ComputeImpl(ComputeOptions options) { + super(options); + computeRpc = options.rpc(); + } + + @Override + public DiskType getDiskType(final DiskTypeId diskTypeId, DiskTypeOption... options) { + final Map optionsMap = optionMap(options); + try { + com.google.api.services.compute.model.DiskType answer = + runWithRetries(new Callable() { + @Override + public com.google.api.services.compute.model.DiskType call() { + return computeRpc.getDiskType(diskTypeId.zone(), diskTypeId.type(), optionsMap); + } + }, options().retryParams(), EXCEPTION_HANDLER); + return answer == null ? null : DiskType.fromPb(answer); + } catch (RetryHelper.RetryHelperException e) { + throw ComputeException.translateAndThrow(e); + } + } + + @Override + public DiskType getDiskType(String zone, String diskType, DiskTypeOption... options) { + return getDiskType(DiskTypeId.of(zone, diskType), options); + } + + @Override + public Page listDiskTypes(String zone, DiskTypeListOption... options) { + return listDiskTypes(zone, options(), optionMap(options)); + } + + private static Page listDiskTypes(final String zone, + final ComputeOptions serviceOptions, final Map optionsMap) { + try { + ComputeRpc.Tuple> result = + runWithRetries(new Callable>>() { + @Override + public ComputeRpc.Tuple> call() { + return serviceOptions.rpc().listDiskTypes(zone, optionsMap); + } + }, serviceOptions.retryParams(), EXCEPTION_HANDLER); + String cursor = result.x(); + Iterable diskTypes = Iterables.transform( + result.y() == null ? ImmutableList.of() + : result.y(), + new Function() { + @Override + public DiskType apply(com.google.api.services.compute.model.DiskType diskType) { + return DiskType.fromPb(diskType); + } + }); + return new PageImpl<>(new DiskTypePageFetcher(zone, serviceOptions, cursor, optionsMap), + cursor, diskTypes); + } catch (RetryHelper.RetryHelperException e) { + throw ComputeException.translateAndThrow(e); + } + } + + @Override + public Page listDiskTypes(DiskTypeAggregatedListOption... options) { + return listDiskTypes(options(), optionMap(options)); + } + + private static Page listDiskTypes(final ComputeOptions serviceOptions, + final Map optionsMap) { + try { + ComputeRpc.Tuple> result = + runWithRetries(new Callable>>() { + @Override + public ComputeRpc.Tuple> call() { + return serviceOptions.rpc().listDiskTypes(optionsMap); + } + }, serviceOptions.retryParams(), EXCEPTION_HANDLER); + String cursor = result.x(); + Iterable diskTypes = Iterables.transform(result.y(), + new Function() { + @Override + public DiskType apply(com.google.api.services.compute.model.DiskType diskType) { + return DiskType.fromPb(diskType); + } + }); + return new PageImpl<>(new AggregatedDiskTypePageFetcher(serviceOptions, cursor, optionsMap), + cursor, diskTypes); + } catch (RetryHelper.RetryHelperException e) { + throw ComputeException.translateAndThrow(e); + } + } + + @Override + public MachineType getMachineType(final MachineTypeId machineType, MachineTypeOption... options) { + final Map optionsMap = optionMap(options); + try { + com.google.api.services.compute.model.MachineType answer = + runWithRetries(new Callable() { + @Override + public com.google.api.services.compute.model.MachineType call() { + return computeRpc.getMachineType(machineType.zone(), machineType.type(), optionsMap); + } + }, options().retryParams(), EXCEPTION_HANDLER); + return answer == null ? null : MachineType.fromPb(answer); + } catch (RetryHelper.RetryHelperException e) { + throw ComputeException.translateAndThrow(e); + } + } + + @Override + public MachineType getMachineType(String zone, String machineType, MachineTypeOption... options) { + return getMachineType(MachineTypeId.of(zone, machineType), options); + } + + @Override + public Page listMachineTypes(String zone, MachineTypeListOption... options) { + return listMachineTypes(zone, options(), optionMap(options)); + } + + private static Page listMachineTypes(final String zone, + final ComputeOptions serviceOptions, final Map optionsMap) { + try { + ComputeRpc.Tuple> result = + runWithRetries(new Callable>>() { + @Override + public ComputeRpc.Tuple> call() { + return serviceOptions.rpc().listMachineTypes(zone, optionsMap); + } + }, serviceOptions.retryParams(), EXCEPTION_HANDLER); + String cursor = result.x(); + Iterable machineTypes = Iterables.transform( + result.y() == null ? ImmutableList.of() + : result.y(), + new Function() { + @Override + public MachineType apply( + com.google.api.services.compute.model.MachineType machineType) { + return MachineType.fromPb(machineType); + } + }); + return new PageImpl<>(new MachineTypePageFetcher(zone, serviceOptions, cursor, optionsMap), + cursor, machineTypes); + } catch (RetryHelper.RetryHelperException e) { + throw ComputeException.translateAndThrow(e); + } + } + + @Override + public Page listMachineTypes(MachineTypeAggregatedListOption... options) { + return listMachineTypes(options(), optionMap(options)); + } + + private static Page listMachineTypes(final ComputeOptions serviceOptions, + final Map optionsMap) { + try { + ComputeRpc.Tuple> result = + runWithRetries(new Callable>>() { + @Override + public ComputeRpc.Tuple> call() { + return serviceOptions.rpc().listMachineTypes(optionsMap); + } + }, serviceOptions.retryParams(), EXCEPTION_HANDLER); + String cursor = result.x(); + Iterable machineTypes = Iterables.transform(result.y(), + new Function() { + @Override + public MachineType apply( + com.google.api.services.compute.model.MachineType machineType) { + return MachineType.fromPb(machineType); + } + }); + return new PageImpl<>( + new AggregatedMachineTypePageFetcher(serviceOptions, cursor, optionsMap), cursor, + machineTypes); + } catch (RetryHelper.RetryHelperException e) { + throw ComputeException.translateAndThrow(e); + } + } + + @Override + public Region getRegion(final String region, RegionOption... options) { + final Map optionsMap = optionMap(options); + try { + com.google.api.services.compute.model.Region answer = + runWithRetries(new Callable() { + @Override + public com.google.api.services.compute.model.Region call() { + return computeRpc.getRegion(region, optionsMap); + } + }, options().retryParams(), EXCEPTION_HANDLER); + return answer == null ? null : Region.fromPb(answer); + } catch (RetryHelper.RetryHelperException e) { + throw ComputeException.translateAndThrow(e); + } + } + + @Override + public Page listRegions(RegionListOption... options) { + return listRegions(options(), optionMap(options)); + } + + private static Page listRegions(final ComputeOptions serviceOptions, + final Map optionsMap) { + try { + ComputeRpc.Tuple> result = + runWithRetries(new Callable>>() { + @Override + public ComputeRpc.Tuple> call() { + return serviceOptions.rpc().listRegions(optionsMap); + } + }, serviceOptions.retryParams(), EXCEPTION_HANDLER); + String cursor = result.x(); + Iterable regions = Iterables.transform( + result.y() == null ? ImmutableList.of() + : result.y(), + new Function() { + @Override + public Region apply(com.google.api.services.compute.model.Region region) { + return Region.fromPb(region); + } + }); + return new PageImpl<>(new RegionPageFetcher(serviceOptions, cursor, optionsMap), cursor, + regions); + } catch (RetryHelper.RetryHelperException e) { + throw ComputeException.translateAndThrow(e); + } + } + + @Override + public Zone getZone(final String zone, ZoneOption... options) { + final Map optionsMap = optionMap(options); + try { + com.google.api.services.compute.model.Zone answer = + runWithRetries(new Callable() { + @Override + public com.google.api.services.compute.model.Zone call() { + return computeRpc.getZone(zone, optionsMap); + } + }, options().retryParams(), EXCEPTION_HANDLER); + return answer == null ? null : Zone.fromPb(answer); + } catch (RetryHelper.RetryHelperException e) { + throw ComputeException.translateAndThrow(e); + } + } + + @Override + public Page listZones(ZoneListOption... options) { + return listZones(options(), optionMap(options)); + } + + private static Page listZones(final ComputeOptions serviceOptions, + final Map optionsMap) { + try { + ComputeRpc.Tuple> result = + runWithRetries(new Callable>>() { + @Override + public ComputeRpc.Tuple> call() { + return serviceOptions.rpc().listZones(optionsMap); + } + }, serviceOptions.retryParams(), EXCEPTION_HANDLER); + String cursor = result.x(); + Iterable zones = Iterables.transform( + result.y() == null ? ImmutableList.of() + : result.y(), + new Function() { + @Override + public Zone apply(com.google.api.services.compute.model.Zone zone) { + return Zone.fromPb(zone); + } + }); + return new PageImpl<>(new ZonePageFetcher(serviceOptions, cursor, optionsMap), cursor, zones); + } catch (RetryHelper.RetryHelperException e) { + throw ComputeException.translateAndThrow(e); + } + } + + @Override + public License getLicense(String license, LicenseOption... options) { + return getLicense(LicenseId.of(license), options); + } + + @Override + public License getLicense(LicenseId license, LicenseOption... options) { + final LicenseId completeId = license.setProjectId(options().projectId()); + final Map optionsMap = optionMap(options); + try { + com.google.api.services.compute.model.License answer = + runWithRetries(new Callable() { + @Override + public com.google.api.services.compute.model.License call() { + return computeRpc.getLicense(completeId.project(), completeId.license(), optionsMap); + } + }, options().retryParams(), EXCEPTION_HANDLER); + return answer == null ? null : License.fromPb(answer); + } catch (RetryHelper.RetryHelperException e) { + throw ComputeException.translateAndThrow(e); + } + } + + @Override + public Operation getOperation(final OperationId operationId, OperationOption... options) { + final Map optionsMap = optionMap(options); + try { + com.google.api.services.compute.model.Operation answer = + runWithRetries(new Callable() { + @Override + public com.google.api.services.compute.model.Operation call() { + switch (operationId.type()) { + case REGION: + RegionOperationId regionOperationId = (RegionOperationId) operationId; + return computeRpc.getRegionOperation(regionOperationId.region(), + regionOperationId.operation(), optionsMap); + case ZONE: + ZoneOperationId zoneOperationId = (ZoneOperationId) operationId; + return computeRpc.getZoneOperation(zoneOperationId.zone(), + zoneOperationId.operation(), optionsMap); + case GLOBAL: + return computeRpc.getGlobalOperation(operationId.operation(), optionsMap); + default: + throw new IllegalArgumentException("Unexpected operation identity type"); + } + } + }, options().retryParams(), EXCEPTION_HANDLER); + return answer == null ? null : Operation.fromPb(this, answer); + } catch (RetryHelper.RetryHelperException e) { + throw ComputeException.translateAndThrow(e); + } + } + + private static Function + operationFromPb(final ComputeOptions serviceOptions) { + return new Function() { + @Override + public Operation apply(com.google.api.services.compute.model.Operation operation) { + return Operation.fromPb(serviceOptions.service(), operation); + } + }; + } + + @Override + public Page listGlobalOperations(OperationListOption... options) { + return listGlobalOperations(options(), optionMap(options)); + } + + private static Page listGlobalOperations(final ComputeOptions serviceOptions, + final Map optionsMap) { + try { + ComputeRpc.Tuple> result = + runWithRetries(new Callable>>() { + @Override + public ComputeRpc.Tuple> call() { + return serviceOptions.rpc().listGlobalOperations(optionsMap); + } + }, serviceOptions.retryParams(), EXCEPTION_HANDLER); + String cursor = result.x(); + Iterable operations = Iterables.transform( + result.y() == null ? ImmutableList.of() + : result.y(), operationFromPb(serviceOptions)); + return new PageImpl<>(new GlobalOperationPageFetcher(serviceOptions, cursor, optionsMap), + cursor, operations); + } catch (RetryHelper.RetryHelperException e) { + throw ComputeException.translateAndThrow(e); + } + } + + @Override + public Page listRegionOperations(String region, OperationListOption... options) { + return listRegionOperations(region, options(), optionMap(options)); + } + + private static Page listRegionOperations(final String region, + final ComputeOptions serviceOptions, final Map optionsMap) { + try { + ComputeRpc.Tuple> result = + runWithRetries(new Callable>>() { + @Override + public ComputeRpc.Tuple> call() { + return serviceOptions.rpc().listRegionOperations(region, optionsMap); + } + }, serviceOptions.retryParams(), EXCEPTION_HANDLER); + String cursor = result.x(); + Iterable operations = Iterables.transform( + result.y() == null ? ImmutableList.of() + : result.y(), operationFromPb(serviceOptions)); + return new PageImpl<>(new RegionOperationPageFetcher(region, serviceOptions, cursor, + optionsMap), cursor, operations); + } catch (RetryHelper.RetryHelperException e) { + throw ComputeException.translateAndThrow(e); + } + } + + @Override + public Page listZoneOperations(String zone, OperationListOption... options) { + return listZoneOperations(zone, options(), optionMap(options)); + } + + private static Page listZoneOperations(final String zone, + final ComputeOptions serviceOptions, final Map optionsMap) { + try { + ComputeRpc.Tuple> result = + runWithRetries(new Callable>>() { + @Override + public ComputeRpc.Tuple> call() { + return serviceOptions.rpc().listZoneOperations(zone, optionsMap); + } + }, serviceOptions.retryParams(), EXCEPTION_HANDLER); + String cursor = result.x(); + Iterable operations = Iterables.transform( + result.y() == null ? ImmutableList.of() + : result.y(), operationFromPb(serviceOptions)); + return new PageImpl<>(new ZoneOperationPageFetcher(zone, serviceOptions, cursor, optionsMap), + cursor, operations); + } catch (RetryHelper.RetryHelperException e) { + throw ComputeException.translateAndThrow(e); + } + } + + @Override + public boolean deleteOperation(final OperationId operation) { + try { + return runWithRetries(new Callable() { + @Override + public Boolean call() { + switch (operation.type()) { + case REGION: + RegionOperationId regionOperationId = (RegionOperationId) operation; + return computeRpc.deleteRegionOperation(regionOperationId.region(), + regionOperationId.operation()); + case ZONE: + ZoneOperationId zoneOperationId = (ZoneOperationId) operation; + return computeRpc.deleteZoneOperation(zoneOperationId.zone(), + zoneOperationId.operation()); + case GLOBAL: + return computeRpc.deleteGlobalOperation(operation.operation()); + default: + throw new IllegalArgumentException("Unexpected operation identity type"); + } + } + }, options().retryParams(), EXCEPTION_HANDLER); + } catch (RetryHelper.RetryHelperException e) { + throw ComputeException.translateAndThrow(e); + } + } + + @Override + public Address getAddress(final AddressId addressId, AddressOption... options) { + final Map optionsMap = optionMap(options); + try { + com.google.api.services.compute.model.Address answer = + runWithRetries(new Callable() { + @Override + public com.google.api.services.compute.model.Address call() { + switch (addressId.type()) { + case REGION: + RegionAddressId regionAddressId = (RegionAddressId) addressId; + return computeRpc.getRegionAddress(regionAddressId.region(), + regionAddressId.address(), optionsMap); + case GLOBAL: + return computeRpc.getGlobalAddress(addressId.address(), optionsMap); + default: + throw new IllegalArgumentException("Unexpected address identity type"); + } + } + }, options().retryParams(), EXCEPTION_HANDLER); + return answer == null ? null : Address.fromPb(this, answer); + } catch (RetryHelper.RetryHelperException e) { + throw ComputeException.translateAndThrow(e); + } + } + + @Override + public Operation create(final AddressInfo address, OperationOption... options) { + final com.google.api.services.compute.model.Address addressPb = + address.setProjectId(options().projectId()).toPb(); + final Map optionsMap = optionMap(options); + try { + return Operation.fromPb(this, + runWithRetries(new Callable() { + @Override + public com.google.api.services.compute.model.Operation call() { + switch (address.addressId().type()) { + case REGION: + RegionAddressId regionAddressId = address.addressId(); + return computeRpc.createRegionAddress(regionAddressId.region(), addressPb, + optionsMap); + case GLOBAL: + return computeRpc.createGlobalAddress(addressPb, optionsMap); + default: + throw new IllegalArgumentException("Unexpected address identity type"); + } + } + }, options().retryParams(), EXCEPTION_HANDLER)); + } catch (RetryHelper.RetryHelperException e) { + throw ComputeException.translateAndThrow(e); + } + } + + private static Function addressFromPb( + final ComputeOptions serviceOptions) { + return new Function() { + @Override + public Address apply(com.google.api.services.compute.model.Address address) { + return Address.fromPb(serviceOptions.service(), address); + } + }; + } + + @Override + public Page
listGlobalAddresses(AddressListOption... options) { + return listGlobalAddresses(options(), optionMap(options)); + } + + private static Page
listGlobalAddresses(final ComputeOptions serviceOptions, + final Map optionsMap) { + try { + ComputeRpc.Tuple> result = + runWithRetries(new Callable>>() { + @Override + public ComputeRpc.Tuple> call() { + return serviceOptions.rpc().listGlobalAddresses(optionsMap); + } + }, serviceOptions.retryParams(), EXCEPTION_HANDLER); + String cursor = result.x(); + Iterable
operations = Iterables.transform( + result.y() == null ? ImmutableList.of() + : result.y(), addressFromPb(serviceOptions)); + return new PageImpl<>(new GlobalAddressPageFetcher(serviceOptions, cursor, optionsMap), + cursor, operations); + } catch (RetryHelper.RetryHelperException e) { + throw ComputeException.translateAndThrow(e); + } + } + + @Override + public Page
listRegionAddresses(String region, AddressListOption... options) { + return listRegionAddresses(region, options(), optionMap(options)); + } + + private static Page
listRegionAddresses(final String region, + final ComputeOptions serviceOptions, final Map optionsMap) { + try { + ComputeRpc.Tuple> result = + runWithRetries(new Callable>>() { + @Override + public ComputeRpc.Tuple> call() { + return serviceOptions.rpc().listRegionAddresses(region, optionsMap); + } + }, serviceOptions.retryParams(), EXCEPTION_HANDLER); + String cursor = result.x(); + Iterable
operations = Iterables.transform( + result.y() == null ? ImmutableList.of() + : result.y(), addressFromPb(serviceOptions)); + return new PageImpl<>(new RegionAddressPageFetcher(region, serviceOptions, cursor, + optionsMap), cursor, operations); + } catch (RetryHelper.RetryHelperException e) { + throw ComputeException.translateAndThrow(e); + } + } + + @Override + public Page
listAddresses(AddressAggregatedListOption... options) { + return listAddresses(options(), optionMap(options)); + } + + private static Page
listAddresses(final ComputeOptions serviceOptions, + final Map optionsMap) { + try { + ComputeRpc.Tuple> result = + runWithRetries(new Callable>>() { + @Override + public ComputeRpc.Tuple> call() { + return serviceOptions.rpc().listAddresses(optionsMap); + } + }, serviceOptions.retryParams(), EXCEPTION_HANDLER); + String cursor = result.x(); + Iterable
operations = Iterables.transform( + result.y() == null ? ImmutableList.of() + : result.y(), + new Function() { + @Override + public Address apply(com.google.api.services.compute.model.Address address) { + return Address.fromPb(serviceOptions.service(), address); + } + }); + return new PageImpl<>(new AggregatedAddressPageFetcher(serviceOptions, cursor, optionsMap), + cursor, operations); + } catch (RetryHelper.RetryHelperException e) { + throw ComputeException.translateAndThrow(e); + } + } + + @Override + public Operation deleteAddress(final AddressId addressId, OperationOption... options) { + final Map optionsMap = optionMap(options); + try { + com.google.api.services.compute.model.Operation answer = + runWithRetries(new Callable() { + @Override + public com.google.api.services.compute.model.Operation call() { + switch (addressId.type()) { + case REGION: + RegionAddressId regionAddressId = (RegionAddressId) addressId; + return computeRpc.deleteRegionAddress(regionAddressId.region(), + regionAddressId.address(), optionsMap); + case GLOBAL: + return computeRpc.deleteGlobalAddress(addressId.address(), optionsMap); + default: + throw new IllegalArgumentException("Unexpected address identity type"); + } + } + }, options().retryParams(), EXCEPTION_HANDLER); + return answer == null ? null : Operation.fromPb(this, answer); + } catch (RetryHelper.RetryHelperException e) { + throw ComputeException.translateAndThrow(e); + } + } + + @Override + public Operation create(SnapshotInfo snapshot, OperationOption... options) { + final SnapshotInfo completeSnapshot = snapshot.setProjectId(options().projectId()); + final Map optionsMap = optionMap(options); + try { + com.google.api.services.compute.model.Operation answer = + runWithRetries(new Callable() { + @Override + public com.google.api.services.compute.model.Operation call() { + return computeRpc.createSnapshot(completeSnapshot.sourceDisk().zone(), + completeSnapshot.sourceDisk().disk(), completeSnapshot.snapshotId().snapshot(), + completeSnapshot.description(), optionsMap); + } + }, options().retryParams(), EXCEPTION_HANDLER); + return answer == null ? null : Operation.fromPb(this, answer); + } catch (RetryHelper.RetryHelperException e) { + throw ComputeException.translateAndThrow(e); + } + } + + @Override + public Snapshot getSnapshot(final String snapshot, SnapshotOption... options) { + final Map optionsMap = optionMap(options); + try { + com.google.api.services.compute.model.Snapshot answer = + runWithRetries(new Callable() { + @Override + public com.google.api.services.compute.model.Snapshot call() { + return computeRpc.getSnapshot(snapshot, optionsMap); + } + }, options().retryParams(), EXCEPTION_HANDLER); + return answer == null ? null : Snapshot.fromPb(this, answer); + } catch (RetryHelper.RetryHelperException e) { + throw ComputeException.translateAndThrow(e); + } + } + + @Override + public Page listSnapshots(SnapshotListOption... options) { + return listSnapshots(options(), optionMap(options)); + } + + private static Page listSnapshots(final ComputeOptions serviceOptions, + final Map optionsMap) { + try { + ComputeRpc.Tuple> result = + runWithRetries(new Callable>>() { + @Override + public ComputeRpc.Tuple> call() { + return serviceOptions.rpc().listSnapshots(optionsMap); + } + }, serviceOptions.retryParams(), EXCEPTION_HANDLER); + String cursor = result.x(); + Iterable snapshots = Iterables.transform( + result.y() == null ? ImmutableList.of() + : result.y(), + new Function() { + @Override + public Snapshot apply(com.google.api.services.compute.model.Snapshot snapshot) { + return Snapshot.fromPb(serviceOptions.service(), snapshot); + } + }); + return new PageImpl<>(new SnapshotPageFetcher(serviceOptions, cursor, optionsMap), cursor, + snapshots); + } catch (RetryHelper.RetryHelperException e) { + throw ComputeException.translateAndThrow(e); + } + } + + @Override + public Operation deleteSnapshot(SnapshotId snapshot, OperationOption... options) { + return deleteSnapshot(snapshot.snapshot(), options); + } + + @Override + public Operation deleteSnapshot(final String snapshot, OperationOption... options) { + final Map optionsMap = optionMap(options); + try { + com.google.api.services.compute.model.Operation answer = + runWithRetries(new Callable() { + @Override + public com.google.api.services.compute.model.Operation call() { + return computeRpc.deleteSnapshot(snapshot, optionsMap); + } + }, options().retryParams(), EXCEPTION_HANDLER); + return answer == null ? null : Operation.fromPb(this, answer); + } catch (RetryHelper.RetryHelperException e) { + throw ComputeException.translateAndThrow(e); + } + } + + @Override + public Operation create(ImageInfo image, OperationOption... options) { + final ImageInfo completeImage = image.setProjectId(options().projectId()); + final Map optionsMap = optionMap(options); + try { + com.google.api.services.compute.model.Operation answer = + runWithRetries(new Callable() { + @Override + public com.google.api.services.compute.model.Operation call() { + return computeRpc.createImage(completeImage.toPb(), optionsMap); + } + }, options().retryParams(), EXCEPTION_HANDLER); + return answer == null ? null : Operation.fromPb(this, answer); + } catch (RetryHelper.RetryHelperException e) { + throw ComputeException.translateAndThrow(e); + } + } + + @Override + public Image getImage(ImageId imageId, ImageOption... options) { + final ImageId completeImageId = imageId.setProjectId(options().projectId()); + final Map optionsMap = optionMap(options); + try { + com.google.api.services.compute.model.Image answer = + runWithRetries(new Callable() { + @Override + public com.google.api.services.compute.model.Image call() { + return computeRpc.getImage(completeImageId.project(), completeImageId.image(), + optionsMap); + } + }, options().retryParams(), EXCEPTION_HANDLER); + return answer == null ? null : Image.fromPb(this, answer); + } catch (RetryHelper.RetryHelperException e) { + throw ComputeException.translateAndThrow(e); + } + } + + @Override + public Page listImages(String project, ImageListOption... options) { + return listImages(project, options(), optionMap(options)); + } + + @Override + public Page listImages(ImageListOption... options) { + return listImages(options().projectId(), options(), optionMap(options)); + } + + private static Page listImages(final String project, final ComputeOptions serviceOptions, + final Map optionsMap) { + try { + ComputeRpc.Tuple> result = + runWithRetries(new Callable>>() { + @Override + public ComputeRpc.Tuple> call() { + return serviceOptions.rpc().listImages(project, optionsMap); + } + }, serviceOptions.retryParams(), EXCEPTION_HANDLER); + String cursor = result.x(); + Iterable images = Iterables.transform( + result.y() == null ? ImmutableList.of() + : result.y(), + new Function() { + @Override + public Image apply(com.google.api.services.compute.model.Image image) { + return Image.fromPb(serviceOptions.service(), image); + } + }); + return new PageImpl<>(new ImagePageFetcher(project, serviceOptions, cursor, optionsMap), + cursor, images); + } catch (RetryHelper.RetryHelperException e) { + throw ComputeException.translateAndThrow(e); + } + } + + @Override + public Operation deleteImage(ImageId image, OperationOption... options) { + final ImageId completeId = image.setProjectId(options().projectId()); + final Map optionsMap = optionMap(options); + try { + com.google.api.services.compute.model.Operation answer = + runWithRetries(new Callable() { + @Override + public com.google.api.services.compute.model.Operation call() { + return computeRpc.deleteImage(completeId.project(), completeId.image(), optionsMap); + } + }, options().retryParams(), EXCEPTION_HANDLER); + return answer == null ? null : Operation.fromPb(this, answer); + } catch (RetryHelper.RetryHelperException e) { + throw ComputeException.translateAndThrow(e); + } + } + + @Override + public Operation deprecate(ImageId image, + final DeprecationStatus deprecationStatus, OperationOption... options) { + final ImageId completeId = image.setProjectId(options().projectId()); + final Map optionsMap = optionMap(options); + try { + com.google.api.services.compute.model.Operation answer = + runWithRetries(new Callable() { + @Override + public com.google.api.services.compute.model.Operation call() { + return computeRpc.deprecateImage(completeId.project(), completeId.image(), + deprecationStatus.toPb(), optionsMap); + } + }, options().retryParams(), EXCEPTION_HANDLER); + return answer == null ? null : Operation.fromPb(this, answer); + } catch (RetryHelper.RetryHelperException e) { + throw ComputeException.translateAndThrow(e); + } + } + + @Override + public Disk getDisk(final DiskId diskId, DiskOption... options) { + final Map optionsMap = optionMap(options); + try { + com.google.api.services.compute.model.Disk answer = + runWithRetries(new Callable() { + @Override + public com.google.api.services.compute.model.Disk call() { + return computeRpc.getDisk(diskId.zone(), diskId.disk(), optionsMap); + } + }, options().retryParams(), EXCEPTION_HANDLER); + return answer == null ? null : Disk.fromPb(this, answer); + } catch (RetryHelper.RetryHelperException e) { + throw ComputeException.translateAndThrow(e); + } + } + + @Override + public Operation create(final DiskInfo disk, OperationOption... options) { + final com.google.api.services.compute.model.Disk diskPb = + disk.setProjectId(options().projectId()).toPb(); + final Map optionsMap = optionMap(options); + try { + return Operation.fromPb(this, + runWithRetries(new Callable() { + @Override + public com.google.api.services.compute.model.Operation call() { + return computeRpc.createDisk(disk.diskId().zone(), diskPb, optionsMap); + } + }, options().retryParams(), EXCEPTION_HANDLER)); + } catch (RetryHelper.RetryHelperException e) { + throw ComputeException.translateAndThrow(e); + } + } + + private static Function diskFromPb( + final ComputeOptions serviceOptions) { + return new Function() { + @Override + public Disk apply(com.google.api.services.compute.model.Disk disk) { + return Disk.fromPb(serviceOptions.service(), disk); + } + }; + } + + @Override + public Page listDisks(String zone, DiskListOption... options) { + return listDisks(zone, options(), optionMap(options)); + } + + private static Page listDisks(final String zone, final ComputeOptions serviceOptions, + final Map optionsMap) { + try { + ComputeRpc.Tuple> result = + runWithRetries(new Callable>>() { + @Override + public ComputeRpc.Tuple> call() { + return serviceOptions.rpc().listDisks(zone, optionsMap); + } + }, serviceOptions.retryParams(), EXCEPTION_HANDLER); + String cursor = result.x(); + Iterable disks = Iterables.transform( + result.y() == null ? ImmutableList.of() + : result.y(), diskFromPb(serviceOptions)); + return new PageImpl<>(new DiskPageFetcher(zone, serviceOptions, cursor, optionsMap), + cursor, disks); + } catch (RetryHelper.RetryHelperException e) { + throw ComputeException.translateAndThrow(e); + } + } + + @Override + public Page listDisks(DiskAggregatedListOption... options) { + return listDisks(options(), optionMap(options)); + } + + private static Page listDisks(final ComputeOptions serviceOptions, + final Map optionsMap) { + try { + ComputeRpc.Tuple> result = + runWithRetries(new Callable>>() { + @Override + public ComputeRpc.Tuple> call() { + return serviceOptions.rpc().listDisks(optionsMap); + } + }, serviceOptions.retryParams(), EXCEPTION_HANDLER); + String cursor = result.x(); + Iterable disks = Iterables.transform( + result.y() == null ? ImmutableList.of() + : result.y(), diskFromPb(serviceOptions)); + return new PageImpl<>(new AggregatedDiskPageFetcher(serviceOptions, cursor, optionsMap), + cursor, disks); + } catch (RetryHelper.RetryHelperException e) { + throw ComputeException.translateAndThrow(e); + } + } + + @Override + public Operation deleteDisk(final DiskId disk, OperationOption... options) { + final Map optionsMap = optionMap(options); + try { + com.google.api.services.compute.model.Operation answer = + runWithRetries(new Callable() { + @Override + public com.google.api.services.compute.model.Operation call() { + return computeRpc.deleteDisk(disk.zone(), disk.disk(), optionsMap); + } + }, options().retryParams(), EXCEPTION_HANDLER); + return answer == null ? null : Operation.fromPb(this, answer); + } catch (RetryHelper.RetryHelperException e) { + throw ComputeException.translateAndThrow(e); + } + } + + @Override + public Operation resize(final DiskId disk, final long sizeGb, OperationOption... options) { + final Map optionsMap = optionMap(options); + try { + com.google.api.services.compute.model.Operation answer = + runWithRetries(new Callable() { + @Override + public com.google.api.services.compute.model.Operation call() { + return computeRpc.resizeDisk(disk.zone(), disk.disk(), sizeGb, optionsMap); + } + }, options().retryParams(), EXCEPTION_HANDLER); + return answer == null ? null : Operation.fromPb(this, answer); + } catch (RetryHelper.RetryHelperException e) { + throw ComputeException.translateAndThrow(e); + } + } + + public Operation create(SubnetworkInfo subnetwork, OperationOption... options) { + final SubnetworkInfo completeSubnetwork = subnetwork.setProjectId(options().projectId()); + final Map optionsMap = optionMap(options); + try { + com.google.api.services.compute.model.Operation answer = + runWithRetries(new Callable() { + @Override + public com.google.api.services.compute.model.Operation call() { + return computeRpc.createSubnetwork(completeSubnetwork.subnetworkId().region(), + completeSubnetwork.toPb(), optionsMap); + } + }, options().retryParams(), EXCEPTION_HANDLER); + return answer == null ? null : Operation.fromPb(this, answer); + } catch (RetryHelper.RetryHelperException e) { + throw ComputeException.translateAndThrow(e); + } + } + + @Override + public Subnetwork getSubnetwork(final SubnetworkId subnetworkId, SubnetworkOption... options) { + final Map optionsMap = optionMap(options); + try { + com.google.api.services.compute.model.Subnetwork answer = + runWithRetries(new Callable() { + @Override + public com.google.api.services.compute.model.Subnetwork call() { + return computeRpc.getSubnetwork(subnetworkId.region(), subnetworkId.subnetwork(), + optionsMap); + } + }, options().retryParams(), EXCEPTION_HANDLER); + return answer == null ? null : Subnetwork.fromPb(this, answer); + } catch (RetryHelper.RetryHelperException e) { + throw ComputeException.translateAndThrow(e); + } + } + + private static Function + subnetworkFromPb(final ComputeOptions serviceOptions) { + return new Function() { + @Override + public Subnetwork apply(com.google.api.services.compute.model.Subnetwork subnetwork) { + return Subnetwork.fromPb(serviceOptions.service(), subnetwork); + } + }; + } + + @Override + public Page listSubnetworks(String region, SubnetworkListOption... options) { + return listSubnetworks(region, options(), optionMap(options)); + } + + private static Page listSubnetworks(final String region, + final ComputeOptions serviceOptions, final Map optionsMap) { + try { + ComputeRpc.Tuple> result = + runWithRetries(new Callable>>() { + @Override + public ComputeRpc.Tuple> call() { + return serviceOptions.rpc().listSubnetworks(region, optionsMap); + } + }, serviceOptions.retryParams(), EXCEPTION_HANDLER); + String cursor = result.x(); + Iterable subnetworks = Iterables.transform( + result.y() == null ? ImmutableList.of() + : result.y(), subnetworkFromPb(serviceOptions)); + return new PageImpl<>(new SubnetworkPageFetcher(region, serviceOptions, cursor, optionsMap), + cursor, subnetworks); + } catch (RetryHelper.RetryHelperException e) { + throw ComputeException.translateAndThrow(e); + } + } + + @Override + public Page listSubnetworks(SubnetworkAggregatedListOption... options) { + return listSubnetworks(options(), optionMap(options)); + } + + private static Page listSubnetworks(final ComputeOptions serviceOptions, + final Map optionsMap) { + try { + ComputeRpc.Tuple> result = + runWithRetries(new Callable>>() { + @Override + public ComputeRpc.Tuple> call() { + return serviceOptions.rpc().listSubnetworks(optionsMap); + } + }, serviceOptions.retryParams(), EXCEPTION_HANDLER); + String cursor = result.x(); + Iterable subnetworks = Iterables.transform( + result.y() == null ? ImmutableList.of() + : result.y(), subnetworkFromPb(serviceOptions)); + return new PageImpl<>(new AggregatedSubnetworkPageFetcher(serviceOptions, cursor, optionsMap), + cursor, subnetworks); + } catch (RetryHelper.RetryHelperException e) { + throw ComputeException.translateAndThrow(e); + } + } + + @Override + public Operation deleteSubnetwork(final SubnetworkId subnetwork, OperationOption... options) { + final Map optionsMap = optionMap(options); + try { + com.google.api.services.compute.model.Operation answer = + runWithRetries(new Callable() { + @Override + public com.google.api.services.compute.model.Operation call() { + return computeRpc.deleteSubnetwork(subnetwork.region(), subnetwork.subnetwork(), + optionsMap); + } + }, options().retryParams(), EXCEPTION_HANDLER); + return answer == null ? null : Operation.fromPb(this, answer); + } catch (RetryHelper.RetryHelperException e) { + throw ComputeException.translateAndThrow(e); + } + } + + @Override + public Operation create(NetworkInfo network, OperationOption... options) { + final NetworkInfo completeNetwork = network.setProjectId(options().projectId()); + final Map optionsMap = optionMap(options); + try { + com.google.api.services.compute.model.Operation answer = + runWithRetries(new Callable() { + @Override + public com.google.api.services.compute.model.Operation call() { + return computeRpc.createNetwork(completeNetwork.toPb(), optionsMap); + } + }, options().retryParams(), EXCEPTION_HANDLER); + return answer == null ? null : Operation.fromPb(this, answer); + } catch (RetryHelper.RetryHelperException e) { + throw ComputeException.translateAndThrow(e); + } + } + + @Override + public Network getNetwork(final String network, NetworkOption... options) { + final Map optionsMap = optionMap(options); + try { + com.google.api.services.compute.model.Network answer = + runWithRetries(new Callable() { + @Override + public com.google.api.services.compute.model.Network call() { + return computeRpc.getNetwork(network, optionsMap); + } + }, options().retryParams(), EXCEPTION_HANDLER); + return answer == null ? null : Network.fromPb(this, answer); + } catch (RetryHelper.RetryHelperException e) { + throw ComputeException.translateAndThrow(e); + } + } + + @Override + public Page listNetworks(NetworkListOption... options) { + return listNetworks(options(), optionMap(options)); + } + + private static Page listNetworks(final ComputeOptions serviceOptions, + final Map optionsMap) { + try { + ComputeRpc.Tuple> result = + runWithRetries(new Callable>>() { + @Override + public ComputeRpc.Tuple> call() { + return serviceOptions.rpc().listNetworks(optionsMap); + } + }, serviceOptions.retryParams(), EXCEPTION_HANDLER); + String cursor = result.x(); + Iterable networks = Iterables.transform( + result.y() == null ? ImmutableList.of() + : result.y(), + new Function() { + @Override + public Network apply(com.google.api.services.compute.model.Network network) { + return Network.fromPb(serviceOptions.service(), network); + } + }); + return new PageImpl<>(new NetworkPageFetcher(serviceOptions, cursor, optionsMap), + cursor, networks); + } catch (RetryHelper.RetryHelperException e) { + throw ComputeException.translateAndThrow(e); + } + } + + @Override + public Operation deleteNetwork(final NetworkId network, OperationOption... options) { + final Map optionsMap = optionMap(options); + try { + com.google.api.services.compute.model.Operation answer = + runWithRetries(new Callable() { + @Override + public com.google.api.services.compute.model.Operation call() { + return computeRpc.deleteNetwork(network.network(), optionsMap); + } + }, options().retryParams(), EXCEPTION_HANDLER); + return answer == null ? null : Operation.fromPb(this, answer); + } catch (RetryHelper.RetryHelperException e) { + throw ComputeException.translateAndThrow(e); + } + } + + @Override + public Operation deleteNetwork(String network, OperationOption... options) { + return deleteNetwork(NetworkId.of(network)); + } + + @Override + public Operation create(InstanceInfo instance, OperationOption... options) { + final InstanceInfo completeInstance = instance.setProjectId(options().projectId()); + final Map optionsMap = optionMap(options); + try { + com.google.api.services.compute.model.Operation answer = + runWithRetries(new Callable() { + @Override + public com.google.api.services.compute.model.Operation call() { + return computeRpc.createInstance(completeInstance.instanceId().zone(), + completeInstance.toPb(), optionsMap); + } + }, options().retryParams(), EXCEPTION_HANDLER); + return answer == null ? null : Operation.fromPb(this, answer); + } catch (RetryHelper.RetryHelperException e) { + throw ComputeException.translateAndThrow(e); + } + } + + @Override + public Instance getInstance(final InstanceId instance, InstanceOption... options) { + final Map optionsMap = optionMap(options); + try { + com.google.api.services.compute.model.Instance answer = + runWithRetries(new Callable() { + @Override + public com.google.api.services.compute.model.Instance call() { + return computeRpc.getInstance(instance.zone(), instance.instance(), optionsMap); + } + }, options().retryParams(), EXCEPTION_HANDLER); + return answer == null ? null : Instance.fromPb(this, answer); + } catch (RetryHelper.RetryHelperException e) { + throw ComputeException.translateAndThrow(e); + } + } + + private static Function + instanceFromPb(final ComputeOptions serviceOptions) { + return new Function() { + @Override + public Instance apply(com.google.api.services.compute.model.Instance instance) { + return Instance.fromPb(serviceOptions.service(), instance); + } + }; + } + + @Override + public Page listInstances(String zone, InstanceListOption... options) { + return listInstances(zone, options(), optionMap(options)); + } + + private static Page listInstances(final String zone, + final ComputeOptions serviceOptions, final Map optionsMap) { + try { + ComputeRpc.Tuple> result = + runWithRetries(new Callable>>() { + @Override + public ComputeRpc.Tuple> call() { + return serviceOptions.rpc().listInstances(zone, optionsMap); + } + }, serviceOptions.retryParams(), EXCEPTION_HANDLER); + String cursor = result.x(); + Iterable instances = Iterables.transform( + result.y() == null ? ImmutableList.of() + : result.y(), instanceFromPb(serviceOptions)); + return new PageImpl<>(new InstancePageFetcher(zone, serviceOptions, cursor, optionsMap), + cursor, instances); + } catch (RetryHelper.RetryHelperException e) { + throw ComputeException.translateAndThrow(e); + } + } + + @Override + public Page listInstances(InstanceAggregatedListOption... options) { + return listInstances(options(), optionMap(options)); + } + + private static Page listInstances(final ComputeOptions serviceOptions, + final Map optionsMap) { + try { + ComputeRpc.Tuple> result = + runWithRetries(new Callable>>() { + @Override + public ComputeRpc.Tuple> call() { + return serviceOptions.rpc().listInstances(optionsMap); + } + }, serviceOptions.retryParams(), EXCEPTION_HANDLER); + String cursor = result.x(); + Iterable instances = Iterables.transform( + result.y() == null ? ImmutableList.of() + : result.y(), instanceFromPb(serviceOptions)); + return new PageImpl<>(new AggregatedInstancePageFetcher(serviceOptions, cursor, optionsMap), + cursor, instances); + } catch (RetryHelper.RetryHelperException e) { + throw ComputeException.translateAndThrow(e); + } + } + + @Override + public Operation deleteInstance(final InstanceId instance, OperationOption... options) { + final Map optionsMap = optionMap(options); + try { + com.google.api.services.compute.model.Operation answer = + runWithRetries(new Callable() { + @Override + public com.google.api.services.compute.model.Operation call() { + return computeRpc.deleteInstance(instance.zone(), instance.instance(), optionsMap); + } + }, options().retryParams(), EXCEPTION_HANDLER); + return answer == null ? null : Operation.fromPb(this, answer); + } catch (RetryHelper.RetryHelperException e) { + throw ComputeException.translateAndThrow(e); + } + } + + @Override + public Operation addAccessConfig(final InstanceId instance, final String networkInterface, + final AccessConfig accessConfig, OperationOption... options) { + final Map optionsMap = optionMap(options); + try { + com.google.api.services.compute.model.Operation answer = + runWithRetries(new Callable() { + @Override + public com.google.api.services.compute.model.Operation call() { + return computeRpc.addAccessConfig(instance.zone(), instance.instance(), + networkInterface, accessConfig.toPb(), optionsMap); + } + }, options().retryParams(), EXCEPTION_HANDLER); + return answer == null ? null : Operation.fromPb(this, answer); + } catch (RetryHelper.RetryHelperException e) { + throw ComputeException.translateAndThrow(e); + } + } + + private Operation attachDisk(final InstanceId instance, AttachedDisk diskToAttach, + OperationOption... options) { + final AttachedDisk completeDisk = diskToAttach.setProjectId(options().projectId()); + final Map optionsMap = optionMap(options); + try { + com.google.api.services.compute.model.Operation answer = + runWithRetries(new Callable() { + @Override + public com.google.api.services.compute.model.Operation call() { + return computeRpc.attachDisk(instance.zone(), instance.instance(), + completeDisk.toPb(), optionsMap); + } + }, options().retryParams(), EXCEPTION_HANDLER); + return answer == null ? null : Operation.fromPb(this, answer); + } catch (RetryHelper.RetryHelperException e) { + throw ComputeException.translateAndThrow(e); + } + } + + @Override + public Operation attachDisk(InstanceId instance, PersistentDiskConfiguration configuration, + OperationOption... options) { + return attachDisk(instance, AttachedDisk.of(configuration), options); + } + + @Override + public Operation attachDisk(InstanceId instance, String deviceName, + PersistentDiskConfiguration configuration, OperationOption... options) { + return attachDisk(instance, AttachedDisk.of(deviceName, configuration), options); + } + + @Override + public Operation attachDisk(InstanceId instance, String deviceName, + PersistentDiskConfiguration configuration, int index, OperationOption... options) { + AttachedDisk attachedDisk = AttachedDisk.builder(configuration) + .deviceName(deviceName) + .index(index) + .build(); + return attachDisk(instance, attachedDisk, options); + } + + @Override + public Operation deleteAccessConfig(final InstanceId instance, final String networkInterface, + final String accessConfig, OperationOption... options) { + final Map optionsMap = optionMap(options); + try { + com.google.api.services.compute.model.Operation answer = + runWithRetries(new Callable() { + @Override + public com.google.api.services.compute.model.Operation call() { + return computeRpc.deleteAccessConfig(instance.zone(), instance.instance(), + networkInterface, accessConfig, optionsMap); + } + }, options().retryParams(), EXCEPTION_HANDLER); + return answer == null ? null : Operation.fromPb(this, answer); + } catch (RetryHelper.RetryHelperException e) { + throw ComputeException.translateAndThrow(e); + } + } + + @Override + public Operation detachDisk(final InstanceId instance, final String deviceName, + OperationOption... options) { + final Map optionsMap = optionMap(options); + try { + com.google.api.services.compute.model.Operation answer = + runWithRetries(new Callable() { + @Override + public com.google.api.services.compute.model.Operation call() { + return computeRpc.detachDisk(instance.zone(), instance.instance(), deviceName, + optionsMap); + } + }, options().retryParams(), EXCEPTION_HANDLER); + return answer == null ? null : Operation.fromPb(this, answer); + } catch (RetryHelper.RetryHelperException e) { + throw ComputeException.translateAndThrow(e); + } + } + + @Override + public String getSerialPortOutput(final InstanceId instance, final int port) { + try { + return runWithRetries(new Callable() { + @Override + public String call() { + return computeRpc.getSerialPortOutput(instance.zone(), instance.instance(), port, + optionMap()); + } + }, options().retryParams(), EXCEPTION_HANDLER); + } catch (RetryHelper.RetryHelperException e) { + throw ComputeException.translateAndThrow(e); + } + } + + @Override + public String getSerialPortOutput(final InstanceId instance) { + try { + return runWithRetries(new Callable() { + @Override + public String call() { + return computeRpc.getSerialPortOutput(instance.zone(), instance.instance(), null, + optionMap()); + } + }, options().retryParams(), EXCEPTION_HANDLER); + } catch (RetryHelper.RetryHelperException e) { + throw ComputeException.translateAndThrow(e); + } + } + + @Override + public Operation reset(final InstanceId instance, OperationOption... options) { + final Map optionsMap = optionMap(options); + try { + com.google.api.services.compute.model.Operation answer = + runWithRetries(new Callable() { + @Override + public com.google.api.services.compute.model.Operation call() { + return computeRpc.reset(instance.zone(), instance.instance(), optionsMap); + } + }, options().retryParams(), EXCEPTION_HANDLER); + return answer == null ? null : Operation.fromPb(this, answer); + } catch (RetryHelper.RetryHelperException e) { + throw ComputeException.translateAndThrow(e); + } + } + + @Override + public Operation setDiskAutoDelete(final InstanceId instance, final String deviceName, + final boolean autoDelete, OperationOption... options) { + final Map optionsMap = optionMap(options); + try { + com.google.api.services.compute.model.Operation answer = + runWithRetries(new Callable() { + @Override + public com.google.api.services.compute.model.Operation call() { + return computeRpc.setDiskAutoDelete(instance.zone(), instance.instance(), deviceName, + autoDelete, optionsMap); + } + }, options().retryParams(), EXCEPTION_HANDLER); + return answer == null ? null : Operation.fromPb(this, answer); + } catch (RetryHelper.RetryHelperException e) { + throw ComputeException.translateAndThrow(e); + } + } + + @Override + public Operation setMachineType(final InstanceId instance, final MachineTypeId machineType, + OperationOption... options) { + final String machineTypeUrl = machineType.setProjectId(options().projectId()).selfLink(); + final Map optionsMap = optionMap(options); + try { + com.google.api.services.compute.model.Operation answer = + runWithRetries(new Callable() { + @Override + public com.google.api.services.compute.model.Operation call() { + return computeRpc.setMachineType(instance.zone(), instance.instance(), machineTypeUrl, + optionsMap); + } + }, options().retryParams(), EXCEPTION_HANDLER); + return answer == null ? null : Operation.fromPb(this, answer); + } catch (RetryHelper.RetryHelperException e) { + throw ComputeException.translateAndThrow(e); + } + } + + @Override + public Operation setMetadata(final InstanceId instance, final Metadata metadata, + OperationOption... options) { + final Map optionsMap = optionMap(options); + try { + com.google.api.services.compute.model.Operation answer = + runWithRetries(new Callable() { + @Override + public com.google.api.services.compute.model.Operation call() { + return computeRpc.setMetadata(instance.zone(), instance.instance(), metadata.toPb(), + optionsMap); + } + }, options().retryParams(), EXCEPTION_HANDLER); + return answer == null ? null : Operation.fromPb(this, answer); + } catch (RetryHelper.RetryHelperException e) { + throw ComputeException.translateAndThrow(e); + } + } + + @Override + public Operation setSchedulingOptions(final InstanceId instance, + final SchedulingOptions schedulingOptions, OperationOption... options) { + final Map optionsMap = optionMap(options); + try { + com.google.api.services.compute.model.Operation answer = + runWithRetries(new Callable() { + @Override + public com.google.api.services.compute.model.Operation call() { + return computeRpc.setScheduling(instance.zone(), instance.instance(), + schedulingOptions.toPb(), optionsMap); + } + }, options().retryParams(), EXCEPTION_HANDLER); + return answer == null ? null : Operation.fromPb(this, answer); + } catch (RetryHelper.RetryHelperException e) { + throw ComputeException.translateAndThrow(e); + } + } + + @Override + public Operation setTags(final InstanceId instance, final Tags tags, OperationOption... options) { + final Map optionsMap = optionMap(options); + try { + com.google.api.services.compute.model.Operation answer = + runWithRetries(new Callable() { + @Override + public com.google.api.services.compute.model.Operation call() { + return computeRpc.setTags(instance.zone(), instance.instance(), tags.toPb(), + optionsMap); + } + }, options().retryParams(), EXCEPTION_HANDLER); + return answer == null ? null : Operation.fromPb(this, answer); + } catch (RetryHelper.RetryHelperException e) { + throw ComputeException.translateAndThrow(e); + } + } + + @Override + public Operation start(final InstanceId instance, OperationOption... options) { + final Map optionsMap = optionMap(options); + try { + com.google.api.services.compute.model.Operation answer = + runWithRetries(new Callable() { + @Override + public com.google.api.services.compute.model.Operation call() { + return computeRpc.start(instance.zone(), instance.instance(), optionsMap); + } + }, options().retryParams(), EXCEPTION_HANDLER); + return answer == null ? null : Operation.fromPb(this, answer); + } catch (RetryHelper.RetryHelperException e) { + throw ComputeException.translateAndThrow(e); + } + } + + @Override + public Operation stop(final InstanceId instance, OperationOption... options) { + final Map optionsMap = optionMap(options); + try { + com.google.api.services.compute.model.Operation answer = + runWithRetries(new Callable() { + @Override + public com.google.api.services.compute.model.Operation call() { + return computeRpc.stop(instance.zone(), instance.instance(), optionsMap); + } + }, options().retryParams(), EXCEPTION_HANDLER); + return answer == null ? null : Operation.fromPb(this, answer); + } catch (RetryHelper.RetryHelperException e) { + throw ComputeException.translateAndThrow(e); + } + } + + private Map optionMap(Option... options) { + Map optionMap = Maps.newEnumMap(ComputeRpc.Option.class); + for (Option option : options) { + Object prev = optionMap.put(option.rpcOption(), option.value()); + checkArgument(prev == null, "Duplicate option %s", option); + } + return optionMap; + } +} diff --git a/gcloud-java-compute/src/main/java/com/google/cloud/compute/ComputeOptions.java b/gcloud-java-compute/src/main/java/com/google/cloud/compute/ComputeOptions.java new file mode 100644 index 000000000000..7e458b9363e6 --- /dev/null +++ b/gcloud-java-compute/src/main/java/com/google/cloud/compute/ComputeOptions.java @@ -0,0 +1,114 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import com.google.cloud.ServiceOptions; +import com.google.cloud.compute.spi.ComputeRpc; +import com.google.cloud.compute.spi.ComputeRpcFactory; +import com.google.cloud.compute.spi.DefaultComputeRpc; +import com.google.common.collect.ImmutableSet; + +import java.util.Set; + +public class ComputeOptions extends ServiceOptions { + + private static final String COMPUTE_SCOPE = "https://www.googleapis.com/auth/compute"; + private static final Set SCOPES = ImmutableSet.of(COMPUTE_SCOPE); + private static final long serialVersionUID = 6509557711917342058L; + + public static class DefaultComputeFactory implements ComputeFactory { + + private static final ComputeFactory INSTANCE = new DefaultComputeFactory(); + + @Override + public Compute create(ComputeOptions options) { + return new ComputeImpl(options); + } + } + + public static class DefaultComputeRpcFactory implements ComputeRpcFactory { + + private static final ComputeRpcFactory INSTANCE = new DefaultComputeRpcFactory(); + + @Override + public ComputeRpc create(ComputeOptions options) { + return new DefaultComputeRpc(options); + } + } + + public static class Builder extends + ServiceOptions.Builder { + + private Builder() { + } + + private Builder(ComputeOptions options) { + super(options); + } + + @Override + public ComputeOptions build() { + return new ComputeOptions(this); + } + } + + private ComputeOptions(Builder builder) { + super(ComputeFactory.class, ComputeRpcFactory.class, builder); + } + + @Override + protected ComputeFactory defaultServiceFactory() { + return DefaultComputeFactory.INSTANCE; + } + + @Override + protected ComputeRpcFactory defaultRpcFactory() { + return DefaultComputeRpcFactory.INSTANCE; + } + + @Override + protected Set scopes() { + return SCOPES; + } + + @Override + public Builder toBuilder() { + return new Builder(this); + } + + @Override + public int hashCode() { + return baseHashCode(); + } + + @Override + public boolean equals(Object obj) { + if (!(obj instanceof ComputeOptions)) { + return false; + } + ComputeOptions other = (ComputeOptions) obj; + return baseEquals(other); + } + + public static ComputeOptions defaultInstance() { + return builder().build(); + } + + public static Builder builder() { + return new Builder(); + } +} diff --git a/gcloud-java-compute/src/main/java/com/google/cloud/compute/DeprecationStatus.java b/gcloud-java-compute/src/main/java/com/google/cloud/compute/DeprecationStatus.java new file mode 100644 index 000000000000..bdf7fa0a57b4 --- /dev/null +++ b/gcloud-java-compute/src/main/java/com/google/cloud/compute/DeprecationStatus.java @@ -0,0 +1,360 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import static com.google.common.base.Preconditions.checkNotNull; + +import com.google.common.base.Function; +import com.google.common.base.MoreObjects; + +import org.joda.time.format.DateTimeFormatter; +import org.joda.time.format.ISODateTimeFormat; + +import java.io.Serializable; +import java.util.Objects; + +/** + * The deprecation status associated to a Google Compute Engine resource. + * + * @param The Google Compute Engine resource identity to which the deprecation status refers + */ +public final class DeprecationStatus implements Serializable { + + private static final long serialVersionUID = -2695077634793679794L; + private static final DateTimeFormatter TIMESTAMP_FORMATTER = ISODateTimeFormat.dateTime(); + private static final DateTimeFormatter TIMESTAMP_PARSER = ISODateTimeFormat.dateTimeParser(); + + private final String deleted; + private final String deprecated; + private final String obsolete; + private final T replacement; + private final Status status; + + /** + * The deprecation status of a Google Compute Engine resource. + */ + public enum Status { + /** + * Operations that create a Google Compute Engine entity using a deprecated resource will return + * successfully but with a warning indicating the deprecation and suggesting a replacement. + */ + DEPRECATED, + + /** + * Operations that create a Google Compute Engine entity using an obsolete resource will be + * rejected and result in an error. + */ + OBSOLETE, + + /** + * Operations that create a Google Compute Engine entity using a deleted resource will be + * rejected and result in an error. + */ + DELETED + } + + /** + * A builder for {@code DeprecationStatus} objects. + * + * @param The Google Compute Engine resource identity to which the deprecation status refers + */ + public static final class Builder { + + private String deleted; + private String deprecated; + private String obsolete; + private T replacement; + private Status status; + + Builder() {} + + Builder(DeprecationStatus deprecationStatus) { + this.deleted = deprecationStatus.deleted; + this.deprecated = deprecationStatus.deprecated; + this.obsolete = deprecationStatus.obsolete; + this.replacement = deprecationStatus.replacement; + this.status = deprecationStatus.status; + } + + /** + * Sets the timestamp on or after which the deprecation state of this resource will be changed + * to {@link Status#DELETED}. Timestamp should be in RFC3339 format. + * + * @see RFC3339 + */ + // todo(mziccard): remove this method if #732 is closed + public Builder deleted(String deleted) { + this.deleted = deleted; + return this; + } + + /** + * Sets the timestamp on or after which the deprecation state of this resource will be changed + * to {@link Status#DEPRECATED}. Timestamp should be in RFC3339 format. + * + * @see RFC3339 + */ + // todo(mziccard): remove this method if #732 is closed + public Builder deprecated(String deprecated) { + this.deprecated = deprecated; + return this; + } + + /** + * Sets the timestamp on or after which the deprecation state of this resource will be changed + * to {@link Status#OBSOLETE}. Timestamp should be in RFC3339 format. + * + * @see RFC3339 + */ + // todo(mziccard): remove this method if #732 is closed + public Builder obsolete(String obsolete) { + this.obsolete = obsolete; + return this; + } + + /** + * Sets the timestamp on or after which the deprecation state of this resource will be changed + * to {@link Status#DELETED}. In milliseconds since epoch. + */ + public Builder deleted(long deleted) { + this.deleted = TIMESTAMP_FORMATTER.print(deleted); + return this; + } + + /** + * Sets the timestamp on or after which the deprecation state of this resource will be changed + * to {@link Status#DEPRECATED}. In milliseconds since epoch. + */ + public Builder deprecated(long deprecated) { + this.deprecated = TIMESTAMP_FORMATTER.print(deprecated); + return this; + } + + /** + * Sets the timestamp on or after which the deprecation state of this resource will be changed + * to {@link Status#OBSOLETE}. In milliseconds since epoch. + */ + public Builder obsolete(long obsolete) { + this.obsolete = TIMESTAMP_FORMATTER.print(obsolete); + return this; + } + + /** + * Sets the identity of the suggested replacement for a deprecated resource. The suggested + * replacement resource must be the same kind of resource as the deprecated resource. + */ + public Builder replacement(T replacement) { + this.replacement = replacement; + return this; + } + + /** + * Sets the status of the deprecated resource. + */ + public Builder status(Status status) { + this.status = checkNotNull(status); + return this; + } + + /** + * Creates a {@code DeprecationStatus} object. + */ + public DeprecationStatus build() { + return new DeprecationStatus(this); + } + } + + DeprecationStatus(Builder builder) { + this.deleted = builder.deleted; + this.deprecated = builder.deprecated; + this.obsolete = builder.obsolete; + this.replacement = builder.replacement; + this.status = checkNotNull(builder.status); + } + + /** + * Returns the timestamp on or after which the deprecation state of this resource will be changed + * to {@link Status#DELETED}. Returns {@code null} if not set. This value should be in RFC3339 + * format. + * + * @see RFC3339 + */ + // todo(mziccard): remove this method if #732 is closed + public String deleted() { + return deleted; + } + + /** + * Returns the timestamp on or after which the deprecation state of this resource will be changed + * to {@link Status#DEPRECATED}. Returns {@code null} if not set. This value should be in RFC3339 + * format. + * + * @see RFC3339 + */ + // todo(mziccard): remove this method if #732 is closed + public String deprecated() { + return deprecated; + } + + /** + * Returns the timestamp on or after which the deprecation state of this resource will be changed + * to {@link Status#OBSOLETE}. Returns {@code null} if not set. This value should be in RFC3339 + * format. + * + * @see RFC3339 + */ + // todo(mziccard): remove this method if #732 is closed + public String obsolete() { + return obsolete; + } + + /** + * Returns the timestamp (in milliseconds since epoch) on or after which the deprecation state of + * this resource will be changed to {@link Status#DELETED}. Returns {@code null} if not set. + * + * @throws IllegalStateException if {@link #deleted()} is not a valid date, time or datetime + */ + public Long deletedMillis() { + try { + return deleted != null ? TIMESTAMP_PARSER.parseMillis(deleted) : null; + } catch (IllegalArgumentException ex) { + throw new IllegalStateException(ex.getMessage(), ex); + } + } + + /** + * Returns the timestamp (in milliseconds since epoch) on or after which the deprecation state of + * this resource will be changed to {@link Status#DEPRECATED}. Returns {@code null} if not set. + * + * @throws IllegalStateException if {@link #deprecated()} is not a valid date, time or datetime + */ + public Long deprecatedMillis() { + try { + return deprecated != null ? TIMESTAMP_PARSER.parseMillis(deprecated) : null; + } catch (IllegalArgumentException ex) { + throw new IllegalStateException(ex.getMessage(), ex); + } + } + + /** + * Returns the timestamp (in milliseconds since epoch) on or after which the deprecation state of + * this resource will be changed to {@link Status#OBSOLETE}. Returns {@code null} if not set. + * + * @throws IllegalStateException if {@link #obsolete()} is not a valid date, time or datetime + */ + public Long obsoleteMillis() { + try { + return obsolete != null ? TIMESTAMP_PARSER.parseMillis(obsolete) : null; + } catch (IllegalArgumentException ex) { + throw new IllegalStateException(ex.getMessage(), ex); + } + } + + /** + * Returns the identity of the suggested replacement for a deprecated resource. The suggested + * replacement resource must be the same kind of resource as the deprecated resource. + */ + public T replacement() { + return replacement; + } + + /** + * Returns the deprecation state of this resource. + */ + public Status status() { + return status; + } + + /** + * Returns a builder for the {@code DeprecationStatus} object. + */ + public Builder toBuilder() { + return new Builder<>(this); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("deleted", deleted) + .add("deprecated", deprecated) + .add("obsolete", obsolete) + .add("replacement", replacement) + .add("status", status) + .toString(); + } + + @Override + public int hashCode() { + return Objects.hash(deleted, deprecated, obsolete, replacement, status); + } + + @Override + public boolean equals(Object obj) { + return obj == this + || obj instanceof DeprecationStatus + && Objects.equals(toPb(), ((DeprecationStatus) obj).toPb()); + } + + com.google.api.services.compute.model.DeprecationStatus toPb() { + com.google.api.services.compute.model.DeprecationStatus deprecationStatusPb = + new com.google.api.services.compute.model.DeprecationStatus(); + deprecationStatusPb.setDeleted(deleted); + deprecationStatusPb.setDeprecated(deprecated); + deprecationStatusPb.setObsolete(obsolete); + deprecationStatusPb.setReplacement(replacement.selfLink()); + deprecationStatusPb.setState(status.name()); + return deprecationStatusPb; + } + + /** + * Returns the builder for a {@code DeprecationStatus} object given the status. + */ + public static Builder builder(Status status) { + return new Builder().status(status); + } + + /** + * Returns the builder for a {@code DeprecationStatus} object given the status and replacement's + * identity. + */ + public static Builder builder(Status status, T replacement) { + return new Builder().status(status).replacement(replacement); + } + + /** + * Returns a {@code DeprecationStatus} object given the status and replacement's identity. + */ + public static DeprecationStatus of(Status status, T replacement) { + return builder(status, replacement).build(); + } + + static DeprecationStatus fromPb( + com.google.api.services.compute.model.DeprecationStatus deprecationStatusPb, + Function fromUrl) { + Builder builder = new Builder<>(); + builder.deleted(deprecationStatusPb.getDeleted()); + builder.deprecated(deprecationStatusPb.getDeprecated()); + builder.obsolete(deprecationStatusPb.getObsolete()); + if (deprecationStatusPb.getReplacement() != null) { + builder.replacement(fromUrl.apply(deprecationStatusPb.getReplacement())); + } + if (deprecationStatusPb.getState() != null) { + builder.status(Status.valueOf(deprecationStatusPb.getState())); + } + return builder.build(); + } +} diff --git a/gcloud-java-compute/src/main/java/com/google/cloud/compute/Disk.java b/gcloud-java-compute/src/main/java/com/google/cloud/compute/Disk.java new file mode 100644 index 000000000000..f1c6cd26291c --- /dev/null +++ b/gcloud-java-compute/src/main/java/com/google/cloud/compute/Disk.java @@ -0,0 +1,261 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import static com.google.common.base.Preconditions.checkNotNull; + +import com.google.cloud.compute.Compute.DiskOption; +import com.google.cloud.compute.Compute.OperationOption; + +import java.io.IOException; +import java.io.ObjectInputStream; +import java.util.List; +import java.util.Objects; + +/** + * A Google Compute Engine persistent disk. A disk can be used as primary storage for your virtual + * machine instances. Objects of this class are immutable. To get a {@code Disk} object with the + * most recent information use {@link #reload}. {@code Disk} adds a layer of service-related + * functionality over {@link DiskInfo}. + * + * @see Block Storage + */ +public class Disk extends DiskInfo { + + private static final long serialVersionUID = 7234747955588262204L; + + private final ComputeOptions options; + private transient Compute compute; + + /** + * A builder for {@code Disk} objects. + */ + public static class Builder extends DiskInfo.Builder { + + private final Compute compute; + private final DiskInfo.BuilderImpl infoBuilder; + + Builder(Compute compute, DiskId diskId, DiskConfiguration diskConfiguration) { + this.compute = compute; + this.infoBuilder = new DiskInfo.BuilderImpl(diskId, diskConfiguration); + } + + Builder(Disk disk) { + this.compute = disk.compute; + this.infoBuilder = new DiskInfo.BuilderImpl(disk); + } + + @Override + Builder generatedId(String generatedId) { + infoBuilder.generatedId(generatedId); + return this; + } + + @Override + public Builder configuration(DiskConfiguration configuration) { + infoBuilder.configuration(configuration); + return this; + } + + @Override + public Builder diskId(DiskId diskId) { + infoBuilder.diskId(diskId); + return this; + } + + @Override + Builder creationTimestamp(Long creationTimestamp) { + infoBuilder.creationTimestamp(creationTimestamp); + return this; + } + + @Override + Builder creationStatus(CreationStatus creationStatus) { + infoBuilder.creationStatus(creationStatus); + return this; + } + + @Override + public Builder description(String description) { + infoBuilder.description(description); + return this; + } + + @Override + Builder licenses(List licenses) { + infoBuilder.licenses(licenses); + return this; + } + + @Override + Builder attachedInstances(List attachedInstances) { + infoBuilder.attachedInstances(attachedInstances); + return this; + } + + @Override + Builder lastAttachTimestamp(Long lastAttachTimestamp) { + infoBuilder.lastAttachTimestamp(lastAttachTimestamp); + return this; + } + + @Override + Builder lastDetachTimestamp(Long lastDetachTimestamp) { + infoBuilder.lastDetachTimestamp(lastDetachTimestamp); + return this; + } + + @Override + public Disk build() { + return new Disk(compute, infoBuilder); + } + } + + Disk(Compute compute, DiskInfo.BuilderImpl infoBuilder) { + super(infoBuilder); + this.compute = checkNotNull(compute); + this.options = compute.options(); + } + + /** + * Checks if this disk exists. + * + * @return {@code true} if this disk exists, {@code false} otherwise + * @throws ComputeException upon failure + */ + public boolean exists() { + return reload(DiskOption.fields()) != null; + } + + /** + * Fetches current disk's latest information. Returns {@code null} if the disk does not exist. + * + * @param options disk options + * @return a {@code Disk} object with latest information or {@code null} if not found + * @throws ComputeException upon failure + */ + public Disk reload(DiskOption... options) { + return compute.getDisk(diskId(), options); + } + + /** + * Deletes this disk. + * + * @return a zone operation if the delete request was successfully sent, {@code null} if the disk + * was not found + * @throws ComputeException upon failure + */ + public Operation delete(OperationOption... options) { + return compute.deleteDisk(diskId(), options); + } + + /** + * Creates a snapshot for this disk given the snapshot's name. + * + * @return a zone operation for snapshot creation + * @throws ComputeException upon failure + */ + public Operation createSnapshot(String snapshot, OperationOption... options) { + return compute.create(SnapshotInfo.of(SnapshotId.of(snapshot), diskId()), options); + } + + /** + * Creates a snapshot for this disk given the snapshot's name and description. + * + * @return a zone operation for snapshot creation + * @throws ComputeException upon failure + */ + public Operation createSnapshot(String snapshot, String description, OperationOption... options) { + SnapshotInfo snapshotInfo = SnapshotInfo.builder(SnapshotId.of(snapshot), diskId()) + .description(description) + .build(); + return compute.create(snapshotInfo, options); + } + + /** + * Creates an image for this disk given the image's name. + * + * @return a global operation if the image creation was successfully requested + * @throws ComputeException upon failure + */ + public Operation createImage(String image, OperationOption... options) { + ImageInfo imageInfo = ImageInfo.of(ImageId.of(image), DiskImageConfiguration.of(diskId())); + return compute.create(imageInfo, options); + } + + /** + * Creates an image for this disk given the image's name and description. + * + * @return a global operation if the image creation was successfully requested + * @throws ComputeException upon failure + */ + public Operation createImage(String image, String description, OperationOption... options) { + ImageInfo imageInfo = ImageInfo.builder(ImageId.of(image), DiskImageConfiguration.of(diskId())) + .description(description) + .build(); + return compute.create(imageInfo, options); + } + + /** + * Resizes this disk to the requested size. The new size must be larger than the previous one. + * + * @return a zone operation if the resize request was issued correctly, {@code null} if this disk + * was not found + * @throws ComputeException upon failure or if the new disk size is smaller than the previous one + */ + public Operation resize(long sizeGb, OperationOption... options) { + return compute.resize(diskId(), sizeGb, options); + } + + /** + * Returns the disk's {@code Compute} object used to issue requests. + */ + public Compute compute() { + return compute; + } + + @Override + public Builder toBuilder() { + return new Builder(this); + } + + @Override + public final boolean equals(Object obj) { + if (obj == this) { + return true; + } + if (obj == null || !obj.getClass().equals(Disk.class)) { + return false; + } + Disk other = (Disk) obj; + return Objects.equals(toPb(), other.toPb()) && Objects.equals(options, other.options); + } + + @Override + public final int hashCode() { + return Objects.hash(super.hashCode(), options); + } + + private void readObject(ObjectInputStream input) throws IOException, ClassNotFoundException { + input.defaultReadObject(); + this.compute = options.service(); + } + + static Disk fromPb(Compute compute, com.google.api.services.compute.model.Disk diskPb) { + return new Disk(compute, new DiskInfo.BuilderImpl(diskPb)); + } +} diff --git a/gcloud-java-compute/src/main/java/com/google/cloud/compute/DiskConfiguration.java b/gcloud-java-compute/src/main/java/com/google/cloud/compute/DiskConfiguration.java new file mode 100644 index 000000000000..1670bcf8b56b --- /dev/null +++ b/gcloud-java-compute/src/main/java/com/google/cloud/compute/DiskConfiguration.java @@ -0,0 +1,203 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import com.google.api.services.compute.model.Disk; +import com.google.common.base.MoreObjects; +import com.google.common.base.MoreObjects.ToStringHelper; + +import java.io.Serializable; +import java.util.Objects; + +/** + * Base class for Google Compute Engine disk configurations. A disk can be used as primary storage + * for your virtual machine instances. Use {@link StandardDiskConfiguration} to create a standard + * disk given a disk type and size. Use {@link ImageDiskConfiguration} to create a disk from a + * Compute Engine disk image. Use {@link SnapshotDiskConfiguration} to create a disk from a Compute + * Engine disk snapshot. + * + * @see Block Storage + */ +public abstract class DiskConfiguration implements Serializable { + + private static final long serialVersionUID = -1783061701255428417L; + + private final Type type; + private final Long sizeGb; + private final DiskTypeId diskType; + + /** + * Type of a Google Compute Engine disk configuration. + */ + public enum Type { + /** + * A Google Compute Engine standard disk configuration. + */ + STANDARD, + + /** + * A Google Compute Engine disk configuration that creates a disk from an image. + */ + IMAGE, + + /** + * A Google Compute Engine disk configuration that creates a disk from a snapshot. + */ + SNAPSHOT + } + + /** + * Base builder for disk configurations. + * + * @param the disk configuration type + * @param the disk configuration builder + */ + public abstract static class Builder> { + + private Type type; + private Long sizeGb; + private DiskTypeId diskType; + + Builder(Type type) { + this.type = type; + } + + Builder(DiskConfiguration diskConfiguration) { + this.type = diskConfiguration.type; + this.sizeGb = diskConfiguration.sizeGb; + this.diskType = diskConfiguration.diskType; + } + + Builder(Type type, Disk diskPb) { + this.type = type; + this.sizeGb = diskPb.getSizeGb(); + if (diskPb.getType() != null) { + this.diskType = DiskTypeId.fromUrl(diskPb.getType()); + } + } + + @SuppressWarnings("unchecked") + protected B self() { + return (B) this; + } + + B type(Type type) { + this.type = type; + return self(); + } + + /** + * Sets the size of the persistent disk, in GB. + */ + public B sizeGb(Long sizeGb) { + this.sizeGb = sizeGb; + return self(); + } + + /** + * Sets the identity of the disk type. If not set {@code pd-standard} will be used. + */ + public B diskType(DiskTypeId diskType) { + this.diskType = diskType; + return self(); + } + + /** + * Creates an object. + */ + public abstract T build(); + } + + DiskConfiguration(Builder builder) { + this.type = builder.type; + this.sizeGb = builder.sizeGb; + this.diskType = builder.diskType; + } + + /** + * Returns the disk configuration's type. This method returns {@link Type#STANDARD} for a standard + * configuration that creates a disk given its type and size. This method returns + * {@link Type#SNAPSHOT} for a configuration that creates a disk from a Google Compute Engine + * snapshot. This method returns {@link Type#IMAGE} for a configuration that creates a disk + * from a Google Compute Engine image. + */ + public Type type() { + return type; + } + + /** + * Returns the size of the persistent disk, in GB. + */ + public Long sizeGb() { + return sizeGb; + } + + /** + * Returns the identity of the disk type. + */ + public DiskTypeId diskType() { + return diskType; + } + + /** + * Returns a builder for the object. + */ + public abstract Builder toBuilder(); + + ToStringHelper toStringHelper() { + return MoreObjects.toStringHelper(this) + .add("type", type) + .add("sizeGb", sizeGb) + .add("diskType", diskType); + } + + @Override + public String toString() { + return toStringHelper().toString(); + } + + final int baseHashCode() { + return Objects.hash(type, sizeGb, diskType); + } + + final boolean baseEquals(DiskConfiguration diskConfiguration) { + return diskConfiguration != null + && getClass().equals(diskConfiguration.getClass()) + && Objects.equals(toPb(), diskConfiguration.toPb()); + } + + abstract DiskConfiguration setProjectId(String projectId); + + Disk toPb() { + Disk diskPb = new Disk(); + diskPb.setSizeGb(sizeGb); + if (diskType != null) { + diskPb.setType(diskType.selfLink()); + } + return diskPb; + } + + @SuppressWarnings("unchecked") + static T fromPb(Disk diskPb) { + if (diskPb.getSourceImage() != null) { + return (T) ImageDiskConfiguration.fromPb(diskPb); + } else if (diskPb.getSourceSnapshot() != null) { + return (T) SnapshotDiskConfiguration.fromPb(diskPb); + } + return (T) StandardDiskConfiguration.fromPb(diskPb); + } +} diff --git a/gcloud-java-compute/src/main/java/com/google/cloud/compute/DiskId.java b/gcloud-java-compute/src/main/java/com/google/cloud/compute/DiskId.java new file mode 100644 index 000000000000..58f55beb9a9a --- /dev/null +++ b/gcloud-java-compute/src/main/java/com/google/cloud/compute/DiskId.java @@ -0,0 +1,162 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import static com.google.common.base.Preconditions.checkNotNull; + +import com.google.common.base.MoreObjects; + +import java.util.Objects; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +/** + * Identity for a Google Compute Engine disk. + */ +public final class DiskId extends ResourceId { + + private static final String REGEX = ResourceId.REGEX + "zones/([^/]+)/disks/([^/]+)"; + private static final Pattern PATTERN = Pattern.compile(REGEX); + private static final long serialVersionUID = -8761290740495870787L; + + private final String zone; + private final String disk; + + private DiskId(String project, String zone, String disk) { + super(project); + this.zone = checkNotNull(zone); + this.disk = checkNotNull(disk); + } + + /** + * Returns the name of the zone this disk belongs to. + */ + public String zone() { + return zone; + } + + /** + * Returns the identity of the zone this disk belongs to. + */ + public ZoneId zoneId() { + return ZoneId.of(project(), zone); + } + + /** + * Returns the name of the disk. The name must be 1-63 characters long and comply with RFC1035. + * Specifically, the name must match the regular expression {@code [a-z]([-a-z0-9]*[a-z0-9])?} + * which means the first character must be a lowercase letter, and all following characters must + * be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. + * + * @see RFC1035 + */ + public String disk() { + return disk; + } + + @Override + public String selfLink() { + return super.selfLink() + "/zones/" + zone + "/disks/" + disk; + } + + @Override + MoreObjects.ToStringHelper toStringHelper() { + return super.toStringHelper().add("zone", zone).add("disk", disk); + } + + @Override + public int hashCode() { + return Objects.hash(super.baseHashCode(), zone, disk); + } + + @Override + public boolean equals(Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof DiskId)) { + return false; + } + DiskId other = (DiskId) obj; + return baseEquals(other) + && Objects.equals(zone, other.zone) + && Objects.equals(disk, other.disk); + } + + @Override + DiskId setProjectId(String projectId) { + if (project() != null) { + return this; + } + return DiskId.of(projectId, zone, disk); + } + + /** + * Returns a disk identity given the zone identity and the disk name. The disk name must be 1-63 + * characters long and comply with RFC1035. Specifically, the name must match the regular + * expression {@code [a-z]([-a-z0-9]*[a-z0-9])?} which means the first character must be a + * lowercase letter, and all following characters must be a dash, lowercase letter, or digit, + * except the last character, which cannot be a dash. + * + * @see RFC1035 + */ + public static DiskId of(ZoneId zoneId, String disk) { + return new DiskId(zoneId.project(), zoneId.zone(), disk); + } + + /** + * Returns a disk identity given the zone and disk names. The disk name must be 1-63 characters + * long and comply with RFC1035. Specifically, the name must match the regular expression + * {@code [a-z]([-a-z0-9]*[a-z0-9])?} which means the first character must be a lowercase letter, + * and all following characters must be a dash, lowercase letter, or digit, except the last + * character, which cannot be a dash. + * + * @see RFC1035 + */ + public static DiskId of(String zone, String disk) { + return new DiskId(null, zone, disk); + } + + /** + * Returns a disk identity given project, zone and disks names. The disk name must be 1-63 + * characters long and comply with RFC1035. Specifically, the name must match the regular + * expression {@code [a-z]([-a-z0-9]*[a-z0-9])?} which means the first character must be a + * lowercase letter, and all following characters must be a dash, lowercase letter, or digit, + * except the last character, which cannot be a dash. + * + * @see RFC1035 + */ + public static DiskId of(String project, String zone, String disk) { + return new DiskId(project, zone, disk); + } + + /** + * Returns {@code true} if the provided string matches the expected format of a disk URL. Returns + * {@code false} otherwise. + */ + static boolean matchesUrl(String url) { + return PATTERN.matcher(url).matches(); + } + + static DiskId fromUrl(String url) { + Matcher matcher = PATTERN.matcher(url); + if (!matcher.matches()) { + throw new IllegalArgumentException(url + " is not a valid disk URL"); + } + return DiskId.of(matcher.group(1), matcher.group(2), matcher.group(3)); + } +} diff --git a/gcloud-java-compute/src/main/java/com/google/cloud/compute/DiskImageConfiguration.java b/gcloud-java-compute/src/main/java/com/google/cloud/compute/DiskImageConfiguration.java new file mode 100644 index 000000000000..a31c8199ec07 --- /dev/null +++ b/gcloud-java-compute/src/main/java/com/google/cloud/compute/DiskImageConfiguration.java @@ -0,0 +1,165 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import static com.google.common.base.Preconditions.checkNotNull; + +import com.google.api.services.compute.model.Image; +import com.google.common.base.MoreObjects; + +import java.util.Objects; + +/** + * A Google Compute Engine disk image configuration. This class can be used to create images from an + * existing Google Compute Engine disk. + */ +public class DiskImageConfiguration extends ImageConfiguration { + + private static final long serialVersionUID = 2716403667042981170L; + + private final DiskId sourceDisk; + private final String sourceDiskId; + + /** + * A builder for {@code DiskImageConfiguration} objects. + */ + public static final class Builder + extends ImageConfiguration.Builder { + + private DiskId sourceDisk; + private String sourceDiskId; + + private Builder() { + super(Type.DISK); + } + + private Builder(DiskImageConfiguration imageConfiguration) { + super(imageConfiguration); + this.sourceDisk = imageConfiguration.sourceDisk; + this.sourceDiskId = imageConfiguration.sourceDiskId; + } + + private Builder(Image imagePb) { + super(Type.DISK, imagePb); + this.sourceDisk = DiskId.fromUrl(imagePb.getSourceDisk()); + this.sourceDiskId = imagePb.getSourceDiskId(); + } + + /** + * Sets the identity of the source disk used to create the image. + */ + public Builder sourceDisk(DiskId sourceDisk) { + this.sourceDisk = checkNotNull(sourceDisk); + return this; + } + + Builder sourceDiskId(String sourceDiskId) { + this.sourceDiskId = sourceDiskId; + return this; + } + + /** + * Creates a {@code DiskImageConfiguration} object. + */ + @Override + public DiskImageConfiguration build() { + return new DiskImageConfiguration(this); + } + } + + private DiskImageConfiguration(Builder builder) { + super(builder); + this.sourceDisk = checkNotNull(builder.sourceDisk); + this.sourceDiskId = builder.sourceDiskId; + } + + /** + * Returns the identity of the source disk used to create this image. + */ + public DiskId sourceDisk() { + return sourceDisk; + } + + /** + * Returns the service-generated unique id of the disk used to create this image. This value may + * be used to determine whether the image was taken from the current or a previous instance of a + * given disk name. + */ + public String sourceDiskId() { + return sourceDiskId; + } + + @Override + public Builder toBuilder() { + return new Builder(this); + } + + @Override + MoreObjects.ToStringHelper toStringHelper() { + return super.toStringHelper() + .add("sourceDisk", sourceDisk) + .add("sourceDiskId", sourceDiskId); + } + + @Override + public final int hashCode() { + return Objects.hash(baseHashCode(), sourceDisk, sourceDiskId); + } + + @Override + public final boolean equals(Object obj) { + return obj == this + || obj != null + && obj.getClass().equals(DiskImageConfiguration.class) + && baseEquals((DiskImageConfiguration) obj); + } + + @Override + DiskImageConfiguration setProjectId(String projectId) { + if (sourceDisk.project() != null) { + return this; + } + return toBuilder().sourceDisk(sourceDisk.setProjectId(projectId)).build(); + } + + @Override + Image toPb() { + Image imagePb = super.toPb(); + imagePb.setSourceDisk(sourceDisk.selfLink()); + imagePb.setSourceDiskId(sourceDiskId); + return imagePb; + } + + /** + * Creates a builder for a {@code DiskImageConfiguration} given the source disk identity. + */ + public static Builder builder(DiskId sourceDisk) { + return new Builder().sourceDisk(sourceDisk); + } + + /** + * Creates a {@code DiskImageConfiguration} object given the source disk identity. + */ + public static DiskImageConfiguration of(DiskId sourceId) { + return builder(sourceId).build(); + } + + @SuppressWarnings("unchecked") + static DiskImageConfiguration fromPb(Image imagePb) { + return new Builder(imagePb).build(); + } +} diff --git a/gcloud-java-compute/src/main/java/com/google/cloud/compute/DiskInfo.java b/gcloud-java-compute/src/main/java/com/google/cloud/compute/DiskInfo.java new file mode 100644 index 000000000000..aab6d90ff345 --- /dev/null +++ b/gcloud-java-compute/src/main/java/com/google/cloud/compute/DiskInfo.java @@ -0,0 +1,443 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import static com.google.common.base.Preconditions.checkNotNull; + +import com.google.api.services.compute.model.Disk; +import com.google.common.base.Function; +import com.google.common.base.MoreObjects; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Lists; + +import org.joda.time.format.DateTimeFormatter; +import org.joda.time.format.ISODateTimeFormat; + +import java.io.Serializable; +import java.math.BigInteger; +import java.util.List; +import java.util.Objects; + +/** + * A Google Compute Engine persistent disk. A disk can be used as primary storage for your virtual + * machine instances. + * + * @see Block Storage + */ +public class DiskInfo implements Serializable { + + static final Function FROM_PB_FUNCTION = + new Function() { + @Override + public DiskInfo apply(Disk pb) { + return DiskInfo.fromPb(pb); + } + }; + static final Function TO_PB_FUNCTION = + new Function() { + @Override + public Disk apply(DiskInfo diskType) { + return diskType.toPb(); + } + }; + + private static final long serialVersionUID = -7173418340679279619L; + private static final DateTimeFormatter TIMESTAMP_FORMATTER = ISODateTimeFormat.dateTime(); + + private final String generatedId; + private final DiskId diskId; + private final DiskConfiguration configuration; + private final Long creationTimestamp; + private final CreationStatus creationStatus; + private final String description; + private final List licenses; + private final List attachedInstances; + private final Long lastAttachTimestamp; + private final Long lastDetachTimestamp; + + /** + * The status of disk creation. + */ + public enum CreationStatus { + /** + * The disk is being created. + */ + CREATING, + + /** + * Disk creation failed. + */ + FAILED, + + /** + * The disk has been created and is ready to use. + */ + READY, + + /** + * The disk is being restored. + */ + RESTORING + } + + /** + * Builder for {@code DiskInfo} objects. + */ + public abstract static class Builder { + + abstract Builder generatedId(String generatedId); + + /** + * Sets the disk configuration. + */ + public abstract Builder configuration(DiskConfiguration configuration); + + /** + * Sets the disk identity. + */ + public abstract Builder diskId(DiskId diskId); + + abstract Builder creationTimestamp(Long creationTimestamp); + + abstract Builder creationStatus(CreationStatus creationStatus); + + /** + * Sets an optional textual description of the resource. + */ + public abstract Builder description(String description); + + abstract Builder licenses(List licenses); + + abstract Builder attachedInstances(List attachedInstances); + + abstract Builder lastAttachTimestamp(Long lastAttachTimestamp); + + abstract Builder lastDetachTimestamp(Long lastDetachTimestamp); + + /** + * Creates a {@code DiskInfo} object. + */ + public abstract DiskInfo build(); + } + + static final class BuilderImpl extends Builder { + + private String generatedId; + private DiskId diskId; + private DiskConfiguration configuration; + private Long creationTimestamp; + private CreationStatus creationStatus; + private String description; + private List licenses; + private List attachedInstances; + private Long lastAttachTimestamp; + private Long lastDetachTimestamp; + + BuilderImpl(DiskId diskId, DiskConfiguration configuration) { + this.diskId = checkNotNull(diskId); + this.configuration = checkNotNull(configuration); + } + + BuilderImpl(DiskInfo diskInfo) { + this.generatedId = diskInfo.generatedId; + this.configuration = diskInfo.configuration; + this.creationTimestamp = diskInfo.creationTimestamp; + this.creationStatus = diskInfo.creationStatus; + this.diskId = diskInfo.diskId; + this.description = diskInfo.description; + this.licenses = diskInfo.licenses; + this.attachedInstances = diskInfo.attachedInstances; + this.lastAttachTimestamp = diskInfo.lastAttachTimestamp; + this.lastDetachTimestamp = diskInfo.lastDetachTimestamp; + } + + BuilderImpl(Disk diskPb) { + if (diskPb.getId() != null) { + this.generatedId = diskPb.getId().toString(); + } + this.configuration = DiskConfiguration.fromPb(diskPb); + if (diskPb.getCreationTimestamp() != null) { + this.creationTimestamp = TIMESTAMP_FORMATTER.parseMillis(diskPb.getCreationTimestamp()); + } + if (diskPb.getStatus() != null) { + this.creationStatus = CreationStatus.valueOf(diskPb.getStatus()); + } + this.diskId = DiskId.fromUrl(diskPb.getSelfLink()); + this.description = diskPb.getDescription(); + if (diskPb.getLicenses() != null) { + this.licenses = Lists.transform(diskPb.getLicenses(), LicenseId.FROM_URL_FUNCTION); + } + if (diskPb.getUsers() != null) { + this.attachedInstances = Lists.transform(diskPb.getUsers(), InstanceId.FROM_URL_FUNCTION); + } + if (diskPb.getLastAttachTimestamp() != null) { + this.lastAttachTimestamp = TIMESTAMP_FORMATTER.parseMillis(diskPb.getLastAttachTimestamp()); + } + if (diskPb.getLastDetachTimestamp() != null) { + this.lastDetachTimestamp = TIMESTAMP_FORMATTER.parseMillis(diskPb.getLastDetachTimestamp()); + } + } + + @Override + BuilderImpl generatedId(String generatedId) { + this.generatedId = generatedId; + return this; + } + + @Override + public BuilderImpl configuration(DiskConfiguration configuration) { + this.configuration = checkNotNull(configuration); + return this; + } + + @Override + public BuilderImpl diskId(DiskId diskId) { + this.diskId = checkNotNull(diskId); + return this; + } + + @Override + BuilderImpl creationTimestamp(Long creationTimestamp) { + this.creationTimestamp = creationTimestamp; + return this; + } + + @Override + BuilderImpl creationStatus(CreationStatus creationStatus) { + this.creationStatus = creationStatus; + return this; + } + + @Override + public BuilderImpl description(String description) { + this.description = description; + return this; + } + + @Override + BuilderImpl licenses(List licenses) { + this.licenses = licenses != null ? ImmutableList.copyOf(licenses) : null; + return this; + } + + @Override + BuilderImpl attachedInstances(List attachedInstances) { + this.attachedInstances = + attachedInstances != null ? ImmutableList.copyOf(attachedInstances) : null; + return this; + } + + @Override + BuilderImpl lastAttachTimestamp(Long lastAttachTimestamp) { + this.lastAttachTimestamp = lastAttachTimestamp; + return this; + } + + @Override + BuilderImpl lastDetachTimestamp(Long lastDetachTimestamp) { + this.lastDetachTimestamp = lastDetachTimestamp; + return this; + } + + @Override + public DiskInfo build() { + return new DiskInfo(this); + } + } + + DiskInfo(BuilderImpl builder) { + this.generatedId = builder.generatedId; + this.configuration = builder.configuration; + this.creationTimestamp = builder.creationTimestamp; + this.creationStatus = builder.creationStatus; + this.diskId = builder.diskId; + this.description = builder.description; + this.licenses = builder.licenses; + this.attachedInstances = builder.attachedInstances; + this.lastAttachTimestamp = builder.lastAttachTimestamp; + this.lastDetachTimestamp = builder.lastDetachTimestamp; + } + + /** + * Returns the creation timestamp in milliseconds since epoch. + */ + public Long creationTimestamp() { + return creationTimestamp; + } + + /** + * Returns the service-generated unique identifier for the disk. + */ + public String generatedId() { + return generatedId; + } + + /** + * Returns the disk configuration. + */ + @SuppressWarnings("unchecked") + public T configuration() { + return (T) configuration; + } + + /** + * Returns the disk identity. + */ + public DiskId diskId() { + return diskId; + } + + /** + * Returns the creation status of the disk. + */ + public CreationStatus creationStatus() { + return creationStatus; + } + + /** + * Returns a textual description of the disk. + */ + public String description() { + return description; + } + + /** + * Returns all applicable publicly visible licenses for the disk. + */ + public List licenses() { + return licenses; + } + + /** + * Returns all the identities of the instances this disk is attached to. + */ + public List attachedInstances() { + return attachedInstances; + } + + /** + * Returns the last attach timestamp in milliseconds since epoch. + */ + public Long lastAttachTimestamp() { + return lastAttachTimestamp; + } + + /** + * Returns the last detach timestamp in milliseconds since epoch. + */ + public Long lastDetachTimestamp() { + return lastDetachTimestamp; + } + + /** + * Returns a builder for the object. + */ + public Builder toBuilder() { + return new BuilderImpl(this); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("generatedId", generatedId) + .add("diskId", diskId) + .add("configuration", configuration) + .add("creationTimestamp", creationTimestamp) + .add("creationStatus", creationStatus) + .add("description", description) + .add("licenses", licenses) + .add("attachedInstances", attachedInstances) + .add("lastAttachTimestamp", lastAttachTimestamp) + .add("lastDetachTimestamp", lastDetachTimestamp) + .toString(); + } + + @Override + public int hashCode() { + return Objects.hash(diskId, configuration, creationTimestamp, creationStatus, description, + licenses, attachedInstances, lastAttachTimestamp, lastDetachTimestamp); + } + + @Override + public boolean equals(Object obj) { + return obj == this + || obj != null + && obj.getClass().equals(DiskInfo.class) + && Objects.equals(toPb(), ((DiskInfo) obj).toPb()); + } + + /** + * Returns a builder for a {@code DiskInfo} object given its identity and configuration. Use + * {@link StandardDiskConfiguration} to create a simple disk given its type and size. Use + * {@link SnapshotDiskConfiguration} to create a disk from a snapshot. Use + * {@link ImageDiskConfiguration} to create a disk from a disk image. + */ + public static Builder builder(DiskId diskId, DiskConfiguration configuration) { + return new BuilderImpl(diskId, configuration); + } + + /** + * Returns a {@code DiskInfo} object given its identity and configuration. Use + * {@link StandardDiskConfiguration} to create a simple disk given its type and size. Use + * {@link SnapshotDiskConfiguration} to create a disk from a snapshot. Use + * {@link ImageDiskConfiguration} to create a disk from a disk image. + */ + public static DiskInfo of(DiskId diskId, DiskConfiguration configuration) { + return builder(diskId, configuration).build(); + } + + DiskInfo setProjectId(String projectId) { + return toBuilder() + .diskId(diskId.setProjectId(projectId)) + .configuration(configuration.setProjectId(projectId)) + .build(); + } + + Disk toPb() { + Disk diskPb = configuration.toPb(); + if (generatedId != null) { + diskPb.setId(new BigInteger(generatedId)); + } + if (creationTimestamp != null) { + diskPb.setCreationTimestamp(TIMESTAMP_FORMATTER.print(creationTimestamp)); + } + diskPb.setZone(diskId.zoneId().selfLink()); + if (creationStatus != null) { + diskPb.setStatus(creationStatus.toString()); + } + diskPb.setName(diskId.disk()); + diskPb.setDescription(description); + diskPb.setSelfLink(diskId.selfLink()); + if (licenses != null) { + diskPb.setLicenses(Lists.transform(licenses, LicenseId.TO_URL_FUNCTION)); + } + if (attachedInstances != null) { + diskPb.setUsers(Lists.transform(attachedInstances, InstanceId.TO_URL_FUNCTION)); + } + if (lastAttachTimestamp != null) { + diskPb.setLastAttachTimestamp(TIMESTAMP_FORMATTER.print(lastAttachTimestamp)); + } + if (lastDetachTimestamp != null) { + diskPb.setLastDetachTimestamp(TIMESTAMP_FORMATTER.print(lastDetachTimestamp)); + } + return diskPb; + } + + static DiskInfo fromPb(Disk diskPb) { + return new BuilderImpl(diskPb).build(); + } +} diff --git a/gcloud-java-compute/src/main/java/com/google/cloud/compute/DiskType.java b/gcloud-java-compute/src/main/java/com/google/cloud/compute/DiskType.java new file mode 100644 index 000000000000..cbe6f625e764 --- /dev/null +++ b/gcloud-java-compute/src/main/java/com/google/cloud/compute/DiskType.java @@ -0,0 +1,243 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import com.google.common.base.Function; +import com.google.common.base.MoreObjects; + +import org.joda.time.format.DateTimeFormatter; +import org.joda.time.format.ISODateTimeFormat; + +import java.io.Serializable; +import java.math.BigInteger; +import java.util.Objects; + +/** + * A Google Compute Engine disk type. A disk type represents the type of disk to use, such as + * {@code pd-ssd} or {@code pd-standard}. + * + * @see Disk Types + */ +public class DiskType implements Serializable { + + static final Function FROM_PB_FUNCTION = + new Function() { + @Override + public DiskType apply(com.google.api.services.compute.model.DiskType pb) { + return DiskType.fromPb(pb); + } + }; + static final Function TO_PB_FUNCTION = + new Function() { + @Override + public com.google.api.services.compute.model.DiskType apply(DiskType diskType) { + return diskType.toPb(); + } + }; + + private static final long serialVersionUID = -944042261695072026L; + private static final DateTimeFormatter TIMESTAMP_FORMATTER = ISODateTimeFormat.dateTime(); + + private final String generatedId; + private final DiskTypeId diskTypeId; + private final Long creationTimestamp; + private final String description; + private final String validDiskSize; + private final Long defaultDiskSizeGb; + private final DeprecationStatus deprecationStatus; + + static final class Builder { + + private String generatedId; + private DiskTypeId diskTypeId; + private Long creationTimestamp; + private String description; + private String validDiskSize; + private Long defaultDiskSizeGb; + private DeprecationStatus deprecationStatus; + + private Builder() {} + + Builder generatedId(String generatedId) { + this.generatedId = generatedId; + return this; + } + + Builder creationTimestamp(Long creationTimestamp) { + this.creationTimestamp = creationTimestamp; + return this; + } + + Builder diskTypeId(DiskTypeId diskTypeId) { + this.diskTypeId = diskTypeId; + return this; + } + + Builder description(String description) { + this.description = description; + return this; + } + + Builder validDiskSize(String validDiskSize) { + this.validDiskSize = validDiskSize; + return this; + } + + Builder defaultDiskSizeGb(Long defaultDiskSizeGb) { + this.defaultDiskSizeGb = defaultDiskSizeGb; + return this; + } + + Builder deprecationStatus(DeprecationStatus deprecationStatus) { + this.deprecationStatus = deprecationStatus; + return this; + } + + DiskType build() { + return new DiskType(this); + } + } + + private DiskType(Builder builder) { + this.generatedId = builder.generatedId; + this.creationTimestamp = builder.creationTimestamp; + this.diskTypeId = builder.diskTypeId; + this.description = builder.description; + this.validDiskSize = builder.validDiskSize; + this.defaultDiskSizeGb = builder.defaultDiskSizeGb; + this.deprecationStatus = builder.deprecationStatus; + } + + /** + * Returns the creation timestamp in milliseconds since epoch. + */ + public Long creationTimestamp() { + return creationTimestamp; + } + + /** + * Returns the disk type's identity. + */ + public DiskTypeId diskTypeId() { + return diskTypeId; + } + + /** + * Returns the service-generated unique identifier for the disk type. + */ + public String generatedId() { + return generatedId; + } + + /** + * Returns a textual description of the disk type. + */ + public String description() { + return description; + } + + /** + * Returns an optional textual description of the valid disk size, such as "10GB-10TB". + */ + public String validDiskSize() { + return validDiskSize; + } + + /** + * Returns the service-defined default disk size in GB. + */ + public Long defaultDiskSizeGb() { + return defaultDiskSizeGb; + } + + /** + * Returns the deprecation status of the disk type. If {@link DeprecationStatus#status()} is + * either {@link DeprecationStatus.Status#DELETED} or {@link DeprecationStatus.Status#OBSOLETE} + * the disk type should not be used. Returns {@code null} if the disk type is not deprecated. + */ + public DeprecationStatus deprecationStatus() { + return deprecationStatus; + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("generatedId", generatedId) + .add("creationTimestamp", creationTimestamp) + .add("description", description) + .add("validDiskSize", validDiskSize) + .add("defaultDiskSizeGb", defaultDiskSizeGb) + .add("deprecationStatus", deprecationStatus) + .toString(); + } + + @Override + public final int hashCode() { + return Objects.hash(diskTypeId); + } + + @Override + public final boolean equals(Object obj) { + return obj == this + || obj != null + && obj.getClass().equals(DiskType.class) + && Objects.equals(toPb(), ((DiskType) obj).toPb()); + } + + com.google.api.services.compute.model.DiskType toPb() { + com.google.api.services.compute.model.DiskType diskTypePb = + new com.google.api.services.compute.model.DiskType(); + if (generatedId != null) { + diskTypePb.setId(new BigInteger(generatedId)); + } + if (creationTimestamp != null) { + diskTypePb.setCreationTimestamp(TIMESTAMP_FORMATTER.print(creationTimestamp)); + } + diskTypePb.setDescription(description); + diskTypePb.setValidDiskSize(validDiskSize); + diskTypePb.setSelfLink(diskTypeId.selfLink()); + diskTypePb.setDefaultDiskSizeGb(defaultDiskSizeGb); + diskTypePb.setZone(diskTypeId.zoneId().selfLink()); + if (deprecationStatus != null) { + diskTypePb.setDeprecated(deprecationStatus.toPb()); + } + return diskTypePb; + } + + static Builder builder() { + return new Builder(); + } + + static DiskType fromPb(com.google.api.services.compute.model.DiskType diskTypePb) { + Builder builder = builder(); + if (diskTypePb.getId() != null) { + builder.generatedId(diskTypePb.getId().toString()); + } + if (diskTypePb.getCreationTimestamp() != null) { + builder.creationTimestamp(TIMESTAMP_FORMATTER.parseMillis(diskTypePb.getCreationTimestamp())); + } + builder.diskTypeId(DiskTypeId.fromUrl(diskTypePb.getSelfLink())); + builder.description(diskTypePb.getDescription()); + builder.validDiskSize(diskTypePb.getValidDiskSize()); + builder.defaultDiskSizeGb(diskTypePb.getDefaultDiskSizeGb()); + if (diskTypePb.getDeprecated() != null) { + builder.deprecationStatus( + DeprecationStatus.fromPb(diskTypePb.getDeprecated(), DiskTypeId.FROM_URL_FUNCTION)); + } + return builder.build(); + } +} diff --git a/gcloud-java-compute/src/main/java/com/google/cloud/compute/DiskTypeId.java b/gcloud-java-compute/src/main/java/com/google/cloud/compute/DiskTypeId.java new file mode 100644 index 000000000000..1ed0cfdea472 --- /dev/null +++ b/gcloud-java-compute/src/main/java/com/google/cloud/compute/DiskTypeId.java @@ -0,0 +1,153 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import static com.google.common.base.Preconditions.checkNotNull; + +import com.google.common.base.Function; +import com.google.common.base.MoreObjects; + +import java.util.Objects; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +/** + * Identity for a Google Compute Engine disk type. + */ +public final class DiskTypeId extends ResourceId { + + static final Function FROM_URL_FUNCTION = new Function() { + @Override + public DiskTypeId apply(String pb) { + return DiskTypeId.fromUrl(pb); + } + }; + static final Function TO_URL_FUNCTION = new Function() { + @Override + public String apply(DiskTypeId diskTypeId) { + return diskTypeId.selfLink(); + } + }; + + private static final String REGEX = ResourceId.REGEX + "zones/([^/]+)/diskTypes/([^/]+)"; + private static final Pattern PATTERN = Pattern.compile(REGEX); + private static final long serialVersionUID = 7337881474103686219L; + + private final String zone; + private final String type; + + private DiskTypeId(String project, String zone, String type) { + super(project); + this.zone = checkNotNull(zone); + this.type = checkNotNull(type); + } + + /** + * Returns the name of the disk type. + */ + public String type() { + return type; + } + + /** + * Returns the name of the zone this disk type belongs to. + */ + public String zone() { + return zone; + } + + /** + * Returns the identity of the zone this disk type belongs to. + */ + public ZoneId zoneId() { + return ZoneId.of(project(), zone); + } + + @Override + public String selfLink() { + return super.selfLink() + "/zones/" + zone + "/diskTypes/" + type; + } + + @Override + MoreObjects.ToStringHelper toStringHelper() { + return super.toStringHelper().add("zone", zone).add("type", type); + } + + @Override + public int hashCode() { + return Objects.hash(super.baseHashCode(), zone, type); + } + + @Override + public boolean equals(Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof DiskTypeId)) { + return false; + } + DiskTypeId other = (DiskTypeId) obj; + return baseEquals(other) + && Objects.equals(zone, other.zone) + && Objects.equals(type, other.type); + } + + @Override + DiskTypeId setProjectId(String projectId) { + if (project() != null) { + return this; + } + return DiskTypeId.of(projectId, zone, type); + } + + /** + * Returns a disk type identity given the zone identity and the disk type name. + */ + public static DiskTypeId of(ZoneId zoneId, String type) { + return new DiskTypeId(zoneId.project(), zoneId.zone(), type); + } + + /** + * Returns a disk type identity given the zone and disk type names. + */ + public static DiskTypeId of(String zone, String type) { + return of(ZoneId.of(null, zone), type); + } + + /** + * Returns a disk type identity given project disk, zone and disk type names. + */ + public static DiskTypeId of(String project, String zone, String type) { + return of(ZoneId.of(project, zone), type); + } + + /** + * Returns {@code true} if the provided string matches the expected format of a disk type URL. + * Returns {@code false} otherwise. + */ + static boolean matchesUrl(String url) { + return PATTERN.matcher(url).matches(); + } + + static DiskTypeId fromUrl(String url) { + Matcher matcher = PATTERN.matcher(url); + if (!matcher.matches()) { + throw new IllegalArgumentException(url + " is not a valid disk type URL"); + } + return DiskTypeId.of(matcher.group(1), matcher.group(2), matcher.group(3)); + } +} diff --git a/gcloud-java-compute/src/main/java/com/google/cloud/compute/ForwardingRuleId.java b/gcloud-java-compute/src/main/java/com/google/cloud/compute/ForwardingRuleId.java new file mode 100644 index 000000000000..09d447cb8072 --- /dev/null +++ b/gcloud-java-compute/src/main/java/com/google/cloud/compute/ForwardingRuleId.java @@ -0,0 +1,93 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import static com.google.common.base.Preconditions.checkNotNull; + +import com.google.common.base.MoreObjects; + +import java.util.Objects; + +/** + * Base class for Google Compute Engine forwarding rule identities. + */ +public abstract class ForwardingRuleId extends ResourceId { + + private static final long serialVersionUID = -4352410760458355391L; + + private final String rule; + + ForwardingRuleId(String project, String rule) { + super(project); + this.rule = checkNotNull(rule); + } + + /** + * Possible types for a Google Compute Engine forwarding rule identity. + */ + enum Type { + /** + * Global forwarding rules are used to forward traffic to the correct load balancer for HTTP(S) + * load balancing. + */ + GLOBAL, + + /** + * Region forwarding rules are used to forward traffic to the correct pool of target virtual + * machines. + */ + REGION + } + + /** + * Returns the type of this forwarding rule identity. + */ + public abstract Type type(); + + /** + * Returns the name of the forwarding rule. The forwarding rule name must be 1-63 characters long + * and comply with RFC1035. Specifically, the name must match the regular expression + * {@code [a-z]([-a-z0-9]*[a-z0-9])?} which means the first character must be a lowercase letter, + * and all following characters must be a dash, lowercase letter, or digit, except the last + * character, which cannot be a dash. + * + * @see RFC1035 + */ + public String rule() { + return rule; + } + + @Override + MoreObjects.ToStringHelper toStringHelper() { + return super.toStringHelper().add("rule", rule); + } + + @Override + final int baseHashCode() { + return Objects.hash(super.baseHashCode(), rule); + } + + @Override + final boolean baseEquals(ResourceId resourceId) { + return resourceId instanceof ForwardingRuleId + && super.baseEquals(resourceId) + && Objects.equals(rule, ((ForwardingRuleId) resourceId).rule); + } + + @Override + abstract ForwardingRuleId setProjectId(String projectId); +} diff --git a/gcloud-java-compute/src/main/java/com/google/cloud/compute/GlobalAddressId.java b/gcloud-java-compute/src/main/java/com/google/cloud/compute/GlobalAddressId.java new file mode 100644 index 000000000000..45232edeff48 --- /dev/null +++ b/gcloud-java-compute/src/main/java/com/google/cloud/compute/GlobalAddressId.java @@ -0,0 +1,104 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +/** + * Identity for a Google Compute Engine global address. + */ +public final class GlobalAddressId extends AddressId { + + private static final String REGEX = ResourceId.REGEX + "global/addresses/([^/]+)"; + private static final Pattern PATTERN = Pattern.compile(REGEX); + private static final long serialVersionUID = -2950815290049218593L; + + private GlobalAddressId(String project, String address) { + super(project, address); + } + + @Override + public Type type() { + return Type.GLOBAL; + } + + @Override + public String selfLink() { + return super.selfLink() + "/global/addresses/" + address(); + } + + @Override + public int hashCode() { + return baseHashCode(); + } + + @Override + public boolean equals(Object obj) { + return obj == this || obj instanceof GlobalAddressId && baseEquals((GlobalAddressId) obj); + } + + @Override + GlobalAddressId setProjectId(String projectId) { + if (project() != null) { + return this; + } + return GlobalAddressId.of(projectId, address()); + } + + /** + * Returns an address identity given the address name. The address name must be 1-63 characters + * long and comply with RFC1035. Specifically, the name must match the regular expression + * {@code [a-z]([-a-z0-9]*[a-z0-9])?} which means the first character must be a lowercase letter, + * and all following characters must be a dash, lowercase letter, or digit, except the last + * character, which cannot be a dash. + * + * @see RFC1035 + */ + public static GlobalAddressId of(String address) { + return new GlobalAddressId(null, address); + } + + /** + * Returns an address identity given project and address names. The address name must be 1-63 + * characters long and comply with RFC1035. Specifically, the name must match the regular + * expression {@code [a-z]([-a-z0-9]*[a-z0-9])?} which means the first character must be a + * lowercase letter, and all following characters must be a dash, lowercase letter, or digit, + * except the last character, which cannot be a dash. + * + * @see RFC1035 + */ + public static GlobalAddressId of(String project, String address) { + return new GlobalAddressId(project, address); + } + + /** + * Returns {@code true} if the provided string matches the expected format of a global address + * URL. Returns {@code false} otherwise. + */ + static boolean matchesUrl(String url) { + return PATTERN.matcher(url).matches(); + } + + static GlobalAddressId fromUrl(String url) { + Matcher matcher = PATTERN.matcher(url); + if (!matcher.matches()) { + throw new IllegalArgumentException(url + " is not a valid global address URL"); + } + return GlobalAddressId.of(matcher.group(1), matcher.group(2)); + } +} diff --git a/gcloud-java-compute/src/main/java/com/google/cloud/compute/GlobalForwardingRuleId.java b/gcloud-java-compute/src/main/java/com/google/cloud/compute/GlobalForwardingRuleId.java new file mode 100644 index 000000000000..b9acfa989b85 --- /dev/null +++ b/gcloud-java-compute/src/main/java/com/google/cloud/compute/GlobalForwardingRuleId.java @@ -0,0 +1,123 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import com.google.common.base.Function; + +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +/** + * Identity for a Google Compute Engine global forwarding rule. + */ +public final class GlobalForwardingRuleId extends ForwardingRuleId { + + static final Function FROM_URL_FUNCTION = + new Function() { + @Override + public GlobalForwardingRuleId apply(String pb) { + return GlobalForwardingRuleId.fromUrl(pb); + } + }; + static final Function TO_URL_FUNCTION = + new Function() { + @Override + public String apply(GlobalForwardingRuleId forwardingRuleId) { + return forwardingRuleId.selfLink(); + } + }; + + private static final String REGEX = ResourceId.REGEX + "global/forwardingRules/([^/]+)"; + private static final Pattern PATTERN = Pattern.compile(REGEX); + private static final long serialVersionUID = -2648031793037534254L; + + private GlobalForwardingRuleId(String project, String rule) { + super(project, rule); + } + + @Override + public Type type() { + return Type.GLOBAL; + } + + @Override + public String selfLink() { + return super.selfLink() + "/global/forwardingRules/" + rule(); + } + + @Override + public int hashCode() { + return baseHashCode(); + } + + @Override + public boolean equals(Object obj) { + return obj == this + || obj instanceof GlobalForwardingRuleId + && baseEquals((GlobalForwardingRuleId) obj); + } + + @Override + GlobalForwardingRuleId setProjectId(String projectId) { + if (project() != null) { + return this; + } + return GlobalForwardingRuleId.of(projectId, rule()); + } + + /** + * Returns a forwarding rule identity given the rule name. The forwarding rule name must be 1-63 + * characters long and comply with RFC1035. Specifically, the name must match the regular + * expression {@code [a-z]([-a-z0-9]*[a-z0-9])?} which means the first character must be a + * lowercase letter, and all following characters must be a dash, lowercase letter, or digit, + * except the last character, which cannot be a dash. + * + * @see RFC1035 + */ + public static GlobalForwardingRuleId of(String rule) { + return new GlobalForwardingRuleId(null, rule); + } + + /** + * Returns a forwarding rule identity given the project rule names. The forwarding rule name must + * be 1-63 characters long and comply with RFC1035. Specifically, the name must match the regular + * expression {@code [a-z]([-a-z0-9]*[a-z0-9])?} which means the first character must be a + * lowercase letter, and all following characters must be a dash, lowercase letter, or digit, + * except the last character, which cannot be a dash. + * + * @see RFC1035 + */ + public static GlobalForwardingRuleId of(String project, String rule) { + return new GlobalForwardingRuleId(project, rule); + } + + /** + * Returns {@code true} if the provided string matches the expected format of a global forwarding + * rule URL. Returns {@code false} otherwise. + */ + static boolean matchesUrl(String url) { + return PATTERN.matcher(url).matches(); + } + + static GlobalForwardingRuleId fromUrl(String url) { + Matcher matcher = PATTERN.matcher(url); + if (!matcher.matches()) { + throw new IllegalArgumentException(url + " is not a valid global forwarding rule URL"); + } + return GlobalForwardingRuleId.of(matcher.group(1), matcher.group(2)); + } +} diff --git a/gcloud-java-compute/src/main/java/com/google/cloud/compute/GlobalOperationId.java b/gcloud-java-compute/src/main/java/com/google/cloud/compute/GlobalOperationId.java new file mode 100644 index 000000000000..ee3e4fc2d40e --- /dev/null +++ b/gcloud-java-compute/src/main/java/com/google/cloud/compute/GlobalOperationId.java @@ -0,0 +1,92 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +/** + * Identity for a Google Compute Engine global operation. + */ +public final class GlobalOperationId extends OperationId { + + private static final String REGEX = ResourceId.REGEX + "global/operations/([^/]+)"; + private static final Pattern PATTERN = Pattern.compile(REGEX); + private static final long serialVersionUID = 3945756772641577962L; + + private GlobalOperationId(String project, String operation) { + super(project, operation); + } + + @Override + public Type type() { + return Type.GLOBAL; + } + + @Override + public String selfLink() { + return super.selfLink() + "/global/operations/" + operation(); + } + + @Override + public int hashCode() { + return baseHashCode(); + } + + @Override + public boolean equals(Object obj) { + return obj == this || obj instanceof GlobalOperationId && baseEquals((GlobalOperationId) obj); + } + + @Override + GlobalOperationId setProjectId(String projectId) { + if (project() != null) { + return this; + } + return GlobalOperationId.of(projectId, operation()); + } + + /** + * Returns a global operation identity given the operation name. + */ + public static GlobalOperationId of(String operation) { + return new GlobalOperationId(null, operation); + } + + /** + * Returns a global operation identity given project and operation names. + */ + public static GlobalOperationId of(String project, String operation) { + return new GlobalOperationId(project, operation); + } + + /** + * Returns {@code true} if the provided string matches the expected format of a global operation + * URL. Returns {@code false} otherwise. + */ + static boolean matchesUrl(String url) { + return PATTERN.matcher(url).matches(); + } + + static GlobalOperationId fromUrl(String url) { + Matcher matcher = PATTERN.matcher(url); + if (!matcher.matches()) { + throw new IllegalArgumentException(url + " is not a valid global operation URL"); + } + return GlobalOperationId.of(matcher.group(1), matcher.group(2)); + } +} diff --git a/gcloud-java-compute/src/main/java/com/google/cloud/compute/Image.java b/gcloud-java-compute/src/main/java/com/google/cloud/compute/Image.java new file mode 100644 index 000000000000..e457ee71da13 --- /dev/null +++ b/gcloud-java-compute/src/main/java/com/google/cloud/compute/Image.java @@ -0,0 +1,214 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import static com.google.common.base.Preconditions.checkNotNull; + +import com.google.cloud.compute.Compute.ImageOption; +import com.google.cloud.compute.Compute.OperationOption; + +import java.io.IOException; +import java.io.ObjectInputStream; +import java.util.List; +import java.util.Objects; + +/** + * A Google Compute Engine Image. An image contains a boot loader, an operating system and a root + * file system that is necessary for starting an instance. Compute Engine offers publicly-available + * images of certain operating systems that you can use, or you can create a custom image. A custom + * image is an image created from one of your virtual machine instances that contains your specific + * instance configurations. To get an {@code Image} object with the most recent information use + * {@link #reload}. {@code Image} adds a layer of service-related functionality + * over {@link ImageInfo}. + * + * @see Images + */ +public class Image extends ImageInfo { + + private static final long serialVersionUID = 4623766590317494020L; + + private final ComputeOptions options; + private transient Compute compute; + + /** + * A builder for {@code Image} objects. + */ + public static class Builder extends ImageInfo.Builder { + + private final Compute compute; + private final ImageInfo.BuilderImpl infoBuilder; + + Builder(Compute compute, ImageId imageId, ImageConfiguration configuration) { + this.compute = compute; + this.infoBuilder = new ImageInfo.BuilderImpl(); + this.infoBuilder.imageId(imageId); + this.infoBuilder.configuration(configuration); + } + + Builder(Image image) { + this.compute = image.compute; + this.infoBuilder = new ImageInfo.BuilderImpl(image); + } + + @Override + Builder generatedId(String generatedId) { + infoBuilder.generatedId(generatedId); + return this; + } + + @Override + Builder creationTimestamp(Long creationTimestamp) { + infoBuilder.creationTimestamp(creationTimestamp); + return this; + } + + @Override + public Builder imageId(ImageId imageId) { + infoBuilder.imageId(imageId); + return this; + } + + @Override + public Builder description(String description) { + infoBuilder.description(description); + return this; + } + + @Override + public Builder configuration(ImageConfiguration configuration) { + infoBuilder.configuration(configuration); + return this; + } + + @Override + Builder status(Status status) { + infoBuilder.status(status); + return this; + } + + @Override + Builder diskSizeGb(Long diskSizeGb) { + infoBuilder.diskSizeGb(diskSizeGb); + return this; + } + + @Override + Builder licenses(List licenses) { + infoBuilder.licenses(licenses); + return this; + } + + @Override + Builder deprecationStatus(DeprecationStatus deprecationStatus) { + infoBuilder.deprecationStatus(deprecationStatus); + return this; + } + + @Override + public Image build() { + return new Image(compute, infoBuilder); + } + } + + Image(Compute compute, ImageInfo.BuilderImpl infoBuilder) { + super(infoBuilder); + this.compute = checkNotNull(compute); + this.options = compute.options(); + } + + /** + * Checks if this image exists. + * + * @return {@code true} if this image exists, {@code false} otherwise + * @throws ComputeException upon failure + */ + public boolean exists() { + return reload(ImageOption.fields()) != null; + } + + /** + * Fetches current image' latest information. Returns {@code null} if the image does not exist. + * + * @param options image options + * @return an {@code Image} object with latest information or {@code null} if not found + * @throws ComputeException upon failure + */ + public Image reload(ImageOption... options) { + return compute.getImage(imageId(), options); + } + + /** + * Deletes this image. + * + * @return a global operation if the delete request was successfully sent, {@code null} if the + * image was not found + * @throws ComputeException upon failure or if this image is a publicly-available image + */ + public Operation delete(OperationOption... options) { + return compute.deleteImage(imageId(), options); + } + + /** + * Deprecates this image. + * + * @return a global operation if the deprecation request was successfully sent, {@code null} if + * the image was not found + * @throws ComputeException upon failure or if this image is a publicly-available image + */ + public Operation deprecate(DeprecationStatus deprecationStatus, + OperationOption... options) { + return compute.deprecate(imageId(), deprecationStatus, options); + } + + /** + * Returns the image's {@code Compute} object used to issue requests. + */ + public Compute compute() { + return compute; + } + + @Override + public Builder toBuilder() { + return new Builder(this); + } + + @Override + public final boolean equals(Object obj) { + if (obj == this) { + return true; + } + if (obj == null || !obj.getClass().equals(Image.class)) { + return false; + } + Image other = (Image) obj; + return Objects.equals(toPb(), other.toPb()) && Objects.equals(options, other.options); + } + + @Override + public final int hashCode() { + return Objects.hash(super.hashCode(), options); + } + + private void readObject(ObjectInputStream input) throws IOException, ClassNotFoundException { + input.defaultReadObject(); + this.compute = options.service(); + } + + static Image fromPb(Compute compute, com.google.api.services.compute.model.Image imagePb) { + return new Image(compute, new ImageInfo.BuilderImpl(imagePb)); + } +} diff --git a/gcloud-java-compute/src/main/java/com/google/cloud/compute/ImageConfiguration.java b/gcloud-java-compute/src/main/java/com/google/cloud/compute/ImageConfiguration.java new file mode 100644 index 000000000000..34b12dab16af --- /dev/null +++ b/gcloud-java-compute/src/main/java/com/google/cloud/compute/ImageConfiguration.java @@ -0,0 +1,190 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import com.google.api.services.compute.model.Image; +import com.google.common.base.MoreObjects; + +import java.io.Serializable; +import java.util.Objects; + +/** + * Base class for Google Compute Engine image configuration. Use {@link DiskImageConfiguration} to + * create an image from an existing Google Compute Engine disk. Use + * {@link StorageImageConfiguration} to create an image from a file stored in Google Cloud Storage. + */ +public abstract class ImageConfiguration implements Serializable { + + private static final long serialVersionUID = -9154332316597745316L; + + private final Type type; + private final SourceType sourceType; + private final Long archiveSizeBytes; + + /** + * Type of a Google Compute Engine image. + */ + public enum Type { + /** + * A Google Compute Engine image created from a Google Compute Engine disk. + */ + DISK, + + /** + * A Google Compute Engine image created from a file saved in Google Cloud Storage. + */ + STORAGE + } + + /** + * Image source type. The only admissible value is {@code RAW}. + */ + public enum SourceType { + RAW + } + + /** + * Base builder for image configurations. + * + * @param the image configuration class + * @param the image configuration builder + */ + public abstract static class Builder> { + + private Type type; + private SourceType sourceType; + private Long archiveSizeBytes; + + Builder(Type type) { + this.type = type; + } + + Builder(ImageConfiguration imageConfiguration) { + this.type = imageConfiguration.type; + this.sourceType = imageConfiguration.sourceType; + this.archiveSizeBytes = imageConfiguration.archiveSizeBytes; + } + + Builder(Type type, Image imagePb) { + this.type = type; + if (imagePb.getSourceType() != null) { + this.sourceType = SourceType.valueOf(imagePb.getSourceType()); + } + this.archiveSizeBytes = imagePb.getArchiveSizeBytes(); + } + + @SuppressWarnings("unchecked") + B self() { + return (B) this; + } + + B type(Type type) { + this.type = type; + return self(); + } + + B sourceType(SourceType sourceType) { + this.sourceType = sourceType; + return self(); + } + + B archiveSizeBytes(Long archiveSizeBytes) { + this.archiveSizeBytes = archiveSizeBytes; + return self(); + } + + /** + * Creates a configuration object. + */ + public abstract T build(); + } + + ImageConfiguration(Builder builder) { + this.type = builder.type; + this.sourceType = builder.sourceType; + this.archiveSizeBytes = builder.archiveSizeBytes; + } + + /** + * Returns the image's type. This method returns {@link Type#DISK} if this image was created from + * an existing disk. This method returns {@link Type#STORAGE} if this image was created from a + * file in Google Cloud Storage. + */ + public Type type() { + return type; + } + + /** + * Returns the source type of the disk. The default and only value is {@link SourceType#RAW}. + */ + public SourceType sourceType() { + return sourceType; + } + + /** + * Returns the size of the image archive stored in Google Cloud Storage (in bytes). + */ + public Long archiveSizeBytes() { + return archiveSizeBytes; + } + + /** + * Returns a builder for the object. + */ + public abstract Builder toBuilder(); + + MoreObjects.ToStringHelper toStringHelper() { + return MoreObjects.toStringHelper(this) + .add("type", type) + .add("sourceType", sourceType) + .add("archiveSizeBytes", archiveSizeBytes); + } + + @Override + public String toString() { + return toStringHelper().toString(); + } + + final int baseHashCode() { + return Objects.hash(type, sourceType, archiveSizeBytes); + } + + final boolean baseEquals(ImageConfiguration imageConfiguration) { + return imageConfiguration != null + && getClass().equals(imageConfiguration.getClass()) + && Objects.equals(toPb(), imageConfiguration.toPb()); + } + + abstract ImageConfiguration setProjectId(String projectId); + + Image toPb() { + Image imagePb = new Image(); + if (sourceType != null) { + imagePb.setSourceType(sourceType.name()); + } + imagePb.setArchiveSizeBytes(archiveSizeBytes); + return imagePb; + } + + @SuppressWarnings("unchecked") + static T fromPb(Image imagePb) { + if (imagePb.getSourceDisk() != null) { + return (T) DiskImageConfiguration.fromPb(imagePb); + } + return (T) StorageImageConfiguration.fromPb(imagePb); + } +} diff --git a/gcloud-java-compute/src/main/java/com/google/cloud/compute/ImageDiskConfiguration.java b/gcloud-java-compute/src/main/java/com/google/cloud/compute/ImageDiskConfiguration.java new file mode 100644 index 000000000000..cf8ede2f061a --- /dev/null +++ b/gcloud-java-compute/src/main/java/com/google/cloud/compute/ImageDiskConfiguration.java @@ -0,0 +1,178 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import static com.google.common.base.Preconditions.checkNotNull; + +import com.google.api.services.compute.model.Disk; +import com.google.common.base.MoreObjects; + +import java.util.Objects; + +/** + * A Google Compute Engine disk configuration to create a disk from a Google Compute Engine image. + * + * @see Block Storage + */ +public class ImageDiskConfiguration extends DiskConfiguration { + + private static final long serialVersionUID = 6469117882950722812L; + + private final ImageId sourceImage; + private final String sourceImageId; + + /** + * A builder for {@code ImageDiskConfiguration} objects. + */ + public static class Builder + extends DiskConfiguration.Builder { + + private ImageId sourceImage; + private String sourceImageId; + + private Builder(ImageId sourceImage) { + super(Type.IMAGE); + this.sourceImage = checkNotNull(sourceImage); + } + + private Builder(ImageDiskConfiguration configuration) { + super(configuration); + this.sourceImage = configuration.sourceImage; + this.sourceImageId = configuration.sourceImageId; + } + + private Builder(Disk diskPb) { + super(Type.IMAGE, diskPb); + this.sourceImage = ImageId.fromUrl(diskPb.getSourceImage()); + this.sourceImageId = diskPb.getSourceImageId(); + } + + /** + * Sets the size of the persistent disk, in GB. If not set the disk will have the size of the + * image. This value can be larger than the image's size. If the provided size is smaller than + * the image's size then disk creation will fail. + */ + @Override + public Builder sizeGb(Long sizeGb) { + super.sizeGb(sizeGb); + return this; + } + + /** + * Sets the identity of the source image used to create the disk. + */ + public Builder sourceImage(ImageId sourceImage) { + this.sourceImage = checkNotNull(sourceImage); + return this; + } + + Builder sourceImageId(String sourceImageId) { + this.sourceImageId = sourceImageId; + return this; + } + + /** + * Creates an {@code ImageDiskConfiguration} object. + */ + @Override + public ImageDiskConfiguration build() { + return new ImageDiskConfiguration(this); + } + } + + private ImageDiskConfiguration(Builder builder) { + super(builder); + this.sourceImage = builder.sourceImage; + this.sourceImageId = builder.sourceImageId; + } + + /** + * Returns the identity of the source image used to create the disk. + */ + public ImageId sourceImage() { + return sourceImage; + } + + /** + * Returns the service-generated unique id of the image used to create this disk. This value + * identifies the exact image that was used to create this persistent disk. For example, if you + * created the persistent disk from an image that was later deleted and recreated under the same + * name, the source image service-generated id would identify the exact version of the image that + * was used. + */ + public String sourceImageId() { + return sourceImageId; + } + + @Override + public Builder toBuilder() { + return new Builder(this); + } + + @Override + MoreObjects.ToStringHelper toStringHelper() { + return super.toStringHelper() + .add("sourceImage", sourceImage) + .add("sourceImageId", sourceImageId); + } + + @Override + public final int hashCode() { + return Objects.hash(baseHashCode(), sourceImage, sourceImageId); + } + + @Override + public final boolean equals(Object obj) { + return obj == this + || obj != null + && obj.getClass().equals(ImageDiskConfiguration.class) + && baseEquals((ImageDiskConfiguration) obj); + } + + @Override + ImageDiskConfiguration setProjectId(String projectId) { + Builder builder = toBuilder().sourceImage(sourceImage.setProjectId(projectId)); + if (diskType() != null) { + builder.diskType(diskType().setProjectId(projectId)); + } + return builder.build(); + } + + @Override + Disk toPb() { + return super.toPb().setSourceImage(sourceImage.selfLink()).setSourceImageId(sourceImageId); + } + + /** + * Returns a builder for an {@code ImageDiskConfiguration} object given the image identity. + */ + public static Builder builder(ImageId imageId) { + return new Builder(imageId); + } + + /** + * Returns an {@code ImageDiskConfiguration} object given the image identity. + */ + public static ImageDiskConfiguration of(ImageId imageId) { + return builder(imageId).build(); + } + + @SuppressWarnings("unchecked") + static ImageDiskConfiguration fromPb(Disk diskPb) { + return new Builder(diskPb).build(); + } +} diff --git a/gcloud-java-compute/src/main/java/com/google/cloud/compute/ImageId.java b/gcloud-java-compute/src/main/java/com/google/cloud/compute/ImageId.java new file mode 100644 index 000000000000..51a252afd6cc --- /dev/null +++ b/gcloud-java-compute/src/main/java/com/google/cloud/compute/ImageId.java @@ -0,0 +1,145 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import static com.google.common.base.Preconditions.checkNotNull; + +import com.google.common.base.Function; +import com.google.common.base.MoreObjects; + +import java.util.Objects; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +/** + * Identity for a Google Compute Engine image. + */ +public final class ImageId extends ResourceId { + + static final Function FROM_URL_FUNCTION = new Function() { + @Override + public ImageId apply(String pb) { + return ImageId.fromUrl(pb); + } + }; + static final Function TO_URL_FUNCTION = new Function() { + @Override + public String apply(ImageId imageId) { + return imageId.selfLink(); + } + }; + + private static final String REGEX = ResourceId.REGEX + "global/images/([^/]+)"; + private static final Pattern PATTERN = Pattern.compile(REGEX); + private static final long serialVersionUID = 6434553917859414341L; + + private final String image; + + private ImageId(String project, String image) { + super(project); + this.image = checkNotNull(image); + } + + /** + * Returns the name of the image. The name must be 1-63 characters long and comply with RFC1035. + * Specifically, the name must match the regular expression {@code [a-z]([-a-z0-9]*[a-z0-9])?} + * which means the first character must be a lowercase letter, and all following characters must + * be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. + * + * @see RFC1035 + */ + public String image() { + return image; + } + + @Override + public String selfLink() { + return super.selfLink() + "/global/images/" + image; + } + + @Override + MoreObjects.ToStringHelper toStringHelper() { + return super.toStringHelper().add("image", image); + } + + @Override + public int hashCode() { + return Objects.hash(baseHashCode(), image); + } + + @Override + public boolean equals(Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof ImageId)) { + return false; + } + ImageId other = (ImageId) obj; + return baseEquals(other) && Objects.equals(image, other.image); + } + + @Override + ImageId setProjectId(String projectId) { + if (project() != null) { + return this; + } + return ImageId.of(projectId, image); + } + + /** + * Returns an image identity given the image name. The image name must be 1-63 characters long and + * comply with RFC1035. Specifically, the name must match the regular expression + * {@code [a-z]([-a-z0-9]*[a-z0-9])?} which means the first character must be a lowercase letter, + * and all following characters must be a dash, lowercase letter, or digit, except the last + * character, which cannot be a dash. + * + * @see RFC1035 + */ + public static ImageId of(String image) { + return new ImageId(null, image); + } + + /** + * Returns an image identity given project and image names. The image name must be 1-63 characters + * long and comply with RFC1035. Specifically, the name must match the regular expression + * {@code [a-z]([-a-z0-9]*[a-z0-9])?} which means the first character must be a lowercase letter, + * and all following characters must be a dash, lowercase letter, or digit, except the last + * character, which cannot be a dash. + * + * @see RFC1035 + */ + public static ImageId of(String project, String image) { + return new ImageId(project, image); + } + + /** + * Returns {@code true} if the provided string matches the expected format of an image URL. + * Returns {@code false} otherwise. + */ + static boolean matchesUrl(String url) { + return PATTERN.matcher(url).matches(); + } + + static ImageId fromUrl(String url) { + Matcher matcher = PATTERN.matcher(url); + if (!matcher.matches()) { + throw new IllegalArgumentException(url + " is not a valid image URL"); + } + return ImageId.of(matcher.group(1), matcher.group(2)); + } +} diff --git a/gcloud-java-compute/src/main/java/com/google/cloud/compute/ImageInfo.java b/gcloud-java-compute/src/main/java/com/google/cloud/compute/ImageInfo.java new file mode 100644 index 000000000000..102e2c742d71 --- /dev/null +++ b/gcloud-java-compute/src/main/java/com/google/cloud/compute/ImageInfo.java @@ -0,0 +1,417 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import static com.google.common.base.Preconditions.checkNotNull; + +import com.google.api.services.compute.model.Image; +import com.google.common.base.Function; +import com.google.common.base.MoreObjects; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Lists; + +import org.joda.time.format.DateTimeFormatter; +import org.joda.time.format.ISODateTimeFormat; + +import java.io.Serializable; +import java.math.BigInteger; +import java.util.List; +import java.util.Objects; + +/** + * A Google Compute Engine Image. An image contains a boot loader, an operating system and a root + * file system that is necessary for starting an instance. Compute Engine offers publicly-available + * images of certain operating systems that you can use, or you can create a custom image. A custom + * image is an image created from one of your virtual machine instances that contains your specific + * instance configurations. Use {@link DiskImageConfiguration} to create an image from an existing + * disk. Use {@link StorageImageConfiguration} to create an image from a file stored in Google + * Cloud Storage. + * + * @see Images + */ +public class ImageInfo implements Serializable { + + static final Function FROM_PB_FUNCTION = + new Function() { + @Override + public ImageInfo apply(Image pb) { + return ImageInfo.fromPb(pb); + } + }; + static final Function TO_PB_FUNCTION = + new Function() { + @Override + public Image apply(ImageInfo image) { + return image.toPb(); + } + }; + + private static final long serialVersionUID = -1061916352807358977L; + private static final DateTimeFormatter TIMESTAMP_FORMATTER = ISODateTimeFormat.dateTime(); + + private final String generatedId; + private final ImageId imageId; + private final Long creationTimestamp; + private final String description; + private final ImageConfiguration configuration; + private final Status status; + private final Long diskSizeGb; + private final List licenses; + private final DeprecationStatus deprecationStatus; + + /** + * The status of a Google Compute Engine Image. An image can be used to create other disks only + * after it has been successfully created and its status is set to {@code READY}. + */ + public enum Status { + /** + * Image creation failed. The image can not be used. + */ + FAILED, + + /** + * Image creation is pending. The image is not ready to be used yet. + */ + PENDING, + + /** + * Image has been created and is ready for use. + */ + READY + } + + /** + * A builder for {@code ImageInfo} objects. + */ + public abstract static class Builder { + + abstract Builder generatedId(String generatedId); + + abstract Builder creationTimestamp(Long creationTimestamp); + + /** + * Sets the image identity. + */ + public abstract Builder imageId(ImageId imageId); + + /** + * Sets an optional textual description of the image. + */ + public abstract Builder description(String description); + + /** + * Sets the image configuration. Use {@link DiskImageConfiguration} to create an image from an + * existing disk. Use {@link StorageImageConfiguration} to create an image from a file stored in + * Google Cloud Storage. + */ + public abstract Builder configuration(ImageConfiguration configuration); + + abstract Builder status(Status status); + + abstract Builder diskSizeGb(Long diskSizeGb); + + abstract Builder licenses(List licenses); + + abstract Builder deprecationStatus(DeprecationStatus deprecationStatus); + + /** + * Creates a {@code ImageInfo} object. + */ + public abstract ImageInfo build(); + } + + static final class BuilderImpl extends Builder { + + private String generatedId; + private Long creationTimestamp; + private ImageId imageId; + private String description; + private ImageConfiguration configuration; + private Status status; + private Long diskSizeGb; + private List licenses; + private DeprecationStatus deprecationStatus; + + BuilderImpl() {} + + BuilderImpl(ImageInfo imageInfo) { + this.generatedId = imageInfo.generatedId; + this.creationTimestamp = imageInfo.creationTimestamp; + this.imageId = imageInfo.imageId; + this.description = imageInfo.description; + this.configuration = imageInfo.configuration; + this.status = imageInfo.status; + this.diskSizeGb = imageInfo.diskSizeGb; + this.licenses = imageInfo.licenses; + this.deprecationStatus = imageInfo.deprecationStatus; + } + + BuilderImpl(Image imagePb) { + if (imagePb.getId() != null) { + this.generatedId = imagePb.getId().toString(); + } + if (imagePb.getCreationTimestamp() != null) { + this.creationTimestamp = TIMESTAMP_FORMATTER.parseMillis(imagePb.getCreationTimestamp()); + } + this.imageId = ImageId.fromUrl(imagePb.getSelfLink()); + this.description = imagePb.getDescription(); + this.configuration = ImageConfiguration.fromPb(imagePb); + if (imagePb.getStatus() != null) { + this.status = Status.valueOf(imagePb.getStatus()); + } + this.diskSizeGb = imagePb.getDiskSizeGb(); + if (imagePb.getLicenses() != null) { + this.licenses = Lists.transform(imagePb.getLicenses(), LicenseId.FROM_URL_FUNCTION); + } + if (imagePb.getDeprecated() != null) { + this.deprecationStatus = + DeprecationStatus.fromPb(imagePb.getDeprecated(), ImageId.FROM_URL_FUNCTION); + } + } + + @Override + BuilderImpl generatedId(String generatedId) { + this.generatedId = generatedId; + return this; + } + + @Override + BuilderImpl creationTimestamp(Long creationTimestamp) { + this.creationTimestamp = creationTimestamp; + return this; + } + + @Override + public BuilderImpl imageId(ImageId imageId) { + this.imageId = checkNotNull(imageId); + return this; + } + + @Override + public BuilderImpl description(String description) { + this.description = description; + return this; + } + + @Override + public BuilderImpl configuration(ImageConfiguration configuration) { + this.configuration = checkNotNull(configuration); + return this; + } + + @Override + BuilderImpl status(Status status) { + this.status = status; + return this; + } + + @Override + BuilderImpl diskSizeGb(Long diskSizeGb) { + this.diskSizeGb = diskSizeGb; + return this; + } + + @Override + BuilderImpl licenses(List licenses) { + this.licenses = licenses != null ? ImmutableList.copyOf(licenses) : null; + return this; + } + + @Override + BuilderImpl deprecationStatus(DeprecationStatus deprecationStatus) { + this.deprecationStatus = deprecationStatus; + return this; + } + + @Override + public ImageInfo build() { + return new ImageInfo(this); + } + } + + ImageInfo(BuilderImpl builder) { + this.generatedId = builder.generatedId; + this.creationTimestamp = builder.creationTimestamp; + this.imageId = checkNotNull(builder.imageId); + this.description = builder.description; + this.configuration = checkNotNull(builder.configuration); + this.status = builder.status; + this.diskSizeGb = builder.diskSizeGb; + this.licenses = builder.licenses; + this.deprecationStatus = builder.deprecationStatus; + } + + /** + * Returns the service-generated unique identifier for the image. + */ + public String generatedId() { + return generatedId; + } + + /** + * Returns the creation timestamp in milliseconds since epoch. + */ + public Long creationTimestamp() { + return creationTimestamp; + } + + /** + * Returns the image identity. + */ + public ImageId imageId() { + return imageId; + } + + /** + * Returns a textual description of the image. + */ + public String description() { + return description; + } + + /** + * Returns the image configuration. This method returns an instance of + * {@link DiskImageConfiguration} if the the image was created from a Google Compute Engine disk. + * This method returns an instance of {@link StorageImageConfiguration} if the image was created + * from a file stored in Google Cloud Storage. + */ + @SuppressWarnings("unchecked") + public T configuration() { + return (T) configuration; + } + + /** + * Returns all applicable publicly visible licenses. + */ + public List licenses() { + return licenses; + } + + /** + * Returns the status of the image. An image can be used to create other disks only after it has + * been successfully created and its status is set to {@link Status#READY}. + */ + public Status status() { + return status; + } + + /** + * Returns the size of the image when restored onto a persistent disk (in GB). + */ + public Long diskSizeGb() { + return diskSizeGb; + } + + /** + * Returns the deprecation status of the image. If {@link DeprecationStatus#status()} is either + * {@link DeprecationStatus.Status#DELETED} or {@link DeprecationStatus.Status#OBSOLETE} the + * image must not be used. Returns {@code null} if the image is not deprecated. + */ + public DeprecationStatus deprecationStatus() { + return deprecationStatus; + } + + /** + * Returns a builder for the current image. + */ + public Builder toBuilder() { + return new BuilderImpl(this); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("generatedId", generatedId) + .add("creationTimestamp", creationTimestamp) + .add("imageId", imageId) + .add("description", description) + .add("configuration", configuration) + .add("status", status) + .add("diskSizeGb", diskSizeGb) + .add("licenses", licenses) + .toString(); + } + + @Override + public int hashCode() { + return Objects.hash(generatedId, creationTimestamp, imageId, description, configuration, status, + diskSizeGb, licenses); + } + + @Override + public boolean equals(Object obj) { + return obj == this + || obj != null + && obj.getClass().equals(ImageInfo.class) + && Objects.equals(toPb(), ((ImageInfo) obj).toPb()); + } + + ImageInfo setProjectId(String projectId) { + return toBuilder() + .imageId(imageId.setProjectId(projectId)) + .configuration(configuration.setProjectId(projectId)) + .build(); + } + + Image toPb() { + Image imagePb = configuration.toPb(); + if (generatedId != null) { + imagePb.setId(new BigInteger(generatedId)); + } + if (creationTimestamp != null) { + imagePb.setCreationTimestamp(TIMESTAMP_FORMATTER.print(creationTimestamp)); + } + imagePb.setName(imageId.image()); + imagePb.setDescription(description); + imagePb.setSelfLink(imageId.selfLink()); + if (status != null) { + imagePb.setStatus(status.name()); + } + imagePb.setDiskSizeGb(diskSizeGb); + if (licenses != null) { + imagePb.setLicenses(Lists.transform(licenses, LicenseId.TO_URL_FUNCTION)); + } + if (deprecationStatus != null) { + imagePb.setDeprecated(deprecationStatus.toPb()); + } + return imagePb; + } + + /** + * Returns a builder for an {@code ImageInfo} object given the image identity and an image + * configuration. Use {@link DiskImageConfiguration} to create an image from an existing disk. Use + * {@link StorageImageConfiguration} to create an image from a file stored in Google Cloud + * Storage. + */ + public static Builder builder(ImageId imageId, ImageConfiguration configuration) { + return new BuilderImpl().imageId(imageId).configuration(configuration); + } + + /** + * Returns an {@code ImageInfo} object given the image identity and an image configuration. Use + * {@link DiskImageConfiguration} to create an image from an existing disk. Use + * {@link StorageImageConfiguration} to create an image from a file stored in Google Cloud + * Storage. + */ + public static ImageInfo of(ImageId imageId, ImageConfiguration configuration) { + return builder(imageId, configuration).build(); + } + + static ImageInfo fromPb(Image imagePb) { + return new BuilderImpl(imagePb).build(); + } +} diff --git a/gcloud-java-compute/src/main/java/com/google/cloud/compute/Instance.java b/gcloud-java-compute/src/main/java/com/google/cloud/compute/Instance.java new file mode 100644 index 000000000000..e1ee91f4e58d --- /dev/null +++ b/gcloud-java-compute/src/main/java/com/google/cloud/compute/Instance.java @@ -0,0 +1,465 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import static com.google.common.base.Preconditions.checkNotNull; + +import com.google.cloud.compute.AttachedDisk.PersistentDiskConfiguration; +import com.google.cloud.compute.Compute.InstanceOption; +import com.google.cloud.compute.Compute.OperationOption; +import com.google.cloud.compute.NetworkInterface.AccessConfig; +import com.google.common.collect.ImmutableList; + +import java.io.IOException; +import java.io.ObjectInputStream; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +/** + * A Google Compute Engine VM Instance. An instance is a virtual machine (VM) hosted on Google's + * infrastructure. Instances can run Linux and Windows Server images provided by Google, or any + * customized versions of these images. You can also build and run images of other operating + * systems. Objects of this class are immutable. To get an {@code Instance} object with the most + * recent information use {@link #reload}. {@code Instance} adds a layer of service-related + * functionality over {@link InstanceInfo}. + * + * @see Virtual Machine Instances + */ +public class Instance extends InstanceInfo { + + private static final long serialVersionUID = 3072508155558980677L; + + private final ComputeOptions options; + private transient Compute compute; + + /** + * A builder for {@code Instance} objects. + */ + public static class Builder extends InstanceInfo.Builder { + + private final Compute compute; + private final InstanceInfo.BuilderImpl infoBuilder; + + Builder(Compute compute, InstanceId instanceId, MachineTypeId machineType, + AttachedDisk attachedDisk, NetworkInterface networkInterface) { + this.compute = compute; + this.infoBuilder = new InstanceInfo.BuilderImpl(instanceId); + this.infoBuilder.machineType(machineType); + this.infoBuilder.attachedDisks(ImmutableList.of(attachedDisk)); + this.infoBuilder.networkInterfaces(ImmutableList.of(networkInterface)); + } + + Builder(Instance instance) { + this.compute = instance.compute; + this.infoBuilder = new InstanceInfo.BuilderImpl(instance); + } + + @Override + Builder generatedId(String generatedId) { + this.infoBuilder.generatedId(generatedId); + return this; + } + + @Override + public Builder instanceId(InstanceId instanceId) { + this.infoBuilder.instanceId(instanceId); + return this; + } + + @Override + Builder creationTimestamp(Long creationTimestamp) { + this.infoBuilder.creationTimestamp(creationTimestamp); + return this; + } + + @Override + public Builder description(String description) { + this.infoBuilder.description(description); + return this; + } + + @Override + Builder status(Status status) { + this.infoBuilder.status(status); + return this; + } + + @Override + Builder statusMessage(String statusMessage) { + this.infoBuilder.statusMessage(statusMessage); + return this; + } + + @Override + public Builder tags(Tags tags) { + this.infoBuilder.tags(tags); + return this; + } + + @Override + public Builder machineType(MachineTypeId machineType) { + this.infoBuilder.machineType(machineType); + return this; + } + + @Override + public Builder canIpForward(Boolean canIpForward) { + this.infoBuilder.canIpForward(canIpForward); + return this; + } + + @Override + public Builder networkInterfaces(List networkInterfaces) { + this.infoBuilder.networkInterfaces(networkInterfaces); + return this; + } + + @Override + public Builder networkInterfaces(NetworkInterface... networkInterfaces) { + this.infoBuilder.networkInterfaces(networkInterfaces); + return this; + } + + @Override + public Builder attachedDisks(List attachedDisks) { + this.infoBuilder.attachedDisks(attachedDisks); + return this; + } + + @Override + public Builder attachedDisks(AttachedDisk... attachedDisks) { + this.infoBuilder.attachedDisks(attachedDisks); + return this; + } + + @Override + public Builder metadata(Metadata metadata) { + this.infoBuilder.metadata(metadata); + return this; + } + + @Override + public Builder serviceAccounts(List serviceAccounts) { + this.infoBuilder.serviceAccounts(serviceAccounts); + return this; + } + + @Override + public Builder schedulingOptions(SchedulingOptions schedulingOptions) { + this.infoBuilder.schedulingOptions(schedulingOptions); + return this; + } + + @Override + Builder cpuPlatform(String cpuPlatform) { + this.infoBuilder.cpuPlatform(cpuPlatform); + return this; + } + + @Override + public Instance build() { + return new Instance(compute, infoBuilder); + } + } + + Instance(Compute compute, Instance.BuilderImpl infoBuilder) { + super(infoBuilder); + this.compute = checkNotNull(compute); + this.options = compute.options(); + } + + /** + * Checks if this instance exists. + * + * @return {@code true} if this instance exists, {@code false} otherwise + * @throws ComputeException upon failure + */ + public boolean exists() { + return reload(InstanceOption.fields()) != null; + } + + /** + * Fetches current instance's latest information. Returns {@code null} if the instance does not + * exist. + * + * @param options instance options + * @return a {@code Instance} object with latest information or {@code null} if not found + * @throws ComputeException upon failure + */ + public Instance reload(InstanceOption... options) { + return compute.getInstance(instanceId(), options); + } + + /** + * Deletes this instance. + * + * @return a zone operation if delete request was successfully sent, {@code null} if the instance + * was not found + * @throws ComputeException upon failure + */ + public Operation delete(OperationOption... options) { + return compute.deleteInstance(instanceId(), options); + } + + /** + * Adds an access configuration to the provided network interface for this instance. + * + * @return a zone operation if the add request was issued correctly, {@code null} if the instance + * was not found + * @throws ComputeException upon failure + */ + public Operation addAccessConfig(String networkInterface, AccessConfig accessConfig, + OperationOption... options) { + return compute.addAccessConfig(instanceId(), networkInterface, accessConfig, options); + } + + /** + * Attaches a persistent disk to this instance given its configuration. + * + * @return a zone operation if the attach request was issued correctly, {@code null} if the + * instance was not found + * @throws ComputeException upon failure + */ + public Operation attachDisk(PersistentDiskConfiguration configuration, + OperationOption... options) { + return compute.attachDisk(instanceId(), configuration, options); + } + + /** + * Attaches a persistent disk to this instance given the device name and its configuration. + * + * @return a zone operation if the attach request was issued correctly, {@code null} if the + * instance was not found + * @throws ComputeException upon failure + */ + public Operation attachDisk(String deviceName, PersistentDiskConfiguration configuration, + OperationOption... options) { + return compute.attachDisk(instanceId(), deviceName, configuration, options); + } + + /** + * Attaches a persistent disk to this instance given the device name, its configuration and the + * device index. + * + * @return a zone operation if the attach request was issued correctly, {@code null} if the + * instance was not found + * @throws ComputeException upon failure + */ + public Operation attachDisk(String deviceName, PersistentDiskConfiguration configuration, + int index, OperationOption... options) { + return compute.attachDisk(instanceId(), deviceName, configuration, index, options); + } + + /** + * Deletes an access configuration from the provided network interface for this instance. + * + * @return a zone operation if the delete request was issued correctly, {@code null} if the + * instance was not found + * @throws ComputeException upon failure + */ + public Operation deleteAccessConfig(String networkInterface, String accessConfig, + OperationOption... options) { + return compute.deleteAccessConfig(instanceId(), networkInterface, accessConfig, options); + } + + /** + * Detaches a disk from this instance. + * + * @return a zone operation if the detach request was issued correctly, {@code null} if the + * instance was not found + * @throws ComputeException upon failure + */ + public Operation detachDisk(String deviceName, OperationOption... options) { + return compute.detachDisk(instanceId(), deviceName, options); + } + + /** + * Returns the serial port output for this instance and port number. {@code port} must be between + * 1 and 4 (inclusive). + * + * @return the serial port output or {@code null} if the instance was not found + * @throws ComputeException upon failure + */ + public String getSerialPortOutput(int port) { + return compute.getSerialPortOutput(instanceId(), port); + } + + /** + * Returns the default serial port output for this instance. Default serial port corresponds to + * port number 1. + * + * @return the serial port output or {@code null} if the instance was not found + * @throws ComputeException upon failure + */ + public String getSerialPortOutput() { + return compute.getSerialPortOutput(instanceId()); + } + + /** + * Resets this instance. + * + * @return a zone operation if the reset request was issued correctly, {@code null} if the + * instance was not found + * @throws ComputeException upon failure + */ + public Operation reset(OperationOption... options) { + return compute.reset(instanceId(), options); + } + + /** + * Sets the auto-delete flag for a disk attached to this instance. + * + * @return a zone operation if the flag setting request was issued correctly, {@code null} if the + * instance was not found + * @throws ComputeException upon failure + */ + public Operation setDiskAutoDelete(String deviceName, boolean autoDelete, + OperationOption... options) { + return compute.setDiskAutoDelete(instanceId(), deviceName, autoDelete, options); + } + + /** + * Sets the machine type for this instance. The instance must be in + * {@link InstanceInfo.Status#TERMINATED} state to be able to set its machine type. + * + * @return a zone operation if the set request was issued correctly, {@code null} if the instance + * was not found + * @throws ComputeException upon failure + */ + public Operation setMachineType(MachineTypeId machineType, OperationOption... options) { + return compute.setMachineType(instanceId(), machineType, options); + } + + /** + * Sets the metadata for this instance. + * + * @return a zone operation if the set request was issued correctly, {@code null} if the instance + * was not found + * @throws ComputeException upon failure + */ + public Operation setMetadata(Metadata metadata, OperationOption... options) { + return compute.setMetadata(instanceId(), metadata, options); + } + + /** + * Sets the metadata for this instance, fingerprint value is taken from this instance's + * {@code tags().fingerprint()}. + * + * @return a zone operation if the set request was issued correctly, {@code null} if the instance + * was not found + * @throws ComputeException upon failure + */ + public Operation setMetadata(Map metadata, OperationOption... options) { + return setMetadata(metadata().toBuilder().values(metadata).build(), options); + } + + /** + * Sets the scheduling options for this instance. + * + * @return a zone operation if the set request was issued correctly, {@code null} if the instance + * was not found + * @throws ComputeException upon failure + */ + public Operation setSchedulingOptions(SchedulingOptions scheduling, OperationOption... options) { + return compute.setSchedulingOptions(instanceId(), scheduling, options); + } + + /** + * Sets the tags for this instance. + * + * @return a zone operation if the set request was issued correctly, {@code null} if the instance + * was not found + * @throws ComputeException upon failure + */ + public Operation setTags(Tags tags, OperationOption... options) { + return compute.setTags(instanceId(), tags, options); + } + + /** + * Sets the tags for this instance, fingerprint value is taken from this instance's + * {@code tags().fingerprint()}. + * + * @return a zone operation if the set request was issued correctly, {@code null} if the instance + * was not found + * @throws ComputeException upon failure + */ + public Operation setTags(Iterable tags, OperationOption... options) { + return setTags(tags().toBuilder().values(tags).build(), options); + } + + /** + * Starts this instance. + * + * @return a zone operation if the start request was issued correctly, {@code null} if the + * instance was not found + * @throws ComputeException upon failure + */ + public Operation start(OperationOption... options) { + return compute.start(instanceId(), options); + } + + /** + * Stops this instance. + * + * @return a zone operation if the stop request was issued correctly, {@code null} if the + * instance was not found + * @throws ComputeException upon failure + */ + public Operation stop(OperationOption... options) { + return compute.stop(instanceId(), options); + } + + /** + * Returns the snapshot's {@code Compute} object used to issue requests. + */ + public Compute compute() { + return compute; + } + + @Override + public Builder toBuilder() { + return new Builder(this); + } + + @Override + public final boolean equals(Object obj) { + if (obj == this) { + return true; + } + if (obj == null || !obj.getClass().equals(Instance.class)) { + return false; + } + Instance other = (Instance) obj; + return Objects.equals(toPb(), other.toPb()) && Objects.equals(options, other.options); + } + + @Override + public final int hashCode() { + return Objects.hash(super.hashCode(), options); + } + + private void readObject(ObjectInputStream input) throws IOException, ClassNotFoundException { + input.defaultReadObject(); + this.compute = options.service(); + } + + static Instance fromPb(Compute compute, + com.google.api.services.compute.model.Instance instancePb) { + return new Instance(compute, new InstanceInfo.BuilderImpl(instancePb)); + } +} diff --git a/gcloud-java-compute/src/main/java/com/google/cloud/compute/InstanceId.java b/gcloud-java-compute/src/main/java/com/google/cloud/compute/InstanceId.java new file mode 100644 index 000000000000..8fde843020f3 --- /dev/null +++ b/gcloud-java-compute/src/main/java/com/google/cloud/compute/InstanceId.java @@ -0,0 +1,177 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import static com.google.common.base.Preconditions.checkNotNull; + +import com.google.common.base.Function; +import com.google.common.base.MoreObjects; + +import java.util.Objects; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +/** + * Identity for a Google Compute Engine virtual machine instance. + */ +public final class InstanceId extends ResourceId { + + static final Function FROM_URL_FUNCTION = new Function() { + @Override + public InstanceId apply(String pb) { + return InstanceId.fromUrl(pb); + } + }; + static final Function TO_URL_FUNCTION = new Function() { + @Override + public String apply(InstanceId instanceId) { + return instanceId.selfLink(); + } + }; + + private static final String REGEX = ResourceId.REGEX + "zones/([^/]+)/instances/([^/]+)"; + private static final Pattern PATTERN = Pattern.compile(REGEX); + private static final long serialVersionUID = -2787043125223159922L; + + private final String zone; + private final String instance; + + private InstanceId(String project, String zone, String instance) { + super(project); + this.zone = checkNotNull(zone); + this.instance = checkNotNull(instance); + } + + /** + * Returns the name of the instance. The name must be 1-63 characters long and comply with + * RFC1035. Specifically, the name must match the regular expression + * {@code [a-z]([-a-z0-9]*[a-z0-9])?} which means the first character must be a lowercase letter, + * and all following characters must be a dash, lowercase letter, or digit, except the last + * character, which cannot be a dash. + * + * @see RFC1035 + */ + public String instance() { + return instance; + } + + /** + * Returns the name of the zone this instance belongs to. + */ + public String zone() { + return zone; + } + + /** + * Returns the identity of the zone this instance belongs to. + */ + public ZoneId zoneId() { + return ZoneId.of(project(), zone); + } + + @Override + public String selfLink() { + return super.selfLink() + "/zones/" + zone + "/instances/" + instance; + } + + @Override + MoreObjects.ToStringHelper toStringHelper() { + return MoreObjects.toStringHelper(this).add("zone", zone).add("instance", instance); + } + + @Override + public int hashCode() { + return Objects.hash(super.baseHashCode(), zone, instance); + } + + @Override + public boolean equals(Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof InstanceId)) { + return false; + } + InstanceId other = (InstanceId) obj; + return baseEquals(other) + && Objects.equals(zone, other.zone) + && Objects.equals(instance, other.instance); + } + + @Override + InstanceId setProjectId(String projectId) { + if (project() != null) { + return this; + } + return InstanceId.of(projectId, zone, instance); + } + + /** + * Returns an instance identity given the zone identity and the instance name. The instance name + * must be 1-63 characters long and comply with RFC1035. Specifically, the name must match the + * regular expression {@code [a-z]([-a-z0-9]*[a-z0-9])?} which means the first character must be a + * lowercase letter, and all following characters must be a dash, lowercase letter, or digit, + * except the last character, which cannot be a dash. + * + * @see RFC1035 + */ + public static InstanceId of(ZoneId zoneId, String instance) { + return new InstanceId(zoneId.project(), zoneId.zone(), instance); + } + + /** + * Returns an instance identity given the zone and instance names. The instance name must be 1-63 + * characters long and comply with RFC1035. Specifically, the name must match the regular + * expression {@code [a-z]([-a-z0-9]*[a-z0-9])?} which means the first character must be a + * lowercase letter, and all following characters must be a dash, lowercase letter, or digit, + * except the last character, which cannot be a dash. + * + * @see RFC1035 + */ + public static InstanceId of(String zone, String instance) { + return new InstanceId(null, zone, instance); + } + + /** + * Returns an instance identity given project, zone and instance names. The instance name must be + * 1-63 characters long and comply with RFC1035. Specifically, the name must match the regular + * expression {@code [a-z]([-a-z0-9]*[a-z0-9])?} which means the first character must be a + * lowercase letter, and all following characters must be a dash, lowercase letter, or digit, + * except the last character, which cannot be a dash. + * + * @see RFC1035 + */ + public static InstanceId of(String project, String zone, String instance) { + return new InstanceId(project, zone, instance); + } + + /** + * Returns {@code true} if the provided string matches the expected format of an instance URL. + * Returns {@code false} otherwise. + */ + static boolean matchesUrl(String url) { + return PATTERN.matcher(url).matches(); + } + + static InstanceId fromUrl(String url) { + Matcher matcher = PATTERN.matcher(url); + if (!matcher.matches()) { + throw new IllegalArgumentException(url + " is not a valid instance URL"); + } + return InstanceId.of(matcher.group(1), matcher.group(2), matcher.group(3)); + } +} diff --git a/gcloud-java-compute/src/main/java/com/google/cloud/compute/InstanceInfo.java b/gcloud-java-compute/src/main/java/com/google/cloud/compute/InstanceInfo.java new file mode 100644 index 000000000000..7f85985afd2d --- /dev/null +++ b/gcloud-java-compute/src/main/java/com/google/cloud/compute/InstanceInfo.java @@ -0,0 +1,676 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import static com.google.common.base.Preconditions.checkNotNull; + +import com.google.api.services.compute.model.Instance; +import com.google.common.base.Function; +import com.google.common.base.MoreObjects; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Lists; + +import org.joda.time.format.DateTimeFormatter; +import org.joda.time.format.ISODateTimeFormat; + +import java.io.Serializable; +import java.math.BigInteger; +import java.util.Arrays; +import java.util.List; +import java.util.Objects; + +/** + * A Google Compute Engine VM Instance. An instance is a virtual machine (VM) hosted on Google's + * infrastructure. Instances can run Linux and Windows Server images provided by Google, or any + * customized versions of these images. You can also build and run images of other operating + * systems. Google Compute Engine also lets you choose the machine properties of your instances, + * such as the number of virtual CPUs and the amount of memory + * + *

By default, each Compute Engine instance has a small root persistent disk that contains the + * operating system. When your applications require additional storage space, you can add one or + * more additional disks to your instance. + * + *

Instances communicate with other instances in the same network through a local area network. + * Instances communicate with the rest of the world through the Internet. A network lives in a + * project and is isolated from other networks in the project. A project can have up to five + * different networks. + * + * @see Virtual Machine Instances + */ +public class InstanceInfo implements Serializable { + + static final Function FROM_PB_FUNCTION = + new Function() { + @Override + public InstanceInfo apply(Instance pb) { + return InstanceInfo.fromPb(pb); + } + }; + static final Function TO_PB_FUNCTION = + new Function() { + @Override + public Instance apply(InstanceInfo instance) { + return instance.toPb(); + } + }; + + private static final long serialVersionUID = -6601223112628977168L; + private static final DateTimeFormatter TIMESTAMP_FORMATTER = ISODateTimeFormat.dateTime(); + + private final String generatedId; + private final InstanceId instanceId; + private final Long creationTimestamp; + private final String description; + private final Status status; + private final String statusMessage; + private final Tags tags; + private final MachineTypeId machineType; + private final Boolean canIpForward; + private final List networkInterfaces; + private final List attachedDisks; + private final Metadata metadata; + private final List serviceAccounts; + private final SchedulingOptions schedulingOptions; + private final String cpuPlatform; + + /** + * The status of the instance. + */ + public enum Status { + /** + * Indicates that resources are being reserved for the instance. The instance isn't running yet. + */ + PROVISIONING, + + /** + * Indicates that resources have been acquired and the instance is being prepared for launch. + */ + STAGING, + + /** + * Indicates that the instance is booting up or running. You should be able to {@code ssh} into + * the instance soon, though not immediately, after it enters this state. + */ + RUNNING, + + /** + * Indicates that the instance is being stopped either due to a failure, or the instance being + * shut down. This is a temporary status and the instance will move to {@code TERMINATED}. + */ + STOPPING, + + /** + * Indicates that the instance was shut down or encountered a failure, either through the API or + * from inside the guest. You can choose to restart the instance or delete it. + */ + TERMINATED + } + + /** + * A builder for {@code InstanceInfo} objects. + */ + public abstract static class Builder { + + abstract Builder generatedId(String generatedId); + + /** + * Sets the identity of the virtual machine instance. + */ + public abstract Builder instanceId(InstanceId instanceId); + + abstract Builder creationTimestamp(Long creationTimestamp); + + /** + * Sets an optional description of this Google Compute Engine instance. + */ + public abstract Builder description(String description); + + abstract Builder status(Status status); + + abstract Builder statusMessage(String statusMessage); + + /** + * Sets the tags to apply to this instance. Tags are used to identify valid sources or targets + * for network firewalls. + */ + public abstract Builder tags(Tags tags); + + /** + * Sets the machine type identity. + */ + public abstract Builder machineType(MachineTypeId machineType); + + /** + * Sets whether to allow this instance to send and receive packets with non-matching destination + * or source IPs. This is required if you plan to use this instance to forward routes. + * + * @see Enabling IP + * Forwarding + */ + public abstract Builder canIpForward(Boolean canIpForward); + + /** + * Sets a list of network interfaces. This specifies how this instance is configured to interact + * with other network services, such as connecting to the internet. At the moment, instances + * only support one network interface. + */ + public abstract Builder networkInterfaces(List networkInterfaces); + + /** + * Sets a list of network interfaces. This specifies how this instance is configured to interact + * with other network services, such as connecting to the internet. At the moment, instances + * only support one network interface. + */ + public abstract Builder networkInterfaces(NetworkInterface... networkInterfaces); + + /** + * Sets a list of disks to attach to the instance. One boot disk must be provided (i.e. an + * attached disk such that {@link AttachedDisk.AttachedDiskConfiguration#boot()} returns + * {@code true}). + */ + public abstract Builder attachedDisks(List attachedDisks); + + /** + * Sets a list of disks to attach to the instance. One boot disk must be provided. + */ + public abstract Builder attachedDisks(AttachedDisk... attachedDisks); + + /** + * Sets the instance metadata. + */ + public abstract Builder metadata(Metadata metadata); + + /** + * Sets a list of service accounts, with their specified scopes, authorized for this instance. + * Service accounts generate access tokens that can be accessed through the metadata server and + * used to authenticate applications on the instance. + * + * @see Authenticating from + * Google Compute Engine + */ + public abstract Builder serviceAccounts(List serviceAccounts); + + /** + * Sets the scheduling options for the instance. + */ + public abstract Builder schedulingOptions(SchedulingOptions schedulingOptions); + + abstract Builder cpuPlatform(String cpuPlatform); + + /** + * Creates an {@code InstanceInfo} object. + */ + public abstract InstanceInfo build(); + } + + public static final class BuilderImpl extends Builder { + + private String generatedId; + private InstanceId instanceId; + private Long creationTimestamp; + private String description; + private Status status; + private String statusMessage; + private Tags tags; + private MachineTypeId machineType; + private Boolean canIpForward; + private List networkInterfaces; + private List attachedDisks; + private Metadata metadata; + private List serviceAccounts; + private SchedulingOptions schedulingOptions; + private String cpuPlatform; + + BuilderImpl(InstanceId instanceId) { + this.instanceId = checkNotNull(instanceId); + } + + BuilderImpl(InstanceInfo instance) { + this.generatedId = instance.generatedId; + this.instanceId = instance.instanceId; + this.creationTimestamp = instance.creationTimestamp; + this.description = instance.description; + this.status = instance.status; + this.statusMessage = instance.statusMessage; + this.tags = instance.tags; + this.machineType = instance.machineType; + this.canIpForward = instance.canIpForward; + this.networkInterfaces = instance.networkInterfaces; + this.attachedDisks = instance.attachedDisks; + this.metadata = instance.metadata; + this.serviceAccounts = instance.serviceAccounts; + this.schedulingOptions = instance.schedulingOptions; + this.cpuPlatform = instance.cpuPlatform; + } + + BuilderImpl(Instance instancePb) { + if (instancePb.getId() != null) { + this.generatedId = instancePb.getId().toString(); + } + this.instanceId = InstanceId.fromUrl(instancePb.getSelfLink()); + if (instancePb.getCreationTimestamp() != null) { + this.creationTimestamp = TIMESTAMP_FORMATTER.parseMillis(instancePb.getCreationTimestamp()); + } + this.description = instancePb.getDescription(); + if (instancePb.getStatus() != null) { + this.status = Status.valueOf(instancePb.getStatus()); + } + this.statusMessage = instancePb.getStatusMessage(); + if (instancePb.getTags() != null) { + this.tags = Tags.fromPb(instancePb.getTags()); + } + if (instancePb.getMachineType() != null) { + this.machineType = MachineTypeId.fromUrl(instancePb.getMachineType()); + } + this.canIpForward = instancePb.getCanIpForward(); + if (instancePb.getNetworkInterfaces() != null) { + this.networkInterfaces = + Lists.transform(instancePb.getNetworkInterfaces(), NetworkInterface.FROM_PB_FUNCTION); + } + if (instancePb.getDisks() != null) { + this.attachedDisks = Lists.transform(instancePb.getDisks(), AttachedDisk.FROM_PB_FUNCTION); + } + if (instancePb.getMetadata() != null) { + this.metadata = Metadata.fromPb(instancePb.getMetadata()); + } + if (instancePb.getServiceAccounts() != null) { + this.serviceAccounts = + Lists.transform(instancePb.getServiceAccounts(), ServiceAccount.FROM_PB_FUNCTION); + } + if (instancePb.getScheduling() != null) { + this.schedulingOptions = SchedulingOptions.fromPb(instancePb.getScheduling()); + } + this.cpuPlatform = instancePb.getCpuPlatform(); + } + + @Override + Builder generatedId(String generatedId) { + this.generatedId = generatedId; + return this; + } + + @Override + public Builder instanceId(InstanceId instanceId) { + this.instanceId = checkNotNull(instanceId); + return this; + } + + @Override + Builder creationTimestamp(Long creationTimestamp) { + this.creationTimestamp = creationTimestamp; + return this; + } + + @Override + public Builder description(String description) { + this.description = description; + return this; + } + + @Override + Builder status(Status status) { + this.status = status; + return this; + } + + @Override + Builder statusMessage(String statusMessage) { + this.statusMessage = statusMessage; + return this; + } + + @Override + public Builder tags(Tags tags) { + this.tags = tags; + return this; + } + + @Override + public Builder machineType(MachineTypeId machineType) { + this.machineType = checkNotNull(machineType); + return this; + } + + @Override + public Builder canIpForward(Boolean canIpForward) { + this.canIpForward = canIpForward; + return this; + } + + @Override + public Builder networkInterfaces(List networkInterfaces) { + this.networkInterfaces = ImmutableList.copyOf(checkNotNull(networkInterfaces)); + return this; + } + + @Override + public Builder networkInterfaces(NetworkInterface... networkInterfaces) { + this.networkInterfaces = Arrays.asList(networkInterfaces); + return this; + } + + @Override + public Builder attachedDisks(List attachedDisks) { + this.attachedDisks = ImmutableList.copyOf(checkNotNull(attachedDisks)); + return this; + } + + @Override + public Builder attachedDisks(AttachedDisk... attachedDisks) { + this.attachedDisks = Arrays.asList(attachedDisks); + return this; + } + + @Override + public Builder metadata(Metadata metadata) { + this.metadata = metadata; + return this; + } + + @Override + public Builder serviceAccounts(List serviceAccounts) { + this.serviceAccounts = ImmutableList.copyOf(checkNotNull(serviceAccounts)); + return this; + } + + @Override + public Builder schedulingOptions(SchedulingOptions schedulingOptions) { + this.schedulingOptions = schedulingOptions; + return this; + } + + @Override + Builder cpuPlatform(String cpuPlatform) { + this.cpuPlatform = cpuPlatform; + return this; + } + + @Override + public InstanceInfo build() { + checkNotNull(attachedDisks); + checkNotNull(networkInterfaces); + return new InstanceInfo(this); + } + } + + InstanceInfo(BuilderImpl builder) { + this.generatedId = builder.generatedId; + this.instanceId = builder.instanceId; + this.creationTimestamp = builder.creationTimestamp; + this.description = builder.description; + this.status = builder.status; + this.statusMessage = builder.statusMessage; + this.tags = builder.tags; + this.machineType = builder.machineType; + this.canIpForward = builder.canIpForward; + this.networkInterfaces = builder.networkInterfaces; + this.attachedDisks = builder.attachedDisks; + this.metadata = builder.metadata; + this.serviceAccounts = builder.serviceAccounts; + this.schedulingOptions = builder.schedulingOptions; + this.cpuPlatform = builder.cpuPlatform; + } + + /** + * Returns the service-generated unique identifier for the instance. + */ + public String generatedId() { + return generatedId; + } + + /** + * Returns the instance identity. + */ + public InstanceId instanceId() { + return instanceId; + } + + /** + * Returns the creation timestamp in milliseconds since epoch. + */ + public Long creationTimestamp() { + return creationTimestamp; + } + + /** + * Returns a textual description of the instance. + */ + public String description() { + return description; + } + + /** + * Returns the status of the instance. + */ + public Status status() { + return status; + } + + /** + * Returns an optional, human-readable explanation of the status. + */ + public String statusMessage() { + return statusMessage; + } + + /** + * Returns the tags of this instance. Tags are used to identify valid sources or targets for + * network firewalls. + */ + public Tags tags() { + return tags; + } + + /** + * Returns the machine type identity. + */ + public MachineTypeId machineType() { + return machineType; + } + + /** + * Returns whether to allow this instance to send and receive packets with non-matching + * destination or source IPs. This is required if you plan to use this instance to forward routes. + * + * @see Enabling IP + * Forwarding + */ + public Boolean canIpForward() { + return canIpForward; + } + + /** + * Returns a list of network interfaces. This specifies how this instance is configured to + * interact with other network services, such as connecting to the internet. + */ + public List networkInterfaces() { + return networkInterfaces; + } + + /** + * Returns a list of disks attached to the instance. + */ + public List attachedDisks() { + return attachedDisks; + } + + /** + * Returns the instance metadata. + */ + public Metadata metadata() { + return metadata; + } + + /** + * Returns a list of service accounts, with their specified scopes, authorized for this instance. + * Service accounts generate access tokens that can be accessed through the metadata server and + * used to authenticate applications on the instance. + * + * @see Authenticating from + * Google Compute Engine + */ + public List serviceAccounts() { + return serviceAccounts; + } + + /** + * Returns the scheduling options for the instance. + */ + public SchedulingOptions schedulingOptions() { + return schedulingOptions; + } + + /** + * Returns the CPU platform used by this instance. + */ + public String cpuPlatform() { + return cpuPlatform; + } + + /** + * Returns a builder for the current instance. + */ + public Builder toBuilder() { + return new BuilderImpl(this); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("generatedId", generatedId) + .add("instanceId", instanceId) + .add("creationTimestamp", creationTimestamp) + .add("description", description) + .add("status", status) + .add("statusMessage", statusMessage) + .add("tags", tags) + .add("machineType", machineType) + .add("canIpForward", canIpForward) + .add("networkInterfaces", networkInterfaces) + .add("attachedDisks", attachedDisks) + .add("metadata", metadata) + .add("serviceAccounts", serviceAccounts) + .add("schedulingOptions", schedulingOptions) + .add("cpuPlatform", cpuPlatform) + .toString(); + } + + @Override + public int hashCode() { + return Objects.hash(generatedId, instanceId, creationTimestamp, description, status, + statusMessage, tags, machineType, canIpForward, networkInterfaces, attachedDisks, metadata, + serviceAccounts, schedulingOptions, cpuPlatform); + } + + @Override + public boolean equals(Object obj) { + return obj == this + || obj != null + && obj.getClass().equals(InstanceInfo.class) + && Objects.equals(toPb(), ((InstanceInfo) obj).toPb()); + } + + InstanceInfo setProjectId(final String projectId) { + Builder builder = toBuilder(); + builder.networkInterfaces(Lists.transform(networkInterfaces, + new Function() { + @Override + public NetworkInterface apply(NetworkInterface networkInterface) { + return networkInterface.setProjectId(projectId); + } + })); + builder.attachedDisks(Lists.transform(attachedDisks, + new Function() { + @Override + public AttachedDisk apply(AttachedDisk attachedDisk) { + return attachedDisk.setProjectId(projectId); + } + })); + return builder.instanceId(instanceId.setProjectId(projectId)) + .machineType(machineType.setProjectId(projectId)) + .build(); + } + + Instance toPb() { + Instance instancePb = new Instance(); + if (generatedId != null) { + instancePb.setId(new BigInteger(generatedId)); + } + if (creationTimestamp != null) { + instancePb.setCreationTimestamp(TIMESTAMP_FORMATTER.print(creationTimestamp)); + } + instancePb.setName(instanceId.instance()); + instancePb.setDescription(description); + instancePb.setSelfLink(instanceId.selfLink()); + instancePb.setZone(instanceId.zoneId().selfLink()); + if (status != null) { + instancePb.setStatus(status.name()); + } + instancePb.setStatusMessage(statusMessage); + if (tags != null) { + instancePb.setTags(tags.toPb()); + } + if (machineType != null) { + instancePb.setMachineType(machineType.selfLink()); + } + instancePb.setCanIpForward(canIpForward); + if (networkInterfaces != null) { + instancePb.setNetworkInterfaces( + Lists.transform(networkInterfaces, NetworkInterface.TO_PB_FUNCTION)); + } + if (attachedDisks != null) { + instancePb.setDisks(Lists.transform(attachedDisks, AttachedDisk.TO_PB_FUNCTION)); + } + if (metadata != null) { + instancePb.setMetadata(metadata.toPb()); + } + if (serviceAccounts != null) { + instancePb.setServiceAccounts( + Lists.transform(serviceAccounts, ServiceAccount.TO_PB_FUNCTION)); + } + if (schedulingOptions != null) { + instancePb.setScheduling(schedulingOptions.toPb()); + } + instancePb.setCpuPlatform(cpuPlatform); + return instancePb; + } + + /** + * Returns a builder for an {@code InstanceInfo} object given the instance identity and the + * machine type. + */ + public static Builder builder(InstanceId instanceId, MachineTypeId machineType) { + return new BuilderImpl(instanceId).machineType(machineType); + } + + /** + * Returns an {@code InstanceInfo} object given the instance identity, the machine type, a disk + * to attach to the instance and a network interface. {@code disk} must be a boot disk (i.e. + * {@link AttachedDisk.AttachedDiskConfiguration#boot()} returns {@code true}). + */ + public static InstanceInfo of(InstanceId instanceId, MachineTypeId machineType, AttachedDisk disk, + NetworkInterface networkInterface) { + return builder(instanceId, machineType) + .attachedDisks(ImmutableList.of(disk)) + .networkInterfaces(ImmutableList.of(networkInterface)) + .build(); + } + + static InstanceInfo fromPb(Instance instancePb) { + return new BuilderImpl(instancePb).build(); + } +} diff --git a/gcloud-java-compute/src/main/java/com/google/cloud/compute/License.java b/gcloud-java-compute/src/main/java/com/google/cloud/compute/License.java new file mode 100644 index 000000000000..dc0e49bace31 --- /dev/null +++ b/gcloud-java-compute/src/main/java/com/google/cloud/compute/License.java @@ -0,0 +1,92 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import static com.google.common.base.Preconditions.checkNotNull; + +import com.google.common.base.MoreObjects; + +import java.io.Serializable; +import java.util.Objects; + +/** + * A Google Compute Engine License. A License represents a software license. Licenses are used to + * track software usage in images, persistent disks, snapshots, and virtual machine instances. + * + * @see Licenses + */ +public class License implements Serializable { + + private static final long serialVersionUID = 6907923910319640363L; + + private final LicenseId licenseId; + private final Boolean chargesUseFee; + + License(LicenseId licenseId, Boolean chargesUseFee) { + this.licenseId = checkNotNull(licenseId); + this.chargesUseFee = chargesUseFee; + } + + /** + * Returns the identity of the license. + */ + public LicenseId licenseId() { + return licenseId; + } + + /** + * Returns {@code true} if the customer will be charged a license fee for running software that + * contains this license on an instance. + */ + public Boolean chargesUseFee() { + return chargesUseFee; + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("licenseId", licenseId) + .add("chargesUseFee", chargesUseFee) + .toString(); + } + + @Override + public final int hashCode() { + return Objects.hash(licenseId); + } + + @Override + public final boolean equals(Object obj) { + return obj == this + || obj != null + && obj.getClass().equals(License.class) + && Objects.equals(toPb(), ((License) obj).toPb()); + } + + com.google.api.services.compute.model.License toPb() { + com.google.api.services.compute.model.License licensePb = + new com.google.api.services.compute.model.License(); + licensePb.setName(licenseId.license()); + licensePb.setChargesUseFee(chargesUseFee); + licensePb.setSelfLink(licenseId.selfLink()); + return licensePb; + } + + static License fromPb(com.google.api.services.compute.model.License licensePb) { + return new License(LicenseId.fromUrl(licensePb.getSelfLink()), licensePb.getChargesUseFee()); + } +} diff --git a/gcloud-java-compute/src/main/java/com/google/cloud/compute/LicenseId.java b/gcloud-java-compute/src/main/java/com/google/cloud/compute/LicenseId.java new file mode 100644 index 000000000000..284572ba58bb --- /dev/null +++ b/gcloud-java-compute/src/main/java/com/google/cloud/compute/LicenseId.java @@ -0,0 +1,128 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import static com.google.common.base.Preconditions.checkNotNull; + +import com.google.common.base.Function; +import com.google.common.base.MoreObjects; + +import java.util.Objects; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +/** + * Identity for a Google Compute Engine license. + */ +public final class LicenseId extends ResourceId { + + static final Function FROM_URL_FUNCTION = new Function() { + @Override + public LicenseId apply(String pb) { + return LicenseId.fromUrl(pb); + } + }; + static final Function TO_URL_FUNCTION = new Function() { + @Override + public String apply(LicenseId licenseId) { + return licenseId.selfLink(); + } + }; + + private static final String REGEX = ResourceId.REGEX + "global/licenses/([^/]+)"; + private static final Pattern PATTERN = Pattern.compile(REGEX); + private static final long serialVersionUID = -2239484554024469651L; + + private final String license; + + private LicenseId(String project, String license) { + super(project); + this.license = checkNotNull(license); + } + + /** + * Returns the name of the license. + */ + public String license() { + return license; + } + + @Override + public String selfLink() { + return super.selfLink() + "/global/licenses/" + license; + } + + @Override + MoreObjects.ToStringHelper toStringHelper() { + return super.toStringHelper().add("license", license); + } + + @Override + public int hashCode() { + return Objects.hash(baseHashCode(), license); + } + + @Override + public boolean equals(Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof LicenseId)) { + return false; + } + LicenseId other = (LicenseId) obj; + return baseEquals(other) && Objects.equals(license, other.license); + } + + @Override + LicenseId setProjectId(String projectId) { + if (project() != null) { + return this; + } + return LicenseId.of(projectId, license); + } + + /** + * Returns a license identity given the license name. + */ + public static LicenseId of(String license) { + return new LicenseId(null, license); + } + + /** + * Returns a license identity given project and license names. + */ + public static LicenseId of(String project, String license) { + return new LicenseId(project, license); + } + + /** + * Returns {@code true} if the provided string matches the expected format of a license URL. + * Returns {@code false} otherwise. + */ + static boolean matchesUrl(String url) { + return PATTERN.matcher(url).matches(); + } + + static LicenseId fromUrl(String url) { + Matcher matcher = PATTERN.matcher(url); + if (!matcher.matches()) { + throw new IllegalArgumentException(url + " is not a valid license URL"); + } + return LicenseId.of(matcher.group(1), matcher.group(2)); + } +} diff --git a/gcloud-java-compute/src/main/java/com/google/cloud/compute/MachineType.java b/gcloud-java-compute/src/main/java/com/google/cloud/compute/MachineType.java new file mode 100644 index 000000000000..c850b351c946 --- /dev/null +++ b/gcloud-java-compute/src/main/java/com/google/cloud/compute/MachineType.java @@ -0,0 +1,323 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import com.google.api.services.compute.model.MachineType.ScratchDisks; +import com.google.common.base.Function; +import com.google.common.base.MoreObjects; +import com.google.common.collect.Lists; + +import org.joda.time.format.DateTimeFormatter; +import org.joda.time.format.ISODateTimeFormat; + +import java.io.Serializable; +import java.math.BigInteger; +import java.util.List; +import java.util.Objects; + +/** + * A Google Compute Engine machine type. A machine type determine the virtualized hardware + * specifications of your virtual machine instances, such as the amount of memory or number of + * virtual CPUs. + * + * @see Machine Types + */ +public class MachineType implements Serializable { + + static final Function + FROM_PB_FUNCTION = + new Function() { + @Override + public MachineType apply(com.google.api.services.compute.model.MachineType pb) { + return MachineType.fromPb(pb); + } + }; + static final Function + TO_PB_FUNCTION = + new Function() { + @Override + public com.google.api.services.compute.model.MachineType apply(MachineType type) { + return type.toPb(); + } + }; + private static final DateTimeFormatter TIMESTAMP_FORMATTER = ISODateTimeFormat.dateTime(); + + private static final long serialVersionUID = -4210962597502860450L; + + private final MachineTypeId machineTypeId; + private final String generatedId; + private final Long creationTimestamp; + private final String description; + private final Integer cpus; + private final Integer memoryMb; + private final List scratchDisksSizeGb; + private final Integer maximumPersistentDisks; + private final Long maximumPersistentDisksSizeGb; + private final DeprecationStatus deprecationStatus; + + static final class Builder { + + private MachineTypeId machineTypeId; + private String generatedId; + private Long creationTimestamp; + private String description; + private Integer cpus; + private Integer memoryMb; + private List scratchDisksSizeGb; + private Integer maximumPersistentDisks; + private Long maximumPersistentDisksSizeGb; + private DeprecationStatus deprecationStatus; + + private Builder() {} + + Builder machineTypeId(MachineTypeId machineTypeId) { + this.machineTypeId = machineTypeId; + return this; + } + + Builder generatedId(String generatedId) { + this.generatedId = generatedId; + return this; + } + + Builder creationTimestamp(Long creationTimestamp) { + this.creationTimestamp = creationTimestamp; + return this; + } + + Builder description(String description) { + this.description = description; + return this; + } + + Builder cpus(Integer cpus) { + this.cpus = cpus; + return this; + } + + Builder memoryMb(Integer memoryMb) { + this.memoryMb = memoryMb; + return this; + } + + Builder scratchDisksSizeGb(List scratchDisksSizeGb) { + this.scratchDisksSizeGb = scratchDisksSizeGb; + return this; + } + + Builder maximumPersistentDisks(Integer maximumPersistentDisks) { + this.maximumPersistentDisks = maximumPersistentDisks; + return this; + } + + Builder maximumPersistentDisksSizeGb(Long maximumPersistentDisksSizeGb) { + this.maximumPersistentDisksSizeGb = maximumPersistentDisksSizeGb; + return this; + } + + Builder deprecationStatus(DeprecationStatus deprecationStatus) { + this.deprecationStatus = deprecationStatus; + return this; + } + + MachineType build() { + return new MachineType(this); + } + } + + private MachineType(Builder builder) { + this.machineTypeId = builder.machineTypeId; + this.generatedId = builder.generatedId; + this.creationTimestamp = builder.creationTimestamp; + this.description = builder.description; + this.cpus = builder.cpus; + this.memoryMb = builder.memoryMb; + this.scratchDisksSizeGb = builder.scratchDisksSizeGb; + this.maximumPersistentDisks = builder.maximumPersistentDisks; + this.maximumPersistentDisksSizeGb = builder.maximumPersistentDisksSizeGb; + this.deprecationStatus = builder.deprecationStatus; + } + + /** + * Returns the machine type's identity. + */ + public MachineTypeId machineTypeId() { + return machineTypeId; + } + + /** + * Returns the service-generated unique identifier for the machine type. + */ + public String generatedId() { + return generatedId; + } + + /** + * Returns the creation timestamp in milliseconds since epoch. + */ + public Long creationTimestamp() { + return creationTimestamp; + } + + /** + * Returns an optional textual description of the machine type. + */ + public String description() { + return description; + } + + /** + * Returns the number of virtual CPUs that are available to the instance. + */ + public Integer cpus() { + return cpus; + } + + /** + * Returns the amount of physical memory available to the instance, defined in MB. + */ + public Integer memoryMb() { + return memoryMb; + } + + /** + * Returns the size of all extended scratch disks assigned to the instance, defined in GB. + */ + public List scratchDisksSizeGb() { + return scratchDisksSizeGb; + } + + /** + * Returns the maximum number of persistent disks allowed by this instance type. + */ + public Integer maximumPersistentDisks() { + return maximumPersistentDisks; + } + + /** + * Returns the maximum total persistent disks size allowed, defined in GB. + */ + public Long maximumPersistentDisksSizeGb() { + return maximumPersistentDisksSizeGb; + } + + /** + * Returns the deprecation status of the machine type. If {@link DeprecationStatus#status()} is + * either {@link DeprecationStatus.Status#DELETED} or {@link DeprecationStatus.Status#OBSOLETE} + * the machine type should not be used. Returns {@code null} if the machine type is not + * deprecated. + */ + public DeprecationStatus deprecationStatus() { + return deprecationStatus; + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("machineTypeId", machineTypeId) + .add("generatedId", generatedId) + .add("creationTimestamp", creationTimestamp) + .add("description", description) + .add("cpus", cpus) + .add("memoryMb", memoryMb) + .add("scratchDisksSizeGb", scratchDisksSizeGb) + .add("maximumPersistentDisks", maximumPersistentDisks) + .add("maximumPersistentDisksSizeGb", maximumPersistentDisksSizeGb) + .add("deprecationStatus", deprecationStatus) + .toString(); + } + + @Override + public final int hashCode() { + return Objects.hash(machineTypeId); + } + + @Override + public final boolean equals(Object obj) { + return obj == this + || obj != null + && obj.getClass().equals(MachineType.class) + && Objects.equals(toPb(), ((MachineType) obj).toPb()); + } + + com.google.api.services.compute.model.MachineType toPb() { + com.google.api.services.compute.model.MachineType machineTypePb = + new com.google.api.services.compute.model.MachineType(); + if (generatedId != null) { + machineTypePb.setId(new BigInteger(generatedId)); + } + if (creationTimestamp != null) { + machineTypePb.setCreationTimestamp(TIMESTAMP_FORMATTER.print(creationTimestamp)); + } + machineTypePb.setName(machineTypeId.type()); + machineTypePb.setDescription(description); + machineTypePb.setSelfLink(machineTypeId.selfLink()); + machineTypePb.setGuestCpus(cpus); + machineTypePb.setMemoryMb(memoryMb); + if (scratchDisksSizeGb != null) { + machineTypePb.setScratchDisks(Lists.transform(scratchDisksSizeGb, + new Function() { + @Override + public ScratchDisks apply(Integer diskSize) { + return new ScratchDisks().setDiskGb(diskSize); + } + })); + } + machineTypePb.setMaximumPersistentDisks(maximumPersistentDisks); + machineTypePb.setMaximumPersistentDisksSizeGb(maximumPersistentDisksSizeGb); + machineTypePb.setZone(machineTypeId.zoneId().zone()); + if (deprecationStatus != null) { + machineTypePb.setDeprecated(deprecationStatus.toPb()); + } + return machineTypePb; + } + + static Builder builder() { + return new Builder(); + } + + static MachineType fromPb(com.google.api.services.compute.model.MachineType machineTypePb) { + Builder builder = builder(); + builder.machineTypeId(MachineTypeId.fromUrl(machineTypePb.getSelfLink())); + if (machineTypePb.getId() != null) { + builder.generatedId(machineTypePb.getId().toString()); + } + if (machineTypePb.getCreationTimestamp() != null) { + builder.creationTimestamp( + TIMESTAMP_FORMATTER.parseMillis(machineTypePb.getCreationTimestamp())); + } + builder.description(machineTypePb.getDescription()); + builder.cpus(machineTypePb.getGuestCpus()); + builder.memoryMb(machineTypePb.getMemoryMb()); + if (machineTypePb.getScratchDisks() != null) { + builder.scratchDisksSizeGb( + Lists.transform(machineTypePb.getScratchDisks(), new Function() { + @Override + public Integer apply(ScratchDisks scratchDiskPb) { + return scratchDiskPb.getDiskGb(); + } + })); + } + builder.maximumPersistentDisks(machineTypePb.getMaximumPersistentDisks()); + builder.maximumPersistentDisksSizeGb(machineTypePb.getMaximumPersistentDisksSizeGb()); + if (machineTypePb.getDeprecated() != null) { + builder.deprecationStatus( + DeprecationStatus.fromPb(machineTypePb.getDeprecated(), MachineTypeId.FROM_URL_FUNCTION)); + } + return builder.build(); + } +} diff --git a/gcloud-java-compute/src/main/java/com/google/cloud/compute/MachineTypeId.java b/gcloud-java-compute/src/main/java/com/google/cloud/compute/MachineTypeId.java new file mode 100644 index 000000000000..bb439c1e29fe --- /dev/null +++ b/gcloud-java-compute/src/main/java/com/google/cloud/compute/MachineTypeId.java @@ -0,0 +1,148 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import static com.google.common.base.Preconditions.checkNotNull; + +import com.google.common.base.Function; +import com.google.common.base.MoreObjects; + +import java.util.Objects; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +/** + * Identity for a Google Compute Engine machine type. + */ +public final class MachineTypeId extends ResourceId { + + static final Function FROM_URL_FUNCTION = + new Function() { + @Override + public MachineTypeId apply(String pb) { + return MachineTypeId.fromUrl(pb); + } + }; + static final Function TO_URL_FUNCTION = + new Function() { + @Override + public String apply(MachineTypeId machineTypeId) { + return machineTypeId.selfLink(); + } + }; + + private static final String REGEX = ResourceId.REGEX + "zones/([^/]+)/machineTypes/([^/]+)"; + private static final Pattern PATTERN = Pattern.compile(REGEX); + private static final long serialVersionUID = -5819598544478859608L; + + private final String zone; + private final String type; + + private MachineTypeId(String project, String zone, String type) { + super(project); + this.zone = checkNotNull(zone); + this.type = checkNotNull(type); + } + + /** + * Returns the name of the machine type. + */ + public String type() { + return type; + } + + /** + * Returns the name of the zone this machine type belongs to. + */ + public String zone() { + return zone; + } + + /** + * Returns the identity of the zone this machine type belongs to. + */ + public ZoneId zoneId() { + return ZoneId.of(project(), zone); + } + + @Override + public String selfLink() { + return super.selfLink() + "/zones/" + zone + "/machineTypes/" + type; + } + + @Override + MoreObjects.ToStringHelper toStringHelper() { + return super.toStringHelper().add("zone", zone).add("type", type); + } + + @Override + public int hashCode() { + return Objects.hash(baseHashCode(), zone, type); + } + + @Override + public boolean equals(Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof MachineTypeId)) { + return false; + } + MachineTypeId other = (MachineTypeId) obj; + return baseEquals(other) + && Objects.equals(zone, other.zone) + && Objects.equals(type, other.type); + } + + @Override + MachineTypeId setProjectId(String projectId) { + if (project() != null) { + return this; + } + return MachineTypeId.of(projectId, zone, type); + } + + /** + * Returns a machine type identity given the zone and type names. + */ + public static MachineTypeId of(String zone, String type) { + return new MachineTypeId(null, zone, type); + } + + /** + * Returns a machine type identity given project, zone and type names. + */ + public static MachineTypeId of(String project, String zone, String type) { + return new MachineTypeId(project, zone, type); + } + + /** + * Returns {@code true} if the provided string matches the expected format of a machine type URL. + * Returns {@code false} otherwise. + */ + static boolean matchesUrl(String url) { + return PATTERN.matcher(url).matches(); + } + + static MachineTypeId fromUrl(String url) { + Matcher matcher = PATTERN.matcher(url); + if (!matcher.matches()) { + throw new IllegalArgumentException(url + " is not a valid machine type URL"); + } + return MachineTypeId.of(matcher.group(1), matcher.group(2), matcher.group(3)); + } +} diff --git a/gcloud-java-compute/src/main/java/com/google/cloud/compute/Metadata.java b/gcloud-java-compute/src/main/java/com/google/cloud/compute/Metadata.java new file mode 100644 index 000000000000..22ba59834d4b --- /dev/null +++ b/gcloud-java-compute/src/main/java/com/google/cloud/compute/Metadata.java @@ -0,0 +1,212 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import static com.google.common.base.Preconditions.checkNotNull; + +import com.google.common.base.Function; +import com.google.common.base.MoreObjects; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; + +import java.io.Serializable; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +/** + * Metadata for Google Compute Engine Instance as ket/value pairs. This includes custom metadata + * and predefined keys. + * + * @see Metadata + */ +public final class Metadata implements Serializable { + + static final Function + FROM_PB_FUNCTION = + new Function() { + @Override + public Metadata apply(com.google.api.services.compute.model.Metadata pb) { + return Metadata.fromPb(pb); + } + }; + static final Function TO_PB_FUNCTION = + new Function() { + @Override + public com.google.api.services.compute.model.Metadata apply(Metadata metadata) { + return metadata.toPb(); + } + }; + + private static final long serialVersionUID = -945038809838910107L; + + private final Map values; + private final String fingerprint; + + /** + * A builder for {@code Metadata} objects. + */ + public static final class Builder { + + private Map values; + private String fingerprint; + + Builder() { + values = Maps.newHashMap(); + } + + Builder(Metadata metadata) { + this.values = metadata.values != null ? Maps.newHashMap(metadata.values) + : Maps.newHashMap(); + this.fingerprint = metadata.fingerprint; + } + + /** + * Sets the metadata for the instance as key/value pairs. The total size of all keys and + * values must be less than 512 KB. Keys must conform to the following regexp: + * {@code [a-zA-Z0-9-_]+}, and be less than 128 bytes in length. This is reflected as part of + * a URL in the metadata server. Additionally, to avoid ambiguity, keys must not conflict with + * any other metadata keys for the project. Values must be less than or equal to 32768 bytes. + */ + public Builder values(Map values) { + this.values = Maps.newHashMap(checkNotNull(values)); + return this; + } + + /** + * Adds a key/value pair to the instance metadata. The total size of all keys and values must + * be less than 512 KB. Keys must conform to the following regexp: {@code [a-zA-Z0-9-_]+}, and + * be less than 128 bytes in length. This is reflected as part of a URL in the metadata + * server. Additionally, to avoid ambiguity, keys must not conflict with any other metadata + * keys for the project. Values must be less than or equal to 32768 bytes. + */ + public Builder add(String key, String value) { + this.values.put(key, value); + return this; + } + + /** + * Sets the fingerprint for the metadata. This value can be used to update instance's + * metadata. + */ + public Builder fingerprint(String fingerprint) { + this.fingerprint = fingerprint; + return this; + } + + /** + * Creates a {@code Metadata} object. + */ + public Metadata build() { + return new Metadata(this); + } + } + + private Metadata(Builder builder) { + this.values = ImmutableMap.copyOf(builder.values); + this.fingerprint = builder.fingerprint; + } + + /** + * Returns instance's metadata as key/value pairs. + */ + public Map values() { + return values; + } + + /** + * Returns the fingerprint for the metadata. This value can be used to update instance's + * metadata. + */ + public String fingerprint() { + return fingerprint; + } + + /** + * Returns a builder for the current instance metadata. + */ + public Builder toBuilder() { + return new Builder(this); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("values", values) + .add("fingerprint", fingerprint) + .toString(); + } + + @Override + public int hashCode() { + return Objects.hash(values, fingerprint); + } + + @Override + public boolean equals(Object obj) { + return obj == this + || obj instanceof Metadata + && Objects.equals(toPb(), ((Metadata) obj).toPb()); + } + + com.google.api.services.compute.model.Metadata toPb() { + com.google.api.services.compute.model.Metadata metadataPb = + new com.google.api.services.compute.model.Metadata(); + metadataPb.setFingerprint(fingerprint); + List itemsPb = + Lists.newArrayListWithCapacity(values.size()); + for (Map.Entry entry : values.entrySet()) { + itemsPb.add(new com.google.api.services.compute.model.Metadata.Items() + .setKey(entry.getKey()).setValue(entry.getValue())); + } + metadataPb.setItems(itemsPb); + metadataPb.setFingerprint(fingerprint); + return metadataPb; + } + + /** + * Returns a builder for a {@code Metadata} object. + */ + public static Builder builder() { + return new Builder(); + } + + /** + * Returns a {@code Metadata} object given the the metadata as a map. The total size of all keys + * and values must be less than 512 KB. Keys must conform to the following regexp: + * {@code [a-zA-Z0-9-_]+}, and be less than 128 bytes in length. This is reflected as part of a + * URL in the metadata server. Additionally, to avoid ambiguity, keys must not conflict with any + * other metadata keys for the project. Values must be less than or equal to 32768 bytes. + */ + public static Metadata of(Map values) { + return builder().values(values).build(); + } + + static Metadata fromPb(com.google.api.services.compute.model.Metadata metadataPb) { + Builder builder = builder(); + if (metadataPb.getItems() != null) { + Map metadataValues = + Maps.newHashMapWithExpectedSize(metadataPb.getItems().size()); + for (com.google.api.services.compute.model.Metadata.Items item : metadataPb.getItems()) { + metadataValues.put(item.getKey(), item.getValue()); + } + builder.values(metadataValues); + } + return builder.fingerprint(metadataPb.getFingerprint()).build(); + } +} diff --git a/gcloud-java-compute/src/main/java/com/google/cloud/compute/Network.java b/gcloud-java-compute/src/main/java/com/google/cloud/compute/Network.java new file mode 100644 index 000000000000..51a0287f3fed --- /dev/null +++ b/gcloud-java-compute/src/main/java/com/google/cloud/compute/Network.java @@ -0,0 +1,194 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import static com.google.common.base.Preconditions.checkNotNull; + +import com.google.cloud.compute.Compute.NetworkOption; +import com.google.cloud.compute.Compute.OperationOption; + +import java.io.IOException; +import java.io.ObjectInputStream; +import java.util.Objects; + +/** + * A Google Compute Engine Network. Every virtual machine instance is created as a member of a + * network. Networks connect instances to each other and to the Internet. You can segment your + * networks, use firewall rules to restrict access to instances, and create static routes to forward + * traffic to specific destinations. Objects of this class are immutable. To get a {@code Network} + * object with the most recent information use {@link #reload}. {@code Network} adds a layer of + * service-related functionality over {@link NetworkInfo}. + * + * @see Using Networks and Firewalls + */ +public class Network extends NetworkInfo { + + private static final long serialVersionUID = 8608280908101278096L; + + private final ComputeOptions options; + private transient Compute compute; + + /** + * A builder for {@code Network} objects. + */ + public static class Builder extends NetworkInfo.Builder { + + private final Compute compute; + private final NetworkInfo.BuilderImpl infoBuilder; + + Builder(Compute compute, NetworkId networkId, NetworkConfiguration configuration) { + this.compute = compute; + this.infoBuilder = new NetworkInfo.BuilderImpl(networkId, configuration); + this.infoBuilder.networkId(networkId); + this.infoBuilder.configuration(configuration); + } + + Builder(Network subnetwork) { + this.compute = subnetwork.compute; + this.infoBuilder = new NetworkInfo.BuilderImpl(subnetwork); + } + + @Override + Builder generatedId(String generatedId) { + infoBuilder.generatedId(generatedId); + return this; + } + + @Override + Builder creationTimestamp(Long creationTimestamp) { + infoBuilder.creationTimestamp(creationTimestamp); + return this; + } + + @Override + public Builder networkId(NetworkId networkId) { + infoBuilder.networkId(networkId); + return this; + } + + @Override + public Builder description(String description) { + infoBuilder.description(description); + return this; + } + + @Override + public Builder configuration(NetworkConfiguration configuration) { + infoBuilder.configuration(configuration); + return this; + } + + @Override + public Network build() { + return new Network(compute, infoBuilder); + } + } + + Network(Compute compute, NetworkInfo.BuilderImpl infoBuilder) { + super(infoBuilder); + this.compute = checkNotNull(compute); + this.options = compute.options(); + } + + /** + * Checks if this network exists. + * + * @return {@code true} if this network exists, {@code false} otherwise + * @throws ComputeException upon failure + */ + public boolean exists() { + return reload(NetworkOption.fields()) != null; + } + + /** + * Fetches current network' latest information. Returns {@code null} if the network does not + * exist. + * + * @param options network options + * @return a {@code Network} object with latest information or {@code null} if not found + * @throws ComputeException upon failure + */ + public Network reload(NetworkOption... options) { + return compute.getNetwork(networkId().network(), options); + } + + /** + * Deletes this network. + * + * @return an operation object if delete request was successfully sent, {@code null} if the + * network was not found + * @throws ComputeException upon failure + */ + public Operation delete(OperationOption... options) { + return compute.deleteNetwork(networkId().network(), options); + } + + /** + * Creates a subnetwork for this network given its identity and the range of IPv4 addresses in + * CIDR format. Subnetwork creation is only supported for networks in "custom subnet mode" (i.e. + * {@link #configuration()} returns a {@link SubnetNetworkConfiguration}) with automatic creation + * of subnetworks disabled (i.e. {@link SubnetNetworkConfiguration#autoCreateSubnetworks()} + * returns {@code false}). + * + * @return an operation object if creation request was successfully sent + * @throws ComputeException upon failure + * @see CIDR + */ + public Operation createSubnetwork(SubnetworkId subnetworkId, String ipRange, + OperationOption... options) { + return compute.create(SubnetworkInfo.of(subnetworkId, networkId(), ipRange), options); + } + + /** + * Returns the network's {@code Compute} object used to issue requests. + */ + public Compute compute() { + return compute; + } + + @Override + public Builder toBuilder() { + return new Builder(this); + } + + @Override + public final boolean equals(Object obj) { + if (obj == this) { + return true; + } + if (obj == null || !obj.getClass().equals(Network.class)) { + return false; + } + Network other = (Network) obj; + return Objects.equals(toPb(), other.toPb()) && Objects.equals(options, other.options); + } + + @Override + public final int hashCode() { + return Objects.hash(super.hashCode(), options); + } + + private void readObject(ObjectInputStream input) throws IOException, ClassNotFoundException { + input.defaultReadObject(); + this.compute = options.service(); + } + + static Network fromPb(Compute compute, + com.google.api.services.compute.model.Network networkPb) { + return new Network(compute, new NetworkInfo.BuilderImpl(networkPb)); + } +} diff --git a/gcloud-java-compute/src/main/java/com/google/cloud/compute/NetworkConfiguration.java b/gcloud-java-compute/src/main/java/com/google/cloud/compute/NetworkConfiguration.java new file mode 100644 index 000000000000..4a7500f66d07 --- /dev/null +++ b/gcloud-java-compute/src/main/java/com/google/cloud/compute/NetworkConfiguration.java @@ -0,0 +1,98 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import com.google.api.services.compute.model.Network; +import com.google.common.base.MoreObjects; +import com.google.common.base.MoreObjects.ToStringHelper; + +import java.io.Serializable; +import java.util.Objects; + +/** + * Base class for Google Compute Engine network configuration. Use + * {@link StandardNetworkConfiguration} to create a standard network with associated address range. + * Use {@link SubnetNetworkConfiguration} to create a network that supports subnetworks, up to one + * per region, each with its own address range. + * + * @see Using Networks and Firewalls + */ +public abstract class NetworkConfiguration implements Serializable { + + private static final long serialVersionUID = 6599798536784576564L; + + private final Type type; + + /** + * Type of a Google Compute Engine disk configuration. + */ + public enum Type { + /** + * A Google Compute Engine network with no subnetworks. + */ + STANDARD, + + /** + * A Google Compute Engine network that supports the creation of subnetworks (either automatic + * or manual). + */ + SUBNET + } + + NetworkConfiguration(Type type) { + this.type = type; + } + + /** + * Returns the network's type. This method returns {@link Type#STANDARD} for a standard networks + * with no subnetworks. This method returns {@link Type#SUBNET} for a network that supports the + * creation of subnetworks (either automatic or manual). + */ + public Type type() { + return type; + } + + ToStringHelper toStringHelper() { + return MoreObjects.toStringHelper(this).add("type", type); + } + + @Override + public String toString() { + return toStringHelper().toString(); + } + + final int baseHashCode() { + return Objects.hash(type); + } + + final boolean baseEquals(NetworkConfiguration networkConfiguration) { + return networkConfiguration != null + && getClass().equals(networkConfiguration.getClass()) + && Objects.equals(toPb(), networkConfiguration.toPb()); + } + + abstract Network toPb(); + + @SuppressWarnings("unchecked") + static T fromPb(Network networkPb) { + if (networkPb.getIPv4Range() != null) { + return (T) StandardNetworkConfiguration.fromPb(networkPb); + } else { + return (T) SubnetNetworkConfiguration.fromPb(networkPb); + } + } +} diff --git a/gcloud-java-compute/src/main/java/com/google/cloud/compute/NetworkId.java b/gcloud-java-compute/src/main/java/com/google/cloud/compute/NetworkId.java new file mode 100644 index 000000000000..1108f126588b --- /dev/null +++ b/gcloud-java-compute/src/main/java/com/google/cloud/compute/NetworkId.java @@ -0,0 +1,137 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import static com.google.common.base.Preconditions.checkNotNull; + +import com.google.common.base.MoreObjects.ToStringHelper; + +import java.util.Objects; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +/** + * Identity for a Google Compute Engine network. + */ +public final class NetworkId extends ResourceId { + + private static final String REGEX = ResourceId.REGEX + "global/networks/([^/]+)"; + private static final Pattern PATTERN = Pattern.compile(REGEX); + private static final long serialVersionUID = 2386765228138819506L; + + private final String network; + + NetworkId(String project, String network) { + super(project); + this.network = checkNotNull(network); + } + + private NetworkId(NetworkId networkId) { + super(networkId.project()); + this.network = checkNotNull(networkId.network()); + } + + /** + * Returns the name of the network. The network name must be 1-63 characters long and comply with + * RFC1035. Specifically, the name must match the regular expression + * {@code [a-z]([-a-z0-9]*[a-z0-9])?} which means the first character must be a lowercase letter, + * and all following characters must be a dash, lowercase letter, or digit, except the last + * character, which cannot be a dash. + * + * @see RFC1035 + */ + public String network() { + return network; + } + + @Override + public String selfLink() { + return super.selfLink() + "/global/networks/" + network; + } + + @Override + ToStringHelper toStringHelper() { + return super.toStringHelper().add("network", network); + } + + @Override + public int hashCode() { + return Objects.hash(baseHashCode(), network); + } + + @Override + public boolean equals(Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof NetworkId)) { + return false; + } + NetworkId other = (NetworkId) obj; + return baseEquals(other) && Objects.equals(network, other.network); + } + + @Override + NetworkId setProjectId(String projectId) { + if (project() != null) { + return this; + } + return NetworkId.of(projectId, network); + } + + /** + * Returns a new network identity given project and network names. The network name must be 1-63 + * characters long and comply with RFC1035. Specifically, the name must match the regular + * expression {@code [a-z]([-a-z0-9]*[a-z0-9])?} which means the first character must be a + * lowercase letter, and all following characters must be a dash, lowercase letter, or digit, + * except the last character, which cannot be a dash. + * + * @see RFC1035 + */ + public static NetworkId of(String project, String network) { + return new NetworkId(project, network); + } + + /** + * Returns a new network identity given network name. The network name must be 1-63 characters + * long and comply with RFC1035. Specifically, the name must match the regular expression + * {@code [a-z]([-a-z0-9]*[a-z0-9])?} which means the first character must be a lowercase letter, + * and all following characters must be a dash, lowercase letter, or digit, except the last + * character, which cannot be a dash. + * + * @see RFC1035 + */ + public static NetworkId of(String network) { + return NetworkId.of(null, network); + } + + /** + * Returns {@code true} if the provided string matches the expected format of a network URL. + * Returns {@code false} otherwise. + */ + static boolean matchesUrl(String url) { + return PATTERN.matcher(url).matches(); + } + + static NetworkId fromUrl(String url) { + Matcher matcher = PATTERN.matcher(url); + if (!matcher.matches()) { + throw new IllegalArgumentException(url + " is not a valid network URL"); + } + return NetworkId.of(matcher.group(1), matcher.group(2)); + } +} diff --git a/gcloud-java-compute/src/main/java/com/google/cloud/compute/NetworkInfo.java b/gcloud-java-compute/src/main/java/com/google/cloud/compute/NetworkInfo.java new file mode 100644 index 000000000000..a7864135b810 --- /dev/null +++ b/gcloud-java-compute/src/main/java/com/google/cloud/compute/NetworkInfo.java @@ -0,0 +1,291 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import static com.google.common.base.Preconditions.checkNotNull; + +import com.google.api.services.compute.model.Network; +import com.google.common.base.Function; +import com.google.common.base.MoreObjects; + +import org.joda.time.format.DateTimeFormatter; +import org.joda.time.format.ISODateTimeFormat; + +import java.io.Serializable; +import java.math.BigInteger; +import java.util.Objects; + +/** + * A Google Compute Engine Network. Every virtual machine instance is created as a member of a + * network. Networks connect instances to each other and to the Internet. You can segment your + * networks, use firewall rules to restrict access to instances, and create static routes to forward + * traffic to specific destinations. + * + *

A network lives in a project and is isolated from other networks in the project. A project can + * have up to five different networks. + * + * @see Using Networks and Firewalls + */ +public class NetworkInfo implements Serializable { + + static final Function FROM_PB_FUNCTION = + new Function() { + @Override + public NetworkInfo apply(Network pb) { + return NetworkInfo.fromPb(pb); + } + }; + static final Function TO_PB_FUNCTION = + new Function() { + @Override + public Network apply(NetworkInfo network) { + return network.toPb(); + } + }; + + private static final long serialVersionUID = 4336912581538114026L; + private static final DateTimeFormatter TIMESTAMP_FORMATTER = ISODateTimeFormat.dateTime(); + + private final String generatedId; + private final NetworkId networkId; + private final Long creationTimestamp; + private final String description; + private final NetworkConfiguration configuration; + + /** + * A builder for {@code NetworkInfo} objects. + */ + public abstract static class Builder { + + abstract Builder generatedId(String generatedId); + + abstract Builder creationTimestamp(Long creationTimestamp); + + /** + * Sets the identity of the network. + */ + public abstract Builder networkId(NetworkId networkId); + + /** + * Sets an optional textual description of the network. + */ + public abstract Builder description(String description); + + /** + * Sets the network configuration. Use {@link StandardNetworkConfiguration} to create a standard + * network with associated IPv4 range. Use {@link SubnetNetworkConfiguration} to create a + * network that could be divided into subnetworks, up to one per region, each with its own + * address range. + */ + public abstract Builder configuration(NetworkConfiguration configuration); + + /** + * Creates a {@code NetworkInfo} object. + */ + public abstract NetworkInfo build(); + } + + static final class BuilderImpl extends Builder { + + private String generatedId; + private NetworkId networkId; + private Long creationTimestamp; + private String description; + private NetworkConfiguration configuration; + + BuilderImpl(NetworkId networkId, NetworkConfiguration configuration) { + this.networkId = checkNotNull(networkId); + this.configuration = checkNotNull(configuration); + } + + BuilderImpl(NetworkInfo networkInfo) { + this.generatedId = networkInfo.generatedId; + this.creationTimestamp = networkInfo.creationTimestamp; + this.networkId = networkInfo.networkId; + this.description = networkInfo.description; + this.configuration = networkInfo.configuration; + } + + BuilderImpl(Network networkPb) { + if (networkPb.getId() != null) { + this.generatedId = networkPb.getId().toString(); + } + if (networkPb.getCreationTimestamp() != null) { + this.creationTimestamp = TIMESTAMP_FORMATTER.parseMillis(networkPb.getCreationTimestamp()); + } + this.networkId = NetworkId.fromUrl(networkPb.getSelfLink()); + this.description = networkPb.getDescription(); + this.configuration = NetworkConfiguration.fromPb(networkPb); + } + + @Override + BuilderImpl generatedId(String generatedId) { + this.generatedId = generatedId; + return this; + } + + @Override + BuilderImpl creationTimestamp(Long creationTimestamp) { + this.creationTimestamp = creationTimestamp; + return this; + } + + @Override + public BuilderImpl networkId(NetworkId networkId) { + this.networkId = checkNotNull(networkId); + return this; + } + + @Override + public BuilderImpl description(String description) { + this.description = description; + return this; + } + + @Override + public BuilderImpl configuration(NetworkConfiguration configuration) { + this.configuration = checkNotNull(configuration); + return this; + } + + @Override + public NetworkInfo build() { + return new NetworkInfo(this); + } + } + + NetworkInfo(BuilderImpl builder) { + this.generatedId = builder.generatedId; + this.creationTimestamp = builder.creationTimestamp; + this.networkId = builder.networkId; + this.description = builder.description; + this.configuration = builder.configuration; + } + + /** + * Returns the service-generated unique identifier for the network. + */ + public String generatedId() { + return generatedId; + } + + /** + * Returns the creation timestamp in milliseconds since epoch. + */ + public Long creationTimestamp() { + return creationTimestamp; + } + + /** + * Returns the network identity. + */ + public NetworkId networkId() { + return networkId; + } + + /** + * Returns a textual description of the network. + */ + public String description() { + return description; + } + + /** + * Returns the network configuration. Returns a {@link StandardNetworkConfiguration} for standard + * networks with associated IPv4 range. Returns {@link SubnetNetworkConfiguration} for networks + * that could be divided into subnetworks, up to one per region, each with its own address range. + */ + @SuppressWarnings("unchecked") + public T configuration() { + return (T) configuration; + } + + /** + * Returns a builder for the current network. + */ + public Builder toBuilder() { + return new BuilderImpl(this); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("generatedId", generatedId) + .add("creationTimestamp", creationTimestamp) + .add("networkId", networkId) + .add("description", description) + .add("configuration", configuration) + .toString(); + } + + @Override + public int hashCode() { + return Objects.hash(generatedId, networkId, creationTimestamp, description, configuration); + } + + @Override + public boolean equals(Object obj) { + return obj == this + || obj != null + && obj.getClass().equals(NetworkInfo.class) + && Objects.equals(toPb(), ((NetworkInfo) obj).toPb()); + } + + NetworkInfo setProjectId(String projectId) { + return toBuilder() + .networkId(networkId.setProjectId(projectId)) + .build(); + } + + Network toPb() { + Network networkPb = configuration.toPb(); + if (generatedId != null) { + networkPb.setId(new BigInteger(generatedId)); + } + if (creationTimestamp != null) { + networkPb.setCreationTimestamp(TIMESTAMP_FORMATTER.print(creationTimestamp)); + } + networkPb.setName(networkId.network()); + networkPb.setDescription(description); + networkPb.setSelfLink(networkId.selfLink()); + return networkPb; + } + + /** + * Returns a builder for a {@code NetworkInfo} object given the network identity and its + * configuration. Use {@link StandardNetworkConfiguration} to create a standard network with + * associated address range. Use {@link SubnetNetworkConfiguration} to create a network that + * supports subnetworks, up to one per region, each with its own address range. + */ + public static Builder builder(NetworkId networkId, NetworkConfiguration configuration) { + return new BuilderImpl(networkId, configuration); + } + + /** + * Returns a {@code NetworkInfo} object given the network identity. Use + * {@link StandardNetworkConfiguration} to create a standard network with associated address + * range. Use {@link SubnetNetworkConfiguration} to create a network that supports subnetworks, up + * to one per region, each with its own address range. + */ + public static NetworkInfo of(NetworkId networkId, NetworkConfiguration configuration) { + return builder(networkId, configuration).build(); + } + + static NetworkInfo fromPb(Network networkPb) { + return new BuilderImpl(networkPb).build(); + } +} diff --git a/gcloud-java-compute/src/main/java/com/google/cloud/compute/NetworkInterface.java b/gcloud-java-compute/src/main/java/com/google/cloud/compute/NetworkInterface.java new file mode 100644 index 000000000000..06964f6641af --- /dev/null +++ b/gcloud-java-compute/src/main/java/com/google/cloud/compute/NetworkInterface.java @@ -0,0 +1,505 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import static com.google.common.base.Preconditions.checkNotNull; + +import com.google.common.base.Function; +import com.google.common.base.MoreObjects; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Lists; + +import java.io.Serializable; +import java.util.Arrays; +import java.util.List; +import java.util.Objects; + +/** + * A network interface for a Google Compute Engine instance. Network interfaces specify how + * the instance is configured to interact with other network services, such as connecting to the + * internet. + * + * @see Configuring an + * Instance's IP Addresses + */ +public class NetworkInterface implements Serializable { + + static final Function + FROM_PB_FUNCTION = + new Function() { + @Override + public NetworkInterface apply( + com.google.api.services.compute.model.NetworkInterface pb) { + return NetworkInterface.fromPb(pb); + } + }; + static final Function + TO_PB_FUNCTION = + new Function() { + @Override + public com.google.api.services.compute.model.NetworkInterface apply( + NetworkInterface networkInterface) { + return networkInterface.toPb(); + } + }; + + private static final long serialVersionUID = 936741262053605581L; + + private final String name; + private final NetworkId network; + private final String networkIp; + private final SubnetworkId subnetwork; + private final List accessConfigurations; + + /** + * Access configuration for a Google Compute Engine instance's network interface. Objects of this + * class can be used to assign either a static or an ephemeral external IP address to Google + * Compute Engine instances. + * + * @see + * Static external IP addresses + * @see + * Ephemeral external IP addresses + */ + public static final class AccessConfig implements Serializable { + + static final Function + FROM_PB_FUNCTION = + new Function() { + @Override + public AccessConfig apply(com.google.api.services.compute.model.AccessConfig pb) { + return AccessConfig.fromPb(pb); + } + }; + static final Function + TO_PB_FUNCTION = + new Function() { + @Override + public com.google.api.services.compute.model.AccessConfig apply( + AccessConfig instance) { + return instance.toPb(); + } + }; + + private static final long serialVersionUID = -5438060668934041567L; + + private final String name; + private final String natIp; + private final Type type; + + /** + * The type of network access configuration. The only supported value is {@code ONE_TO_ONE_NAT}. + */ + public enum Type { + ONE_TO_ONE_NAT + } + + public static final class Builder { + + private String name; + private String natIp; + private Type type; + + private Builder() {} + + private Builder(AccessConfig accessConfig) { + this.name = accessConfig.name; + this.natIp = accessConfig.natIp; + this.type = accessConfig.type; + } + + /** + * Sets the name of the access configuration. + */ + public Builder name(String name) { + this.name = name; + return this; + } + + /** + * Sets an external IP address associated with this instance. Specify an unused static + * external IP address available to the project or leave this field undefined to use an IP + * from a shared ephemeral IP address pool. If you specify a static external IP address, it + * must live in the same region as the zone of the instance. + * + * @see + * Ephemeral external IP addresses + * @see + * Ephemeral external IP addresses + */ + public Builder natIp(String natIp) { + this.natIp = natIp; + return this; + } + + /** + * Sets the type of the access configuration. The only supported value is + * {@link Type#ONE_TO_ONE_NAT}. + */ + public Builder type(Type type) { + this.type = type; + return this; + } + + /** + * Creates an {@code AccessConfig} object. + */ + public AccessConfig build() { + return new AccessConfig(this); + } + } + + AccessConfig(Builder builder) { + this.name = builder.name; + this.natIp = builder.natIp; + this.type = builder.type; + } + + /** + * Returns the name of the access configuration. + */ + public String name() { + return name; + } + + /** + * Returns an external IP address associated with this instance. + */ + public String natIp() { + return natIp; + } + + /** + * Returns the type of network access configuration. The only supported value is + * {@link Type#ONE_TO_ONE_NAT}. + */ + public Type type() { + return type; + } + + /** + * Returns a builder for the current access configuration. + */ + public Builder toBuilder() { + return new Builder(this); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("name", name) + .add("natIp", natIp) + .add("type", type) + .toString(); + } + + @Override + public int hashCode() { + return Objects.hash(name, natIp, type); + } + + @Override + public boolean equals(Object obj) { + return obj == this + || obj instanceof AccessConfig + && Objects.equals(toPb(), ((AccessConfig) obj).toPb()); + } + + com.google.api.services.compute.model.AccessConfig toPb() { + com.google.api.services.compute.model.AccessConfig accessConfigPb = + new com.google.api.services.compute.model.AccessConfig(); + accessConfigPb.setName(name); + accessConfigPb.setNatIP(natIp); + if (type != null) { + accessConfigPb.setType(type.name()); + } + return accessConfigPb; + } + + /** + * Returns a builder for an {@code AccessConfig} object. + */ + public static Builder builder() { + return new Builder(); + } + + /** + * Returns an {@code AccessConfig} object given the static external IP address. + * + * @see + * Static external IP addresses + */ + public static AccessConfig of(String natIp) { + return builder().natIp(natIp).build(); + } + + /** + * Returns an {@code AccessConfig} object. IP address for this access configuration will be + * taken from a pool of ephemeral addresses. + * + * @see + * Ephemeral external IP addresses + */ + public static AccessConfig of() { + return builder().build(); + } + + static AccessConfig fromPb(com.google.api.services.compute.model.AccessConfig configPb) { + Builder builder = builder(); + builder.name(configPb.getName()); + if (configPb.getNatIP() != null) { + builder.natIp(configPb.getNatIP()); + } + if (configPb.getType() != null) { + builder.type(Type.valueOf(configPb.getType())); + } + return builder.build(); + } + } + + public static final class Builder { + + private String name; + private NetworkId network; + private String networkIp; + private SubnetworkId subnetwork; + private List accessConfigurations; + + private Builder(NetworkId network) { + this.network = checkNotNull(network); + } + + private Builder(NetworkInterface networkInterface) { + this.name = networkInterface.name; + this.network = networkInterface.network; + this.networkIp = networkInterface.networkIp; + this.subnetwork = networkInterface.subnetwork; + this.accessConfigurations = networkInterface.accessConfigurations; + } + + Builder name(String name) { + this.name = name; + return this; + } + + /** + * Sets the identity of the network this interface applies to. + */ + public Builder network(NetworkId network) { + this.network = checkNotNull(network); + return this; + } + + Builder networkIp(String networkIp) { + this.networkIp = networkIp; + return this; + } + + /** + * Sets the identity of the subnetwork this interface applies to. Setting the subnetwork is + * not necessary when the network is in "automatic subnet mode". + */ + public Builder subnetwork(SubnetworkId subnetwork) { + this.subnetwork = subnetwork; + return this; + } + + /** + * Sets a list of access configurations for the network interface. Access configurations can be + * used to assign either a static or an ephemeral external IP address to Google Compute Engine + * instances. At the moment, network interfaces only support one access configuration. + * + * @see + * Static external IP addresses + * @see + * Ephemeral external IP addresses + */ + public Builder accessConfigurations(List accessConfigurations) { + this.accessConfigurations = ImmutableList.copyOf(accessConfigurations); + return this; + } + + /** + * Sets a list of access configurations for the network interface. Access configurations can be + * used to assign either a static or an ephemeral external IP address to Google Compute Engine + * instances. At the moment, network interfaces only support one access configuration. + * + * @see + * Static external IP addresses + * @see + * Ephemeral external IP addresses + */ + public Builder accessConfigurations(AccessConfig... accessConfigurations) { + accessConfigurations(Arrays.asList(accessConfigurations)); + return this; + } + + /** + * Creates a {@code NetworkInterface} object. + */ + public NetworkInterface build() { + return new NetworkInterface(this); + } + } + + private NetworkInterface(Builder builder) { + this.name = builder.name; + this.network = builder.network; + this.networkIp = builder.networkIp; + this.subnetwork = builder.subnetwork; + this.accessConfigurations = builder.accessConfigurations != null + ? builder.accessConfigurations : ImmutableList.of(); + } + + /** + * Returns the name of the network interface, generated by the service. For network devices, + * these are {@code eth0}, {@code eth1}, etc. + */ + public String name() { + return name; + } + + /** + * Returns the identity of the network this interface applies to. + */ + public NetworkId network() { + return network; + } + + /** + * An optional IPv4 internal network address assigned by the service to the instance for this + * network interface. + */ + public String networkIp() { + return networkIp; + } + + /** + * Returns the identity of the subnetwork this interface applies to. + */ + public SubnetworkId subnetwork() { + return subnetwork; + } + + /** + * Returns a list of access configurations for the network interface. + */ + public List accessConfigurations() { + return accessConfigurations; + } + + /** + * Returns a builder for the current network interface. + */ + public Builder toBuilder() { + return new Builder(this); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("name", name) + .add("network", network) + .add("networkIp", networkIp) + .add("subnetwork", subnetwork) + .add("accessConfigurations", accessConfigurations) + .toString(); + } + + @Override + public final int hashCode() { + return Objects.hash(name, network, networkIp, subnetwork, accessConfigurations); + } + + @Override + public final boolean equals(Object obj) { + return obj == this + || obj != null + && obj.getClass().equals(NetworkInterface.class) + && Objects.equals(toPb(), ((NetworkInterface) obj).toPb()); + } + + com.google.api.services.compute.model.NetworkInterface toPb() { + com.google.api.services.compute.model.NetworkInterface interfacePb = + new com.google.api.services.compute.model.NetworkInterface(); + interfacePb.setName(name); + interfacePb.setNetwork(network.selfLink()); + if (subnetwork != null) { + interfacePb.setSubnetwork(subnetwork.selfLink()); + } + interfacePb.setNetworkIP(networkIp); + if (accessConfigurations != null) { + interfacePb.setAccessConfigs( + Lists.transform(accessConfigurations, AccessConfig.TO_PB_FUNCTION)); + } + return interfacePb; + } + + NetworkInterface setProjectId(String projectId) { + Builder builder = toBuilder(); + builder.network(network.setProjectId(projectId)); + if (subnetwork != null) { + builder.subnetwork(subnetwork.setProjectId(projectId)); + } + return builder.build(); + } + + /** + * Returns a builder for a {@code NetworkInterface} object given the network's identity. + */ + public static Builder builder(NetworkId networkId) { + return new Builder(networkId); + } + + /** + * Returns a builder for a {@code NetworkInterface} object given the network's name. + */ + public static Builder builder(String network) { + return builder(NetworkId.of(network)); + } + + /** + * Returns a {@code NetworkInterface} object given the network's identity. + */ + public static NetworkInterface of(NetworkId networkId) { + return builder(networkId).build(); + } + + /** + * Returns a {@code NetworkInterface} object given the network's name. + */ + public static NetworkInterface of(String network) { + return builder(network).build(); + } + + static NetworkInterface fromPb( + com.google.api.services.compute.model.NetworkInterface interfacePb) { + Builder builder = builder(NetworkId.fromUrl(interfacePb.getNetwork())) + .name(interfacePb.getName()); + if (interfacePb.getSubnetwork() != null) { + builder.subnetwork(SubnetworkId.fromUrl(interfacePb.getSubnetwork())); + } + builder.networkIp(interfacePb.getNetworkIP()); + builder.accessConfigurations(interfacePb.getAccessConfigs() != null + ? Lists.transform(interfacePb.getAccessConfigs(), AccessConfig.FROM_PB_FUNCTION) : + ImmutableList.of()); + return builder.build(); + } +} diff --git a/gcloud-java-compute/src/main/java/com/google/cloud/compute/Operation.java b/gcloud-java-compute/src/main/java/com/google/cloud/compute/Operation.java new file mode 100644 index 000000000000..326b681098a6 --- /dev/null +++ b/gcloud-java-compute/src/main/java/com/google/cloud/compute/Operation.java @@ -0,0 +1,786 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import static com.google.common.base.Preconditions.checkNotNull; + +import com.google.cloud.compute.Compute.OperationOption; +import com.google.common.base.Function; +import com.google.common.base.MoreObjects; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; + +import org.joda.time.format.DateTimeFormatter; +import org.joda.time.format.ISODateTimeFormat; + +import java.io.IOException; +import java.io.ObjectInputStream; +import java.io.Serializable; +import java.math.BigInteger; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +/** + * Google Compute Engine operations. Operation identity can be obtained via {@link #operationId()}. + * {@link #operationId()} returns {@link GlobalOperationId} for global operations, + * {@link RegionOperationId} for region operations, and {@link ZoneOperationId} for zone operations. + * To get an {@code Operation} object with the most recent information, use + * {@link #reload(OperationOption...)}. + */ +public class Operation implements Serializable { + + private static final long serialVersionUID = -8979001444590023899L; + private static final DateTimeFormatter TIMESTAMP_FORMATTER = ISODateTimeFormat.dateTime(); + + private transient Compute compute; + private final ComputeOptions options; + private final String generatedId; + private final OperationId operationId; + private final String clientOperationId; + private final String operationType; + private final String targetLink; + private final String targetId; + private final Status status; + private final String statusMessage; + private final String user; + private final Integer progress; + private final Long insertTime; + private final Long startTime; + private final Long endTime; + private final List errors; + private final List warnings; + private final Integer httpErrorStatusCode; + private final String httpErrorMessage; + private final String description; + + /** + * Status of an operation. + */ + public enum Status { + PENDING, + RUNNING, + DONE + } + + /** + * An error that can occur during the processing of a Google Compute Engine operation. + */ + public static final class OperationError implements Serializable { + + static final Function FROM_PB_FUNCTION = new Function< + com.google.api.services.compute.model.Operation.Error.Errors, OperationError>() { + @Override + public OperationError apply( + com.google.api.services.compute.model.Operation.Error.Errors pb) { + return OperationError.fromPb(pb); + } + }; + static final Function TO_PB_FUNCTION = + new Function() { + @Override + public com.google.api.services.compute.model.Operation.Error.Errors apply( + OperationError operation) { + return operation.toPb(); + } + }; + + private static final long serialVersionUID = -1155314394806515873L; + + private final String code; + private final String location; + private final String message; + + OperationError(String code, String location, String message) { + this.code = code; + this.location = location; + this.message = message; + } + + /** + * Returns an error type identifier for this error. + */ + public String code() { + return code; + } + + /** + * Returns the field in the request which caused the error. This value is optional. + */ + public String location() { + return location; + } + + /** + * Returns an optional, human-readable error message. + */ + public String message() { + return message; + } + + com.google.api.services.compute.model.Operation.Error.Errors toPb() { + return new com.google.api.services.compute.model.Operation.Error.Errors() + .setCode(code) + .setLocation(location) + .setMessage(message); + } + + static OperationError fromPb( + com.google.api.services.compute.model.Operation.Error.Errors errorPb) { + return new OperationError(errorPb.getCode(), errorPb.getLocation(), errorPb.getMessage()); + } + + @Override + public boolean equals(Object obj) { + return obj instanceof OperationError + && Objects.equals(code, ((OperationError) obj).code) + && Objects.equals(message, ((OperationError) obj).message) + && Objects.equals(location, ((OperationError) obj).location); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("code", code) + .add("location", location) + .add("message", message) + .toString(); + } + + @Override + public int hashCode() { + return Objects.hash(code, location, message); + } + } + + /** + * A warning message that is generated during the processing of a Google Compute Engine operation. + */ + public static final class OperationWarning implements Serializable { + + static final + Function + FROM_PB_FUNCTION = + new Function() { + @Override + public OperationWarning apply( + com.google.api.services.compute.model.Operation.Warnings pb) { + return OperationWarning.fromPb(pb); + } + }; + static final + Function + TO_PB_FUNCTION = + new Function() { + @Override + public com.google.api.services.compute.model.Operation.Warnings apply( + OperationWarning operation) { + return operation.toPb(); + } + }; + + private static final long serialVersionUID = 4917326627380228928L; + + private final String code; + private final String message; + private final Map metadata; + + OperationWarning(String code, String message, Map metadata) { + this.code = code; + this.metadata = metadata != null ? ImmutableMap.copyOf(metadata) : null; + this.message = message; + } + + /** + * Returns a warning identifier for this warning. For example, {@code NO_RESULTS_ON_PAGE} if + * there are no results in the response. + */ + public String code() { + return code; + } + + /** + * Returns a human-readable error message. + */ + public String message() { + return message; + } + + /** + * Returns metadata about this warning. Each key provides more detail on the warning being + * returned. For example, for warnings where there are no results in a list request for a + * particular zone, this key might be {@code scope} and the key's value might be the zone name. + * Other examples might be a key indicating a deprecated resource, and a suggested replacement, + * or a warning about invalid network settings (for example, if an instance attempts to perform + * IP forwarding but is not enabled for IP forwarding). + */ + public Map metadata() { + return metadata; + } + + com.google.api.services.compute.model.Operation.Warnings toPb() { + com.google.api.services.compute.model.Operation.Warnings warningPb = + new com.google.api.services.compute.model.Operation.Warnings() + .setCode(code) + .setMessage(message); + if (this.metadata != null) { + List metadataPb = + Lists.newArrayListWithCapacity(metadata.size()); + for (Map.Entry entry : metadata.entrySet()) { + metadataPb.add(new com.google.api.services.compute.model.Operation.Warnings.Data() + .setKey(entry.getKey()).setValue(entry.getValue())); + } + warningPb.setData(metadataPb); + } + return warningPb; + } + + static OperationWarning fromPb( + com.google.api.services.compute.model.Operation.Warnings warningPb) { + Map metadata = null; + if (warningPb.getData() != null) { + metadata = Maps.newHashMapWithExpectedSize(warningPb.getData().size()); + for (com.google.api.services.compute.model.Operation.Warnings.Data data + : warningPb.getData()) { + metadata.put(data.getKey(), data.getValue()); + } + } + return new OperationWarning(warningPb.getCode(), warningPb.getMessage(), metadata); + } + + @Override + public boolean equals(Object obj) { + return obj instanceof OperationWarning + && Objects.equals(code, ((OperationWarning) obj).code) + && Objects.equals(message, ((OperationWarning) obj).message) + && Objects.equals(metadata, ((OperationWarning) obj).metadata); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("code", code) + .add("message", message) + .add("metadata", metadata) + .toString(); + } + + @Override + public int hashCode() { + return Objects.hash(code, message, metadata); + } + } + + static final class Builder { + + private Compute compute; + private String generatedId; + private OperationId operationId; + private String clientOperationId; + private String operationType; + private String targetLink; + private String targetId; + private Status status; + private String statusMessage; + private String user; + private Integer progress; + private Long insertTime; + private Long startTime; + private Long endTime; + private List errors; + private List warnings; + private Integer httpErrorStatusCode; + private String httpErrorMessage; + private String description; + + Builder(Compute compute) { + this.compute = compute; + } + + Builder(Compute compute, com.google.api.services.compute.model.Operation operationPb) { + this.compute = compute; + if (operationPb.getId() != null) { + generatedId = operationPb.getId().toString(); + } + if (RegionOperationId.matchesUrl(operationPb.getSelfLink())) { + operationId = RegionOperationId.fromUrl(operationPb.getSelfLink()); + } else if (ZoneOperationId.matchesUrl(operationPb.getSelfLink())) { + operationId = ZoneOperationId.fromUrl(operationPb.getSelfLink()); + } else { + operationId = GlobalOperationId.fromUrl(operationPb.getSelfLink()); + } + clientOperationId = operationPb.getClientOperationId(); + operationType = operationPb.getOperationType(); + targetLink = operationPb.getTargetLink(); + if (operationPb.getTargetId() != null) { + targetId = operationPb.getTargetId().toString(); + } + if (operationPb.getStatus() != null) { + status = Status.valueOf(operationPb.getStatus()); + } + statusMessage = operationPb.getStatusMessage(); + user = operationPb.getUser(); + progress = operationPb.getProgress(); + if (operationPb.getInsertTime() != null) { + insertTime = TIMESTAMP_FORMATTER.parseMillis(operationPb.getInsertTime()); + } + if (operationPb.getStartTime() != null) { + startTime = TIMESTAMP_FORMATTER.parseMillis(operationPb.getStartTime()); + } + if (operationPb.getEndTime() != null) { + endTime = TIMESTAMP_FORMATTER.parseMillis(operationPb.getEndTime()); + } + if (operationPb.getError() != null && operationPb.getError().getErrors() != null) { + errors = + Lists.transform(operationPb.getError().getErrors(), OperationError.FROM_PB_FUNCTION); + } + if (operationPb.getWarnings() != null) { + warnings = Lists.transform(operationPb.getWarnings(), OperationWarning.FROM_PB_FUNCTION); + } + httpErrorStatusCode = operationPb.getHttpErrorStatusCode(); + httpErrorMessage = operationPb.getHttpErrorMessage(); + description = operationPb.getDescription(); + } + + Builder generatedId(String generatedId) { + this.generatedId = generatedId; + return this; + } + + Builder operationId(OperationId operationId) { + this.operationId = checkNotNull(operationId); + return this; + } + + Builder clientOperationId(String clientOperationId) { + this.clientOperationId = clientOperationId; + return this; + } + + Builder operationType(String operationType) { + this.operationType = operationType; + return this; + } + + Builder targetLink(String targetLink) { + this.targetLink = targetLink; + return this; + } + + Builder targetId(String targetId) { + this.targetId = targetId; + return this; + } + + Builder status(Status status) { + this.status = status; + return this; + } + + Builder statusMessage(String statusMessage) { + this.statusMessage = statusMessage; + return this; + } + + Builder user(String user) { + this.user = user; + return this; + } + + Builder progress(Integer progress) { + this.progress = progress; + return this; + } + + Builder insertTime(Long insertTime) { + this.insertTime = insertTime; + return this; + } + + Builder startTime(Long startTime) { + this.startTime = startTime; + return this; + } + + Builder endTime(Long endTime) { + this.endTime = endTime; + return this; + } + + Builder errors(List errors) { + this.errors = ImmutableList.copyOf(checkNotNull(errors)); + return this; + } + + Builder warnings(List warnings) { + this.warnings = ImmutableList.copyOf(checkNotNull(warnings)); + return this; + } + + Builder httpErrorStatusCode(Integer httpErrorStatusCode) { + this.httpErrorStatusCode = httpErrorStatusCode; + return this; + } + + Builder httpErrorMessage(String httpErrorMessage) { + this.httpErrorMessage = httpErrorMessage; + return this; + } + + Builder description(String description) { + this.description = description; + return this; + } + + Operation build() { + return new Operation(this); + } + } + + private Operation(Builder builder) { + this.compute = checkNotNull(builder.compute); + this.options = compute.options(); + this.generatedId = builder.generatedId; + this.operationId = checkNotNull(builder.operationId); + this.clientOperationId = builder.clientOperationId; + this.operationType = builder.operationType; + this.targetLink = builder.targetLink; + this.targetId = builder.targetId; + this.status = builder.status; + this.statusMessage = builder.statusMessage; + this.user = builder.user; + this.progress = builder.progress; + this.insertTime = builder.insertTime; + this.startTime = builder.startTime; + this.endTime = builder.endTime; + this.errors = builder.errors != null ? ImmutableList.copyOf(builder.errors) : null; + this.warnings = builder.warnings != null ? ImmutableList.copyOf(builder.warnings) : null; + this.httpErrorStatusCode = builder.httpErrorStatusCode; + this.httpErrorMessage = builder.httpErrorMessage; + this.description = builder.description; + } + + /** + * Returns the operation's {@code Compute} object used to issue requests. + */ + public Compute compute() { + return compute; + } + + /** + * Returns the service-generated unique identifier for the operation. + */ + public String generatedId() { + return generatedId; + } + + /** + * Returns the operation's identity. This method returns an {@link GlobalOperationId} for global + * operations, a {@link RegionOperationId} for region operations and a {@link ZoneOperationId} for + * zone operations. + * + * @see RFC1035 + */ + @SuppressWarnings("unchecked") + public T operationId() { + return (T) operationId; + } + + /** + * Reserved for future use. + */ + String clientOperationId() { + return clientOperationId; + } + + /** + * Returns the type of operation. + */ + public String operationType() { + return operationType; + } + + /** + * Returns the URL of the resource that the operation is modifying. + */ + public String targetLink() { + return targetLink; + } + + /** + * Returns the unique service-defined target ID, which identifies the resource that the operation + * is modifying. + */ + public String targetId() { + return targetId; + } + + /** + * Returns the status of the operation. + */ + public Status status() { + return status; + } + + /** + * Returns an optional textual description of the current status of the operation. + */ + public String statusMessage() { + return statusMessage; + } + + /** + * Returns the user who requested the operation, for example: {@code user@example.com}. + */ + public String user() { + return user; + } + + /** + * Returns an optional progress indicator that ranges from 0 to 100. There is no requirement that + * this be linear or support any granularity of operations. This should not be used to guess when + * the operation will be complete. This number should monotonically increase as the operation + * progresses. + */ + public Integer progress() { + return progress; + } + + /** + * Returns the time that this operation was requested. In milliseconds since epoch. + */ + public Long insertTime() { + return insertTime; + } + + /** + * Returns the time that this operation was started by the service. In milliseconds since epoch. + * This value will be {@code null} if the operation has not started yet. + */ + public Long startTime() { + return startTime; + } + + /** + * Returns the time that this operation was completed. In milliseconds since epoch. This value + * will be {@code null} if the operation has not finished yet. + */ + public Long endTime() { + return endTime; + } + + /** + * Returns the errors encountered while processing this operation, if any. Returns {@code null} if + * no error occurred. + */ + public List errors() { + return errors; + } + + /** + * Returns the warnings encountered while processing this operation, if any. Returns {@code null} + * if no warning occurred. + */ + public List warnings() { + return warnings; + } + + /** + * Returns the HTTP error status code that was returned, if the operation failed. For example, a + * {@code 404} means the resource was not found. + */ + public Integer httpErrorStatusCode() { + return httpErrorStatusCode; + } + + /** + * Returns the the HTTP error message that was returned, if the operation failed. For example, a + * {@code NOT FOUND} message is returned if the resource was not found. + */ + public String httpErrorMessage() { + return httpErrorMessage; + } + + /** + * Returns an optional textual description of the operation. + */ + public String description() { + return description; + } + + /** + * Checks if this operation exists. + * + * @return {@code true} if this operation exists, {@code false} otherwise + * @throws ComputeException upon failure + */ + public boolean exists() throws ComputeException { + return reload(OperationOption.fields()) != null; + } + + /** + * Checks if this operation has completed its execution, either failing or succeeding. If the + * operation does not exist this method returns {@code true}. You can wait for operation + * completion with: + *

 {@code
+   * while(!operation.isDone()) {
+   *   Thread.sleep(1000L);
+   * }}
+ * + * @return {@code true} if this operation is in {@link Operation.Status#DONE} state or if it does + * not exist, {@code false} if the state is not {@link Operation.Status#DONE} + * @throws ComputeException upon failure + */ + public boolean isDone() throws ComputeException { + Operation operation = compute.getOperation(operationId, + OperationOption.fields(Compute.OperationField.STATUS)); + return operation == null || operation.status() == Status.DONE; + } + + /** + * Fetches current operation's latest information. Returns {@code null} if the operation does not + * exist. + * + * @param options operation options + * @return an {@code Operation} object with latest information or {@code null} if not found + * @throws ComputeException upon failure + */ + public Operation reload(OperationOption... options) throws ComputeException { + return compute.getOperation(operationId, options); + } + + /** + * Deletes this operation. Delete is only possible for operations that have completed their + * execution. Any attempt to delete a running operation will fail. + * + * @return {@code true} if operation was deleted, {@code false} if it was not found + * @throws ComputeException upon failure + */ + public boolean delete() throws ComputeException { + return compute.deleteOperation(operationId); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("generatedId", generatedId) + .add("operationsId", operationId) + .add("clientOperationId", clientOperationId) + .add("operationType", operationType) + .add("targetLink", targetLink) + .add("targetId", targetId) + .add("status", status) + .add("statusMessage", statusMessage) + .add("user", user) + .add("progress", progress) + .add("insertTime", insertTime) + .add("startTime", startTime) + .add("endTime", endTime) + .add("errors", errors) + .add("warnings", warnings) + .add("httpErrorStatusCode", httpErrorStatusCode) + .add("httpErrorMessage", httpErrorMessage) + .add("description", description) + .toString(); + } + + @Override + public final int hashCode() { + return Objects.hash(operationId); + } + + @Override + public final boolean equals(Object obj) { + if (obj == this) { + return true; + } + if (obj == null || !obj.getClass().equals(Operation.class)) { + return false; + } + Operation other = (Operation) obj; + return Objects.equals(toPb(), other.toPb()) && Objects.equals(options, other.options); + } + + com.google.api.services.compute.model.Operation toPb() { + com.google.api.services.compute.model.Operation operationPb = + new com.google.api.services.compute.model.Operation(); + if (generatedId != null) { + operationPb.setId(new BigInteger(generatedId)); + } + operationPb.setName(operationId.operation()); + operationPb.setClientOperationId(clientOperationId); + switch (operationId.type()) { + case REGION: + operationPb.setRegion(this.operationId().regionId().selfLink()); + break; + case ZONE: + operationPb.setZone(this.operationId().zoneId().selfLink()); + break; + } + if (operationType != null) { + operationPb.setOperationType(operationType); + } + operationPb.setTargetLink(targetLink); + if (targetId != null) { + operationPb.setTargetId(new BigInteger(targetId)); + } + if (status != null) { + operationPb.setStatus(status.name()); + } + operationPb.setStatusMessage(statusMessage); + operationPb.setUser(user); + operationPb.setProgress(progress); + if (insertTime != null) { + operationPb.setInsertTime(TIMESTAMP_FORMATTER.print(insertTime)); + } + if (startTime != null) { + operationPb.setStartTime(TIMESTAMP_FORMATTER.print(startTime)); + } + if (endTime != null) { + operationPb.setEndTime(TIMESTAMP_FORMATTER.print(endTime)); + } + if (errors != null) { + operationPb.setError(new com.google.api.services.compute.model.Operation.Error().setErrors( + Lists.transform(errors, OperationError.TO_PB_FUNCTION))); + } + if (warnings != null) { + operationPb.setWarnings(Lists.transform(warnings, OperationWarning.TO_PB_FUNCTION)); + } + operationPb.setHttpErrorStatusCode(httpErrorStatusCode); + operationPb.setHttpErrorMessage(httpErrorMessage); + operationPb.setSelfLink(operationId.selfLink()); + operationPb.setDescription(description); + return operationPb; + } + + private void readObject(ObjectInputStream input) throws IOException, ClassNotFoundException { + input.defaultReadObject(); + this.compute = options.service(); + } + + static Operation fromPb(Compute compute, + com.google.api.services.compute.model.Operation operationPb) { + return new Builder(compute, operationPb).build(); + } +} diff --git a/gcloud-java-compute/src/main/java/com/google/cloud/compute/OperationId.java b/gcloud-java-compute/src/main/java/com/google/cloud/compute/OperationId.java new file mode 100644 index 000000000000..2a3dc2a28d76 --- /dev/null +++ b/gcloud-java-compute/src/main/java/com/google/cloud/compute/OperationId.java @@ -0,0 +1,93 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import static com.google.common.base.Preconditions.checkNotNull; + +import com.google.common.base.MoreObjects; + +import java.util.Objects; + +/** + * Base class for Google Compute Engine operation identities. + */ +public abstract class OperationId extends ResourceId { + + private static final long serialVersionUID = -5502909279744388604L; + + private final String operation; + + OperationId(String project, String operation) { + super(project); + this.operation = checkNotNull(operation); + } + + /** + * Possible types for a Google Compute Engine operation identity. + */ + enum Type { + /** + * Global operations are those operations that deal with global resources, such as global + * addresses or snapshots. + */ + GLOBAL, + + /** + * Region operations are those operations that deal with resources that live in a region, such + * as subnetworks. + */ + REGION, + + /** + * Zone operations are those operations that deal with resources that live in a zone, such as + * disks and instances. + */ + ZONE + } + + /** + * Returns the type of this operation identity. + */ + public abstract Type type(); + + /** + * Returns the name of the operation resource. + */ + public String operation() { + return operation; + } + + @Override + MoreObjects.ToStringHelper toStringHelper() { + return super.toStringHelper().add("operation", operation); + } + + @Override + final int baseHashCode() { + return Objects.hash(super.baseHashCode(), operation); + } + + @Override + final boolean baseEquals(ResourceId resourceId) { + return resourceId instanceof OperationId + && super.baseEquals(resourceId) + && Objects.equals(operation, ((OperationId) resourceId).operation); + } + + @Override + abstract OperationId setProjectId(String projectId); +} diff --git a/gcloud-java-compute/src/main/java/com/google/cloud/compute/Option.java b/gcloud-java-compute/src/main/java/com/google/cloud/compute/Option.java new file mode 100644 index 000000000000..de8676b2ac79 --- /dev/null +++ b/gcloud-java-compute/src/main/java/com/google/cloud/compute/Option.java @@ -0,0 +1,72 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import static com.google.common.base.Preconditions.checkNotNull; + +import com.google.cloud.compute.spi.ComputeRpc; +import com.google.common.base.MoreObjects; + +import java.io.Serializable; +import java.util.Objects; + +/** + * Base class for Compute operation option. + */ +class Option implements Serializable { + + private static final long serialVersionUID = 4116849309806774350L; + + private final ComputeRpc.Option rpcOption; + private final Object value; + + Option(ComputeRpc.Option rpcOption, Object value) { + this.rpcOption = checkNotNull(rpcOption); + this.value = value; + } + + ComputeRpc.Option rpcOption() { + return rpcOption; + } + + Object value() { + return value; + } + + @Override + public boolean equals(Object obj) { + if (!(obj instanceof Option)) { + return false; + } + Option other = (Option) obj; + return Objects.equals(rpcOption, other.rpcOption) + && Objects.equals(value, other.value); + } + + @Override + public int hashCode() { + return Objects.hash(rpcOption, value); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("name", rpcOption.value()) + .add("value", value) + .toString(); + } +} diff --git a/gcloud-java-compute/src/main/java/com/google/cloud/compute/Region.java b/gcloud-java-compute/src/main/java/com/google/cloud/compute/Region.java new file mode 100644 index 000000000000..85845283010c --- /dev/null +++ b/gcloud-java-compute/src/main/java/com/google/cloud/compute/Region.java @@ -0,0 +1,377 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import com.google.common.base.Function; +import com.google.common.base.MoreObjects; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Lists; + +import org.joda.time.format.DateTimeFormatter; +import org.joda.time.format.ISODateTimeFormat; + +import java.io.Serializable; +import java.math.BigInteger; +import java.util.List; +import java.util.Objects; + +/** + * A Google Compute Engine region. + * + * @see Region and Zones + */ +public class Region implements Serializable { + + static final Function FROM_PB_FUNCTION = + new Function() { + @Override + public Region apply(com.google.api.services.compute.model.Region pb) { + return Region.fromPb(pb); + } + }; + static final Function TO_PB_FUNCTION = + new Function() { + @Override + public com.google.api.services.compute.model.Region apply(Region region) { + return region.toPb(); + } + }; + + private static final long serialVersionUID = -3578710133393645135L; + private static final DateTimeFormatter TIMESTAMP_FORMATTER = ISODateTimeFormat.dateTime(); + + private final RegionId regionId; + private final String generatedId; + private final Long creationTimestamp; + private final String description; + private final Status status; + private final List zones; + private final List quotas; + private final DeprecationStatus deprecationStatus; + + /** + * Status of the region. + */ + public enum Status { + UP, + DOWN + } + + /** + * A quota assigned to this region. + */ + public static final class Quota implements Serializable { + + static final Function FROM_PB_FUNCTION = + new Function() { + @Override + public Quota apply(com.google.api.services.compute.model.Quota pb) { + return Quota.fromPb(pb); + } + }; + static final Function TO_PB_FUNCTION = + new Function() { + @Override + public com.google.api.services.compute.model.Quota apply(Quota quota) { + return quota.toPb(); + } + }; + private static final long serialVersionUID = -4357118665133226338L; + + private final String metric; + private final double limit; + private final double usage; + + /** + * Returns a region quota object. + */ + Quota(String metric, double limit, double usage) { + this.metric = metric; + this.limit = limit; + this.usage = usage; + } + + /** + * Returns the name of the quota metric. + */ + public String metric() { + return metric; + } + + /** + * Returns the quota limit for this metric. + */ + public double limit() { + return limit; + } + + /** + * Returns the current usage for this quota. + */ + public double usage() { + return usage; + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("metric", metric) + .add("limit", limit) + .add("usage", usage) + .toString(); + } + + @Override + public int hashCode() { + return Objects.hash(metric, limit, usage); + } + + @Override + public boolean equals(Object obj) { + if (!(obj instanceof Quota)) { + return false; + } + Quota other = (Quota) obj; + return Objects.equals(metric, other.metric) + && Objects.equals(limit, other.limit) + && Objects.equals(usage, other.usage); + } + + com.google.api.services.compute.model.Quota toPb() { + return new com.google.api.services.compute.model.Quota() + .setMetric(metric) + .setLimit(limit) + .setUsage(usage); + } + + static Quota fromPb(com.google.api.services.compute.model.Quota quotaPb) { + return new Quota(quotaPb.getMetric(), quotaPb.getLimit(), quotaPb.getUsage()); + } + } + + static final class Builder { + + private RegionId regionId; + private String generatedId; + private Long creationTimestamp; + private String description; + + private Status status; + private List zones; + private List quotas; + private DeprecationStatus deprecationStatus; + + private Builder() {} + + Builder regionId(RegionId regionId) { + this.regionId = regionId; + return this; + } + + Builder generatedId(String generatedId) { + this.generatedId = generatedId; + return this; + } + + Builder creationTimestamp(Long creationTimestamp) { + this.creationTimestamp = creationTimestamp; + return this; + } + + Builder description(String description) { + this.description = description; + return this; + } + + Builder status(Status status) { + this.status = status; + return this; + } + + Builder zones(List zones) { + this.zones = ImmutableList.copyOf(zones); + return this; + } + + Builder quotas(List quotas) { + this.quotas = ImmutableList.copyOf(quotas); + return this; + } + + Builder deprecationStatus(DeprecationStatus deprecationStatus) { + this.deprecationStatus = deprecationStatus; + return this; + } + + Region build() { + return new Region(this); + } + } + + private Region(Builder builder) { + this.regionId = builder.regionId; + this.generatedId = builder.generatedId; + this.creationTimestamp = builder.creationTimestamp; + this.description = builder.description; + this.status = builder.status; + this.zones = builder.zones; + this.quotas = builder.quotas; + this.deprecationStatus = builder.deprecationStatus; + } + + /** + * Returns the region's identity. + */ + public RegionId regionId() { + return regionId; + } + + /** + * Returns the service-generated unique identifier for the region. + */ + public String generatedId() { + return generatedId; + } + + /** + * Returns the creation timestamp in milliseconds since epoch. + */ + public Long creationTimestamp() { + return creationTimestamp; + } + + /** + * Returns an optional textual description of the region. + */ + public String description() { + return description; + } + + /** + * Returns the status of the status. + */ + public Status status() { + return status; + } + + /** + * Returns a list of identities of zones available in this region. + */ + public List zones() { + return zones; + } + + /** + * Returns quotas assigned to this region. + */ + public List quotas() { + return quotas; + } + + /** + * Returns the deprecation status of the region. If {@link DeprecationStatus#status()} is either + * {@link DeprecationStatus.Status#DELETED} or {@link DeprecationStatus.Status#OBSOLETE} the + * region should not be used. Returns {@code null} if the region is not deprecated. + */ + public DeprecationStatus deprecationStatus() { + return deprecationStatus; + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("regionId", regionId) + .add("generatedId", generatedId) + .add("creationTimestamp", creationTimestamp) + .add("description", description) + .add("status", status) + .add("zones", zones) + .add("quotas", quotas) + .add("deprecationStatus", deprecationStatus) + .toString(); + } + + @Override + public final int hashCode() { + return Objects.hash(regionId); + } + + @Override + public final boolean equals(Object obj) { + return obj == this + || obj != null + && obj.getClass().equals(Region.class) + && Objects.equals(toPb(), ((Region) obj).toPb()); + } + + com.google.api.services.compute.model.Region toPb() { + com.google.api.services.compute.model.Region regionPb = + new com.google.api.services.compute.model.Region(); + if (generatedId != null) { + regionPb.setId(new BigInteger(generatedId)); + } + if (creationTimestamp != null) { + regionPb.setCreationTimestamp(TIMESTAMP_FORMATTER.print(creationTimestamp)); + } + regionPb.setName(regionId.region()); + regionPb.setDescription(description); + regionPb.setSelfLink(regionId.selfLink()); + if (status != null) { + regionPb.setStatus(status.name()); + } + if (zones != null) { + regionPb.setZones(Lists.transform(zones, ZoneId.TO_URL_FUNCTION)); + } + if (quotas != null) { + regionPb.setQuotas(Lists.transform(quotas, Quota.TO_PB_FUNCTION)); + } + if (deprecationStatus != null) { + regionPb.setDeprecated(deprecationStatus.toPb()); + } + return regionPb; + } + + static Builder builder() { + return new Builder(); + } + + static Region fromPb(com.google.api.services.compute.model.Region regionPb) { + Builder builder = builder(); + builder.regionId(RegionId.fromUrl(regionPb.getSelfLink())); + if (regionPb.getId() != null) { + builder.generatedId(regionPb.getId().toString()); + } + if (regionPb.getCreationTimestamp() != null) { + builder.creationTimestamp(TIMESTAMP_FORMATTER.parseMillis(regionPb.getCreationTimestamp())); + } + builder.description(regionPb.getDescription()); + if (regionPb.getStatus() != null) { + builder.status(Status.valueOf(regionPb.getStatus())); + } + if (regionPb.getZones() != null) { + builder.zones(Lists.transform(regionPb.getZones(), ZoneId.FROM_URL_FUNCTION)); + } + if (regionPb.getQuotas() != null) { + builder.quotas(Lists.transform(regionPb.getQuotas(), Quota.FROM_PB_FUNCTION)); + } + if (regionPb.getDeprecated() != null) { + builder.deprecationStatus( + DeprecationStatus.fromPb(regionPb.getDeprecated(), RegionId.FROM_URL_FUNCTION)); + } + return builder.build(); + } +} diff --git a/gcloud-java-compute/src/main/java/com/google/cloud/compute/RegionAddressId.java b/gcloud-java-compute/src/main/java/com/google/cloud/compute/RegionAddressId.java new file mode 100644 index 000000000000..9e81b3ca909a --- /dev/null +++ b/gcloud-java-compute/src/main/java/com/google/cloud/compute/RegionAddressId.java @@ -0,0 +1,151 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import static com.google.common.base.Preconditions.checkNotNull; + +import com.google.common.base.MoreObjects; + +import java.util.Objects; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +/** + * Identity for a Google Compute Engine region address. + */ +public final class RegionAddressId extends AddressId { + + private static final String REGEX = ResourceId.REGEX + "regions/([^/]+)/addresses/([^/]+)"; + private static final Pattern PATTERN = Pattern.compile(REGEX); + private static final long serialVersionUID = 8170980880371085238L; + + private final String region; + + private RegionAddressId(String project, String region, String address) { + super(project, address); + this.region = checkNotNull(region); + } + + @Override + public Type type() { + return Type.REGION; + } + + /** + * Returns the name of the region this address belongs to. + */ + public String region() { + return region; + } + + /** + * Returns the identity of the region this address belongs to. + */ + public RegionId regionId() { + return RegionId.of(project(), region); + } + + @Override + public String selfLink() { + return super.selfLink() + "/regions/" + region + "/addresses/" + address(); + } + + @Override + MoreObjects.ToStringHelper toStringHelper() { + return super.toStringHelper().add("region", region); + } + + @Override + public int hashCode() { + return Objects.hash(baseHashCode(), region); + } + + @Override + public boolean equals(Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof RegionAddressId)) { + return false; + } + RegionAddressId other = (RegionAddressId) obj; + return baseEquals(other) && Objects.equals(region, other.region); + } + + @Override + RegionAddressId setProjectId(String projectId) { + if (project() != null) { + return this; + } + return RegionAddressId.of(projectId, region, address()); + } + + /** + * Returns a region address identity given the region identity and the address name. The address + * name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 + * characters long and match the regular expression {@code [a-z]([-a-z0-9]*[a-z0-9])?} which means + * the first character must be a lowercase letter, and all following characters must be a dash, + * lowercase letter, or digit, except the last character, which cannot be a dash. + * + * @see RFC1035 + */ + public static RegionAddressId of(RegionId regionId, String address) { + return new RegionAddressId(regionId.project(), regionId.region(), address); + } + + /** + * Returns a region address identity given the region and address names. The address name must be + * 1-63 characters long and comply with RFC1035. Specifically, the name must match the regular + * expression {@code [a-z]([-a-z0-9]*[a-z0-9])?} which means the first character must be a + * lowercase letter, and all following characters must be a dash, lowercase letter, or digit, + * except the last character, which cannot be a dash. + * + * @see RFC1035 + */ + public static RegionAddressId of(String region, String address) { + return new RegionAddressId(null, region, address); + } + + /** + * Returns a region address identity given project, region and address names. The address name + * must be 1-63 characters long and comply with RFC1035. Specifically, the name must match the + * regular expression {@code [a-z]([-a-z0-9]*[a-z0-9])?} which means the first character must be a + * lowercase letter, and all following characters must be a dash, lowercase letter, or digit, + * except the last character, which cannot be a dash. + * + * @see RFC1035 + */ + public static RegionAddressId of(String project, String region, String address) { + return new RegionAddressId(project, region, address); + } + + /** + * Returns {@code true} if the provided string matches the expected format of a region address + * URL. Returns {@code false} otherwise. + */ + static boolean matchesUrl(String url) { + return PATTERN.matcher(url).matches(); + } + + static RegionAddressId fromUrl(String url) { + Matcher matcher = PATTERN.matcher(url); + if (!matcher.matches()) { + throw new IllegalArgumentException(url + " is not a valid region address URL"); + } + return RegionAddressId.of(matcher.group(1), matcher.group(2), matcher.group(3)); + } +} diff --git a/gcloud-java-compute/src/main/java/com/google/cloud/compute/RegionForwardingRuleId.java b/gcloud-java-compute/src/main/java/com/google/cloud/compute/RegionForwardingRuleId.java new file mode 100644 index 000000000000..f1f2460ef811 --- /dev/null +++ b/gcloud-java-compute/src/main/java/com/google/cloud/compute/RegionForwardingRuleId.java @@ -0,0 +1,167 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import static com.google.common.base.Preconditions.checkNotNull; + +import com.google.common.base.Function; +import com.google.common.base.MoreObjects; + +import java.util.Objects; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +/** + * Identity for a Google Compute Engine region's forwarding rule. + */ +public final class RegionForwardingRuleId extends ForwardingRuleId { + + static final Function FROM_URL_FUNCTION = + new Function() { + @Override + public RegionForwardingRuleId apply(String pb) { + return RegionForwardingRuleId.fromUrl(pb); + } + }; + static final Function TO_URL_FUNCTION = + new Function() { + @Override + public String apply(RegionForwardingRuleId forwardingRuleId) { + return forwardingRuleId.selfLink(); + } + }; + + private static final String REGEX = ResourceId.REGEX + "regions/([^/]+)/forwardingRules/([^/]+)"; + private static final Pattern PATTERN = Pattern.compile(REGEX); + private static final long serialVersionUID = 7885327931402904667L; + + private final String region; + + private RegionForwardingRuleId(String project, String region, String rule) { + super(project, rule); + this.region = checkNotNull(region); + } + + @Override + public Type type() { + return Type.REGION; + } + + /** + * Returns the name of the region this forwarding rule belongs to. + */ + public String region() { + return region; + } + + /** + * Returns the identity of the region this forwarding rule belongs to. + */ + public RegionId regionId() { + return RegionId.of(project(), region); + } + + @Override + public String selfLink() { + return super.selfLink() + "/regions/" + region + "/forwardingRules/" + rule(); + } + + @Override + MoreObjects.ToStringHelper toStringHelper() { + return MoreObjects.toStringHelper(this).add("region", region); + } + + @Override + public int hashCode() { + return Objects.hash(baseHashCode(), region); + } + + @Override + public boolean equals(Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof RegionForwardingRuleId)) { + return false; + } + RegionForwardingRuleId other = (RegionForwardingRuleId) obj; + return baseEquals(other) && Objects.equals(region, other.region); + } + + @Override + RegionForwardingRuleId setProjectId(String projectId) { + if (project() != null) { + return this; + } + return RegionForwardingRuleId.of(projectId, region, rule()); + } + + /** + * Returns a region forwarding rule identity given the region identity and the rule name. The + * forwarding rule name must be 1-63 characters long and comply with RFC1035. Specifically, the + * name must match the regular expression {@code [a-z]([-a-z0-9]*[a-z0-9])?} which means the first + * character must be a lowercase letter, and all following characters must be a dash, lowercase + * letter, or digit, except the last character, which cannot be a dash. + * + * @see RFC1035 + */ + public static RegionForwardingRuleId of(RegionId regionId, String rule) { + return new RegionForwardingRuleId(regionId.project(), regionId.region(), rule); + } + + /** + * Returns a region forwarding rule identity given the region and rule names. The forwarding rule + * name must be 1-63 characters long and comply with RFC1035. Specifically, the name must match + * the regular expression {@code [a-z]([-a-z0-9]*[a-z0-9])?} which means the first character must + * be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, + * except the last character, which cannot be a dash. + * + * @see RFC1035 + */ + public static RegionForwardingRuleId of(String region, String rule) { + return new RegionForwardingRuleId(null, region, rule); + } + + /** + * Returns a region forwarding rule identity given project, region and rule names. The forwarding + * rule name must be 1-63 characters long and comply with RFC1035. Specifically, the name must + * match the regular expression {@code [a-z]([-a-z0-9]*[a-z0-9])?} which means the first character + * must be a lowercase letter, and all following characters must be a dash, lowercase letter, or + * digit, except the last character, which cannot be a dash. + * + * @see RFC1035 + */ + public static RegionForwardingRuleId of(String project, String region, String rule) { + return new RegionForwardingRuleId(project, region, rule); + } + + /** + * Returns {@code true} if the provided string matches the expected format of a region forwarding + * rule URL. Returns {@code false} otherwise. + */ + static boolean matchesUrl(String url) { + return PATTERN.matcher(url).matches(); + } + + static RegionForwardingRuleId fromUrl(String url) { + Matcher matcher = PATTERN.matcher(url); + if (!matcher.matches()) { + throw new IllegalArgumentException(url + " is not a valid region forwarding rule URL"); + } + return RegionForwardingRuleId.of(matcher.group(1), matcher.group(2), matcher.group(3)); + } +} diff --git a/gcloud-java-compute/src/main/java/com/google/cloud/compute/RegionId.java b/gcloud-java-compute/src/main/java/com/google/cloud/compute/RegionId.java new file mode 100644 index 000000000000..1f3c74084692 --- /dev/null +++ b/gcloud-java-compute/src/main/java/com/google/cloud/compute/RegionId.java @@ -0,0 +1,133 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import static com.google.common.base.Preconditions.checkNotNull; + +import com.google.common.base.Function; +import com.google.common.base.MoreObjects.ToStringHelper; + +import java.util.Objects; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +/** + * A Google Compute Engine region identity. + */ +public final class RegionId extends ResourceId { + + static final Function FROM_URL_FUNCTION = new Function() { + @Override + public RegionId apply(String pb) { + return RegionId.fromUrl(pb); + } + }; + static final Function TO_URL_FUNCTION = new Function() { + @Override + public String apply(RegionId regionId) { + return regionId.selfLink(); + } + }; + + private static final String REGEX = ResourceId.REGEX + "regions/([^/]+)"; + private static final Pattern PATTERN = Pattern.compile(REGEX); + private static final long serialVersionUID = 5569092266957249294L; + + private final String region; + + private RegionId(String project, String region) { + super(project); + this.region = checkNotNull(region); + } + + private RegionId(RegionId regionId) { + super(regionId.project()); + this.region = checkNotNull(regionId.region()); + } + + /** + * Returns the name of the region. + */ + public final String region() { + return region; + } + + @Override + public String selfLink() { + return super.selfLink() + "/regions/" + region; + } + + @Override + ToStringHelper toStringHelper() { + return super.toStringHelper().add("region", region); + } + + @Override + public int hashCode() { + return Objects.hash(baseHashCode(), region); + } + + @Override + public boolean equals(Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof RegionId)) { + return false; + } + RegionId other = (RegionId) obj; + return baseEquals(other) && Objects.equals(region, other.region); + } + + @Override + RegionId setProjectId(String projectId) { + if (project() != null) { + return this; + } + return RegionId.of(projectId, region); + } + + /** + * Returns a new region identity given project and region names. + */ + public static RegionId of(String project, String region) { + return new RegionId(project, region); + } + + /** + * Returns a new region identity given region name. + */ + public static RegionId of(String region) { + return RegionId.of(null, region); + } + + /** + * Returns {@code true} if the provided string matches the expected format of a region URL. + * Returns {@code false} otherwise. + */ + static boolean matchesUrl(String url) { + return PATTERN.matcher(url).matches(); + } + + static RegionId fromUrl(String url) { + Matcher matcher = PATTERN.matcher(url); + if (!matcher.matches()) { + throw new IllegalArgumentException(url + " is not a valid region URL"); + } + return RegionId.of(matcher.group(1), matcher.group(2)); + } +} diff --git a/gcloud-java-compute/src/main/java/com/google/cloud/compute/RegionOperationId.java b/gcloud-java-compute/src/main/java/com/google/cloud/compute/RegionOperationId.java new file mode 100644 index 000000000000..f66f3cc615bc --- /dev/null +++ b/gcloud-java-compute/src/main/java/com/google/cloud/compute/RegionOperationId.java @@ -0,0 +1,133 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import static com.google.common.base.Preconditions.checkNotNull; + +import com.google.common.base.MoreObjects; + +import java.util.Objects; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +/** + * Identity for a Google Compute Engine region's operation. + */ +public final class RegionOperationId extends OperationId { + + private static final String REGEX = ResourceId.REGEX + "regions/([^/]+)/operations/([^/]+)"; + private static final Pattern PATTERN = Pattern.compile(REGEX); + private static final long serialVersionUID = 5816161906501886782L; + + private final String region; + + private RegionOperationId(String project, String region, String operation) { + super(project, operation); + this.region = checkNotNull(region); + } + + @Override + public Type type() { + return Type.REGION; + } + + /** + * Returns the name of the region this operation belongs to. + */ + public String region() { + return region; + } + + /** + * Returns the identity of the region this operation belongs to. + */ + public RegionId regionId() { + return RegionId.of(project(), region); + } + + @Override + public String selfLink() { + return super.selfLink() + "/regions/" + region + "/operations/" + operation(); + } + + @Override + MoreObjects.ToStringHelper toStringHelper() { + return MoreObjects.toStringHelper(this).add("region", region); + } + + @Override + public int hashCode() { + return Objects.hash(baseHashCode(), region); + } + + @Override + public boolean equals(Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof RegionOperationId)) { + return false; + } + RegionOperationId other = (RegionOperationId) obj; + return baseEquals(other) && Objects.equals(region, other.region); + } + + @Override + RegionOperationId setProjectId(String projectId) { + if (project() != null) { + return this; + } + return RegionOperationId.of(projectId, region, operation()); + } + + /** + * Returns a region operation identity given the region identity and the operation name. + */ + public static RegionOperationId of(RegionId regionId, String operation) { + return new RegionOperationId(regionId.project(), regionId.region(), operation); + } + + /** + * Returns a region operation identity given the region and operation names. + */ + public static RegionOperationId of(String region, String operation) { + return new RegionOperationId(null, region, operation); + } + + /** + * Returns a region operation identity given project, region and operation names. + */ + public static RegionOperationId of(String project, String region, String operation) { + return new RegionOperationId(project, region, operation); + } + + /** + * Returns {@code true} if the provided string matches the expected format of a region operation + * URL. Returns {@code false} otherwise. + */ + static boolean matchesUrl(String url) { + return PATTERN.matcher(url).matches(); + } + + static RegionOperationId fromUrl(String url) { + Matcher matcher = PATTERN.matcher(url); + if (!matcher.matches()) { + throw new IllegalArgumentException(url + " is not a valid region operation URL"); + } + return RegionOperationId.of(matcher.group(1), matcher.group(2), matcher.group(3)); + } +} diff --git a/gcloud-java-compute/src/main/java/com/google/cloud/compute/ResourceId.java b/gcloud-java-compute/src/main/java/com/google/cloud/compute/ResourceId.java new file mode 100644 index 000000000000..fed67c8fd72a --- /dev/null +++ b/gcloud-java-compute/src/main/java/com/google/cloud/compute/ResourceId.java @@ -0,0 +1,71 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import com.google.common.base.MoreObjects; + +import java.io.Serializable; +import java.util.Objects; + +/** + * Base class for Google Compute Engine resource identities. + */ +public abstract class ResourceId implements Serializable { + + static final String REGEX = ".*?projects/([^/]+)/"; + private static final String BASE_URL = "https://www.googleapis.com/compute/v1/projects/"; + private static final long serialVersionUID = -8028734746870421573L; + + private final String project; + + ResourceId(String project) { + this.project = project; + } + + /** + * Returns a fully qualified URL to the entity. + */ + public String selfLink() { + return BASE_URL + project; + } + + /** + * Returns the name of the project. + */ + public final String project() { + return project; + } + + MoreObjects.ToStringHelper toStringHelper() { + return MoreObjects.toStringHelper(this).add("project", project); + } + + @Override + public String toString() { + return toStringHelper().toString(); + } + + int baseHashCode() { + return Objects.hash(project); + } + + boolean baseEquals(ResourceId resourceId) { + return Objects.equals(project, resourceId.project); + } + + abstract ResourceId setProjectId(String projectId); +} diff --git a/gcloud-java-compute/src/main/java/com/google/cloud/compute/SchedulingOptions.java b/gcloud-java-compute/src/main/java/com/google/cloud/compute/SchedulingOptions.java new file mode 100644 index 000000000000..8abac14f8fcb --- /dev/null +++ b/gcloud-java-compute/src/main/java/com/google/cloud/compute/SchedulingOptions.java @@ -0,0 +1,150 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import com.google.common.base.MoreObjects; + +import java.io.Serializable; +import java.util.Objects; + +/** + * A Google Compute Engine instance scheduling options. When there are system events that might + * cause your instances to be disrupted, Google Compute Engine automatically manages the + * scheduling decisions for your instances. Use {@code SchedulingOptions.preemptible()} to create + * a preemptible instance. Use {@code SchedulingOptions.standard()} to configure scheduling + * options for a standard instance. + * + * @see + * Setting Instance Scheduling Options + */ +public final class SchedulingOptions implements Serializable { + + private static final long serialVersionUID = 4199610694227857331L; + + private final boolean automaticRestart; + private final Maintenance maintenance; + private final boolean isPreemptible; + + /** + * Defines the maintenance behavior for this instance. + */ + public enum Maintenance { + /** + * The default behavior for standard instances. + */ + MIGRATE, + + /** + * The default and only possible behavior for preemptible instances. + */ + TERMINATE + } + + private SchedulingOptions(Boolean automaticRestart, Maintenance maintenance, + Boolean isPreemptible) { + this.automaticRestart = automaticRestart; + this.maintenance = maintenance; + this.isPreemptible = isPreemptible; + } + + /** + * Returns whether the instance should be automatically restarted if it is terminated by Compute + * Engine (not terminated by a user). + */ + public Boolean automaticRestart() { + return automaticRestart; + } + + /** + * Returns the maintenance behavior for the instance. + */ + public Maintenance maintenance() { + return maintenance; + } + + /** + * Returns {@code true} if the instance is preemptible, {@code false} otherwhise. + * + * @see Preemptible + * Instance + */ + public boolean isPreemptible() { + return isPreemptible; + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("automaticRestart", automaticRestart) + .add("maintenance", maintenance) + .add("isPreemptible", isPreemptible) + .toString(); + } + + @Override + public int hashCode() { + return Objects.hash(automaticRestart, maintenance, isPreemptible); + } + + @Override + public boolean equals(Object obj) { + return obj == this + || obj instanceof SchedulingOptions + && Objects.equals(toPb(), ((SchedulingOptions) obj).toPb()); + } + + com.google.api.services.compute.model.Scheduling toPb() { + com.google.api.services.compute.model.Scheduling schedulingPb = + new com.google.api.services.compute.model.Scheduling(); + schedulingPb.setAutomaticRestart(automaticRestart); + schedulingPb.setPreemptible(isPreemptible); + if (maintenance != null) { + schedulingPb.setOnHostMaintenance(maintenance.name()); + } + return schedulingPb; + } + + /** + * Returns a {@code SchedulingOptions} object for a preemptible instance. + * + * @see Preemptible + * Instance + */ + public static SchedulingOptions preemptible() { + return new SchedulingOptions(false, Maintenance.TERMINATE, true); + } + + /** + * Returns a {@code SchedulingOptions} object for a standard instance. + * + * @param automaticRestart specifies whether the instance should be automatically restarted if + * it is terminated by Compute Engine (not terminated by a user) + * @param maintenance defines the maintenance behavior for the instance + */ + public static SchedulingOptions standard(boolean automaticRestart, Maintenance maintenance) { + return new SchedulingOptions(automaticRestart, maintenance, false); + } + + static SchedulingOptions fromPb(com.google.api.services.compute.model.Scheduling schedPb) { + Maintenance maintenance = null; + if (schedPb.getOnHostMaintenance() != null) { + maintenance = Maintenance.valueOf(schedPb.getOnHostMaintenance()); + } + return new SchedulingOptions(schedPb.getAutomaticRestart(), maintenance, + schedPb.getPreemptible()); + } +} diff --git a/gcloud-java-compute/src/main/java/com/google/cloud/compute/ServiceAccount.java b/gcloud-java-compute/src/main/java/com/google/cloud/compute/ServiceAccount.java new file mode 100644 index 000000000000..65508a0a4c3f --- /dev/null +++ b/gcloud-java-compute/src/main/java/com/google/cloud/compute/ServiceAccount.java @@ -0,0 +1,123 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import com.google.common.base.Function; +import com.google.common.base.MoreObjects; +import com.google.common.collect.ImmutableList; + +import java.io.Serializable; +import java.util.Arrays; +import java.util.List; +import java.util.Objects; + +/** + * A service account, with its specified scopes, authorized for this instance. + * + * @see Authenticating from Google + * Compute Engine + */ +public final class ServiceAccount implements Serializable { + + static final Function + FROM_PB_FUNCTION = + new Function() { + @Override + public ServiceAccount apply(com.google.api.services.compute.model.ServiceAccount pb) { + return ServiceAccount.fromPb(pb); + } + }; + static final Function + TO_PB_FUNCTION = + new Function() { + @Override + public com.google.api.services.compute.model.ServiceAccount apply( + ServiceAccount metadata) { + return metadata.toPb(); + } + }; + + private static final long serialVersionUID = 4199610694227857331L; + + private final String email; + private final List scopes; + + private ServiceAccount(String email, List scopes) { + this.email = email; + this.scopes = ImmutableList.copyOf(scopes); + } + + /** + * Returns the email address of the service account. + */ + public String email() { + return email; + } + + /** + * Returns the list of scopes to be made available for this service account. + */ + public List scopes() { + return scopes; + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("email", email) + .add("scopes", scopes) + .toString(); + } + + @Override + public int hashCode() { + return Objects.hash(email, scopes); + } + + @Override + public boolean equals(Object obj) { + return obj == this + || obj instanceof ServiceAccount + && Objects.equals(toPb(), ((ServiceAccount) obj).toPb()); + } + + com.google.api.services.compute.model.ServiceAccount toPb() { + com.google.api.services.compute.model.ServiceAccount serviceAccountPb = + new com.google.api.services.compute.model.ServiceAccount(); + serviceAccountPb.setEmail(email); + serviceAccountPb.setScopes(scopes); + return serviceAccountPb; + } + + /** + * Returns a {@code ServiceAccount} object for the provided email and scopes. + */ + public static ServiceAccount of(String email, List scopes) { + return new ServiceAccount(email, scopes); + } + + /** + * Returns a {@code ServiceAccount} object for the provided email and scopes. + */ + public static ServiceAccount of(String email, String... scopes) { + return of(email, Arrays.asList(scopes)); + } + + static ServiceAccount fromPb(com.google.api.services.compute.model.ServiceAccount accountPb) { + return new ServiceAccount(accountPb.getEmail(), accountPb.getScopes()); + } +} diff --git a/gcloud-java-compute/src/main/java/com/google/cloud/compute/Snapshot.java b/gcloud-java-compute/src/main/java/com/google/cloud/compute/Snapshot.java new file mode 100644 index 000000000000..fee0e2fcfac5 --- /dev/null +++ b/gcloud-java-compute/src/main/java/com/google/cloud/compute/Snapshot.java @@ -0,0 +1,216 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import static com.google.common.base.Preconditions.checkNotNull; + +import com.google.cloud.compute.Compute.OperationOption; +import com.google.cloud.compute.Compute.SnapshotOption; + +import java.io.IOException; +import java.io.ObjectInputStream; +import java.util.List; +import java.util.Objects; + +/** + * A Google Compute Engine snapshot. Compute Engine allows you to take snapshots of your persistent + * disk and create new persistent disks from that snapshot. This can be useful for backing up data, + * recreating a persistent disk that might have been lost, or copying a persistent disk. Snapshots + * can be applied across persistent disk types. {@code Snapshot} adds a layer of service-related + * functionality over {@link SnapshotInfo}. Objects of this class are immutable; to get a + * {@code Snapshot} object with the most recent information use {@link #reload}. + * + * @see Use + * persistent disk snapshots + */ +public class Snapshot extends SnapshotInfo { + + private static final long serialVersionUID = -973924811396336695L; + + private final ComputeOptions options; + private transient Compute compute; + + /** + * A builder for {@code Snapshot} objects. + */ + public static class Builder extends SnapshotInfo.Builder { + + private final Compute compute; + private final SnapshotInfo.BuilderImpl infoBuilder; + + Builder(Compute compute, SnapshotId snapshotId, DiskId sourceDisk) { + this.compute = compute; + this.infoBuilder = new SnapshotInfo.BuilderImpl(); + this.infoBuilder.snapshotId(snapshotId); + this.infoBuilder.sourceDisk(sourceDisk); + } + + Builder(Snapshot snapshot) { + this.compute = snapshot.compute; + this.infoBuilder = new SnapshotInfo.BuilderImpl(snapshot); + } + + @Override + Builder generatedId(String generatedId) { + infoBuilder.generatedId(generatedId); + return this; + } + + @Override + Builder creationTimestamp(Long creationTimestamp) { + infoBuilder.creationTimestamp(creationTimestamp); + return this; + } + + @Override + public Builder snapshotId(SnapshotId snapshotId) { + infoBuilder.snapshotId(snapshotId); + return this; + } + + @Override + public Builder description(String description) { + infoBuilder.description(description); + return this; + } + + @Override + Builder status(Status status) { + infoBuilder.status(status); + return this; + } + + @Override + Builder diskSizeGb(Long diskSizeGb) { + infoBuilder.diskSizeGb(diskSizeGb); + return this; + } + + @Override + Builder licenses(List licenses) { + infoBuilder.licenses(licenses); + return this; + } + + @Override + public Builder sourceDisk(DiskId sourceDisk) { + infoBuilder.sourceDisk(sourceDisk); + return this; + } + + @Override + Builder sourceDiskId(String sourceDiskId) { + infoBuilder.sourceDiskId(sourceDiskId); + return this; + } + + @Override + Builder storageBytes(Long storageBytes) { + infoBuilder.storageBytes(storageBytes); + return this; + } + + @Override + Builder storageBytesStatus(StorageBytesStatus storageBytesStatus) { + infoBuilder.storageBytesStatus(storageBytesStatus); + return this; + } + + @Override + public Snapshot build() { + return new Snapshot(compute, infoBuilder); + } + } + + Snapshot(Compute compute, SnapshotInfo.BuilderImpl infoBuilder) { + super(infoBuilder); + this.compute = checkNotNull(compute); + this.options = compute.options(); + } + + /** + * Checks if this snapshot exists. + * + * @return {@code true} if this snapshot exists, {@code false} otherwise + * @throws ComputeException upon failure + */ + public boolean exists() { + return reload(SnapshotOption.fields()) != null; + } + + /** + * Fetches current snapshot's latest information. Returns {@code null} if the snapshot does not + * exist. + * + * @param options snapshot options + * @return a {@code Snapshot} object with latest information or {@code null} if not found + * @throws ComputeException upon failure + */ + public Snapshot reload(SnapshotOption... options) { + return compute.getSnapshot(snapshotId().snapshot(), options); + } + + /** + * Deletes this snapshot. + * + * @return a global operation if delete request was successfully sent, {@code null} if the + * snapshot was not found + * @throws ComputeException upon failure + */ + public Operation delete(OperationOption... options) { + return compute.deleteSnapshot(snapshotId(), options); + } + + /** + * Returns the snapshot's {@code Compute} object used to issue requests. + */ + public Compute compute() { + return compute; + } + + @Override + public Builder toBuilder() { + return new Builder(this); + } + + @Override + public final boolean equals(Object obj) { + if (obj == this) { + return true; + } + if (obj == null || !obj.getClass().equals(Snapshot.class)) { + return false; + } + Snapshot other = (Snapshot) obj; + return Objects.equals(toPb(), other.toPb()) && Objects.equals(options, other.options); + } + + @Override + public final int hashCode() { + return Objects.hash(super.hashCode(), options); + } + + private void readObject(ObjectInputStream input) throws IOException, ClassNotFoundException { + input.defaultReadObject(); + this.compute = options.service(); + } + + static Snapshot fromPb(Compute compute, + com.google.api.services.compute.model.Snapshot snapshotPb) { + return new Snapshot(compute, new SnapshotInfo.BuilderImpl(snapshotPb)); + } +} diff --git a/gcloud-java-compute/src/main/java/com/google/cloud/compute/SnapshotDiskConfiguration.java b/gcloud-java-compute/src/main/java/com/google/cloud/compute/SnapshotDiskConfiguration.java new file mode 100644 index 000000000000..15909b6092d1 --- /dev/null +++ b/gcloud-java-compute/src/main/java/com/google/cloud/compute/SnapshotDiskConfiguration.java @@ -0,0 +1,184 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import static com.google.common.base.Preconditions.checkNotNull; + +import com.google.api.services.compute.model.Disk; +import com.google.common.base.MoreObjects; + +import java.util.Objects; + +/** + * A Google Compute Engine disk configuration to create a disk from a Google Compute Engine + * snapshot. + * + * @see Block Storage + */ +public class SnapshotDiskConfiguration extends DiskConfiguration { + + private static final long serialVersionUID = -1996055058706221049L; + + private final SnapshotId sourceSnapshot; + private final String sourceSnapshotId; + + /** + * A builder for {@code SnapshotDiskConfiguration} objects. + */ + public static class Builder + extends DiskConfiguration.Builder { + + private SnapshotId sourceSnapshot; + private String sourceSnapshotId; + + private Builder(SnapshotId sourceSnapshot) { + super(Type.SNAPSHOT); + this.sourceSnapshot = checkNotNull(sourceSnapshot); + } + + private Builder(SnapshotDiskConfiguration configuration) { + super(configuration); + this.sourceSnapshot = configuration.sourceSnapshot; + this.sourceSnapshotId = configuration.sourceSnapshotId; + } + + private Builder(Disk diskPb) { + super(Type.SNAPSHOT, diskPb); + this.sourceSnapshot = SnapshotId.fromUrl(diskPb.getSourceSnapshot()); + this.sourceSnapshotId = diskPb.getSourceSnapshotId(); + } + + /** + * Sets the size of the persistent disk, in GB. If not set the disk will have the size of the + * snapshot. This value can be larger than the snapshot's size. If the provided size is smaller + * than the snapshot's size then disk creation will fail. + * + * @see + * Restoring a snapshot to a larger size + */ + @Override + public Builder sizeGb(Long sizeGb) { + super.sizeGb(sizeGb); + return this; + } + + /** + * Sets the identity of the source snapshot used to create the disk. + */ + public Builder sourceSnapshot(SnapshotId sourceSnapshot) { + this.sourceSnapshot = checkNotNull(sourceSnapshot); + return this; + } + + Builder sourceSnapshotId(String sourceSnapshotId) { + this.sourceSnapshotId = sourceSnapshotId; + return this; + } + + /** + * Creates a {@code SnapshotDiskConfiguration} object. + */ + @Override + public SnapshotDiskConfiguration build() { + return new SnapshotDiskConfiguration(this); + } + } + + private SnapshotDiskConfiguration(Builder builder) { + super(builder); + this.sourceSnapshot = builder.sourceSnapshot; + this.sourceSnapshotId = builder.sourceSnapshotId; + } + + /** + * Returns the identity of the source snapshot used to create the disk. + */ + public SnapshotId sourceSnapshot() { + return sourceSnapshot; + } + + /** + * Returns the service-generated unique id of the snapshot used to create this disk. This value + * identifies the exact snapshot that was used to create the persistent disk. For example, if you + * created the persistent disk from a snapshot that was later deleted and recreated under the same + * name, the source snapshot ID would identify the exact version of the snapshot that was used. + */ + public String sourceSnapshotId() { + return sourceSnapshotId; + } + + @Override + public Builder toBuilder() { + return new Builder(this); + } + + @Override + MoreObjects.ToStringHelper toStringHelper() { + return super.toStringHelper() + .add("sourceSnapshot", sourceSnapshot) + .add("sourceSnapshotId", sourceSnapshotId); + } + + @Override + public final int hashCode() { + return Objects.hash(baseHashCode(), sourceSnapshot, sourceSnapshotId); + } + + @Override + public final boolean equals(Object obj) { + return obj == this + || obj != null + && obj.getClass().equals(SnapshotDiskConfiguration.class) + && baseEquals((SnapshotDiskConfiguration) obj); + } + + @Override + SnapshotDiskConfiguration setProjectId(String projectId) { + Builder builder = toBuilder().sourceSnapshot(sourceSnapshot.setProjectId(projectId)); + if (diskType() != null) { + builder.diskType(diskType().setProjectId(projectId)); + } + return builder.build(); + } + + @Override + Disk toPb() { + return super.toPb() + .setSourceSnapshot(sourceSnapshot.selfLink()) + .setSourceSnapshotId(sourceSnapshotId); + } + + /** + * Returns a builder for a {@code SnapshotDiskConfiguration} object given the snapshot identity. + */ + public static Builder builder(SnapshotId sourceSnapshot) { + return new Builder(sourceSnapshot); + } + + /** + * Returns a {@code SnapshotDiskConfiguration} object given the snapshot identity. + */ + public static SnapshotDiskConfiguration of(SnapshotId sourceSnapshot) { + return builder(sourceSnapshot).build(); + } + + @SuppressWarnings("unchecked") + static SnapshotDiskConfiguration fromPb(Disk diskPb) { + return new Builder(diskPb).build(); + } +} diff --git a/gcloud-java-compute/src/main/java/com/google/cloud/compute/SnapshotId.java b/gcloud-java-compute/src/main/java/com/google/cloud/compute/SnapshotId.java new file mode 100644 index 000000000000..a30d531b65be --- /dev/null +++ b/gcloud-java-compute/src/main/java/com/google/cloud/compute/SnapshotId.java @@ -0,0 +1,132 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import static com.google.common.base.Preconditions.checkNotNull; + +import com.google.common.base.MoreObjects; + +import java.util.Objects; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +/** + * Identity for a Google Compute Engine snapshot. + */ +public final class SnapshotId extends ResourceId { + + private static final String REGEX = ResourceId.REGEX + "global/snapshots/([^/]+)"; + private static final Pattern PATTERN = Pattern.compile(REGEX); + private static final long serialVersionUID = -1699492866663041082L; + + private final String snapshot; + + private SnapshotId(String project, String snapshot) { + super(project); + this.snapshot = checkNotNull(snapshot); + } + + /** + * Returns the name of the snapshot. The name must be 1-63 characters long and comply with + * RFC1035. Specifically, the name must match the regular expression + * {@code [a-z]([-a-z0-9]*[a-z0-9])?} which means the first character must be a lowercase letter, + * and all following characters must be a dash, lowercase letter, or digit, except the last + * character, which cannot be a dash. + * + * @see RFC1035 + */ + public String snapshot() { + return snapshot; + } + + @Override + public String selfLink() { + return super.selfLink() + "/global/snapshots/" + snapshot; + } + + @Override + MoreObjects.ToStringHelper toStringHelper() { + return super.toStringHelper().add("snapshot", snapshot); + } + + @Override + public int hashCode() { + return Objects.hash(baseHashCode(), snapshot); + } + + @Override + public boolean equals(Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof SnapshotId)) { + return false; + } + SnapshotId other = (SnapshotId) obj; + return baseEquals(other) && Objects.equals(snapshot, other.snapshot); + } + + @Override + SnapshotId setProjectId(String projectId) { + if (project() != null) { + return this; + } + return SnapshotId.of(projectId, snapshot); + } + + /** + * Returns a snapshot identity given the snapshot name. The snapshot name must be 1-63 characters + * long and comply with RFC1035. Specifically, the name must match the regular expression + * {@code [a-z]([-a-z0-9]*[a-z0-9])?} which means the first character must be a lowercase letter, + * and all following characters must be a dash, lowercase letter, or digit, except the last + * character, which cannot be a dash. + * + * @see RFC1035 + */ + public static SnapshotId of(String snapshot) { + return new SnapshotId(null, snapshot); + } + + /** + * Returns a snapshot identity given project and snapshot names. The snapshot name must be 1-63 + * characters long and comply with RFC1035. Specifically, the name must match the regular + * expression {@code [a-z]([-a-z0-9]*[a-z0-9])?} which means the first character must be a + * lowercase letter, and all following characters must be a dash, lowercase letter, or digit, + * except the last character, which cannot be a dash. + * + * @see RFC1035 + */ + public static SnapshotId of(String project, String snapshot) { + return new SnapshotId(project, snapshot); + } + + /** + * Returns {@code true} if the provided string matches the expected format of a snapshot URL. + * Returns {@code false} otherwise. + */ + static boolean matchesUrl(String url) { + return url.matches(REGEX); + } + + static SnapshotId fromUrl(String url) { + Matcher matcher = PATTERN.matcher(url); + if (!matcher.matches()) { + throw new IllegalArgumentException(url + " is not a valid snapshot URL"); + } + return SnapshotId.of(matcher.group(1), matcher.group(2)); + } +} diff --git a/gcloud-java-compute/src/main/java/com/google/cloud/compute/SnapshotInfo.java b/gcloud-java-compute/src/main/java/com/google/cloud/compute/SnapshotInfo.java new file mode 100644 index 000000000000..ce9ebbc7825c --- /dev/null +++ b/gcloud-java-compute/src/main/java/com/google/cloud/compute/SnapshotInfo.java @@ -0,0 +1,484 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import static com.google.common.base.Preconditions.checkNotNull; + +import com.google.api.services.compute.model.Snapshot; +import com.google.common.base.Function; +import com.google.common.base.MoreObjects; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Lists; + +import org.joda.time.format.DateTimeFormatter; +import org.joda.time.format.ISODateTimeFormat; + +import java.io.Serializable; +import java.math.BigInteger; +import java.util.List; +import java.util.Objects; + +/** + * A Google Compute Engine snapshot. Compute Engine allows you to take snapshots of your persistent + * disk and create new persistent disks from that snapshot. This can be useful for backing up data, + * recreating a persistent disk that might have been lost, or copying a persistent disk. Snapshots + * can be applied across persistent disk types. + * + * @see Use + * persistent disk snapshots + */ +public class SnapshotInfo implements Serializable { + + static final Function FROM_PB_FUNCTION = + new Function() { + @Override + public SnapshotInfo apply(Snapshot pb) { + return SnapshotInfo.fromPb(pb); + } + }; + static final Function TO_PB_FUNCTION = + new Function() { + @Override + public Snapshot apply(SnapshotInfo snapshot) { + return snapshot.toPb(); + } + }; + + private static final long serialVersionUID = 1065513502131159769L; + private static final DateTimeFormatter TIMESTAMP_FORMATTER = ISODateTimeFormat.dateTime(); + + private final String generatedId; + private final SnapshotId snapshotId; + private final Long creationTimestamp; + private final String description; + private final Status status; + private final Long diskSizeGb; + private final List licenses; + private final DiskId sourceDisk; + private final String sourceDiskId; + private final Long storageBytes; + private final StorageBytesStatus storageBytesStatus; + + /** + * The status of a Google Compute Engine snapshot. A snapshot can be used to create a disk only + * after the snapshot has been successfully created and the status is set to {@code READY}. + */ + public enum Status { + /** + * The snapshot is being created. + */ + CREATING, + + /** + * The snapshot is being deleted. + */ + DELETING, + + /** + * Snapshot's creation failed. + */ + FAILED, + + /** + * Snapshot has been successfully created. + */ + READY, + + /** + * Snapshot is being uploaded. + */ + UPLOADING + } + + /** + * An indicator of whether {@link SnapshotInfo#storageBytes()} is in a stable state or it is being + * adjusted as a result of shared storage reallocation. + */ + public enum StorageBytesStatus { + /** + * Indicates that the size of the snapshot is being updated. + */ + UPDATING, + + /** + * Indicates that the size of the snapshot is up-to-date. + */ + UP_TO_DATE + } + + /** + * A builder for {@code SnapshotInfo} objects. + */ + public abstract static class Builder { + + abstract Builder generatedId(String generatedId); + + abstract Builder creationTimestamp(Long creationTimestamp); + + /** + * Sets the snapshot identity. + */ + public abstract Builder snapshotId(SnapshotId snapshotId); + + /** + * Sets an optional textual description of the snapshot. + */ + public abstract Builder description(String description); + + abstract Builder status(Status status); + + abstract Builder diskSizeGb(Long diskSizeGb); + + abstract Builder licenses(List licenses); + + /** + * Sets the identity of the source disk used to create the snapshot. + */ + public abstract Builder sourceDisk(DiskId sourceDisk); + + abstract Builder sourceDiskId(String sourceDiskId); + + abstract Builder storageBytes(Long storageBytes); + + abstract Builder storageBytesStatus(StorageBytesStatus storageBytesStatus); + + /** + * Creates a {@code SnapshotInfo} object. + */ + public abstract SnapshotInfo build(); + } + + static final class BuilderImpl extends Builder { + + private String generatedId; + private Long creationTimestamp; + private SnapshotId snapshotId; + private String description; + private Status status; + private Long diskSizeGb; + private List licenses; + private DiskId sourceDisk; + private String sourceDiskId; + private Long storageBytes; + private StorageBytesStatus storageBytesStatus; + + BuilderImpl() {} + + BuilderImpl(SnapshotInfo snapshotInfo) { + this.generatedId = snapshotInfo.generatedId; + this.creationTimestamp = snapshotInfo.creationTimestamp; + this.snapshotId = snapshotInfo.snapshotId; + this.description = snapshotInfo.description; + this.status = snapshotInfo.status; + this.diskSizeGb = snapshotInfo.diskSizeGb; + this.licenses = snapshotInfo.licenses; + this.sourceDisk = snapshotInfo.sourceDisk; + this.sourceDiskId = snapshotInfo.sourceDiskId; + this.storageBytes = snapshotInfo.storageBytes; + this.storageBytesStatus = snapshotInfo.storageBytesStatus; + } + + BuilderImpl(Snapshot snapshotPb) { + if (snapshotPb.getId() != null) { + this.generatedId = snapshotPb.getId().toString(); + } + if (snapshotPb.getCreationTimestamp() != null) { + this.creationTimestamp = TIMESTAMP_FORMATTER.parseMillis(snapshotPb.getCreationTimestamp()); + } + this.snapshotId = SnapshotId.fromUrl(snapshotPb.getSelfLink()); + this.description = snapshotPb.getDescription(); + if (snapshotPb.getStatus() != null) { + this.status = Status.valueOf(snapshotPb.getStatus()); + } + this.diskSizeGb = snapshotPb.getDiskSizeGb(); + if (snapshotPb.getLicenses() != null) { + this.licenses = Lists.transform(snapshotPb.getLicenses(), LicenseId.FROM_URL_FUNCTION); + } + if (snapshotPb.getSourceDisk() != null) { + this.sourceDisk = DiskId.fromUrl(snapshotPb.getSourceDisk()); + } + this.sourceDiskId = snapshotPb.getSourceDiskId(); + this.storageBytes = snapshotPb.getStorageBytes(); + if (snapshotPb.getStorageBytesStatus() != null) { + this.storageBytesStatus = StorageBytesStatus.valueOf(snapshotPb.getStorageBytesStatus()); + } + } + + @Override + BuilderImpl generatedId(String generatedId) { + this.generatedId = generatedId; + return this; + } + + @Override + BuilderImpl creationTimestamp(Long creationTimestamp) { + this.creationTimestamp = creationTimestamp; + return this; + } + + @Override + public BuilderImpl snapshotId(SnapshotId snapshotId) { + this.snapshotId = checkNotNull(snapshotId); + return this; + } + + @Override + public BuilderImpl description(String description) { + this.description = description; + return this; + } + + @Override + BuilderImpl status(Status status) { + this.status = status; + return this; + } + + @Override + BuilderImpl diskSizeGb(Long diskSizeGb) { + this.diskSizeGb = diskSizeGb; + return this; + } + + @Override + BuilderImpl licenses(List licenses) { + this.licenses = licenses != null ? ImmutableList.copyOf(licenses) : null; + return this; + } + + @Override + public BuilderImpl sourceDisk(DiskId sourceDisk) { + this.sourceDisk = checkNotNull(sourceDisk); + return this; + } + + @Override + BuilderImpl sourceDiskId(String sourceDiskId) { + this.sourceDiskId = sourceDiskId; + return this; + } + + @Override + BuilderImpl storageBytes(Long storageBytes) { + this.storageBytes = storageBytes; + return this; + } + + @Override + BuilderImpl storageBytesStatus(StorageBytesStatus storageBytesStatus) { + this.storageBytesStatus = storageBytesStatus; + return this; + } + + @Override + public SnapshotInfo build() { + return new SnapshotInfo(this); + } + } + + SnapshotInfo(BuilderImpl builder) { + this.generatedId = builder.generatedId; + this.creationTimestamp = builder.creationTimestamp; + this.snapshotId = checkNotNull(builder.snapshotId); + this.description = builder.description; + this.status = builder.status; + this.diskSizeGb = builder.diskSizeGb; + this.licenses = builder.licenses; + this.sourceDisk = builder.sourceDisk; + this.sourceDiskId = builder.sourceDiskId; + this.storageBytes = builder.storageBytes; + this.storageBytesStatus = builder.storageBytesStatus; + } + + /** + * Returns the service-generated unique identifier for the snapshot. + */ + public String generatedId() { + return generatedId; + } + + /** + * Returns the creation timestamp in milliseconds since epoch. + */ + public Long creationTimestamp() { + return creationTimestamp; + } + + /** + * Returns the snapshot identity. + */ + public SnapshotId snapshotId() { + return snapshotId; + } + + /** + * Returns a textual description of the snapshot. + */ + public String description() { + return description; + } + + /** + * Returns all applicable publicly visible licenses. + */ + public List licenses() { + return licenses; + } + + /** + * Returns the status of the snapshot. A snapshot can be used to create other resources, such as + * disks, only after the snapshot has been successfully created and the status is set to + * {@code READY}. + */ + public Status status() { + return status; + } + + /** + * Returns the size of the snapshot (in GB). + */ + public Long diskSizeGb() { + return diskSizeGb; + } + + /** + * Returns the identity of the source disk used to create this snapshot. + */ + public DiskId sourceDisk() { + return sourceDisk; + } + + /** + * Returns the service-generated unique id of the disk used to create this snapshot. This value + * may be used to determine whether the snapshot was taken from the current or a previous instance + * of a given disk name. + */ + public String sourceDiskId() { + return sourceDiskId; + } + + /** + * Returns the size of the the storage used by the snapshot. As snapshots share storage, this + * number is expected to change with snapshot creation/deletion. + */ + public Long storageBytes() { + return storageBytes; + } + + /** + * Indicates whether {@link SnapshotInfo#storageBytes()} is in a stable state or it is being + * adjusted as a result of shared storage reallocation. {@link StorageBytesStatus#UPDATING} + * indicates that the size of the snapshot is being updated. {@link StorageBytesStatus#UP_TO_DATE} + * indicates that the size of the snapshot is up-to-date. + */ + public StorageBytesStatus storageBytesStatus() { + return storageBytesStatus; + } + + /** + * Returns a builder for the current snapshot. + */ + public Builder toBuilder() { + return new BuilderImpl(this); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("generatedId", generatedId) + .add("creationTimestamp", creationTimestamp) + .add("snapshotId", snapshotId) + .add("description", description) + .add("status", status) + .add("diskSizeGb", diskSizeGb) + .add("licenses", licenses) + .add("sourceDisk", sourceDisk) + .add("sourceDiskId", sourceDiskId) + .add("storageBytes", storageBytes) + .add("storageBytesStatus", storageBytesStatus) + .toString(); + } + + @Override + public int hashCode() { + return Objects.hash(generatedId, creationTimestamp, snapshotId, description, status, diskSizeGb, + licenses, sourceDisk, sourceDiskId, storageBytes, storageBytesStatus); + } + + @Override + public boolean equals(Object obj) { + return obj == this + || obj != null + && obj.getClass().equals(SnapshotInfo.class) + && Objects.equals(toPb(), ((SnapshotInfo) obj).toPb()); + } + + SnapshotInfo setProjectId(String projectId) { + return toBuilder() + .snapshotId(snapshotId.setProjectId(projectId)) + .sourceDisk(sourceDisk.setProjectId(projectId)) + .build(); + } + + Snapshot toPb() { + Snapshot snapshotPb = new Snapshot(); + if (generatedId != null) { + snapshotPb.setId(new BigInteger(generatedId)); + } + if (creationTimestamp != null) { + snapshotPb.setCreationTimestamp(TIMESTAMP_FORMATTER.print(creationTimestamp)); + } + snapshotPb.setName(snapshotId.snapshot()); + snapshotPb.setDescription(description); + snapshotPb.setSelfLink(snapshotId.selfLink()); + if (status != null) { + snapshotPb.setStatus(status.name()); + } + snapshotPb.setDiskSizeGb(diskSizeGb); + if (licenses != null) { + snapshotPb.setLicenses(Lists.transform(licenses, LicenseId.TO_URL_FUNCTION)); + } + if (sourceDisk != null) { + snapshotPb.setSourceDisk(sourceDisk.selfLink()); + } + snapshotPb.setSourceDiskId(sourceDiskId); + snapshotPb.setStorageBytes(storageBytes); + if (storageBytesStatus != null) { + snapshotPb.setStorageBytesStatus(storageBytesStatus.name()); + } + return snapshotPb; + } + + /** + * Returns a builder for a {@code SnapshotInfo} object given the snapshot identity and a source + * disk identity. + */ + public static Builder builder(SnapshotId snapshotId, DiskId source) { + return new BuilderImpl().snapshotId(snapshotId).sourceDisk(source); + } + + /** + * Returns a {@code SnapshotInfo} object given the snapshot identity and a source disk identity. + */ + public static SnapshotInfo of(SnapshotId snapshotId, DiskId source) { + return builder(snapshotId, source).build(); + } + + static SnapshotInfo fromPb(Snapshot snapshotPb) { + return new BuilderImpl(snapshotPb).build(); + } +} diff --git a/gcloud-java-compute/src/main/java/com/google/cloud/compute/StandardDiskConfiguration.java b/gcloud-java-compute/src/main/java/com/google/cloud/compute/StandardDiskConfiguration.java new file mode 100644 index 000000000000..90cf9fb7c40b --- /dev/null +++ b/gcloud-java-compute/src/main/java/com/google/cloud/compute/StandardDiskConfiguration.java @@ -0,0 +1,131 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import com.google.api.services.compute.model.Disk; + +import java.util.Objects; + +/** + * A Google Compute Engine standard persistent disk configuration. This class allows users to create + * a disk given its type and size. + * + * @see Block Storage + */ +public class StandardDiskConfiguration extends DiskConfiguration { + + private static final long serialVersionUID = -6974045909359567054L; + + /** + * A builder for {@code StandardDiskConfiguration} objects. + */ + public static class Builder + extends DiskConfiguration.Builder { + + private Builder() { + super(Type.STANDARD); + } + + private Builder(StandardDiskConfiguration configuration) { + super(configuration); + } + + private Builder(Disk diskPb) { + super(Type.STANDARD, diskPb); + } + + /** + * Sets the size of the persistent disk, in GB. If not set, 500GB is used. + */ + @Override + public Builder sizeGb(Long sizeGb) { + super.sizeGb(sizeGb); + return this; + } + + /** + * Creates a {@code StandardDiskConfiguration} object. + */ + @Override + public StandardDiskConfiguration build() { + return new StandardDiskConfiguration(this); + } + } + + private StandardDiskConfiguration(Builder builder) { + super(builder); + } + + @Override + public Builder toBuilder() { + return new Builder(this); + } + + @Override + public final int hashCode() { + return Objects.hash(baseHashCode()); + } + + @Override + public final boolean equals(Object obj) { + return obj == this + || obj != null + && obj.getClass().equals(StandardDiskConfiguration.class) + && baseEquals((StandardDiskConfiguration) obj); + } + + @Override + StandardDiskConfiguration setProjectId(String projectId) { + if (diskType() == null || diskType().project() != null) { + return this; + } + return toBuilder().diskType(diskType().setProjectId(projectId)).build(); + } + + /** + * Returns a builder for a {@code StandardDiskConfiguration} object. + */ + public static Builder builder() { + return new Builder(); + } + + /** + * Returns a {@code StandardDiskConfiguration} object given the disk type. + */ + public static StandardDiskConfiguration of(DiskTypeId diskType) { + return builder().diskType(diskType).build(); + } + + /** + * Returns a {@code StandardDiskConfiguration} object given the disk size in GB. + */ + public static StandardDiskConfiguration of(long sizeGb) { + return builder().sizeGb(sizeGb).build(); + } + + /** + * Returns a {@code StandardDiskConfiguration} object given the disk type and size in GB. + */ + public static StandardDiskConfiguration of(DiskTypeId diskType, long sizeGb) { + return builder().diskType(diskType).sizeGb(sizeGb).build(); + } + + @SuppressWarnings("unchecked") + static StandardDiskConfiguration fromPb(Disk diskPb) { + return new Builder(diskPb).build(); + } +} diff --git a/gcloud-java-compute/src/main/java/com/google/cloud/compute/StandardNetworkConfiguration.java b/gcloud-java-compute/src/main/java/com/google/cloud/compute/StandardNetworkConfiguration.java new file mode 100644 index 000000000000..16aa8a128d0a --- /dev/null +++ b/gcloud-java-compute/src/main/java/com/google/cloud/compute/StandardNetworkConfiguration.java @@ -0,0 +1,102 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import static com.google.common.base.Preconditions.checkNotNull; + +import com.google.api.services.compute.model.Network; +import com.google.common.base.MoreObjects; + +import java.util.Objects; + +/** + * A Google Compute Engine standard network configuration. This class allows users to create a + * network with its own address range. A network created with a {@code StandardNetworkConfiguration} + * does not support the creation of subnetworks. + * + * @see Using Networks and Firewalls + */ +public class StandardNetworkConfiguration extends NetworkConfiguration { + + private static final long serialVersionUID = -5143748459659467966L; + + private final String ipRange; + private final String gatewayAddress; + + StandardNetworkConfiguration(String ipRange, String gatewayAddress) { + super(Type.STANDARD); + this.ipRange = checkNotNull(ipRange); + this.gatewayAddress = gatewayAddress; + } + + /** + * Returns the range of internal IPv4 addresses that are legal on this network. This range is a + * CIDR specification, for example: {@code 192.168.0.0/16}. + * + * @see CIDR + */ + public String ipRange() { + return ipRange; + } + + /** + * Returns the gateway IPv4 address for this networks. This value is read only and is selected by + * Google Compute Engine, typically as the first usable address in {@code ipRange}. + */ + public String gatewayAddress() { + return gatewayAddress; + } + + @Override + public final int hashCode() { + return Objects.hash(super.baseHashCode(), ipRange, gatewayAddress); + } + + @Override + public final boolean equals(Object obj) { + return obj == this + || obj != null + && obj.getClass().equals(StandardNetworkConfiguration.class) + && Objects.equals(toPb(), ((StandardNetworkConfiguration) obj).toPb()); + } + + @Override + MoreObjects.ToStringHelper toStringHelper() { + return super.toStringHelper().add("ipRange", ipRange).add("gatewayAddress", gatewayAddress); + } + + @Override + Network toPb() { + return new Network().setIPv4Range(ipRange).setGatewayIPv4(gatewayAddress); + } + + /** + * Returns a {@code StandardNetworkConfiguration} object given the range of internal addresses + * that are legal on this network. {@code ipRange} must be a CIDR specification, for example: + * {@code 192.168.0.0/16}. + * + * @see CIDR + */ + public static StandardNetworkConfiguration of(String ipRange) { + return new StandardNetworkConfiguration(ipRange, null); + } + + @SuppressWarnings("unchecked") + static StandardNetworkConfiguration fromPb(Network networkPb) { + return new StandardNetworkConfiguration(networkPb.getIPv4Range(), networkPb.getGatewayIPv4()); + } +} diff --git a/gcloud-java-compute/src/main/java/com/google/cloud/compute/StorageImageConfiguration.java b/gcloud-java-compute/src/main/java/com/google/cloud/compute/StorageImageConfiguration.java new file mode 100644 index 000000000000..f90e9fa4e13a --- /dev/null +++ b/gcloud-java-compute/src/main/java/com/google/cloud/compute/StorageImageConfiguration.java @@ -0,0 +1,204 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import static com.google.common.base.Preconditions.checkNotNull; + +import com.google.api.services.compute.model.Image; +import com.google.common.base.MoreObjects; + +import java.util.Objects; + +/** + * A Google Compute Engine image configuration used to create images from a Google Cloud Storage + * URL where the disk image is stored. + */ +public class StorageImageConfiguration extends ImageConfiguration { + + private static final long serialVersionUID = 8160447986545005880L; + + private final ContainerType containerType; + private final String sha1; + private final String source; + + /** + * The format used to encode and transmit the block device. The only supported value is + * {@code TAR}. This is just a container and transmission format, not a runtime format. + */ + public enum ContainerType { + TAR + } + + /** + * A builder for {@code StorageImageConfiguration} objects. + */ + public static final class Builder + extends ImageConfiguration.Builder { + + private ContainerType containerType; + private String sha1; + private String source; + + private Builder() { + super(Type.STORAGE); + } + + private Builder(StorageImageConfiguration imageConfiguration) { + super(imageConfiguration); + this.containerType = imageConfiguration.containerType; + this.sha1 = imageConfiguration.sha1; + this.source = imageConfiguration.source; + } + + private Builder(Image imagePb) { + super(Type.STORAGE, imagePb); + if (imagePb.getRawDisk().getContainerType() != null) { + this.containerType = ContainerType.valueOf(imagePb.getRawDisk().getContainerType()); + } + this.sha1 = imagePb.getRawDisk().getSha1Checksum(); + this.source = imagePb.getRawDisk().getSource(); + } + + /** + * Sets the format used to encode and transmit the block device. The only supported value is + * {@code TAR}. This is just a container and transmission format, not a runtime format. + */ + public Builder containerType(ContainerType containerType) { + this.containerType = containerType; + return this; + } + + /** + * Sets the SHA1 checksum of the disk image before unpackaging. + */ + public Builder sha1(String sha1) { + this.sha1 = sha1; + return this; + } + + /** + * Sets the full Google Cloud Storage URL where the disk image is stored (e.g. + * {@code gs://bucket/file}). + */ + public Builder source(String source) { + this.source = checkNotNull(source); + return this; + } + + /** + * Creates a {@code StorageImageConfiguration} object. + */ + @Override + public StorageImageConfiguration build() { + return new StorageImageConfiguration(this); + } + } + + private StorageImageConfiguration(Builder builder) { + super(builder); + this.source = checkNotNull(builder.source); + this.containerType = builder.containerType; + this.sha1 = builder.sha1; + } + + /** + * Returns the format used to encode and transmit the block device. This is just a container and + * transmission format, not a runtime format. + */ + public ContainerType containerType() { + return containerType; + } + + /** + * Returns the SHA1 checksum of the disk image before unpackaging. + */ + public String sha1() { + return sha1; + } + + /** + * Returns the full Google Cloud Storage URL where the disk image is stored (e.g. + * {@code gs://bucket/file}). + */ + public String source() { + return source; + } + + @Override + public Builder toBuilder() { + return new Builder(this); + } + + @Override + MoreObjects.ToStringHelper toStringHelper() { + return super.toStringHelper() + .add("source", source) + .add("containerType", containerType) + .add("sha1", sha1); + } + + @Override + public final int hashCode() { + return Objects.hash(baseHashCode(), source, containerType, sha1); + } + + @Override + public final boolean equals(Object obj) { + return obj == this + || obj != null + && obj.getClass().equals(StorageImageConfiguration.class) + && Objects.equals(toPb(), ((StorageImageConfiguration) obj).toPb()); + } + + @Override + StorageImageConfiguration setProjectId(String projectId) { + return this; + } + + @Override + Image toPb() { + Image.RawDisk rawDiskPb = new Image.RawDisk(); + rawDiskPb.setSource(source); + rawDiskPb.setSha1Checksum(sha1); + if (containerType != null) { + rawDiskPb.setContainerType(containerType.name()); + } + Image imagePb = super.toPb(); + return imagePb.setRawDisk(rawDiskPb); + } + + /** + * Creates a {@code StorageImageConfiguration} builder given the full Google Cloud Storage URL + * where the disk image is stored. + */ + public static Builder builder(String source) { + return new Builder().source(source); + } + + /** + * Creates a {@code StorageImageConfiguration} object given the full Google Cloud Storage URL + * where the disk image is stored. + */ + public static StorageImageConfiguration of(String source) { + return builder(source).build(); + } + + @SuppressWarnings("unchecked") + static StorageImageConfiguration fromPb(Image imagePb) { + return new Builder(imagePb).build(); + } +} diff --git a/gcloud-java-compute/src/main/java/com/google/cloud/compute/SubnetNetworkConfiguration.java b/gcloud-java-compute/src/main/java/com/google/cloud/compute/SubnetNetworkConfiguration.java new file mode 100644 index 000000000000..d060e91eab73 --- /dev/null +++ b/gcloud-java-compute/src/main/java/com/google/cloud/compute/SubnetNetworkConfiguration.java @@ -0,0 +1,111 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import com.google.api.services.compute.model.Network; +import com.google.common.base.MoreObjects; +import com.google.common.collect.Lists; + +import java.util.List; +import java.util.Objects; + +/** + * A Google Compute Engine configuration for networks that support subnetworks, up to one per + * region, each with its own address range. Subnetworks can be either automatically or manually + * created, depending on the value of {@link SubnetNetworkConfiguration#autoCreateSubnetworks()}. + * + * @see Using Networks and Firewalls + */ +public class SubnetNetworkConfiguration extends NetworkConfiguration { + + private static final long serialVersionUID = -5286394393047479494L; + + private final Boolean autoCreateSubnetworks; + private final List subnetworks; + + SubnetNetworkConfiguration(boolean autoCreateSubnetworks, List subnetworks) { + super(Type.SUBNET); + this.autoCreateSubnetworks = autoCreateSubnetworks; + this.subnetworks = subnetworks; + } + + /** + * Returns whether the subnetworks should be automatically created. When set to {@code true}, the + * network is created in "auto subnet mode". When set to {@code false}, the network is in + * "custom subnet mode". In "auto subnet mode", a subnetwork per region is automatically created. + * In "custom subnet mode", a custom topology of subnetworks can be created by the user. + */ + public Boolean autoCreateSubnetworks() { + return autoCreateSubnetworks; + } + + /** + * Returns the identities of all networks in this network. + */ + public List subnetworks() { + return subnetworks; + } + + @Override + public final int hashCode() { + return Objects.hash(autoCreateSubnetworks, subnetworks); + } + + @Override + public final boolean equals(Object obj) { + return obj == this + || obj != null + && obj.getClass().equals(SubnetNetworkConfiguration.class) + && Objects.equals(toPb(), ((SubnetNetworkConfiguration) obj).toPb()); + } + + @Override + MoreObjects.ToStringHelper toStringHelper() { + return super.toStringHelper() + .add("autoCreateSubnetworks", autoCreateSubnetworks) + .add("subnetworks", subnetworks); + } + + @Override + Network toPb() { + Network networkPb = new Network().setAutoCreateSubnetworks(autoCreateSubnetworks); + if (subnetworks != null) { + networkPb.setSubnetworks(Lists.transform(subnetworks, SubnetworkId.TO_URL_FUNCTION)); + } + return networkPb; + } + + /** + * Returns a {@code SubnetNetworkConfiguration} object. The {@code autoCreateSubnetworks} + * parameter sets whether subnetworks should be automatically created. When set to {@code true}, + * the network is created in "auto subnet mode". When set to {@code false}, the network is in + * "custom subnet mode". In "auto subnet mode", a subnetwork per region is automatically created. + * In "custom subnet mode", a custom topology of subnetworks can be created by the user. + */ + public static SubnetNetworkConfiguration of(boolean autoCreateSubnetworks) { + return new SubnetNetworkConfiguration(autoCreateSubnetworks, null); + } + + @SuppressWarnings("unchecked") + static SubnetNetworkConfiguration fromPb(Network networkPb) { + List subnetworks = null; + if (networkPb.getSubnetworks() != null) { + subnetworks = Lists.transform(networkPb.getSubnetworks(), SubnetworkId.FROM_URL_FUNCTION); + } + return new SubnetNetworkConfiguration(networkPb.getAutoCreateSubnetworks(), subnetworks); + } +} diff --git a/gcloud-java-compute/src/main/java/com/google/cloud/compute/Subnetwork.java b/gcloud-java-compute/src/main/java/com/google/cloud/compute/Subnetwork.java new file mode 100644 index 000000000000..b3929dd937ea --- /dev/null +++ b/gcloud-java-compute/src/main/java/com/google/cloud/compute/Subnetwork.java @@ -0,0 +1,190 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import static com.google.common.base.Preconditions.checkNotNull; + +import com.google.cloud.compute.Compute.OperationOption; +import com.google.cloud.compute.Compute.SubnetworkOption; + +import java.io.IOException; +import java.io.ObjectInputStream; +import java.util.Objects; + +/** + * A Google Compute Engine Subnetwork. Subnetworks segments your cloud network IP space into + * subnetworks. Subnetwork prefixes can be automatically allocated, or you can create a custom + * topology. Objects of this class are immutable. To get a {@code Subnetwork} object with the most + * recent information use {@link #reload}. {@code Subnetwork} adds a layer of service-related + * functionality over {@link SubnetworkInfo}. + * + * @see Subnetworks + */ +public class Subnetwork extends SubnetworkInfo { + + private static final long serialVersionUID = 8608280908101278096L; + + private final ComputeOptions options; + private transient Compute compute; + + /** + * A builder for {@code Subnetwork} objects. + */ + public static class Builder extends SubnetworkInfo.Builder { + + private final Compute compute; + private final SubnetworkInfo.BuilderImpl infoBuilder; + + Builder(Compute compute, SubnetworkId subnetworkId, NetworkId networkId, String ipRange) { + this.compute = compute; + this.infoBuilder = new SubnetworkInfo.BuilderImpl(subnetworkId, networkId, ipRange); + this.infoBuilder.subnetworkId(subnetworkId); + this.infoBuilder.network(networkId); + this.infoBuilder.ipRange(ipRange); + } + + Builder(Subnetwork subnetwork) { + this.compute = subnetwork.compute; + this.infoBuilder = new SubnetworkInfo.BuilderImpl(subnetwork); + } + + @Override + Builder generatedId(String generatedId) { + infoBuilder.generatedId(generatedId); + return this; + } + + @Override + Builder creationTimestamp(Long creationTimestamp) { + infoBuilder.creationTimestamp(creationTimestamp); + return this; + } + + @Override + public Builder subnetworkId(SubnetworkId subnetworkId) { + infoBuilder.subnetworkId(subnetworkId); + return this; + } + + @Override + public Builder description(String description) { + infoBuilder.description(description); + return this; + } + + @Override + Builder gatewayAddress(String gatewayAddress) { + infoBuilder.gatewayAddress(gatewayAddress); + return this; + } + + @Override + public Builder network(NetworkId network) { + infoBuilder.network(network); + return this; + } + + @Override + public Builder ipRange(String ipRange) { + infoBuilder.ipRange(ipRange); + return this; + } + + @Override + public Subnetwork build() { + return new Subnetwork(compute, infoBuilder); + } + } + + Subnetwork(Compute compute, SubnetworkInfo.BuilderImpl infoBuilder) { + super(infoBuilder); + this.compute = checkNotNull(compute); + this.options = compute.options(); + } + + /** + * Checks if this subnetwork exists. + * + * @return {@code true} if this subnetwork exists, {@code false} otherwise + * @throws ComputeException upon failure + */ + public boolean exists() { + return reload(SubnetworkOption.fields()) != null; + } + + /** + * Fetches current subnetwork' latest information. Returns {@code null} if the subnetwork does not + * exist. + * + * @param options subnetwork options + * @return an {@code Subnetwork} object with latest information or {@code null} if not found + * @throws ComputeException upon failure + */ + public Subnetwork reload(SubnetworkOption... options) { + return compute.getSubnetwork(subnetworkId(), options); + } + + /** + * Deletes this subnetwork. If this subnetwork was auto-generated deletion will fail. + * + * @return an operation object if delete request was successfully sent, {@code null} if the + * subnetwork was not found + * @throws ComputeException upon failure + */ + public Operation delete(OperationOption... options) { + return compute.deleteSubnetwork(subnetworkId(), options); + } + + /** + * Returns the subnetwork's {@code Compute} object used to issue requests. + */ + public Compute compute() { + return compute; + } + + @Override + public Builder toBuilder() { + return new Builder(this); + } + + @Override + public final boolean equals(Object obj) { + if (obj == this) { + return true; + } + if (obj == null || !obj.getClass().equals(Subnetwork.class)) { + return false; + } + Subnetwork other = (Subnetwork) obj; + return Objects.equals(toPb(), other.toPb()) && Objects.equals(options, other.options); + } + + @Override + public final int hashCode() { + return Objects.hash(super.hashCode(), options); + } + + private void readObject(ObjectInputStream input) throws IOException, ClassNotFoundException { + input.defaultReadObject(); + this.compute = options.service(); + } + + static Subnetwork fromPb(Compute compute, + com.google.api.services.compute.model.Subnetwork subnetworkPb) { + return new Subnetwork(compute, new SubnetworkInfo.BuilderImpl(subnetworkPb)); + } +} diff --git a/gcloud-java-compute/src/main/java/com/google/cloud/compute/SubnetworkId.java b/gcloud-java-compute/src/main/java/com/google/cloud/compute/SubnetworkId.java new file mode 100644 index 000000000000..b750847a3d10 --- /dev/null +++ b/gcloud-java-compute/src/main/java/com/google/cloud/compute/SubnetworkId.java @@ -0,0 +1,179 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import static com.google.common.base.Preconditions.checkNotNull; + +import com.google.common.base.Function; +import com.google.common.base.MoreObjects; + +import java.util.Objects; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +/** + * Identity for a Google Compute Engine subnetwork. + */ +public final class SubnetworkId extends ResourceId { + + static final Function FROM_URL_FUNCTION = + new Function() { + @Override + public SubnetworkId apply(String pb) { + return SubnetworkId.fromUrl(pb); + } + }; + static final Function TO_URL_FUNCTION = + new Function() { + @Override + public String apply(SubnetworkId zoneId) { + return zoneId.selfLink(); + } + }; + + private static final String REGEX = ResourceId.REGEX + "regions/([^/]+)/subnetworks/([^/]+)"; + private static final Pattern PATTERN = Pattern.compile(REGEX); + private static final long serialVersionUID = -5451054513760540282L; + + private final String region; + private final String subnetwork; + + private SubnetworkId(String project, String region, String subnetwork) { + super(project); + this.region = checkNotNull(region); + this.subnetwork = checkNotNull(subnetwork); + } + + /** + * Returns the name of the region this subnetwork belongs to. + */ + public String region() { + return region; + } + + /** + * Returns the identity of the region this subnetwork belongs to. + */ + public RegionId regionId() { + return RegionId.of(project(), region); + } + + /** + * Returns the name of the subnetwork. The name must be 1-63 characters long and comply with + * RFC1035. Specifically, the name must match the regular expression + * {@code [a-z]([-a-z0-9]*[a-z0-9])?} which means the first character must be a lowercase letter, + * and all following characters must be a dash, lowercase letter, or digit, except the last + * character, which cannot be a dash. + * + * @see RFC1035 + */ + public String subnetwork() { + return subnetwork; + } + + @Override + public String selfLink() { + return super.selfLink() + "/regions/" + region + "/subnetworks/" + subnetwork; + } + + @Override + MoreObjects.ToStringHelper toStringHelper() { + return MoreObjects.toStringHelper(this).add("region", region).add("subnetwork", subnetwork); + } + + @Override + public int hashCode() { + return Objects.hash(baseHashCode(), region, subnetwork); + } + + @Override + public boolean equals(Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof SubnetworkId)) { + return false; + } + SubnetworkId other = (SubnetworkId) obj; + return baseEquals(other) + && Objects.equals(region, other.region) + && Objects.equals(subnetwork, other.subnetwork); + } + + @Override + SubnetworkId setProjectId(String projectId) { + if (project() != null) { + return this; + } + return SubnetworkId.of(projectId, region(), subnetwork); + } + + /** + * Returns a subnetwork identity given the region identity and the subnetwork name. The subnetwork + * name must be 1-63 characters long and comply with RFC1035. Specifically, the name must match + * the regular expression {@code [a-z]([-a-z0-9]*[a-z0-9])?} which means the first character must + * be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, + * except the last character, which cannot be a dash. + * + * @see RFC1035 + */ + public static SubnetworkId of(RegionId regionId, String subnetwork) { + return new SubnetworkId(regionId.project(), regionId.region(), subnetwork); + } + + /** + * Returns a subnetwork identity given the region and subnetwork names. The subnetwork name must + * be 1-63 characters long and comply with RFC1035. Specifically, the name must match the regular + * expression {@code [a-z]([-a-z0-9]*[a-z0-9])?} which means the first character must be a + * lowercase letter, and all following characters must be a dash, lowercase letter, or digit, + * except the last character, which cannot be a dash. + * + * @see RFC1035 + */ + public static SubnetworkId of(String region, String subnetwork) { + return new SubnetworkId(null, region, subnetwork); + } + + /** + * Returns a subnetwork identity given project, region and subnetwork names. The subnetwork name + * must be 1-63 characters long and comply with RFC1035. Specifically, the name must match the + * regular expression {@code [a-z]([-a-z0-9]*[a-z0-9])?} which means the first character must be a + * lowercase letter, and all following characters must be a dash, lowercase letter, or digit, + * except the last character, which cannot be a dash. + * + * @see RFC1035 + */ + public static SubnetworkId of(String project, String region, String subnetwork) { + return new SubnetworkId(project, region, subnetwork); + } + + /** + * Returns {@code true} if the provided string matches the expected format of a subnetwork URL. + * Returns {@code false} otherwise. + */ + static boolean matchesUrl(String url) { + return PATTERN.matcher(url).matches(); + } + + static SubnetworkId fromUrl(String url) { + Matcher matcher = PATTERN.matcher(url); + if (!matcher.matches()) { + throw new IllegalArgumentException(url + " is not a valid subnetwork URL"); + } + return SubnetworkId.of(matcher.group(1), matcher.group(2), matcher.group(3)); + } +} diff --git a/gcloud-java-compute/src/main/java/com/google/cloud/compute/SubnetworkInfo.java b/gcloud-java-compute/src/main/java/com/google/cloud/compute/SubnetworkInfo.java new file mode 100644 index 000000000000..a89a5d713f0f --- /dev/null +++ b/gcloud-java-compute/src/main/java/com/google/cloud/compute/SubnetworkInfo.java @@ -0,0 +1,348 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import static com.google.common.base.Preconditions.checkNotNull; + +import com.google.api.services.compute.model.Subnetwork; +import com.google.common.base.Function; +import com.google.common.base.MoreObjects; + +import org.joda.time.format.DateTimeFormatter; +import org.joda.time.format.ISODateTimeFormat; + +import java.io.Serializable; +import java.math.BigInteger; +import java.util.Objects; + +/** + * A Google Compute Engine subnetwork. Compute Engine subnetworks allow you to segment your Compute + * Engine network IP space into subnetworks. Subnetworks for a Compute Engine network can be + * automatically allocated, or you can create a custom topology. + * + * @see Subnetworks + */ +public class SubnetworkInfo implements Serializable { + + static final Function FROM_PB_FUNCTION = + new Function() { + @Override + public SubnetworkInfo apply(Subnetwork pb) { + return SubnetworkInfo.fromPb(pb); + } + }; + static final Function TO_PB_FUNCTION = + new Function() { + @Override + public Subnetwork apply(SubnetworkInfo subnetwork) { + return subnetwork.toPb(); + } + }; + + private static final long serialVersionUID = 7491176262675441579L; + private static final DateTimeFormatter TIMESTAMP_FORMATTER = ISODateTimeFormat.dateTime(); + + private final String generatedId; + private final SubnetworkId subnetworkId; + private final Long creationTimestamp; + private final String description; + private final String gatewayAddress; + private final NetworkId network; + private final String ipRange; + + /** + * A builder for {@code SubnetworkInfo} objects. + */ + public abstract static class Builder { + + abstract Builder generatedId(String generatedId); + + abstract Builder creationTimestamp(Long creationTimestamp); + + /** + * Sets the identity of the subnework. + */ + public abstract Builder subnetworkId(SubnetworkId subnetworkId); + + /** + * Sets an optional textual description of the subnetwork. + */ + public abstract Builder description(String description); + + abstract Builder gatewayAddress(String gatewayAddress); + + /** + * Sets the identity of the network to which this subnetwork belongs. Only networks that are in + * subnet mode can have subnetworks. + */ + public abstract Builder network(NetworkId network); + + /** + * Sets the range of internal IPv4 addresses that are owned by this subnetwork. This range must + * be a CIDR specification, for example: {@code 192.168.0.0/16}. Ranges must be unique and + * non-overlapping within a network. + * + * @see CIDR + */ + public abstract Builder ipRange(String ipRange); + + /** + * Creates a {@code SubnetworkInfo} object. + */ + public abstract SubnetworkInfo build(); + } + + static final class BuilderImpl extends Builder { + + private String generatedId; + private SubnetworkId subnetworkId; + private Long creationTimestamp; + private String description; + private String gatewayAddress; + private NetworkId network; + private String ipRange; + + BuilderImpl(SubnetworkId subnetworkId, NetworkId network, String ipRange) { + this.subnetworkId = checkNotNull(subnetworkId); + this.network = checkNotNull(network); + this.ipRange = checkNotNull(ipRange); + } + + BuilderImpl(SubnetworkInfo subnetworkInfo) { + this.generatedId = subnetworkInfo.generatedId; + this.creationTimestamp = subnetworkInfo.creationTimestamp; + this.subnetworkId = subnetworkInfo.subnetworkId; + this.description = subnetworkInfo.description; + this.gatewayAddress = subnetworkInfo.gatewayAddress; + this.network = subnetworkInfo.network; + this.ipRange = subnetworkInfo.ipRange; + } + + BuilderImpl(Subnetwork subnetworkPb) { + if (subnetworkPb.getId() != null) { + this.generatedId = subnetworkPb.getId().toString(); + } + if (subnetworkPb.getCreationTimestamp() != null) { + this.creationTimestamp = + TIMESTAMP_FORMATTER.parseMillis(subnetworkPb.getCreationTimestamp()); + } + this.subnetworkId = SubnetworkId.fromUrl(subnetworkPb.getSelfLink()); + this.description = subnetworkPb.getDescription(); + this.gatewayAddress = subnetworkPb.getGatewayAddress(); + if (subnetworkPb.getNetwork() != null) { + this.network = NetworkId.fromUrl(subnetworkPb.getNetwork()); + } + this.ipRange = subnetworkPb.getIpCidrRange(); + } + + @Override + BuilderImpl generatedId(String generatedId) { + this.generatedId = generatedId; + return this; + } + + @Override + BuilderImpl creationTimestamp(Long creationTimestamp) { + this.creationTimestamp = creationTimestamp; + return this; + } + + @Override + public BuilderImpl subnetworkId(SubnetworkId subnetworkId) { + this.subnetworkId = checkNotNull(subnetworkId); + return this; + } + + @Override + public BuilderImpl description(String description) { + this.description = description; + return this; + } + + @Override + BuilderImpl gatewayAddress(String gatewayAddress) { + this.gatewayAddress = gatewayAddress; + return this; + } + + @Override + public BuilderImpl network(NetworkId network) { + this.network = checkNotNull(network); + return this; + } + + @Override + public BuilderImpl ipRange(String ipRange) { + this.ipRange = checkNotNull(ipRange); + return this; + } + + @Override + public SubnetworkInfo build() { + return new SubnetworkInfo(this); + } + } + + SubnetworkInfo(BuilderImpl builder) { + this.generatedId = builder.generatedId; + this.creationTimestamp = builder.creationTimestamp; + this.subnetworkId = checkNotNull(builder.subnetworkId); + this.description = builder.description; + this.gatewayAddress = builder.gatewayAddress; + this.network = builder.network; + this.ipRange = builder.ipRange; + } + + /** + * Returns the service-generated unique identifier for the subnetwork. + */ + public String generatedId() { + return generatedId; + } + + /** + * Returns the creation timestamp in milliseconds since epoch. + */ + public Long creationTimestamp() { + return creationTimestamp; + } + + /** + * Returns the subnetwork identity. + */ + public SubnetworkId subnetworkId() { + return subnetworkId; + } + + /** + * Returns a textual description of the subnetwork. + */ + public String description() { + return description; + } + + /** + * Returns the gateway IPv4 address for this subnetwork, selected by the service. + */ + public String gatewayAddress() { + return gatewayAddress; + } + + /** + * Returns the identity of the network to which this subnetwork belongs. Only networks that are in + * subnet mode can have subnetworks. + */ + public NetworkId network() { + return network; + } + + /** + * Returns the range of internal IPv4 addresses that are owned by this subnetwork. This range is a + * CIDR specification, for example: {@code 192.168.0.0/16}. Ranges must be unique and + * non-overlapping within a network. + * + * @see CIDR + */ + public String ipRange() { + return ipRange; + } + + /** + * Returns a builder for the current subnetwork. + */ + public Builder toBuilder() { + return new BuilderImpl(this); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("generatedId", generatedId) + .add("creationTimestamp", creationTimestamp) + .add("subnetworkId", subnetworkId) + .add("description", description) + .add("gatewayAddress", gatewayAddress) + .add("network", network) + .add("ipRange", ipRange) + .toString(); + } + + @Override + public int hashCode() { + return Objects.hash(generatedId, creationTimestamp, subnetworkId, description, gatewayAddress, + network, ipRange); + } + + @Override + public boolean equals(Object obj) { + return obj == this + || obj != null + && obj.getClass().equals(SubnetworkInfo.class) + && Objects.equals(toPb(), ((SubnetworkInfo) obj).toPb()); + } + + SubnetworkInfo setProjectId(String projectId) { + return toBuilder() + .subnetworkId(subnetworkId.setProjectId(projectId)) + .network(network.setProjectId(projectId)) + .build(); + } + + Subnetwork toPb() { + Subnetwork subnetworkPb = new Subnetwork(); + if (generatedId != null) { + subnetworkPb.setId(new BigInteger(generatedId)); + } + if (creationTimestamp != null) { + subnetworkPb.setCreationTimestamp(TIMESTAMP_FORMATTER.print(creationTimestamp)); + } + subnetworkPb.setName(subnetworkId.subnetwork()); + subnetworkPb.setDescription(description); + subnetworkPb.setSelfLink(subnetworkId.selfLink()); + subnetworkPb.setGatewayAddress(gatewayAddress); + subnetworkPb.setNetwork(network.selfLink()); + subnetworkPb.setIpCidrRange(ipRange); + return subnetworkPb; + } + + /** + * Returns a builder for a {@code SubnetworkInfo} object given the identity of the subnetwork, the + * identity of the network this subnetwork belongs to and the range of IPv4 addresses owned by + * this subnetwork. {@code ipRange} must be a CIDR specification, for example: + * {@code 192.168.0.0/16}. + * + * @see CIDR + */ + public static Builder builder(SubnetworkId subnetworkId, NetworkId network, String ipRange) { + return new BuilderImpl(subnetworkId, network, ipRange); + } + + /** + * Returns a {@code SubnetworkInfo} object given the identity of the subnetwork, the identity of + * the network this subnetwork belongs to and the range of IPv4 addresses owned by this + * subnetwork. {@code ipRange} must be a CIDR specification, for example: {@code 192.168.0.0/16}. + * + * @see CIDR + */ + public static SubnetworkInfo of(SubnetworkId subnetworkId, NetworkId network, String ipRange) { + return builder(subnetworkId, network, ipRange).build(); + } + + static SubnetworkInfo fromPb(Subnetwork subnetworkPb) { + return new BuilderImpl(subnetworkPb).build(); + } +} diff --git a/gcloud-java-compute/src/main/java/com/google/cloud/compute/Tags.java b/gcloud-java-compute/src/main/java/com/google/cloud/compute/Tags.java new file mode 100644 index 000000000000..7be604cd5a64 --- /dev/null +++ b/gcloud-java-compute/src/main/java/com/google/cloud/compute/Tags.java @@ -0,0 +1,227 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import static com.google.common.base.Preconditions.checkNotNull; + +import com.google.common.base.Function; +import com.google.common.base.MoreObjects; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Lists; + +import java.io.Serializable; +import java.util.Arrays; +import java.util.List; +import java.util.Objects; + +/** + * A list of tags for a Google Compute Engine Instance; with associated fingerprint. Tags are used + * to identify valid sources or targets for network firewalls and are specified by the client + * during instance creation. Each tag within the list must comply with RFC1035. + * + * @see RFC1035 + */ +public final class Tags implements Serializable { + + static final Function FROM_PB_FUNCTION = + new Function() { + @Override + public Tags apply(com.google.api.services.compute.model.Tags pb) { + return Tags.fromPb(pb); + } + }; + static final Function TO_PB_FUNCTION = + new Function() { + @Override + public com.google.api.services.compute.model.Tags apply(Tags tags) { + return tags.toPb(); + } + }; + + private static final long serialVersionUID = 5627093820497225322L; + + private final List values; + private final String fingerprint; + + /** + * A builder for {@code Tags} objects. + */ + public static final class Builder { + + private List values; + private String fingerprint; + + private Builder() { + values = Lists.newArrayList(); + } + + private Builder(Tags tags) { + this.values = tags.values != null ? Lists.newArrayList(tags.values) + : Lists.newArrayList(); + this.fingerprint = tags.fingerprint; + } + + /** + * Sets a list of tags to apply to an instance. Tags are used to identify valid sources or + * targets for network firewalls. Each tag within the list must comply with RFC1035. + * + * @see RFC1035 + */ + public Builder values(Iterable values) { + this.values = Lists.newArrayList(values); + return this; + } + + /** + * Sets a list of tags to apply to an instance. Tags are used to identify valid sources or + * targets for network firewalls. Each tag within the list must comply with RFC1035. + * + * @see RFC1035 + */ + public Builder values(String... values) { + this.values = Lists.newArrayList(Arrays.asList(checkNotNull(values))); + return this; + } + + /** + * Adds a tag to the list of tags. Tags are used to identify valid sources or targets for + * network firewalls. The tag must comply with RFC1035. + * + * @see RFC1035 + */ + public Builder add(String tag) { + this.values.add(tag); + return this; + } + + /** + * Sets the fingerprint for the tags. This value is needed to update instance's tags. + */ + public Builder fingerprint(String fingerprint) { + this.fingerprint = fingerprint; + return this; + } + + /** + * Creates a {@code Tags} object. + */ + public Tags build() { + return new Tags(this); + } + } + + private Tags(Builder builder) { + this.values = ImmutableList.copyOf(builder.values); + this.fingerprint = builder.fingerprint; + } + + /** + * Returns a list of tags to apply to an instance. Tags are used to identify valid sources or + * targets for network firewalls. Each tag within the list must comply with RFC1035. + * + * @see RFC1035 + */ + public List values() { + return values; + } + + /** + * Returns the fingerprint for the tags. This value is needed to update instance's tags. + */ + public String fingerprint() { + return fingerprint; + } + + /** + * Returns a builder for the current instance tags. + */ + public Builder toBuilder() { + return new Builder(this); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("values", values) + .add("fingerprint", fingerprint) + .toString(); + } + + @Override + public int hashCode() { + return Objects.hash(values, fingerprint); + } + + @Override + public boolean equals(Object obj) { + return obj == this || obj instanceof Tags && Objects.equals(toPb(), ((Tags) obj).toPb()); + } + + com.google.api.services.compute.model.Tags toPb() { + com.google.api.services.compute.model.Tags tagsPb = + new com.google.api.services.compute.model.Tags(); + tagsPb.setFingerprint(fingerprint); + tagsPb.setItems(values); + return tagsPb; + } + + /** + * Returns a builder for a {@code Tags} object given the tags to apply to the instance. Each tag + * within the list must comply with RFC1035. + * + * @see RFC1035 + */ + public static Builder builder(Iterable values) { + return new Builder().values(values); + } + + /** + * Returns a builder for a {@code Tags} object given the tags to apply to the instance. Each tag + * within the list must comply with RFC1035. + * + * @see RFC1035 + */ + public static Builder builder(String... values) { + return new Builder().values(values); + } + + /** + * Returns a {@code Tags} object given the tags to apply to the instance. Each tag within the + * list must comply with RFC1035. + * + * @see RFC1035 + */ + public static Tags of(Iterable values) { + return builder(values).build(); + } + + /** + * Returns a {@code Tags} object given the tags to apply to the instance. Each tag within the + * list must comply with RFC1035. + * + * @see RFC1035 + */ + public static Tags of(String... values) { + return builder(values).build(); + } + + static Tags fromPb(com.google.api.services.compute.model.Tags tagsPb) { + Builder builder = + builder(tagsPb.getItems() != null ? tagsPb.getItems() : ImmutableList.of()); + return builder.fingerprint(tagsPb.getFingerprint()).build(); + } +} diff --git a/gcloud-java-compute/src/main/java/com/google/cloud/compute/Zone.java b/gcloud-java-compute/src/main/java/com/google/cloud/compute/Zone.java new file mode 100644 index 000000000000..1534506c55a3 --- /dev/null +++ b/gcloud-java-compute/src/main/java/com/google/cloud/compute/Zone.java @@ -0,0 +1,260 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import com.google.common.base.Function; +import com.google.common.base.MoreObjects; + +import org.joda.time.format.DateTimeFormatter; +import org.joda.time.format.ISODateTimeFormat; + +import java.io.Serializable; +import java.math.BigInteger; +import java.util.Objects; + +/** + * A Google Compute Engine zone. + * + * @see Region and Zones + */ +public class Zone implements Serializable { + + static final Function FROM_PB_FUNCTION = + new Function() { + @Override + public Zone apply(com.google.api.services.compute.model.Zone pb) { + return Zone.fromPb(pb); + } + }; + static final Function TO_PB_FUNCTION = + new Function() { + @Override + public com.google.api.services.compute.model.Zone apply(Zone region) { + return region.toPb(); + } + }; + + private static final long serialVersionUID = 6113636504417213010L; + private static final DateTimeFormatter TIMESTAMP_FORMATTER = ISODateTimeFormat.dateTime(); + + private final ZoneId zoneId; + private final String generatedId; + private final Long creationTimestamp; + private final String description; + private final Status status; + private final RegionId region; + private final DeprecationStatus deprecationStatus; + + /** + * Status of the region. + */ + public enum Status { + UP, + DOWN + } + + static final class Builder { + + private ZoneId zoneId; + private String generatedId; + private Long creationTimestamp; + private String description; + + private Status status; + private RegionId region; + private DeprecationStatus deprecationStatus; + + private Builder() {} + + Builder zoneId(ZoneId zoneId) { + this.zoneId = zoneId; + return this; + } + + Builder generatedId(String generatedId) { + this.generatedId = generatedId; + return this; + } + + Builder creationTimestamp(Long creationTimestamp) { + this.creationTimestamp = creationTimestamp; + return this; + } + + Builder description(String description) { + this.description = description; + return this; + } + + Builder status(Status status) { + this.status = status; + return this; + } + + Builder region(RegionId region) { + this.region = region; + return this; + } + + Builder deprecationStatus(DeprecationStatus deprecationStatus) { + this.deprecationStatus = deprecationStatus; + return this; + } + + Zone build() { + return new Zone(this); + } + } + + private Zone(Builder builder) { + this.zoneId = builder.zoneId; + this.generatedId = builder.generatedId; + this.creationTimestamp = builder.creationTimestamp; + this.description = builder.description; + this.status = builder.status; + this.region = builder.region; + this.deprecationStatus = builder.deprecationStatus; + } + + /** + * Returns the zone's identity. + */ + public ZoneId zoneId() { + return zoneId; + } + + /** + * Returns the creation timestamp in milliseconds since epoch. + */ + public Long creationTimestamp() { + return creationTimestamp; + } + + /** + * Returns an optional textual description of the zone. + */ + public String description() { + return description; + } + + /** + * Returns the service-generated unique identifier for the zone. + */ + public String generatedId() { + return generatedId; + } + + /** + * Returns the status of the zone. + */ + public Status status() { + return status; + } + + /** + * Returns the identity of the region that hosts the zone. + */ + public RegionId region() { + return region; + } + + /** + * Returns the deprecation status of the zone. If {@link DeprecationStatus#status()} is either + * {@link DeprecationStatus.Status#DELETED} or {@link DeprecationStatus.Status#OBSOLETE} the zone + * should not be used. Returns {@code null} if the zone is not deprecated. + */ + public DeprecationStatus deprecationStatus() { + return deprecationStatus; + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("zoneId", zoneId) + .add("generatedId", generatedId) + .add("creationTimestamp", creationTimestamp) + .add("description", description) + .add("status", status) + .add("region", region) + .add("deprecationStatus", deprecationStatus) + .toString(); + } + + @Override + public final int hashCode() { + return Objects.hash(zoneId); + } + + @Override + public final boolean equals(Object obj) { + return obj == this + || obj != null + && obj.getClass().equals(Zone.class) + && Objects.equals(toPb(), ((Zone) obj).toPb()); + } + + com.google.api.services.compute.model.Zone toPb() { + com.google.api.services.compute.model.Zone zonePb = + new com.google.api.services.compute.model.Zone(); + if (generatedId != null) { + zonePb.setId(new BigInteger(generatedId)); + } + if (creationTimestamp != null) { + zonePb.setCreationTimestamp(TIMESTAMP_FORMATTER.print(creationTimestamp)); + } + zonePb.setName(zoneId.zone()); + zonePb.setDescription(description); + zonePb.setSelfLink(zoneId.selfLink()); + if (status != null) { + zonePb.setStatus(status.name()); + } + if (region != null) { + zonePb.setRegion(region.selfLink()); + } + if (deprecationStatus != null) { + zonePb.setDeprecated(deprecationStatus.toPb()); + } + return zonePb; + } + + static Builder builder() { + return new Builder(); + } + + static Zone fromPb(com.google.api.services.compute.model.Zone zonePb) { + Builder builder = builder(); + builder.zoneId(ZoneId.fromUrl(zonePb.getSelfLink())); + if (zonePb.getId() != null) { + builder.generatedId(zonePb.getId().toString()); + } + if (zonePb.getCreationTimestamp() != null) { + builder.creationTimestamp(TIMESTAMP_FORMATTER.parseMillis(zonePb.getCreationTimestamp())); + } + builder.description(zonePb.getDescription()); + if (zonePb.getStatus() != null) { + builder.status(Status.valueOf(zonePb.getStatus())); + } + if (zonePb.getRegion() != null) { + builder.region(RegionId.fromUrl(zonePb.getRegion())); + } + if (zonePb.getDeprecated() != null) { + builder.deprecationStatus( + DeprecationStatus.fromPb(zonePb.getDeprecated(), ZoneId.FROM_URL_FUNCTION)); + } + return builder.build(); + } +} diff --git a/gcloud-java-compute/src/main/java/com/google/cloud/compute/ZoneId.java b/gcloud-java-compute/src/main/java/com/google/cloud/compute/ZoneId.java new file mode 100644 index 000000000000..1a1e2bf207be --- /dev/null +++ b/gcloud-java-compute/src/main/java/com/google/cloud/compute/ZoneId.java @@ -0,0 +1,128 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import static com.google.common.base.Preconditions.checkNotNull; + +import com.google.common.base.Function; +import com.google.common.base.MoreObjects.ToStringHelper; + +import java.util.Objects; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +/** + * A Google Compute Engine zone identity. + */ +public final class ZoneId extends ResourceId { + + static final Function FROM_URL_FUNCTION = new Function() { + @Override + public ZoneId apply(String pb) { + return ZoneId.fromUrl(pb); + } + }; + static final Function TO_URL_FUNCTION = new Function() { + @Override + public String apply(ZoneId zoneId) { + return zoneId.selfLink(); + } + }; + + private static final String REGEX = ResourceId.REGEX + "zones/([^/]+)"; + private static final Pattern PATTERN = Pattern.compile(REGEX); + private static final long serialVersionUID = -7635391994812946733L; + + private final String zone; + + private ZoneId(String project, String zone) { + super(project); + this.zone = checkNotNull(zone); + } + + /** + * Returns the name of the zone. + */ + public final String zone() { + return zone; + } + + @Override + public String selfLink() { + return super.selfLink() + "/zones/" + zone; + } + + @Override + ToStringHelper toStringHelper() { + return super.toStringHelper().add("zone", zone); + } + + @Override + public int hashCode() { + return Objects.hash(baseHashCode(), zone); + } + + @Override + public boolean equals(Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof ZoneId)) { + return false; + } + ZoneId other = (ZoneId) obj; + return baseEquals(other) && Objects.equals(zone, other.zone); + } + + @Override + ZoneId setProjectId(String projectId) { + if (project() != null) { + return this; + } + return ZoneId.of(projectId, zone); + } + + /** + * Returns a new zone identity given project and zone names. + */ + public static ZoneId of(String project, String zone) { + return new ZoneId(project, zone); + } + + /** + * Returns a new zone identity given zone name. + */ + public static ZoneId of(String zone) { + return ZoneId.of(null, zone); + } + + /** + * Returns {@code true} if the provided string matches the expected format of a zone URL. + * Returns {@code false} otherwise. + */ + static boolean matchesUrl(String url) { + return url.matches(REGEX); + } + + static ZoneId fromUrl(String url) { + Matcher matcher = PATTERN.matcher(url); + if (!matcher.matches()) { + throw new IllegalArgumentException(url + " is not a valid zone URL"); + } + return ZoneId.of(matcher.group(1), matcher.group(2)); + } +} diff --git a/gcloud-java-compute/src/main/java/com/google/cloud/compute/ZoneOperationId.java b/gcloud-java-compute/src/main/java/com/google/cloud/compute/ZoneOperationId.java new file mode 100644 index 000000000000..1403773d2d99 --- /dev/null +++ b/gcloud-java-compute/src/main/java/com/google/cloud/compute/ZoneOperationId.java @@ -0,0 +1,133 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import static com.google.common.base.Preconditions.checkNotNull; + +import com.google.common.base.MoreObjects; + +import java.util.Objects; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +/** + * Identity for a Google Compute Engine zone operation. + */ +public final class ZoneOperationId extends OperationId { + + private static final String REGEX = ResourceId.REGEX + "zones/([^/]+)/operations/([^/]+)"; + private static final Pattern PATTERN = Pattern.compile(REGEX); + private static final long serialVersionUID = 4910670262094017392L; + + private final String zone; + + private ZoneOperationId(String project, String zone, String operation) { + super(project, operation); + this.zone = checkNotNull(zone); + } + + @Override + public Type type() { + return Type.ZONE; + } + + /** + * Returns the name of the zone this operation belongs to. + */ + public String zone() { + return zone; + } + + /** + * Returns the identity of the zone this address belongs to. + */ + public ZoneId zoneId() { + return ZoneId.of(project(), zone); + } + + @Override + public String selfLink() { + return super.selfLink() + "/zones/" + zone + "/operations/" + operation(); + } + + @Override + MoreObjects.ToStringHelper toStringHelper() { + return MoreObjects.toStringHelper(this).add("zone", zone); + } + + @Override + public int hashCode() { + return Objects.hash(baseHashCode(), zone); + } + + @Override + public boolean equals(Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof ZoneOperationId)) { + return false; + } + ZoneOperationId other = (ZoneOperationId) obj; + return baseEquals(other) && Objects.equals(zone, other.zone); + } + + @Override + ZoneOperationId setProjectId(String projectId) { + if (project() != null) { + return this; + } + return ZoneOperationId.of(projectId, zone, operation()); + } + + /** + * Returns a zone operation identity given the zone identity and the operation name. + */ + public static ZoneOperationId of(ZoneId zoneId, String operation) { + return new ZoneOperationId(zoneId.project(), zoneId.zone(), operation); + } + + /** + * Returns a zone operation identity given the zone and operation names. + */ + public static ZoneOperationId of(String zone, String operation) { + return new ZoneOperationId(null, zone, operation); + } + + /** + * Returns a zone operation identity given project, zone and operation names. + */ + public static ZoneOperationId of(String project, String zone, String operation) { + return new ZoneOperationId(project, zone, operation); + } + + /** + * Returns {@code true} if the provided string matches the expected format of a zone operation + * URL. Returns {@code false} otherwise. + */ + static boolean matchesUrl(String url) { + return PATTERN.matcher(url).matches(); + } + + static ZoneOperationId fromUrl(String url) { + Matcher matcher = PATTERN.matcher(url); + if (!matcher.matches()) { + throw new IllegalArgumentException(url + " is not a valid zone operation URL"); + } + return ZoneOperationId.of(matcher.group(1), matcher.group(2), matcher.group(3)); + } +} diff --git a/gcloud-java-compute/src/main/java/com/google/cloud/compute/package-info.java b/gcloud-java-compute/src/main/java/com/google/cloud/compute/package-info.java new file mode 100644 index 000000000000..b7f589ea3b3f --- /dev/null +++ b/gcloud-java-compute/src/main/java/com/google/cloud/compute/package-info.java @@ -0,0 +1,63 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * A client to Google Cloud Compute. + * + *

Here's a simple usage example for using gcloud-java from App/Compute Engine. This example + * shows how to create a snapshot from an existing disk. For the complete source code see + * + * CreateSnapshot.java. + *

 {@code
+ * Compute compute = ComputeOptions.defaultInstance().service();
+ * DiskId diskId = DiskId.of("us-central1-a", "disk-name");
+ * Disk disk = compute.getDisk(diskId, Compute.DiskOption.fields());
+ * if (disk != null) {
+ *   String snapshotName = "disk-name-snapshot";
+ *   Operation operation = disk.createSnapshot(snapshotName);
+ *   while (!operation.isDone()) {
+ *     Thread.sleep(1000L);
+ *   }
+ *   if (operation.errors() == null) {
+ *     // use snapshot
+ *     Snapshot snapshot = compute.getSnapshot("disk-name-snapshot");
+ *   }
+ * }}
+ *

This second example shows how to create a virtual machine instance. Complete source code can + * be found at + * + * CreateInstance.java. + *

 {@code
+ * Compute compute = ComputeOptions.defaultInstance().service();
+ * ImageId imageId = ImageId.of("debian-cloud", "debian-8-jessie-v20160329");
+ * NetworkId networkId = NetworkId.of("default");
+ * AttachedDisk attachedDisk = AttachedDisk.of(AttachedDisk.CreateDiskConfiguration.of(imageId));
+ * NetworkInterface networkInterface = NetworkInterface.of(networkId);
+ * InstanceId instanceId = InstanceId.of("us-central1-a", "instance-name");
+ * MachineTypeId machineTypeId = MachineTypeId.of("us-central1-a", "n1-standard-1");
+ * Operation operation =
+ * compute.create(InstanceInfo.of(instanceId, machineTypeId, attachedDisk, networkInterface));
+ * while (!operation.isDone()) {
+ *   Thread.sleep(1000L);
+ * }
+ * if (operation.errors() == null) {
+ *   // use instance
+ *   Instance instance = compute.getInstance(instanceId);
+ * }}
+ * + * @see Google Cloud Compute + */ +package com.google.cloud.compute; diff --git a/gcloud-java-compute/src/main/java/com/google/cloud/compute/spi/ComputeRpc.java b/gcloud-java-compute/src/main/java/com/google/cloud/compute/spi/ComputeRpc.java new file mode 100644 index 000000000000..2209da1f6951 --- /dev/null +++ b/gcloud-java-compute/src/main/java/com/google/cloud/compute/spi/ComputeRpc.java @@ -0,0 +1,672 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute.spi; + +import com.google.api.services.compute.model.AccessConfig; +import com.google.api.services.compute.model.Address; +import com.google.api.services.compute.model.AttachedDisk; +import com.google.api.services.compute.model.DeprecationStatus; +import com.google.api.services.compute.model.Disk; +import com.google.api.services.compute.model.DiskType; +import com.google.api.services.compute.model.Image; +import com.google.api.services.compute.model.Instance; +import com.google.api.services.compute.model.License; +import com.google.api.services.compute.model.MachineType; +import com.google.api.services.compute.model.Metadata; +import com.google.api.services.compute.model.Network; +import com.google.api.services.compute.model.Operation; +import com.google.api.services.compute.model.Region; +import com.google.api.services.compute.model.Scheduling; +import com.google.api.services.compute.model.Snapshot; +import com.google.api.services.compute.model.Subnetwork; +import com.google.api.services.compute.model.Tags; +import com.google.api.services.compute.model.Zone; +import com.google.cloud.compute.ComputeException; + +import java.util.Map; + +public interface ComputeRpc { + + // These options are part of the Google Compute Engine query parameters + enum Option { + FIELDS("fields"), + MAX_RESULTS("maxResults"), + PAGE_TOKEN("pageToken"), + FILTER("filter"); + + private final String value; + + Option(String value) { + this.value = value; + } + + public String value() { + return value; + } + + @SuppressWarnings("unchecked") + T get(Map options) { + return (T) options.get(this); + } + + String getString(Map options) { + return get(options); + } + + Long getLong(Map options) { + return get(options); + } + + Boolean getBoolean(Map options) { + return get(options); + } + } + + class Tuple { + + private final X x; + private final Y y; + + private Tuple(X x, Y y) { + this.x = x; + this.y = y; + } + + public static Tuple of(X x, Y y) { + return new Tuple<>(x, y); + } + + public X x() { + return x; + } + + public Y y() { + return y; + } + } + + /** + * Returns the requested disk type or {@code null} if not found. + * + * @throws ComputeException upon failure + */ + DiskType getDiskType(String zone, String diskType, Map options); + + /** + * Lists the disk types in the provided zone. + * + * @throws ComputeException upon failure + */ + Tuple> listDiskTypes(String zone, Map options); + + /** + * Lists disk types. + * + * @throws ComputeException upon failure + */ + Tuple> listDiskTypes(Map options); + + /** + * Returns the requested machine type or {@code null} if not found. + * + * @throws ComputeException upon failure + */ + MachineType getMachineType(String zone, String diskType, Map options); + + /** + * Lists the machine types in the provided zone. + * + * @throws ComputeException upon failure + */ + Tuple> listMachineTypes(String zone, Map options); + + /** + * Lists machine types. + * + * @throws ComputeException upon failure + */ + Tuple> listMachineTypes(Map options); + + /** + * Returns the requested region or {@code null} if not found. + * + * @throws ComputeException upon failure + */ + Region getRegion(String region, Map options); + + /** + * Lists the regions. + * + * @throws ComputeException upon failure + */ + Tuple> listRegions(Map options); + + /** + * Returns the requested zone or {@code null} if not found. + * + * @throws ComputeException upon failure + */ + Zone getZone(String zone, Map options); + + /** + * Lists the zones. + * + * @throws ComputeException upon failure + */ + Tuple> listZones(Map options); + + /** + * Returns the requested license or {@code null} if not found. + * + * @throws ComputeException upon failure + */ + License getLicense(String project, String license, Map options); + + /** + * Returns the requested global operation or {@code null} if not found. + * + * @throws ComputeException upon failure + */ + Operation getGlobalOperation(String operation, Map options); + + /** + * Lists the global operations. + * + * @throws ComputeException upon failure + */ + Tuple> listGlobalOperations(Map options); + + /** + * Deletes the requested global operation. + * + * @return {@code true} if operation was deleted, {@code false} if it was not found + * @throws ComputeException upon failure + */ + boolean deleteGlobalOperation(String operation); + + /** + * Returns the requested region operation or {@code null} if not found. + * + * @throws ComputeException upon failure + */ + Operation getRegionOperation(String region, String operation, Map options); + + /** + * Lists the region operations for the provided region. + * + * @throws ComputeException upon failure + */ + Tuple> listRegionOperations(String region, Map options); + + /** + * Deletes the requested region operation. + * + * @return {@code true} if operation was deleted, {@code false} if it was not found + * @throws ComputeException upon failure + */ + boolean deleteRegionOperation(String region, String operation); + + /** + * Returns the requested zone operation or {@code null} if not found. + * + * @throws ComputeException upon failure + */ + Operation getZoneOperation(String zone, String operation, Map options); + + /** + * Lists the zone operations for the provided zone. + * + * @throws ComputeException upon failure + */ + Tuple> listZoneOperations(String zone, Map options); + + /** + * Deletes the requested zone operation. + * + * @return {@code true} if operation was deleted, {@code false} if it was not found + * @throws ComputeException upon failure + */ + boolean deleteZoneOperation(String zone, String operation); + + /** + * Returns the requested global address or {@code null} if not found. + * + * @throws ComputeException upon failure + */ + Address getGlobalAddress(String address, Map options); + + /** + * Creates a new global address. + * + * @return a global operation for global address' creation + * @throws ComputeException upon failure + */ + Operation createGlobalAddress(Address address, Map options); + + /** + * Lists the global addresses. + * + * @throws ComputeException upon failure + */ + Tuple> listGlobalAddresses(Map options); + + /** + * Deletes the requested global address. + * + * @return a global operation if the request was issued correctly, {@code null} if the address was + * not found + * @throws ComputeException upon failure + */ + Operation deleteGlobalAddress(String address, Map options); + + /** + * Returns the requested region address or {@code null} if not found. + * + * @throws ComputeException upon failure or if region is not found + */ + Address getRegionAddress(String region, String address, Map options); + + /** + * Creates a new region address. + * + * @return a region operation for region address' creation + * @throws ComputeException upon failure or if region is not found + */ + Operation createRegionAddress(String region, Address address, Map options); + + /** + * Lists the regions addresses for the provided region. + * + * @throws ComputeException upon failure or if region is not found + */ + Tuple> listRegionAddresses(String region, Map options); + + /** + * Lists all addresses. + * + * @throws ComputeException upon failure + */ + Tuple> listAddresses(Map options); + + /** + * Deletes the requested region address. + * + * @return a region operation if the request was issued correctly, {@code null} if the address was + * not found + * @throws ComputeException upon failure or if region is not found + */ + Operation deleteRegionAddress(String region, String address, Map options); + + /** + * Creates a snapshot for the specified disk. + * + * @return a zone operation for snapshot creation + * @throws ComputeException upon failure + */ + Operation createSnapshot(String zone, String disk, String snapshot, String description, + Map options); + + /** + * Returns the requested snapshot or {@code null} if not found. + * + * @throws ComputeException upon failure + */ + Snapshot getSnapshot(String snapshot, Map options); + + /** + * Lists snapshots. + * + * @throws ComputeException upon failure + */ + Tuple> listSnapshots(Map options); + + /** + * Deletes the requested snapshot. Keep in mind that deleting a single snapshot might not + * necessarily delete all the data for that snapshot. If any data for the snapshot that is marked + * for deletion is needed for subsequent snapshots, the data will be moved to the next snapshot. + * + * @return a global operation if the request was issued correctly, {@code null} if the snapshot + * was not found + * @throws ComputeException upon failure + */ + Operation deleteSnapshot(String snapshot, Map options); + + /** + * Creates a new image. + * + * @return a global operation for image's creation + * @throws ComputeException upon failure + */ + Operation createImage(Image image, Map options); + + /** + * Returns the requested image or {@code null} if not found. + * + * @throws ComputeException upon failure + */ + Image getImage(String project, String image, Map options); + + /** + * Lists images in the provided project that are available to the current user. + * + * @throws ComputeException upon failure + */ + Tuple> listImages(String project, Map options); + + /** + * Deletes the requested image. + * + * @return a global operation if the delete request was issued correctly, {@code null} if the + * image was not found + * @throws ComputeException upon failure + */ + Operation deleteImage(String project, String image, Map options); + + /** + * Deprecates the requested image. + * + * @return a global operation if the deprecation request was issued correctly, {@code null} if the + * image was not found + * @throws ComputeException upon failure + */ + Operation deprecateImage(String project, String image, DeprecationStatus deprecationStatus, + Map options); + + /** + * Returns the requested disk or {@code null} if not found. + * + * @throws ComputeException upon failure + */ + Disk getDisk(String zone, String disk, Map options); + + /** + * Creates a new disk. + * + * @return a zone operation for disk's creation + * @throws ComputeException upon failure + */ + Operation createDisk(String zone, Disk disk, Map options); + + /** + * Lists the disks for the provided zone. + * + * @throws ComputeException upon failure + */ + Tuple> listDisks(String zone, Map options); + + /** + * Lists disks for all zones. + * + * @throws ComputeException upon failure + */ + Tuple> listDisks(Map options); + + /** + * Deletes the requested disk. + * + * @return a zone operation if the request was issued correctly, {@code null} if the disk was not + * found + * @throws ComputeException upon failure + */ + Operation deleteDisk(String zone, String disk, Map options); + + /** + * Resizes the disk to the requested size. The new size must be larger than the previous one. + * + * @return a zone operation if the request was issued correctly, {@code null} if the disk was not + * found + * @throws ComputeException upon failure or if the new disk size is smaller than the previous one + */ + Operation resizeDisk(String zone, String disk, long sizeGb, Map options); + + /* + * Creates a new subnetwork. + * + * @return a region operation for subnetwork's creation + * @throws ComputeException upon failure + */ + Operation createSubnetwork(String region, Subnetwork subnetwork, Map options); + + /** + * Returns the requested subnetwork or {@code null} if not found. + * + * @throws ComputeException upon failure + */ + Subnetwork getSubnetwork(String region, String subnetwork, Map options); + + /** + * Lists subnetworks for the provided region. + * + * @throws ComputeException upon failure + */ + Tuple> listSubnetworks(String region, Map options); + + /** + * Lists subnetworks. + * + * @throws ComputeException upon failure + */ + Tuple> listSubnetworks(Map options); + + /** + * Deletes the requested subnetwork. Any attempt to delete an automatically created subnetwork + * will fail. + * + * @return a region operation if the delete request was issued correctly, {@code null} if the + * subnetwork was not found + * @throws ComputeException upon failure + */ + Operation deleteSubnetwork(String region, String subnetwork, Map options); + + /** + * Creates a new network. + * + * @return a global operation for network's creation + * @throws ComputeException upon failure + */ + Operation createNetwork(Network network, Map options); + + /** + * Returns the requested network or {@code null} if not found. + * + * @throws ComputeException upon failure + */ + Network getNetwork(String network, Map options); + + /** + * Lists networks. + * + * @throws ComputeException upon failure + */ + Tuple> listNetworks(Map options); + + /** + * Deletes the requested network. + * + * @return a global operation if the delete request was issued correctly, {@code null} if the + * network was not found + * @throws ComputeException upon failure + */ + Operation deleteNetwork(String network, Map options); + + /** + * Creates a new instance. + * + * @return a zone operation for instance's creation + * @throws ComputeException upon failure or if the zone does not exist + */ + Operation createInstance(String zone, Instance instance, Map options); + + /** + * Returns the requested instance or {@code null} if not found. + * + * @throws ComputeException upon failure or if the zone does not exist + */ + Instance getInstance(String zone, String instance, Map options); + + /** + * Lists instances for the provided zone. + * + * @throws ComputeException upon failure or if the zone does not exist + */ + Tuple> listInstances(String zone, Map options); + + /** + * Lists instances. + * + * @throws ComputeException upon failure + */ + Tuple> listInstances(Map options); + + /** + * Deletes the requested instance. + * + * @return a zone operation if the delete request was issued correctly, {@code null} if the + * instance was not found + * @throws ComputeException upon failure or if the zone does not exist + */ + Operation deleteInstance(String zone, String instance, Map options); + + /** + * Adds an access configuration to an instance's network interface. + * + * @return a zone operation if the add request was issued correctly, {@code null} if the instance + * was not found + * @throws ComputeException upon failure + */ + Operation addAccessConfig(String zone, String instance, String networkInterface, + AccessConfig accessConfig, Map options); + + /** + * Attaches a disk to an instance. + * + * @return a zone operation if the attach request was issued correctly, {@code null} if the + * instance was not found + * @throws ComputeException upon failure + */ + Operation attachDisk(String zone, String instance, AttachedDisk attachedDisk, + Map options); + + /** + * Deletes an access configuration from an instance's network interface. + * + * @return a zone operation if the delete request was issued correctly, {@code null} if the + * instance was not found + * @throws ComputeException upon failure + */ + Operation deleteAccessConfig(String zone, String instance, String networkInterface, + String accessConfig, Map options); + + /** + * Detaches a disk from an instance. + * + * @return a zone operation if the detach request was issued correctly, {@code null} if the + * instance was not found + * @throws ComputeException upon failure + */ + Operation detachDisk(String zone, String instance, String deviceName, Map options); + + /** + * Returns the serial port output for the provided instance and port number. {@code port} must be + * between 1 and 4 (inclusive). If {@code port} is {@code null} output for the default port (1) is + * returned. + * + * @return the serial port output or {@code null} if the instance was not found + * @throws ComputeException upon failure + */ + String getSerialPortOutput(String zone, String instance, Integer port, Map options); + + /** + * Resets the provided instance. + * + * @return a zone operation if the reset request was issued correctly, {@code null} if the + * instance was not found + * @throws ComputeException upon failure + */ + Operation reset(String zone, String instance, Map options); + + /** + * Sets the auto-delete flag for a disk attached to the provided instance. + * + * @return a zone operation if the flag setting request was issued correctly, {@code null} if the + * instance was not found + * @throws ComputeException upon failure + */ + Operation setDiskAutoDelete(String zone, String instance, String deviceName, boolean autoDelete, + Map options); + + /** + * Sets the machine type for the provided instance. Instance must be in {@code TERMINATED} state + * to be able to set its machine type. + * + * @param zone name of the zone in which the instance resides + * @param instance name of the instance + * @param machineTypeUrl full or partial URL of the machine type resource. For example + * {@code zones/us-central1-f/machineTypes/n1-standard-1}. + * @return a zone operation if the set request was issued correctly, {@code null} if the instance + * was not found + * @throws ComputeException upon failure + */ + Operation setMachineType(String zone, String instance, String machineTypeUrl, + Map options); + + /** + * Sets the metadata for the provided instance. + * + * @return a zone operation if the set request was issued correctly, {@code null} if the instance + * was not found + * @throws ComputeException upon failure + */ + Operation setMetadata(String zone, String instance, Metadata metadata, Map options); + + /** + * Sets the scheduling options for the provided instance. + * + * @return a zone operation if the set request was issued correctly, {@code null} if the instance + * was not found + * @throws ComputeException upon failure + */ + Operation setScheduling(String zone, String instance, Scheduling scheduling, + Map options); + + /** + * Sets the tags for the provided instance. + * + * @return a zone operation if the set request was issued correctly, {@code null} if the instance + * was not found + * @throws ComputeException upon failure + */ + Operation setTags(String zone, String instance, Tags tags, Map options); + + /** + * Starts the provided instance. + * + * @return a zone operation if the start request was issued correctly, {@code null} if the + * instance was not found + * @throws ComputeException upon failure + */ + Operation start(String zone, String instance, Map options); + + /** + * Stops the provided instance. + * + * @return a zone operation if the stop request was issued correctly, {@code null} if the instance + * was not found + * @throws ComputeException upon failure + */ + Operation stop(String zone, String instance, Map options); +} diff --git a/gcloud-java-compute/src/main/java/com/google/cloud/compute/spi/ComputeRpcFactory.java b/gcloud-java-compute/src/main/java/com/google/cloud/compute/spi/ComputeRpcFactory.java new file mode 100644 index 000000000000..a9d1593b9930 --- /dev/null +++ b/gcloud-java-compute/src/main/java/com/google/cloud/compute/spi/ComputeRpcFactory.java @@ -0,0 +1,27 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute.spi; + +import com.google.cloud.compute.ComputeOptions; +import com.google.cloud.spi.ServiceRpcFactory; + +/** + * An interface for Compute RPC factory. + * Implementation will be loaded via {@link java.util.ServiceLoader}. + */ +public interface ComputeRpcFactory extends ServiceRpcFactory { +} diff --git a/gcloud-java-compute/src/main/java/com/google/cloud/compute/spi/DefaultComputeRpc.java b/gcloud-java-compute/src/main/java/com/google/cloud/compute/spi/DefaultComputeRpc.java new file mode 100644 index 000000000000..542a2aa2fcb7 --- /dev/null +++ b/gcloud-java-compute/src/main/java/com/google/cloud/compute/spi/DefaultComputeRpc.java @@ -0,0 +1,1156 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute.spi; + +import static java.net.HttpURLConnection.HTTP_NOT_FOUND; + +import com.google.api.client.http.HttpRequestInitializer; +import com.google.api.client.http.HttpTransport; +import com.google.api.client.json.jackson.JacksonFactory; +import com.google.api.services.compute.Compute; +import com.google.api.services.compute.model.AccessConfig; +import com.google.api.services.compute.model.Address; +import com.google.api.services.compute.model.AddressAggregatedList; +import com.google.api.services.compute.model.AddressList; +import com.google.api.services.compute.model.AddressesScopedList; +import com.google.api.services.compute.model.AttachedDisk; +import com.google.api.services.compute.model.DeprecationStatus; +import com.google.api.services.compute.model.Disk; +import com.google.api.services.compute.model.DiskAggregatedList; +import com.google.api.services.compute.model.DiskList; +import com.google.api.services.compute.model.DiskType; +import com.google.api.services.compute.model.DiskTypeAggregatedList; +import com.google.api.services.compute.model.DiskTypeList; +import com.google.api.services.compute.model.DiskTypesScopedList; +import com.google.api.services.compute.model.DisksResizeRequest; +import com.google.api.services.compute.model.DisksScopedList; +import com.google.api.services.compute.model.Image; +import com.google.api.services.compute.model.ImageList; +import com.google.api.services.compute.model.Instance; +import com.google.api.services.compute.model.InstanceAggregatedList; +import com.google.api.services.compute.model.InstanceList; +import com.google.api.services.compute.model.InstancesScopedList; +import com.google.api.services.compute.model.InstancesSetMachineTypeRequest; +import com.google.api.services.compute.model.License; +import com.google.api.services.compute.model.MachineType; +import com.google.api.services.compute.model.MachineTypeAggregatedList; +import com.google.api.services.compute.model.MachineTypeList; +import com.google.api.services.compute.model.MachineTypesScopedList; +import com.google.api.services.compute.model.Metadata; +import com.google.api.services.compute.model.Network; +import com.google.api.services.compute.model.NetworkList; +import com.google.api.services.compute.model.Operation; +import com.google.api.services.compute.model.OperationList; +import com.google.api.services.compute.model.Region; +import com.google.api.services.compute.model.RegionList; +import com.google.api.services.compute.model.Scheduling; +import com.google.api.services.compute.model.SerialPortOutput; +import com.google.api.services.compute.model.Snapshot; +import com.google.api.services.compute.model.SnapshotList; +import com.google.api.services.compute.model.Subnetwork; +import com.google.api.services.compute.model.SubnetworkAggregatedList; +import com.google.api.services.compute.model.SubnetworkList; +import com.google.api.services.compute.model.SubnetworksScopedList; +import com.google.api.services.compute.model.Tags; +import com.google.api.services.compute.model.Zone; +import com.google.api.services.compute.model.ZoneList; +import com.google.cloud.compute.ComputeException; +import com.google.cloud.compute.ComputeOptions; +import com.google.common.collect.ImmutableList; + +import java.io.IOException; +import java.util.Map; + +public class DefaultComputeRpc implements ComputeRpc { + + private final ComputeOptions options; + private final Compute compute; + + public DefaultComputeRpc(ComputeOptions options) { + HttpTransport transport = options.httpTransportFactory().create(); + HttpRequestInitializer initializer = options.httpRequestInitializer(); + this.options = options; + compute = new Compute.Builder(transport, new JacksonFactory(), initializer) + .setRootUrl(options.host()) + .setApplicationName(options.applicationName()) + .build(); + } + + private static ComputeException translate(IOException exception) { + return new ComputeException(exception); + } + + @Override + public DiskType getDiskType(String zone, String diskType, Map options) { + try { + return compute.diskTypes() + .get(this.options.projectId(), zone, diskType) + .setFields(Option.FIELDS.getString(options)) + .execute(); + } catch (IOException ex) { + return nullForNotFound(ex); + } + } + + @Override + public Tuple> listDiskTypes(String zone, Map options) { + try { + DiskTypeList diskTypesList = compute.diskTypes() + .list(this.options.projectId(), zone) + .setFilter(Option.FILTER.getString(options)) + .setMaxResults(Option.MAX_RESULTS.getLong(options)) + .setPageToken(Option.PAGE_TOKEN.getString(options)) + .setFields(Option.FIELDS.getString(options)) + .execute(); + Iterable diskTypes = diskTypesList.getItems(); + return Tuple.of(diskTypesList.getNextPageToken(), diskTypes); + } catch (IOException ex) { + throw translate(ex); + } + } + + @Override + public Tuple> listDiskTypes(Map options) { + try { + DiskTypeAggregatedList aggregatedList = compute.diskTypes() + .aggregatedList(this.options.projectId()) + .setFilter(Option.FILTER.getString(options)) + .setMaxResults(Option.MAX_RESULTS.getLong(options)) + .setPageToken(Option.PAGE_TOKEN.getString(options)) + // todo(mziccard): uncomment or remove once #711 is closed + // .setFields(FIELDS.getString(options)) + .execute(); + ImmutableList.Builder builder = ImmutableList.builder(); + Map scopedList = aggregatedList.getItems(); + if (scopedList != null) { + for (DiskTypesScopedList diskTypesScopedList : scopedList.values()) { + if (diskTypesScopedList.getDiskTypes() != null) { + builder.addAll(diskTypesScopedList.getDiskTypes()); + } + } + } + return Tuple.>of(aggregatedList.getNextPageToken(), + builder.build()); + } catch (IOException ex) { + throw translate(ex); + } + } + + @Override + public MachineType getMachineType(String zone, String machineType, Map options) { + try { + return compute.machineTypes() + .get(this.options.projectId(), zone, machineType) + .setFields(Option.FIELDS.getString(options)) + .execute(); + } catch (IOException ex) { + return nullForNotFound(ex); + } + } + + @Override + public Tuple> listMachineTypes(String zone, + Map options) { + try { + MachineTypeList machineTypesList = compute.machineTypes() + .list(this.options.projectId(), zone) + .setFilter(Option.FILTER.getString(options)) + .setMaxResults(Option.MAX_RESULTS.getLong(options)) + .setPageToken(Option.PAGE_TOKEN.getString(options)) + .setFields(Option.FIELDS.getString(options)) + .execute(); + Iterable machineTypes = machineTypesList.getItems(); + return Tuple.of(machineTypesList.getNextPageToken(), machineTypes); + } catch (IOException ex) { + throw translate(ex); + } + } + + @Override + public Tuple> listMachineTypes(Map options) { + try { + MachineTypeAggregatedList aggregatedList = compute.machineTypes() + .aggregatedList(this.options.projectId()) + .setFilter(Option.FILTER.getString(options)) + .setMaxResults(Option.MAX_RESULTS.getLong(options)) + .setPageToken(Option.PAGE_TOKEN.getString(options)) + // todo(mziccard): uncomment or remove once #711 is closed + // .setFields(FIELDS.getString(options)) + .execute(); + ImmutableList.Builder builder = ImmutableList.builder(); + Map scopedList = aggregatedList.getItems(); + if (scopedList != null) { + for (MachineTypesScopedList machineTypesScopedList : scopedList.values()) { + if (machineTypesScopedList.getMachineTypes() != null) { + builder.addAll(machineTypesScopedList.getMachineTypes()); + } + } + } + return Tuple.>of(aggregatedList.getNextPageToken(), + builder.build()); + } catch (IOException ex) { + throw translate(ex); + } + } + + @Override + public Region getRegion(String region, Map options) { + try { + return compute.regions() + .get(this.options.projectId(), region) + .setFields(Option.FIELDS.getString(options)) + .execute(); + } catch (IOException ex) { + return nullForNotFound(ex); + } + } + + @Override + public Tuple> listRegions(Map options) { + try { + RegionList regionsList = compute.regions() + .list(this.options.projectId()) + .setFilter(Option.FILTER.getString(options)) + .setMaxResults(Option.MAX_RESULTS.getLong(options)) + .setPageToken(Option.PAGE_TOKEN.getString(options)) + .setFields(Option.FIELDS.getString(options)) + .execute(); + Iterable regions = regionsList.getItems(); + return Tuple.of(regionsList.getNextPageToken(), regions); + } catch (IOException ex) { + throw translate(ex); + } + } + + @Override + public Zone getZone(String zone, Map options) { + try { + return compute.zones() + .get(this.options.projectId(), zone) + .setFields(Option.FIELDS.getString(options)) + .execute(); + } catch (IOException ex) { + return nullForNotFound(ex); + } + } + + @Override + public Tuple> listZones(Map options) { + try { + ZoneList zonesList = compute.zones() + .list(this.options.projectId()) + .setFilter(Option.FILTER.getString(options)) + .setMaxResults(Option.MAX_RESULTS.getLong(options)) + .setPageToken(Option.PAGE_TOKEN.getString(options)) + .setFields(Option.FIELDS.getString(options)) + .execute(); + Iterable zones = zonesList.getItems(); + return Tuple.of(zonesList.getNextPageToken(), zones); + } catch (IOException ex) { + throw translate(ex); + } + } + + @Override + public License getLicense(String project, String license, Map options) { + try { + return compute.licenses() + .get(project, license) + .setFields(Option.FIELDS.getString(options)) + .execute(); + } catch (IOException ex) { + return nullForNotFound(ex); + } + } + + @Override + public Operation getGlobalOperation(String operation, Map options) { + try { + return compute.globalOperations() + .get(this.options.projectId(), operation) + .setFields(Option.FIELDS.getString(options)) + .execute(); + } catch (IOException ex) { + return nullForNotFound(ex); + } + } + + @Override + public Tuple> listGlobalOperations(Map options) { + try { + OperationList operationsList = compute.globalOperations() + .list(this.options.projectId()) + .setFilter(Option.FILTER.getString(options)) + .setMaxResults(Option.MAX_RESULTS.getLong(options)) + .setPageToken(Option.PAGE_TOKEN.getString(options)) + .setFields(Option.FIELDS.getString(options)) + .execute(); + Iterable operations = operationsList.getItems(); + return Tuple.of(operationsList.getNextPageToken(), operations); + } catch (IOException ex) { + throw translate(ex); + } + } + + @Override + public boolean deleteGlobalOperation(String operation) { + try { + compute.globalOperations().delete(this.options.projectId(), operation).execute(); + return true; + } catch (IOException ex) { + return falseForNotFound(ex); + } + } + + @Override + public Operation getRegionOperation(String region, String operation, Map options) { + try { + return compute.regionOperations() + .get(this.options.projectId(), region, operation) + .setFields(Option.FIELDS.getString(options)) + .execute(); + } catch (IOException ex) { + return nullForNotFound(ex); + } + } + + @Override + public Tuple> listRegionOperations(String region, + Map options) { + try { + OperationList operationsList = compute.regionOperations() + .list(this.options.projectId(), region) + .setFilter(Option.FILTER.getString(options)) + .setMaxResults(Option.MAX_RESULTS.getLong(options)) + .setPageToken(Option.PAGE_TOKEN.getString(options)) + .setFields(Option.FIELDS.getString(options)) + .execute(); + Iterable operations = operationsList.getItems(); + return Tuple.of(operationsList.getNextPageToken(), operations); + } catch (IOException ex) { + throw translate(ex); + } + } + + @Override + public boolean deleteRegionOperation(String region, String operation) { + try { + compute.regionOperations().delete(this.options.projectId(), region, operation).execute(); + return true; + } catch (IOException ex) { + return falseForNotFound(ex); + } + } + + @Override + public Operation getZoneOperation(String zone, String operation, Map options) { + try { + return compute.zoneOperations() + .get(this.options.projectId(), zone, operation) + .setFields(Option.FIELDS.getString(options)) + .execute(); + } catch (IOException ex) { + return nullForNotFound(ex); + } + } + + @Override + public Tuple> listZoneOperations(String zone, + Map options) { + try { + OperationList operationsList = compute.zoneOperations() + .list(this.options.projectId(), zone) + .setFilter(Option.FILTER.getString(options)) + .setMaxResults(Option.MAX_RESULTS.getLong(options)) + .setPageToken(Option.PAGE_TOKEN.getString(options)) + .setFields(Option.FIELDS.getString(options)) + .execute(); + Iterable operations = operationsList.getItems(); + return Tuple.of(operationsList.getNextPageToken(), operations); + } catch (IOException ex) { + throw translate(ex); + } + } + + @Override + public boolean deleteZoneOperation(String zone, String operation) { + try { + compute.zoneOperations().delete(this.options.projectId(), zone, operation).execute(); + return true; + } catch (IOException ex) { + return falseForNotFound(ex); + } + } + + @Override + public Address getGlobalAddress(String address, Map options) { + try { + return compute.globalAddresses() + .get(this.options.projectId(), address) + .setFields(Option.FIELDS.getString(options)) + .execute(); + } catch (IOException ex) { + return nullForNotFound(ex); + } + } + + @Override + public Operation createGlobalAddress(Address address, Map options) { + try { + return compute.globalAddresses() + .insert(this.options.projectId(), address) + .setFields(Option.FIELDS.getString(options)) + .execute(); + } catch (IOException ex) { + throw translate(ex); + } + } + + @Override + public Tuple> listGlobalAddresses(Map options) { + try { + AddressList addressList = compute.globalAddresses() + .list(this.options.projectId()) + .setFilter(Option.FILTER.getString(options)) + .setMaxResults(Option.MAX_RESULTS.getLong(options)) + .setPageToken(Option.PAGE_TOKEN.getString(options)) + .setFields(Option.FIELDS.getString(options)) + .execute(); + Iterable
operations = addressList.getItems(); + return Tuple.of(addressList.getNextPageToken(), operations); + } catch (IOException ex) { + throw translate(ex); + } + } + + @Override + public Operation deleteGlobalAddress(String address, Map options) { + try { + return compute.globalAddresses() + .delete(this.options.projectId(), address) + .setFields(Option.FIELDS.getString(options)) + .execute(); + } catch (IOException ex) { + return nullForNotFound(ex); + } + } + + @Override + public Address getRegionAddress(String region, String address, Map options) { + try { + return compute.addresses() + .get(this.options.projectId(), region, address) + .setFields(Option.FIELDS.getString(options)) + .execute(); + } catch (IOException ex) { + return nullForNotFound(ex); + } + } + + @Override + public Operation createRegionAddress(String region, Address address, Map options) { + try { + return compute.addresses() + .insert(this.options.projectId(), region, address) + .setFields(Option.FIELDS.getString(options)) + .execute(); + } catch (IOException ex) { + throw translate(ex); + } + } + + @Override + public Tuple> listRegionAddresses(String region, + Map options) { + try { + AddressList addressList = compute.addresses() + .list(this.options.projectId(), region) + .setFilter(Option.FILTER.getString(options)) + .setMaxResults(Option.MAX_RESULTS.getLong(options)) + .setPageToken(Option.PAGE_TOKEN.getString(options)) + .setFields(Option.FIELDS.getString(options)) + .execute(); + Iterable
operations = addressList.getItems(); + return Tuple.of(addressList.getNextPageToken(), operations); + } catch (IOException ex) { + throw translate(ex); + } + } + + @Override + public Tuple> listAddresses(Map options) { + try { + AddressAggregatedList aggregatedList = compute.addresses() + .aggregatedList(this.options.projectId()) + .setFilter(Option.FILTER.getString(options)) + .setMaxResults(Option.MAX_RESULTS.getLong(options)) + .setPageToken(Option.PAGE_TOKEN.getString(options)) + // todo(mziccard): uncomment or remove once #711 is closed + // .setFields(FIELDS.getString(options)) + .execute(); + ImmutableList.Builder
builder = ImmutableList.builder(); + Map scopedList = aggregatedList.getItems(); + if (scopedList != null) { + for (AddressesScopedList addressesScopedList : scopedList.values()) { + if (addressesScopedList.getAddresses() != null) { + builder.addAll(addressesScopedList.getAddresses()); + } + } + } + return Tuple.>of(aggregatedList.getNextPageToken(), + builder.build()); + } catch (IOException ex) { + throw translate(ex); + } + } + + @Override + public Operation deleteRegionAddress(String region, String address, Map options) { + try { + return compute.addresses() + .delete(this.options.projectId(), region, address) + .setFields(Option.FIELDS.getString(options)) + .execute(); + } catch (IOException ex) { + return nullForNotFound(ex); + } + } + + @Override + public Operation createSnapshot(String zone, String disk, String snapshot, String description, + Map options) { + Snapshot snapshotObject = new Snapshot().setName(snapshot).setDescription(description); + try { + return compute.disks() + .createSnapshot(this.options.projectId(), zone, disk, snapshotObject) + .setFields(Option.FIELDS.getString(options)) + .execute(); + } catch (IOException ex) { + throw translate(ex); + } + } + + @Override + public Snapshot getSnapshot(String snapshot, Map options) { + try { + return compute.snapshots() + .get(this.options.projectId(), snapshot) + .setFields(Option.FIELDS.getString(options)) + .execute(); + } catch (IOException ex) { + return nullForNotFound(ex); + } + } + + @Override + public Tuple> listSnapshots(Map options) { + try { + SnapshotList snapshotList = compute.snapshots() + .list(this.options.projectId()) + .setFilter(Option.FILTER.getString(options)) + .setMaxResults(Option.MAX_RESULTS.getLong(options)) + .setPageToken(Option.PAGE_TOKEN.getString(options)) + .setFields(Option.FIELDS.getString(options)) + .execute(); + Iterable snapshots = snapshotList.getItems(); + return Tuple.of(snapshotList.getNextPageToken(), snapshots); + } catch (IOException ex) { + throw translate(ex); + } + } + + @Override + public Operation deleteSnapshot(String snapshot, Map options) { + try { + return compute.snapshots() + .delete(this.options.projectId(), snapshot) + .setFields(Option.FIELDS.getString(options)) + .execute(); + } catch (IOException ex) { + return nullForNotFound(ex); + } + } + + @Override + public Operation createImage(Image image, Map options) { + try { + return compute.images() + .insert(this.options.projectId(), image) + .setFields(Option.FIELDS.getString(options)) + .execute(); + } catch (IOException ex) { + throw translate(ex); + } + } + + @Override + public Image getImage(String project, String image, Map options) { + try { + return compute.images() + .get(project, image) + .setFields(Option.FIELDS.getString(options)) + .execute(); + } catch (IOException ex) { + return nullForNotFound(ex); + } + } + + @Override + public Tuple> listImages(String project, Map options) { + try { + ImageList imageList = compute.images() + .list(project) + .setFilter(Option.FILTER.getString(options)) + .setMaxResults(Option.MAX_RESULTS.getLong(options)) + .setPageToken(Option.PAGE_TOKEN.getString(options)) + .setFields(Option.FIELDS.getString(options)) + .execute(); + Iterable images = imageList.getItems(); + return Tuple.of(imageList.getNextPageToken(), images); + } catch (IOException ex) { + throw translate(ex); + } + } + + @Override + public Operation deleteImage(String project, String image, Map options) { + try { + return compute.images() + .delete(project, image) + .setFields(Option.FIELDS.getString(options)) + .execute(); + } catch (IOException ex) { + return nullForNotFound(ex); + } + } + + @Override + public Operation deprecateImage(String project, String image, DeprecationStatus deprecationStatus, + Map options) { + try { + return compute.images() + .deprecate(project, image, deprecationStatus) + .setFields(Option.FIELDS.getString(options)) + .execute(); + } catch (IOException ex) { + return nullForNotFound(ex); + } + } + + @Override + public Disk getDisk(String zone, String disk, Map options) { + try { + return compute.disks() + .get(this.options.projectId(), zone, disk) + .setFields(Option.FIELDS.getString(options)) + .execute(); + } catch (IOException ex) { + return nullForNotFound(ex); + } + } + + @Override + public Operation createDisk(String zone, Disk disk, Map options) { + try { + return compute.disks() + .insert(this.options.projectId(), zone, disk) + .setFields(Option.FIELDS.getString(options)) + .execute(); + } catch (IOException ex) { + throw translate(ex); + } + } + + @Override + public Tuple> listDisks(String zone, Map options) { + try { + DiskList diskList = compute.disks() + .list(this.options.projectId(), zone) + .setFilter(Option.FILTER.getString(options)) + .setMaxResults(Option.MAX_RESULTS.getLong(options)) + .setPageToken(Option.PAGE_TOKEN.getString(options)) + .setFields(Option.FIELDS.getString(options)) + .execute(); + Iterable disks = diskList.getItems(); + return Tuple.of(diskList.getNextPageToken(), disks); + } catch (IOException ex) { + throw translate(ex); + } + } + + @Override + public Tuple> listDisks(Map options) { + try { + DiskAggregatedList aggregatedList = compute.disks() + .aggregatedList(this.options.projectId()) + .setFilter(Option.FILTER.getString(options)) + .setMaxResults(Option.MAX_RESULTS.getLong(options)) + .setPageToken(Option.PAGE_TOKEN.getString(options)) + // todo(mziccard): uncomment or remove once #711 is closed + // .setFields(FIELDS.getString(options)) + .execute(); + ImmutableList.Builder builder = ImmutableList.builder(); + Map scopedList = aggregatedList.getItems(); + if (scopedList != null) { + for (DisksScopedList disksScopedList : scopedList.values()) { + if (disksScopedList.getDisks() != null) { + builder.addAll(disksScopedList.getDisks()); + } + } + } + return Tuple.>of(aggregatedList.getNextPageToken(), + builder.build()); + } catch (IOException ex) { + throw translate(ex); + } + } + + @Override + public Operation deleteDisk(String zone, String disk, Map options) { + try { + return compute.disks() + .delete(this.options.projectId(), zone, disk) + .setFields(Option.FIELDS.getString(options)) + .execute(); + } catch (IOException ex) { + return nullForNotFound(ex); + } + } + + @Override + public Operation resizeDisk(String zone, String disk, long sizeGb, Map options) { + try { + DisksResizeRequest resizeRequest = new DisksResizeRequest().setSizeGb(sizeGb); + return compute.disks().resize(this.options.projectId(), zone, disk, resizeRequest) + .setFields(Option.FIELDS.getString(options)) + .execute(); + } catch (IOException ex) { + return nullForNotFound(ex); + } + } + + public Operation createSubnetwork(String region, Subnetwork subnetwork, Map options) { + try { + return compute.subnetworks() + .insert(this.options.projectId(), region, subnetwork) + .setFields(Option.FIELDS.getString(options)) + .execute(); + } catch (IOException ex) { + throw translate(ex); + } + } + + @Override + public Subnetwork getSubnetwork(String region, String subnetwork, Map options) { + try { + return compute.subnetworks() + .get(this.options.projectId(), region, subnetwork) + .setFields(Option.FIELDS.getString(options)) + .execute(); + } catch (IOException ex) { + return nullForNotFound(ex); + } + } + + @Override + public Tuple> listSubnetworks(String region, + Map options) { + try { + SubnetworkList subnetworkList = compute.subnetworks() + .list(this.options.projectId(), region) + .setFilter(Option.FILTER.getString(options)) + .setMaxResults(Option.MAX_RESULTS.getLong(options)) + .setPageToken(Option.PAGE_TOKEN.getString(options)) + .setFields(Option.FIELDS.getString(options)) + .execute(); + Iterable subnetworks = subnetworkList.getItems(); + return Tuple.of(subnetworkList.getNextPageToken(), subnetworks); + } catch (IOException ex) { + throw translate(ex); + } + } + + @Override + public Tuple> listSubnetworks(Map options) { + try { + SubnetworkAggregatedList aggregatedList = compute.subnetworks() + .aggregatedList(this.options.projectId()) + .setFilter(Option.FILTER.getString(options)) + .setMaxResults(Option.MAX_RESULTS.getLong(options)) + .setPageToken(Option.PAGE_TOKEN.getString(options)) + // todo(mziccard): uncomment or remove once #711 is closed + // .setFields(FIELDS.getString(options)) + .execute(); + ImmutableList.Builder builder = ImmutableList.builder(); + Map scopedList = aggregatedList.getItems(); + if (scopedList != null) { + for (SubnetworksScopedList subnetworksScopedList : scopedList.values()) { + if (subnetworksScopedList.getSubnetworks() != null) { + builder.addAll(subnetworksScopedList.getSubnetworks()); + } + } + } + return Tuple.>of(aggregatedList.getNextPageToken(), + builder.build()); + } catch (IOException ex) { + throw translate(ex); + } + } + + @Override + public Operation deleteSubnetwork(String region, String subnetwork, Map options) { + try { + return compute.subnetworks() + .delete(this.options.projectId(), region, subnetwork) + .setFields(Option.FIELDS.getString(options)) + .execute(); + } catch (IOException ex) { + return nullForNotFound(ex); + } + } + + @Override + public Operation createNetwork(Network network, Map options) { + try { + return compute.networks() + .insert(this.options.projectId(), network) + .setFields(Option.FIELDS.getString(options)) + .execute(); + } catch (IOException ex) { + throw translate(ex); + } + } + + @Override + public Network getNetwork(String network, Map options) { + try { + return compute.networks() + .get(this.options.projectId(), network) + .setFields(Option.FIELDS.getString(options)) + .execute(); + } catch (IOException ex) { + return nullForNotFound(ex); + } + } + + @Override + public Tuple> listNetworks(Map options) { + try { + NetworkList networkList = compute.networks() + .list(this.options.projectId()) + .setFilter(Option.FILTER.getString(options)) + .setMaxResults(Option.MAX_RESULTS.getLong(options)) + .setPageToken(Option.PAGE_TOKEN.getString(options)) + .setFields(Option.FIELDS.getString(options)) + .execute(); + Iterable networks = networkList.getItems(); + return Tuple.of(networkList.getNextPageToken(), networks); + } catch (IOException ex) { + throw translate(ex); + } + } + + @Override + public Operation deleteNetwork(String network, Map options) { + try { + return compute.networks() + .delete(this.options.projectId(), network) + .setFields(Option.FIELDS.getString(options)) + .execute(); + } catch (IOException ex) { + return nullForNotFound(ex); + } + } + + @Override + public Operation createInstance(String zone, Instance instance, Map options) { + try { + return compute.instances() + .insert(this.options.projectId(), zone, instance) + .setFields(Option.FIELDS.getString(options)) + .execute(); + } catch (IOException ex) { + throw translate(ex); + } + } + + @Override + public Instance getInstance(String zone, String instance, Map options) { + try { + return compute.instances() + .get(this.options.projectId(), zone, instance) + .setFields(Option.FIELDS.getString(options)) + .execute(); + } catch (IOException ex) { + return nullForNotFound(ex); + } + } + + @Override + public Tuple> listInstances(String zone, Map options) { + try { + InstanceList instanceList = compute.instances() + .list(this.options.projectId(), zone) + .setFilter(Option.FILTER.getString(options)) + .setMaxResults(Option.MAX_RESULTS.getLong(options)) + .setPageToken(Option.PAGE_TOKEN.getString(options)) + .setFields(Option.FIELDS.getString(options)) + .execute(); + Iterable instances = instanceList.getItems(); + return Tuple.of(instanceList.getNextPageToken(), instances); + } catch (IOException ex) { + throw translate(ex); + } + } + + @Override + public Tuple> listInstances(Map options) { + try { + InstanceAggregatedList aggregatedList = compute.instances() + .aggregatedList(this.options.projectId()) + .setFilter(Option.FILTER.getString(options)) + .setMaxResults(Option.MAX_RESULTS.getLong(options)) + .setPageToken(Option.PAGE_TOKEN.getString(options)) + // todo(mziccard): uncomment or remove once #711 is closed + // .setFields(FIELDS.getString(options)) + .execute(); + ImmutableList.Builder builder = ImmutableList.builder(); + Map scopedList = aggregatedList.getItems(); + if (scopedList != null) { + for (InstancesScopedList instancesScopedList : scopedList.values()) { + if (instancesScopedList.getInstances() != null) { + builder.addAll(instancesScopedList.getInstances()); + } + } + } + return Tuple.>of(aggregatedList.getNextPageToken(), + builder.build()); + } catch (IOException ex) { + throw translate(ex); + } + } + + @Override + public Operation deleteInstance(String zone, String instance, Map options) { + try { + return compute.instances() + .delete(this.options.projectId(), zone, instance) + .setFields(Option.FIELDS.getString(options)) + .execute(); + } catch (IOException ex) { + return nullForNotFound(ex); + } + } + + @Override + public Operation addAccessConfig(String zone, String instance, String networkInterface, + AccessConfig accessConfig, Map options) { + try { + return compute.instances() + .addAccessConfig(this.options.projectId(), zone, instance, networkInterface, accessConfig) + .setFields(Option.FIELDS.getString(options)) + .execute(); + } catch (IOException ex) { + throw translate(ex); + } + } + + @Override + public Operation attachDisk(String zone, String instance, AttachedDisk attachedDisk, + Map options) { + try { + return compute.instances() + .attachDisk(this.options.projectId(), zone, instance, attachedDisk) + .setFields(Option.FIELDS.getString(options)) + .execute(); + } catch (IOException ex) { + throw translate(ex); + } + } + + @Override + public Operation deleteAccessConfig(String zone, String instance, String networkInterface, + String accessConfig, Map options) { + try { + return compute.instances() + .deleteAccessConfig(this.options.projectId(), zone, instance, accessConfig, + networkInterface) + .setFields(Option.FIELDS.getString(options)) + .execute(); + } catch (IOException ex) { + return nullForNotFound(ex); + } + } + + @Override + public Operation detachDisk(String zone, String instance, String deviceName, + Map options) { + try { + return compute.instances() + .detachDisk(this.options.projectId(), zone, instance, deviceName) + .setFields(Option.FIELDS.getString(options)) + .execute(); + } catch (IOException ex) { + return nullForNotFound(ex); + } + } + + @Override + public String getSerialPortOutput(String zone, String instance, Integer port, + Map options) { + try { + SerialPortOutput portOutput = compute.instances() + .getSerialPortOutput(this.options.projectId(), zone, instance) + .setPort(port) + .setFields(Option.FIELDS.getString(options)) + .execute(); + return portOutput != null ? portOutput.getContents() : null; + } catch (IOException ex) { + return nullForNotFound(ex); + } + } + + @Override + public Operation reset(String zone, String instance, Map options) { + try { + return compute.instances() + .reset(this.options.projectId(), zone, instance) + .setFields(Option.FIELDS.getString(options)) + .execute(); + } catch (IOException ex) { + return nullForNotFound(ex); + } + } + + @Override + public Operation setDiskAutoDelete(String zone, String instance, String deviceName, + boolean autoDelete, Map options) { + try { + return compute.instances() + .setDiskAutoDelete(this.options.projectId(), zone, instance, autoDelete, deviceName) + .setFields(Option.FIELDS.getString(options)) + .execute(); + } catch (IOException ex) { + return nullForNotFound(ex); + } + } + + @Override + public Operation setMachineType(String zone, String instance, String machineTypeUrl, + Map options) { + try { + InstancesSetMachineTypeRequest request = + new InstancesSetMachineTypeRequest().setMachineType(machineTypeUrl); + return compute.instances() + .setMachineType(this.options.projectId(), zone, instance, request) + .setFields(Option.FIELDS.getString(options)) + .execute(); + } catch (IOException ex) { + return nullForNotFound(ex); + } + } + + @Override + public Operation setMetadata(String zone, String instance, Metadata metadata, + Map options) { + try { + return compute.instances() + .setMetadata(this.options.projectId(), zone, instance, metadata) + .setFields(Option.FIELDS.getString(options)) + .execute(); + } catch (IOException ex) { + return nullForNotFound(ex); + } + } + + @Override + public Operation setScheduling(String zone, String instance, Scheduling scheduling, + Map options) { + try { + return compute.instances() + .setScheduling(this.options.projectId(), zone, instance, scheduling) + .setFields(Option.FIELDS.getString(options)) + .execute(); + } catch (IOException ex) { + return nullForNotFound(ex); + } + } + + @Override + public Operation setTags(String zone, String instance, Tags tags, Map options) { + try { + return compute.instances() + .setTags(this.options.projectId(), zone, instance, tags) + .setFields(Option.FIELDS.getString(options)) + .execute(); + } catch (IOException ex) { + return nullForNotFound(ex); + } + } + + @Override + public Operation start(String zone, String instance, Map options) { + try { + return compute.instances() + .start(this.options.projectId(), zone, instance) + .setFields(Option.FIELDS.getString(options)) + .execute(); + } catch (IOException ex) { + return nullForNotFound(ex); + } + } + + @Override + public Operation stop(String zone, String instance, Map options) { + try { + return compute.instances() + .stop(this.options.projectId(), zone, instance) + .setFields(Option.FIELDS.getString(options)) + .execute(); + } catch (IOException ex) { + return nullForNotFound(ex); + } + } + + /** + * This method returns {@code null} if the error code of {@code exception} was 404, re-throws the + * exception otherwise. + * + * @throws ComputeException if the error code of {@code exception} was not 404 + */ + private static T nullForNotFound(IOException exception) { + ComputeException serviceException = translate(exception); + if (serviceException.code() == HTTP_NOT_FOUND) { + return null; + } + throw serviceException; + } + + /** + * This method returns {@code false} if the error code of {@code exception} was 404, re-throws the + * exception otherwise. + * + * @throws ComputeException if the error code of {@code exception} was not 404 + */ + private static boolean falseForNotFound(IOException exception) { + ComputeException serviceException = translate(exception); + if (serviceException.code() == HTTP_NOT_FOUND) { + return false; + } + throw serviceException; + } +} diff --git a/gcloud-java-compute/src/main/java/com/google/cloud/compute/testing/RemoteComputeHelper.java b/gcloud-java-compute/src/main/java/com/google/cloud/compute/testing/RemoteComputeHelper.java new file mode 100644 index 000000000000..eb443a25af28 --- /dev/null +++ b/gcloud-java-compute/src/main/java/com/google/cloud/compute/testing/RemoteComputeHelper.java @@ -0,0 +1,127 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute.testing; + +import com.google.cloud.AuthCredentials; +import com.google.cloud.RetryParams; +import com.google.cloud.compute.ComputeOptions; + +import java.io.IOException; +import java.io.InputStream; +import java.util.UUID; +import java.util.logging.Level; +import java.util.logging.Logger; + +/** + * Utility to create a remote Compute configuration for testing. Compute options can be obtained + * via the {@link #options()} method. Returned options have custom + * {@link ComputeOptions#retryParams()}: {@link RetryParams#retryMaxAttempts()} is {@code 10}, + * {@link RetryParams#retryMinAttempts()} is {@code 6}, {@link RetryParams#maxRetryDelayMillis()} is + * {@code 30000}, {@link RetryParams#totalRetryPeriodMillis()} is {@code 120000} and + * {@link RetryParams#initialRetryDelayMillis()} is {@code 250}. + * {@link ComputeOptions#connectTimeout()} and {@link ComputeOptions#readTimeout()} are both set to + * {@code 60000}. + */ +public class RemoteComputeHelper { + + private static final Logger log = Logger.getLogger(RemoteComputeHelper.class.getName()); + private final ComputeOptions options; + + private RemoteComputeHelper(ComputeOptions options) { + this.options = options; + } + + /** + * Returns a {@link ComputeOptions} object to be used for testing. + */ + public ComputeOptions options() { + return options; + } + + /** + * Returns a base name for testing resources generated using a random UUID. This base name can be + * prepended to resource names to prevent name clashes. This method always returns a 30 characters + * long prefix. Since Compute Engine resource names can be at most 63 characters long your suffix + * should be no longer than 33 characters. + */ + public static String baseResourceName() { + return "test-" + UUID.randomUUID().toString().replace("-", "").substring(0, 24) + "-"; + } + + /** + * Creates a {@code RemoteComputeHelper} object for the given project id and JSON key input + * stream. + * + * @param projectId id of the project to be used for running the tests + * @param keyStream input stream for a JSON key + * @return A {@code RemoteComputeHelper} object for the provided options + * @throws ComputeHelperException if {@code keyStream} is not a valid JSON key stream + */ + public static RemoteComputeHelper create(String projectId, InputStream keyStream) { + try { + ComputeOptions computeOptions = ComputeOptions.builder() + .authCredentials(AuthCredentials.createForJson(keyStream)) + .projectId(projectId) + .retryParams(retryParams()) + .connectTimeout(60000) + .readTimeout(60000) + .build(); + return new RemoteComputeHelper(computeOptions); + } catch (IOException ex) { + if (log.isLoggable(Level.WARNING)) { + log.log(Level.WARNING, ex.getMessage()); + } + throw ComputeHelperException.translate(ex); + } + } + + /** + * Creates a {@code RemoteComputeHelper} object using default project id and authentication + * credentials. + */ + public static RemoteComputeHelper create() { + ComputeOptions computeOptions = ComputeOptions.builder() + .retryParams(retryParams()) + .connectTimeout(60000) + .readTimeout(60000) + .build(); + return new RemoteComputeHelper(computeOptions); + } + + private static RetryParams retryParams() { + return RetryParams.builder() + .retryMaxAttempts(10) + .retryMinAttempts(6) + .maxRetryDelayMillis(30000) + .totalRetryPeriodMillis(120000) + .initialRetryDelayMillis(250) + .build(); + } + + public static class ComputeHelperException extends RuntimeException { + + private static final long serialVersionUID = -5747977015007639912L; + + public ComputeHelperException(String message, Throwable cause) { + super(message, cause); + } + + public static ComputeHelperException translate(Exception ex) { + return new ComputeHelperException(ex.getMessage(), ex); + } + } +} diff --git a/gcloud-java-compute/src/main/java/com/google/cloud/compute/testing/package-info.java b/gcloud-java-compute/src/main/java/com/google/cloud/compute/testing/package-info.java new file mode 100644 index 000000000000..30da2872a66c --- /dev/null +++ b/gcloud-java-compute/src/main/java/com/google/cloud/compute/testing/package-info.java @@ -0,0 +1,31 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * A testing helper for Google Compute Engine. + * + *

A simple usage example: + * + *

Before the test: + *

 {@code
+ * RemoteComputeHelper computeHelper = RemoteComputeHelper.create();
+ * Compute compute = computeHelper.options().service();
+ * } 
+ * + * @see + * gcloud-java tools for testing + */ +package com.google.cloud.compute.testing; diff --git a/gcloud-java-compute/src/test/java/com/google/cloud/compute/AddressIdTest.java b/gcloud-java-compute/src/test/java/com/google/cloud/compute/AddressIdTest.java new file mode 100644 index 000000000000..361012f3a30f --- /dev/null +++ b/gcloud-java-compute/src/test/java/com/google/cloud/compute/AddressIdTest.java @@ -0,0 +1,119 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; + +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; + +public class AddressIdTest { + + private static final String PROJECT = "project"; + private static final String REGION = "region"; + private static final String NAME = "addr"; + private static final String GLOBAL_URL = + "https://www.googleapis.com/compute/v1/projects/project/global/addresses/addr"; + private static final String REGION_URL = + "https://www.googleapis.com/compute/v1/projects/project/regions/region/addresses/addr"; + + @Rule + public ExpectedException thrown = ExpectedException.none(); + + @Test + public void testOf() { + GlobalAddressId addressId = GlobalAddressId.of(PROJECT, NAME); + assertEquals(PROJECT, addressId.project()); + assertEquals(NAME, addressId.address()); + assertEquals(GLOBAL_URL, addressId.selfLink()); + addressId = GlobalAddressId.of(NAME); + assertNull(addressId.project()); + assertEquals(NAME, addressId.address()); + RegionAddressId regionAddressId = RegionAddressId.of(PROJECT, REGION, NAME); + assertEquals(PROJECT, regionAddressId.project()); + assertEquals(REGION, regionAddressId.region()); + assertEquals(NAME, regionAddressId.address()); + assertEquals(REGION_URL, regionAddressId.selfLink()); + regionAddressId = RegionAddressId.of(RegionId.of(PROJECT, REGION), NAME); + assertEquals(PROJECT, regionAddressId.project()); + assertEquals(REGION, regionAddressId.region()); + assertEquals(NAME, regionAddressId.address()); + assertEquals(REGION_URL, regionAddressId.selfLink()); + regionAddressId = RegionAddressId.of(REGION, NAME); + assertNull(regionAddressId.project()); + assertEquals(REGION, regionAddressId.region()); + assertEquals(NAME, regionAddressId.address()); + } + + @Test + public void testToAndFromUrlGlobal() { + GlobalAddressId addressId = GlobalAddressId.of(PROJECT, NAME); + compareAddressId(addressId, GlobalAddressId.fromUrl(addressId.selfLink())); + thrown.expect(IllegalArgumentException.class); + thrown.expectMessage("notMatchingUrl is not a valid global address URL"); + GlobalAddressId.fromUrl("notMatchingUrl"); + } + + @Test + public void testToAndFromUrlRegion() { + RegionAddressId regionAddressId = RegionAddressId.of(PROJECT, REGION, NAME); + compareRegionAddressId(regionAddressId, RegionAddressId.fromUrl(regionAddressId.selfLink())); + thrown.expect(IllegalArgumentException.class); + thrown.expectMessage("notMatchingUrl is not a valid region address URL"); + RegionAddressId.fromUrl("notMatchingUrl"); + } + + @Test + public void testSetProjectId() { + GlobalAddressId addressId = GlobalAddressId.of(PROJECT, NAME); + assertSame(addressId, addressId.setProjectId(PROJECT)); + compareAddressId(addressId, GlobalAddressId.of(NAME).setProjectId(PROJECT)); + RegionAddressId regionAddressId = RegionAddressId.of(PROJECT, REGION, NAME); + assertSame(regionAddressId, regionAddressId.setProjectId(PROJECT)); + compareRegionAddressId(regionAddressId, RegionAddressId.of(REGION, NAME).setProjectId(PROJECT)); + } + + @Test + public void testMatchesUrl() { + assertTrue(GlobalAddressId.matchesUrl(GlobalAddressId.of(PROJECT, NAME).selfLink())); + assertFalse(GlobalAddressId.matchesUrl("notMatchingUrl")); + assertTrue(RegionAddressId.matchesUrl(RegionAddressId.of(PROJECT, REGION, NAME).selfLink())); + assertFalse(RegionAddressId.matchesUrl("notMatchingUrl")); + } + + private void compareAddressId(GlobalAddressId expected, GlobalAddressId value) { + assertEquals(expected, value); + assertEquals(expected.project(), expected.project()); + assertEquals(expected.address(), expected.address()); + assertEquals(expected.selfLink(), expected.selfLink()); + assertEquals(expected.hashCode(), expected.hashCode()); + } + + private void compareRegionAddressId(RegionAddressId expected, RegionAddressId value) { + assertEquals(expected, value); + assertEquals(expected.project(), expected.project()); + assertEquals(expected.region(), expected.region()); + assertEquals(expected.address(), expected.address()); + assertEquals(expected.selfLink(), expected.selfLink()); + assertEquals(expected.hashCode(), expected.hashCode()); + } +} diff --git a/gcloud-java-compute/src/test/java/com/google/cloud/compute/AddressInfoTest.java b/gcloud-java-compute/src/test/java/com/google/cloud/compute/AddressInfoTest.java new file mode 100644 index 000000000000..1e69608c8d40 --- /dev/null +++ b/gcloud-java-compute/src/test/java/com/google/cloud/compute/AddressInfoTest.java @@ -0,0 +1,199 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; + +import com.google.common.collect.ImmutableList; + +import org.junit.Assert; +import org.junit.Test; + +import java.util.List; + +public class AddressInfoTest { + + private static final String ADDRESS = "192.168.1.1"; + private static final Long CREATION_TIMESTAMP = 1452602400000L; + private static final String DESCRIPTION = "description"; + private static final String GENERATED_ID = "42"; + private static final GlobalAddressId GLOBAL_ADDRESS_ID = GlobalAddressId.of("project", "address"); + private static final RegionAddressId REGION_ADDRESS_ID = + RegionAddressId.of("project", "region", "address"); + private static final AddressInfo.Status STATUS = AddressInfo.Status.RESERVED; + private static final List GLOBAL_FORWARDING_RULES = + ImmutableList.of(GlobalForwardingRuleId.of("project", "forwardingRule1"), + GlobalForwardingRuleId.of("project", "forwardingRule2")); + private static final List REGION_FORWARDING_RULES = + ImmutableList.of(RegionForwardingRuleId.of("project", "region", "forwardingRule1"), + RegionForwardingRuleId.of("project", "region", "forwardingRule2")); + private static final AddressInfo.InstanceUsage INSTANCE_USAGE = + new AddressInfo.InstanceUsage(InstanceId.of("project", "zone", "instance1")); + private static final AddressInfo.GlobalForwardingUsage GLOBAL_FORWARDING_USAGE = + new AddressInfo.GlobalForwardingUsage(GLOBAL_FORWARDING_RULES); + private static final AddressInfo.RegionForwardingUsage REGION_FORWARDING_USAGE = + new AddressInfo.RegionForwardingUsage(REGION_FORWARDING_RULES); + private static final AddressInfo INSTANCE_ADDRESS_INFO = AddressInfo.builder(REGION_ADDRESS_ID) + .address(ADDRESS) + .creationTimestamp(CREATION_TIMESTAMP) + .description(DESCRIPTION) + .generatedId(GENERATED_ID) + .status(STATUS) + .usage(INSTANCE_USAGE) + .build(); + private static final AddressInfo GLOBAL_FORWARDING_ADDRESS_INFO = + AddressInfo.builder(GLOBAL_ADDRESS_ID) + .address(ADDRESS) + .creationTimestamp(CREATION_TIMESTAMP) + .description(DESCRIPTION) + .generatedId(GENERATED_ID) + .status(STATUS) + .usage(GLOBAL_FORWARDING_USAGE) + .build(); + private static final AddressInfo REGION_FORWARDING_ADDRESS_INFO = + AddressInfo.builder(REGION_ADDRESS_ID) + .address(ADDRESS) + .creationTimestamp(CREATION_TIMESTAMP) + .description(DESCRIPTION) + .generatedId(GENERATED_ID) + .status(STATUS) + .usage(REGION_FORWARDING_USAGE) + .build(); + + @Test + public void testToBuilder() { + compareAddressInfo(INSTANCE_ADDRESS_INFO, INSTANCE_ADDRESS_INFO.toBuilder().build()); + AddressInfo addressInfo = INSTANCE_ADDRESS_INFO.toBuilder() + .address("192.168.1.2") + .description("description2") + .build(); + assertEquals("description2", addressInfo.description()); + assertEquals("192.168.1.2", addressInfo.address()); + addressInfo = addressInfo.toBuilder() + .address("192.168.1.1") + .description("description") + .build(); + compareAddressInfo(INSTANCE_ADDRESS_INFO, addressInfo); + } + + @Test + public void testToBuilderIncomplete() { + AddressInfo addressInfo = AddressInfo.builder(GLOBAL_ADDRESS_ID).build(); + assertEquals(addressInfo, addressInfo.toBuilder().build()); + addressInfo = AddressInfo.builder(REGION_ADDRESS_ID).build(); + assertEquals(addressInfo, addressInfo.toBuilder().build()); + } + + @Test + public void testBuilder() { + assertEquals(ADDRESS, INSTANCE_ADDRESS_INFO.address()); + assertEquals(CREATION_TIMESTAMP, INSTANCE_ADDRESS_INFO.creationTimestamp()); + assertEquals(DESCRIPTION, INSTANCE_ADDRESS_INFO.description()); + assertEquals(GENERATED_ID, INSTANCE_ADDRESS_INFO.generatedId()); + assertEquals(REGION_ADDRESS_ID, INSTANCE_ADDRESS_INFO.addressId()); + assertEquals(STATUS, INSTANCE_ADDRESS_INFO.status()); + assertEquals(INSTANCE_USAGE, INSTANCE_ADDRESS_INFO.usage()); + Assert.assertEquals(INSTANCE_USAGE.instance(), + INSTANCE_ADDRESS_INFO.usage().instance()); + assertEquals(ADDRESS, REGION_FORWARDING_ADDRESS_INFO.address()); + assertEquals(CREATION_TIMESTAMP, REGION_FORWARDING_ADDRESS_INFO.creationTimestamp()); + assertEquals(DESCRIPTION, REGION_FORWARDING_ADDRESS_INFO.description()); + assertEquals(GENERATED_ID, REGION_FORWARDING_ADDRESS_INFO.generatedId()); + assertEquals(REGION_ADDRESS_ID, REGION_FORWARDING_ADDRESS_INFO.addressId()); + assertEquals(STATUS, REGION_FORWARDING_ADDRESS_INFO.status()); + assertEquals(REGION_FORWARDING_USAGE, REGION_FORWARDING_ADDRESS_INFO.usage()); + Assert.assertEquals(REGION_FORWARDING_RULES, + REGION_FORWARDING_ADDRESS_INFO.usage().forwardingRules()); + assertEquals(ADDRESS, GLOBAL_FORWARDING_ADDRESS_INFO.address()); + assertEquals(CREATION_TIMESTAMP, GLOBAL_FORWARDING_ADDRESS_INFO.creationTimestamp()); + assertEquals(DESCRIPTION, GLOBAL_FORWARDING_ADDRESS_INFO.description()); + assertEquals(GENERATED_ID, GLOBAL_FORWARDING_ADDRESS_INFO.generatedId()); + assertEquals(GLOBAL_ADDRESS_ID, GLOBAL_FORWARDING_ADDRESS_INFO.addressId()); + assertEquals(STATUS, GLOBAL_FORWARDING_ADDRESS_INFO.status()); + assertEquals(GLOBAL_FORWARDING_USAGE, GLOBAL_FORWARDING_ADDRESS_INFO.usage()); + Assert.assertEquals(GLOBAL_FORWARDING_RULES, + GLOBAL_FORWARDING_ADDRESS_INFO.usage().forwardingRules()); + } + + @Test + public void testOf() { + AddressInfo addressInfo = AddressInfo.of("address"); + assertEquals(GlobalAddressId.of("address"), addressInfo.addressId()); + assertNull(addressInfo.address()); + assertNull(addressInfo.creationTimestamp()); + assertNull(addressInfo.description()); + assertNull(addressInfo.generatedId()); + assertNull(addressInfo.status()); + assertNull(addressInfo.usage()); + addressInfo = AddressInfo.of(GLOBAL_ADDRESS_ID); + assertEquals(GLOBAL_ADDRESS_ID, addressInfo.addressId()); + assertNull(addressInfo.address()); + assertNull(addressInfo.creationTimestamp()); + assertNull(addressInfo.description()); + assertNull(addressInfo.generatedId()); + assertNull(addressInfo.status()); + assertNull(addressInfo.usage()); + addressInfo = AddressInfo.of("region", "address"); + assertEquals(RegionAddressId.of("region", "address"), addressInfo.addressId()); + assertNull(addressInfo.address()); + assertNull(addressInfo.creationTimestamp()); + assertNull(addressInfo.description()); + assertNull(addressInfo.generatedId()); + assertNull(addressInfo.status()); + assertNull(addressInfo.usage()); + addressInfo = AddressInfo.of(RegionId.of("region"), "address"); + assertEquals(RegionAddressId.of("region", "address"), addressInfo.addressId()); + assertNull(addressInfo.address()); + assertNull(addressInfo.creationTimestamp()); + assertNull(addressInfo.description()); + assertNull(addressInfo.generatedId()); + assertNull(addressInfo.status()); + assertNull(addressInfo.usage()); + } + + @Test + public void testToPbAndFromPb() { + compareAddressInfo(INSTANCE_ADDRESS_INFO, AddressInfo.fromPb(INSTANCE_ADDRESS_INFO.toPb())); + compareAddressInfo(REGION_FORWARDING_ADDRESS_INFO, + AddressInfo.fromPb(REGION_FORWARDING_ADDRESS_INFO.toPb())); + compareAddressInfo(GLOBAL_FORWARDING_ADDRESS_INFO, + AddressInfo.fromPb(GLOBAL_FORWARDING_ADDRESS_INFO.toPb())); + AddressInfo addressInfo = AddressInfo.builder(GLOBAL_ADDRESS_ID).build(); + compareAddressInfo(addressInfo, AddressInfo.fromPb(addressInfo.toPb())); + } + + @Test + public void testSetProjectId() { + AddressInfo addressInfo = GLOBAL_FORWARDING_ADDRESS_INFO.toBuilder() + .addressId(GlobalAddressId.of(GLOBAL_ADDRESS_ID.address())) + .build(); + compareAddressInfo(GLOBAL_FORWARDING_ADDRESS_INFO, addressInfo.setProjectId("project")); + } + + private void compareAddressInfo(AddressInfo expected, AddressInfo value) { + assertEquals(expected, value); + assertEquals(expected.address(), value.address()); + assertEquals(expected.creationTimestamp(), value.creationTimestamp()); + assertEquals(expected.description(), value.description()); + assertEquals(expected.generatedId(), value.generatedId()); + assertEquals(expected.addressId(), value.addressId()); + assertEquals(expected.usage(), value.usage()); + assertEquals(expected.status(), value.status()); + assertEquals(expected.hashCode(), value.hashCode()); + } +} diff --git a/gcloud-java-compute/src/test/java/com/google/cloud/compute/AddressTest.java b/gcloud-java-compute/src/test/java/com/google/cloud/compute/AddressTest.java new file mode 100644 index 000000000000..914d7e510b3a --- /dev/null +++ b/gcloud-java-compute/src/test/java/com/google/cloud/compute/AddressTest.java @@ -0,0 +1,294 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import static org.easymock.EasyMock.createMock; +import static org.easymock.EasyMock.createStrictMock; +import static org.easymock.EasyMock.expect; +import static org.easymock.EasyMock.replay; +import static org.easymock.EasyMock.verify; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; + +import com.google.common.collect.ImmutableList; + +import org.junit.After; +import org.junit.Test; + +import java.util.List; + +public class AddressTest { + + private static final String ADDRESS = "192.168.1.1"; + private static final Long CREATION_TIMESTAMP = 1452602400000L; + private static final String DESCRIPTION = "description"; + private static final String GENERATED_ID = "42"; + private static final GlobalAddressId GLOBAL_ADDRESS_ID = GlobalAddressId.of("project", "address"); + private static final RegionAddressId REGION_ADDRESS_ID = + RegionAddressId.of("project", "region", "address"); + private static final AddressInfo.Status STATUS = AddressInfo.Status.RESERVED; + private static final List GLOBAL_FORWARDING_RULES = + ImmutableList.of(GlobalForwardingRuleId.of("project", "forwardingRule1"), + GlobalForwardingRuleId.of("project", "forwardingRule2")); + private static final List REGION_FORWARDING_RULES = + ImmutableList.of(RegionForwardingRuleId.of("project", "region", "forwardingRule1"), + RegionForwardingRuleId.of("project", "region", "forwardingRule2")); + private static final AddressInfo.InstanceUsage INSTANCE_USAGE = + new AddressInfo.InstanceUsage(InstanceId.of("project", "zone", "instance1")); + private static final AddressInfo.GlobalForwardingUsage GLOBAL_FORWARDING_USAGE = + new AddressInfo.GlobalForwardingUsage(GLOBAL_FORWARDING_RULES); + private static final AddressInfo.RegionForwardingUsage REGION_FORWARDING_USAGE = + new AddressInfo.RegionForwardingUsage(REGION_FORWARDING_RULES); + + private final Compute serviceMockReturnsOptions = createStrictMock(Compute.class); + private final ComputeOptions mockOptions = createMock(ComputeOptions.class); + private Compute compute; + private Address globalForwardingAddress; + private Address instanceAddress; + private Address regionForwardingAddress; + private Address address; + + private void initializeExpectedAddress(int optionsCalls) { + expect(serviceMockReturnsOptions.options()).andReturn(mockOptions).times(optionsCalls); + replay(serviceMockReturnsOptions); + instanceAddress = new Address.Builder(serviceMockReturnsOptions, REGION_ADDRESS_ID) + .address(ADDRESS) + .creationTimestamp(CREATION_TIMESTAMP) + .description(DESCRIPTION) + .generatedId(GENERATED_ID) + .status(STATUS) + .usage(INSTANCE_USAGE) + .build(); + globalForwardingAddress = new Address.Builder(serviceMockReturnsOptions, GLOBAL_ADDRESS_ID) + .address(ADDRESS) + .creationTimestamp(CREATION_TIMESTAMP) + .description(DESCRIPTION) + .generatedId(GENERATED_ID) + .status(STATUS) + .usage(GLOBAL_FORWARDING_USAGE) + .build(); + regionForwardingAddress = new Address.Builder(serviceMockReturnsOptions, REGION_ADDRESS_ID) + .address(ADDRESS) + .creationTimestamp(CREATION_TIMESTAMP) + .description(DESCRIPTION) + .generatedId(GENERATED_ID) + .status(STATUS) + .usage(REGION_FORWARDING_USAGE) + .build(); + compute = createStrictMock(Compute.class); + } + + private void initializeAddress() { + address = new Address.Builder(compute, REGION_ADDRESS_ID) + .address(ADDRESS) + .creationTimestamp(CREATION_TIMESTAMP) + .description(DESCRIPTION) + .generatedId(GENERATED_ID) + .status(STATUS) + .usage(REGION_FORWARDING_USAGE) + .build(); + } + + @After + public void tearDown() throws Exception { + verify(serviceMockReturnsOptions); + } + + @Test + public void testBuilder() { + initializeExpectedAddress(6); + assertEquals(ADDRESS, instanceAddress.address()); + assertEquals(CREATION_TIMESTAMP, instanceAddress.creationTimestamp()); + assertEquals(DESCRIPTION, instanceAddress.description()); + assertEquals(GENERATED_ID, instanceAddress.generatedId()); + assertEquals(REGION_ADDRESS_ID, instanceAddress.addressId()); + assertEquals(STATUS, instanceAddress.status()); + assertEquals(INSTANCE_USAGE, instanceAddress.usage()); + assertSame(serviceMockReturnsOptions, instanceAddress.compute()); + assertEquals(ADDRESS, regionForwardingAddress.address()); + assertEquals(CREATION_TIMESTAMP, regionForwardingAddress.creationTimestamp()); + assertEquals(DESCRIPTION, regionForwardingAddress.description()); + assertEquals(GENERATED_ID, regionForwardingAddress.generatedId()); + assertEquals(REGION_ADDRESS_ID, regionForwardingAddress.addressId()); + assertEquals(STATUS, regionForwardingAddress.status()); + assertEquals(REGION_FORWARDING_USAGE, regionForwardingAddress.usage()); + assertSame(serviceMockReturnsOptions, regionForwardingAddress.compute()); + assertEquals(ADDRESS, globalForwardingAddress.address()); + assertEquals(CREATION_TIMESTAMP, globalForwardingAddress.creationTimestamp()); + assertEquals(DESCRIPTION, globalForwardingAddress.description()); + assertEquals(GENERATED_ID, globalForwardingAddress.generatedId()); + assertEquals(GLOBAL_ADDRESS_ID, globalForwardingAddress.addressId()); + assertEquals(STATUS, globalForwardingAddress.status()); + assertEquals(GLOBAL_FORWARDING_USAGE, globalForwardingAddress.usage()); + assertSame(serviceMockReturnsOptions, globalForwardingAddress.compute()); + Address address = new Address.Builder(serviceMockReturnsOptions, GLOBAL_ADDRESS_ID).build(); + assertEquals(GLOBAL_ADDRESS_ID, address.addressId()); + assertSame(serviceMockReturnsOptions, address.compute()); + assertNull(address.address()); + assertNull(address.creationTimestamp()); + assertNull(address.description()); + assertNull(address.generatedId()); + assertNull(address.status()); + assertNull(address.usage()); + address = new Address.Builder(serviceMockReturnsOptions, REGION_ADDRESS_ID).build(); + assertEquals(REGION_ADDRESS_ID, address.addressId()); + assertSame(serviceMockReturnsOptions, address.compute()); + assertNull(address.address()); + assertNull(address.creationTimestamp()); + assertNull(address.description()); + assertNull(address.generatedId()); + assertNull(address.status()); + assertNull(address.usage()); + address = new Address.Builder(serviceMockReturnsOptions, REGION_ADDRESS_ID) + .addressId(GLOBAL_ADDRESS_ID) + .build(); + assertEquals(GLOBAL_ADDRESS_ID, address.addressId()); + assertSame(serviceMockReturnsOptions, address.compute()); + assertNull(address.address()); + assertNull(address.creationTimestamp()); + assertNull(address.description()); + assertNull(address.generatedId()); + assertNull(address.status()); + assertNull(address.usage()); + } + + @Test + public void testToBuilder() { + initializeExpectedAddress(16); + compareAddress(instanceAddress, instanceAddress.toBuilder().build()); + compareAddress(globalForwardingAddress, globalForwardingAddress.toBuilder().build()); + compareAddress(regionForwardingAddress, regionForwardingAddress.toBuilder().build()); + Address newAddress = instanceAddress.toBuilder().description("newDescription").build(); + assertEquals("newDescription", newAddress.description()); + newAddress = newAddress.toBuilder().description("description").build(); + compareAddress(instanceAddress, newAddress); + } + + @Test + public void testToAndFromPb() { + initializeExpectedAddress(20); + compareAddress(globalForwardingAddress, + Address.fromPb(serviceMockReturnsOptions, globalForwardingAddress.toPb())); + compareAddress(regionForwardingAddress, + Address.fromPb(serviceMockReturnsOptions, regionForwardingAddress.toPb())); + compareAddress(instanceAddress, + Address.fromPb(serviceMockReturnsOptions, instanceAddress.toPb())); + Address address = new Address.Builder(serviceMockReturnsOptions, GLOBAL_ADDRESS_ID).build(); + compareAddress(address, Address.fromPb(serviceMockReturnsOptions, address.toPb())); + address = new Address.Builder(serviceMockReturnsOptions, REGION_ADDRESS_ID).build(); + compareAddress(address, Address.fromPb(serviceMockReturnsOptions, address.toPb())); + } + + @Test + public void testDeleteOperation() { + initializeExpectedAddress(4); + expect(compute.options()).andReturn(mockOptions); + Operation operation = new Operation.Builder(serviceMockReturnsOptions) + .operationId(GlobalOperationId.of("project", "op")) + .build(); + expect(compute.deleteAddress(REGION_ADDRESS_ID)).andReturn(operation); + replay(compute); + initializeAddress(); + assertSame(operation, address.delete()); + } + + @Test + public void testDeleteNull() { + initializeExpectedAddress(3); + expect(compute.options()).andReturn(mockOptions); + expect(compute.deleteAddress(REGION_ADDRESS_ID)).andReturn(null); + replay(compute); + initializeAddress(); + assertNull(address.delete()); + } + + @Test + public void testExists_True() throws Exception { + initializeExpectedAddress(3); + Compute.AddressOption[] expectedOptions = {Compute.AddressOption.fields()}; + expect(compute.options()).andReturn(mockOptions); + expect(compute.getAddress(REGION_ADDRESS_ID, expectedOptions)).andReturn(regionForwardingAddress); + replay(compute); + initializeAddress(); + assertTrue(address.exists()); + verify(compute); + } + + @Test + public void testExists_False() throws Exception { + initializeExpectedAddress(3); + Compute.AddressOption[] expectedOptions = {Compute.AddressOption.fields()}; + expect(compute.options()).andReturn(mockOptions); + expect(compute.getAddress(REGION_ADDRESS_ID, expectedOptions)).andReturn(null); + replay(compute); + initializeAddress(); + assertFalse(address.exists()); + verify(compute); + } + + @Test + public void testReload() throws Exception { + initializeExpectedAddress(5); + expect(compute.options()).andReturn(mockOptions); + expect(compute.getAddress(REGION_ADDRESS_ID)).andReturn(regionForwardingAddress); + replay(compute); + initializeAddress(); + Address updatedAddress = address.reload(); + compareAddress(regionForwardingAddress, updatedAddress); + verify(compute); + } + + @Test + public void testReloadNull() throws Exception { + initializeExpectedAddress(3); + expect(compute.options()).andReturn(mockOptions); + expect(compute.getAddress(REGION_ADDRESS_ID)).andReturn(null); + replay(compute); + initializeAddress(); + assertNull(address.reload()); + verify(compute); + } + + @Test + public void testReloadWithOptions() throws Exception { + initializeExpectedAddress(5); + expect(compute.options()).andReturn(mockOptions); + expect(compute.getAddress(REGION_ADDRESS_ID, Compute.AddressOption.fields())) + .andReturn(regionForwardingAddress); + replay(compute); + initializeAddress(); + Address updatedAddress = address.reload(Compute.AddressOption.fields()); + compareAddress(regionForwardingAddress, updatedAddress); + verify(compute); + } + + private void compareAddress(Address expected, Address value) { + assertEquals(expected, value); + assertEquals(expected.compute().options(), value.compute().options()); + assertEquals(expected.address(), value.address()); + assertEquals(expected.creationTimestamp(), value.creationTimestamp()); + assertEquals(expected.description(), value.description()); + assertEquals(expected.generatedId(), value.generatedId()); + assertEquals(expected.addressId(), value.addressId()); + assertEquals(expected.usage(), value.usage()); + assertEquals(expected.status(), value.status()); + assertEquals(expected.hashCode(), value.hashCode()); + } +} diff --git a/gcloud-java-compute/src/test/java/com/google/cloud/compute/AttachedDiskTest.java b/gcloud-java-compute/src/test/java/com/google/cloud/compute/AttachedDiskTest.java new file mode 100644 index 000000000000..6e68e18590c2 --- /dev/null +++ b/gcloud-java-compute/src/test/java/com/google/cloud/compute/AttachedDiskTest.java @@ -0,0 +1,392 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + +import com.google.cloud.compute.AttachedDisk.AttachedDiskConfiguration.InterfaceType; +import com.google.cloud.compute.AttachedDisk.AttachedDiskConfiguration.Type; +import com.google.cloud.compute.AttachedDisk.CreateDiskConfiguration; +import com.google.cloud.compute.AttachedDisk.PersistentDiskConfiguration; +import com.google.cloud.compute.AttachedDisk.ScratchDiskConfiguration; +import com.google.common.collect.ImmutableList; + +import org.junit.Test; + +import java.util.List; + +public class AttachedDiskTest { + + private static final Boolean AUTO_DELETE = true; + private static final Boolean BOOT = true; + private static final Integer INDEX = 0; + private static final String DEVICE_NAME = "deviceName"; + private static final String DISK_NAME = "diskName"; + private static final DiskTypeId DISK_TYPE_ID = DiskTypeId.of("project", "zone", "diskType"); + private static final Long DISK_SIZE_GB = 42L; + private static final DiskId DISK_ID = DiskId.of("project", "zone", "disk"); + private static final ImageId IMAGE_ID = ImageId.of("project", "image"); + private static final InterfaceType INTERFACE_TYPE = InterfaceType.NVME; + private static final PersistentDiskConfiguration.Mode MODE = + PersistentDiskConfiguration.Mode.READ_ONLY; + private static final PersistentDiskConfiguration PERSISTENT_DISK_CONFIGURATION = + PersistentDiskConfiguration.builder(DISK_ID) + .boot(BOOT) + .autoDelete(AUTO_DELETE) + .mode(MODE) + .build(); + private static final ScratchDiskConfiguration SCRATCH_DISK_CONFIGURATION = + ScratchDiskConfiguration.builder(DISK_TYPE_ID).interfaceType(INTERFACE_TYPE).build(); + private static final CreateDiskConfiguration CREATE_DISK_CONFIGURATION = + CreateDiskConfiguration.builder(IMAGE_ID) + .autoDelete(AUTO_DELETE) + .diskName(DISK_NAME) + .diskType(DISK_TYPE_ID) + .diskSizeGb(DISK_SIZE_GB) + .sourceImage(IMAGE_ID) + .build(); + private static final List LICENSES = ImmutableList.of( + LicenseId.of("project", "license1"), LicenseId.of("project", "license2")); + private static final AttachedDisk PERSISTENT_DISK = + AttachedDisk.builder(PERSISTENT_DISK_CONFIGURATION) + .deviceName(DEVICE_NAME) + .index(INDEX) + .licenses(LICENSES) + .build(); + private static final AttachedDisk SCRATCH_DISK = + AttachedDisk.builder(SCRATCH_DISK_CONFIGURATION) + .deviceName(DEVICE_NAME) + .index(INDEX) + .licenses(LICENSES) + .build(); + private static final AttachedDisk CREATED_DISK = + AttachedDisk.builder(CREATE_DISK_CONFIGURATION) + .deviceName(DEVICE_NAME) + .index(INDEX) + .licenses(LICENSES) + .build(); + + @Test + public void testConfigurationToBuilder() { + comparePersistentDiskConfiguration(PERSISTENT_DISK_CONFIGURATION, + PERSISTENT_DISK_CONFIGURATION.toBuilder().build()); + compareScratchDiskConfiguration(SCRATCH_DISK_CONFIGURATION, + SCRATCH_DISK_CONFIGURATION.toBuilder().build()); + compareCreateDiskConfiguration(CREATE_DISK_CONFIGURATION, + CREATE_DISK_CONFIGURATION.toBuilder().build()); + PersistentDiskConfiguration persistentDiskConfiguration = + PERSISTENT_DISK_CONFIGURATION.toBuilder().autoDelete(false).build(); + assertFalse(persistentDiskConfiguration.autoDelete()); + persistentDiskConfiguration = + persistentDiskConfiguration.toBuilder().autoDelete(AUTO_DELETE).build(); + assertEquals(PERSISTENT_DISK_CONFIGURATION, persistentDiskConfiguration); + ScratchDiskConfiguration scratchDiskConfiguration = + SCRATCH_DISK_CONFIGURATION.toBuilder().interfaceType(InterfaceType.SCSI).build(); + assertEquals(InterfaceType.SCSI, scratchDiskConfiguration.interfaceType()); + scratchDiskConfiguration = + scratchDiskConfiguration.toBuilder().interfaceType(INTERFACE_TYPE).build(); + assertEquals(SCRATCH_DISK_CONFIGURATION, scratchDiskConfiguration); + CreateDiskConfiguration createDiskConfiguration = + CREATE_DISK_CONFIGURATION.toBuilder().autoDelete(false).build(); + assertFalse(createDiskConfiguration.autoDelete()); + createDiskConfiguration = createDiskConfiguration.toBuilder().autoDelete(AUTO_DELETE).build(); + assertEquals(CREATE_DISK_CONFIGURATION, createDiskConfiguration); + } + + @Test + public void testConfigurationToBuilderIncomplete() { + PersistentDiskConfiguration persistentConfiguration = PersistentDiskConfiguration.of(DISK_ID); + comparePersistentDiskConfiguration(persistentConfiguration, + AttachedDisk.AttachedDiskConfiguration.fromPb( + persistentConfiguration.toPb())); + ScratchDiskConfiguration scratchDiskConfiguration = ScratchDiskConfiguration.of(DISK_TYPE_ID); + compareScratchDiskConfiguration(scratchDiskConfiguration, + AttachedDisk.AttachedDiskConfiguration.fromPb( + scratchDiskConfiguration.toPb())); + CreateDiskConfiguration createDiskConfiguration = CreateDiskConfiguration.of(IMAGE_ID); + compareCreateDiskConfiguration(createDiskConfiguration, + AttachedDisk.AttachedDiskConfiguration.fromPb( + createDiskConfiguration.toPb())); + } + + @Test + public void testToBuilder() { + compareAttachedDisk(PERSISTENT_DISK, PERSISTENT_DISK.toBuilder().build()); + compareAttachedDisk(SCRATCH_DISK, SCRATCH_DISK.toBuilder().build()); + compareAttachedDisk(CREATED_DISK, CREATED_DISK.toBuilder().build()); + AttachedDisk attachedDisk = PERSISTENT_DISK.toBuilder().deviceName("newDeviceName").build(); + assertEquals("newDeviceName", attachedDisk.deviceName()); + attachedDisk = attachedDisk.toBuilder().deviceName(DEVICE_NAME).build(); + compareAttachedDisk(PERSISTENT_DISK, attachedDisk); + } + + @Test + public void testToBuilderIncomplete() { + AttachedDisk attachedDisk = AttachedDisk.of(PERSISTENT_DISK_CONFIGURATION); + assertEquals(attachedDisk, attachedDisk.toBuilder().build()); + attachedDisk = AttachedDisk.of(SCRATCH_DISK_CONFIGURATION); + assertEquals(attachedDisk, attachedDisk.toBuilder().build()); + attachedDisk = AttachedDisk.of(CREATE_DISK_CONFIGURATION); + assertEquals(attachedDisk, attachedDisk.toBuilder().build()); + } + + @Test + public void testConfigurationBuilder() { + assertTrue(CREATE_DISK_CONFIGURATION.boot()); + assertEquals(AUTO_DELETE, CREATE_DISK_CONFIGURATION.autoDelete()); + assertNull(CREATE_DISK_CONFIGURATION.interfaceType()); + assertEquals(Type.PERSISTENT, CREATE_DISK_CONFIGURATION.type()); + assertEquals(IMAGE_ID, CREATE_DISK_CONFIGURATION.sourceImage()); + assertEquals(DISK_NAME, CREATE_DISK_CONFIGURATION.diskName()); + assertEquals(DISK_TYPE_ID, CREATE_DISK_CONFIGURATION.diskType()); + assertEquals(DISK_SIZE_GB, CREATE_DISK_CONFIGURATION.diskSizeGb()); + assertEquals(IMAGE_ID, CREATE_DISK_CONFIGURATION.sourceImage()); + + assertEquals(BOOT, PERSISTENT_DISK_CONFIGURATION.boot()); + assertEquals(AUTO_DELETE, PERSISTENT_DISK_CONFIGURATION.autoDelete()); + assertNull(PERSISTENT_DISK_CONFIGURATION.interfaceType()); + assertEquals(Type.PERSISTENT, PERSISTENT_DISK_CONFIGURATION.type()); + assertEquals(MODE, PERSISTENT_DISK_CONFIGURATION.mode()); + assertEquals(DISK_ID, PERSISTENT_DISK_CONFIGURATION.sourceDisk()); + + assertFalse(SCRATCH_DISK_CONFIGURATION.boot()); + assertTrue(SCRATCH_DISK_CONFIGURATION.autoDelete()); + assertEquals(INTERFACE_TYPE, SCRATCH_DISK_CONFIGURATION.interfaceType()); + assertEquals(Type.SCRATCH, SCRATCH_DISK_CONFIGURATION.type()); + assertEquals(DISK_TYPE_ID, SCRATCH_DISK_CONFIGURATION.diskType()); + } + + @Test + public void testBuilder() { + assertEquals(PERSISTENT_DISK_CONFIGURATION, PERSISTENT_DISK.configuration()); + assertEquals(DEVICE_NAME, PERSISTENT_DISK.deviceName()); + assertEquals(INDEX, PERSISTENT_DISK.index()); + assertEquals(LICENSES, PERSISTENT_DISK.licenses()); + assertEquals(SCRATCH_DISK_CONFIGURATION, SCRATCH_DISK.configuration()); + assertEquals(DEVICE_NAME, SCRATCH_DISK.deviceName()); + assertEquals(INDEX, SCRATCH_DISK.index()); + assertEquals(LICENSES, SCRATCH_DISK.licenses()); + assertEquals(CREATE_DISK_CONFIGURATION, CREATED_DISK.configuration()); + assertEquals(DEVICE_NAME, CREATED_DISK.deviceName()); + assertEquals(INDEX, CREATED_DISK.index()); + assertEquals(LICENSES, CREATED_DISK.licenses()); + } + + @Test + public void testConfigurationOf() { + PersistentDiskConfiguration persistentConfiguration = PersistentDiskConfiguration.of(DISK_ID); + assertEquals(DISK_ID, persistentConfiguration.sourceDisk()); + assertEquals(Type.PERSISTENT, persistentConfiguration.type()); + assertNull(persistentConfiguration.autoDelete()); + assertNull(persistentConfiguration.boot()); + assertNull(persistentConfiguration.interfaceType()); + ScratchDiskConfiguration scratchDiskConfiguration = ScratchDiskConfiguration.of(DISK_TYPE_ID); + assertEquals(DISK_TYPE_ID, scratchDiskConfiguration.diskType()); + assertNull(scratchDiskConfiguration.interfaceType()); + assertEquals(Type.SCRATCH, scratchDiskConfiguration.type()); + assertTrue(scratchDiskConfiguration.autoDelete()); + assertFalse(scratchDiskConfiguration.boot()); + assertNull(scratchDiskConfiguration.interfaceType()); + CreateDiskConfiguration createDiskConfiguration = CreateDiskConfiguration.of(IMAGE_ID); + assertEquals(IMAGE_ID, createDiskConfiguration.sourceImage()); + assertNull(createDiskConfiguration.diskType()); + assertNull(createDiskConfiguration.diskName()); + assertNull(createDiskConfiguration.diskSizeGb()); + assertNull(createDiskConfiguration.interfaceType()); + assertEquals(Type.PERSISTENT, createDiskConfiguration.type()); + assertNull(createDiskConfiguration.autoDelete()); + assertTrue(createDiskConfiguration.boot()); + assertNull(createDiskConfiguration.interfaceType()); + } + + @Test + public void testOf() { + AttachedDisk attachedDisk = AttachedDisk.of(DEVICE_NAME, PERSISTENT_DISK_CONFIGURATION); + assertEquals(PERSISTENT_DISK_CONFIGURATION, attachedDisk.configuration()); + assertEquals(DEVICE_NAME, attachedDisk.deviceName()); + assertNull(attachedDisk.index()); + assertNull(attachedDisk.licenses()); + attachedDisk = AttachedDisk.of(PERSISTENT_DISK_CONFIGURATION); + assertEquals(PERSISTENT_DISK_CONFIGURATION, attachedDisk.configuration()); + assertNull(attachedDisk.deviceName()); + assertNull(attachedDisk.index()); + assertNull(attachedDisk.licenses()); + attachedDisk = AttachedDisk.of(DEVICE_NAME, SCRATCH_DISK_CONFIGURATION); + assertEquals(SCRATCH_DISK_CONFIGURATION, attachedDisk.configuration()); + assertEquals(DEVICE_NAME, attachedDisk.deviceName()); + assertNull(attachedDisk.index()); + assertNull(attachedDisk.licenses()); + attachedDisk = AttachedDisk.of(SCRATCH_DISK_CONFIGURATION); + assertEquals(SCRATCH_DISK_CONFIGURATION, attachedDisk.configuration()); + assertNull(attachedDisk.deviceName()); + assertNull(attachedDisk.index()); + assertNull(attachedDisk.licenses()); + attachedDisk = AttachedDisk.of(DEVICE_NAME, CREATE_DISK_CONFIGURATION); + assertEquals(CREATE_DISK_CONFIGURATION, attachedDisk.configuration()); + assertEquals(DEVICE_NAME, attachedDisk.deviceName()); + assertNull(attachedDisk.index()); + assertNull(attachedDisk.licenses()); + attachedDisk = AttachedDisk.of(CREATE_DISK_CONFIGURATION); + assertEquals(CREATE_DISK_CONFIGURATION, attachedDisk.configuration()); + assertNull(attachedDisk.deviceName()); + assertNull(attachedDisk.index()); + assertNull(attachedDisk.licenses()); + } + + @Test + public void testConfigurationToAndFromPb() { + PersistentDiskConfiguration persistentConfiguration = + PersistentDiskConfiguration.of(DISK_ID); + comparePersistentDiskConfiguration(persistentConfiguration, + AttachedDisk.AttachedDiskConfiguration.fromPb( + persistentConfiguration.toPb())); + comparePersistentDiskConfiguration(PERSISTENT_DISK_CONFIGURATION, + AttachedDisk.AttachedDiskConfiguration.fromPb( + PERSISTENT_DISK_CONFIGURATION.toPb())); + ScratchDiskConfiguration scratchDiskConfiguration = + ScratchDiskConfiguration.of(DISK_TYPE_ID); + compareScratchDiskConfiguration(scratchDiskConfiguration, + AttachedDisk.AttachedDiskConfiguration.fromPb( + scratchDiskConfiguration.toPb())); + compareScratchDiskConfiguration(SCRATCH_DISK_CONFIGURATION, + AttachedDisk.AttachedDiskConfiguration.fromPb( + SCRATCH_DISK_CONFIGURATION.toPb())); + CreateDiskConfiguration createDiskConfiguration = + CreateDiskConfiguration.of(IMAGE_ID); + compareCreateDiskConfiguration(createDiskConfiguration, + AttachedDisk.AttachedDiskConfiguration.fromPb( + createDiskConfiguration.toPb())); + compareCreateDiskConfiguration(CREATE_DISK_CONFIGURATION, + AttachedDisk.AttachedDiskConfiguration.fromPb( + CREATE_DISK_CONFIGURATION.toPb())); + } + + @Test + public void testToAndFromPb() { + AttachedDisk attachedDisk = AttachedDisk.fromPb(PERSISTENT_DISK.toPb()); + compareAttachedDisk(PERSISTENT_DISK, attachedDisk); + attachedDisk = AttachedDisk.fromPb(SCRATCH_DISK.toPb()); + compareAttachedDisk(SCRATCH_DISK, attachedDisk); + attachedDisk = AttachedDisk.fromPb(CREATED_DISK.toPb()); + compareAttachedDisk(CREATED_DISK, attachedDisk); + attachedDisk = AttachedDisk.of(DEVICE_NAME, PERSISTENT_DISK_CONFIGURATION); + compareAttachedDisk(attachedDisk, AttachedDisk.fromPb(attachedDisk.toPb())); + attachedDisk = AttachedDisk.of(PERSISTENT_DISK_CONFIGURATION); + compareAttachedDisk(attachedDisk, AttachedDisk.fromPb(attachedDisk.toPb())); + attachedDisk = AttachedDisk.of(DEVICE_NAME, SCRATCH_DISK_CONFIGURATION); + compareAttachedDisk(attachedDisk, AttachedDisk.fromPb(attachedDisk.toPb())); + attachedDisk = AttachedDisk.of(SCRATCH_DISK_CONFIGURATION); + compareAttachedDisk(attachedDisk, AttachedDisk.fromPb(attachedDisk.toPb())); + attachedDisk = AttachedDisk.of(DEVICE_NAME, CREATE_DISK_CONFIGURATION); + compareAttachedDisk(attachedDisk, AttachedDisk.fromPb(attachedDisk.toPb())); + attachedDisk = AttachedDisk.of(CREATE_DISK_CONFIGURATION); + compareAttachedDisk(attachedDisk, AttachedDisk.fromPb(attachedDisk.toPb())); + } + + @Test + public void testConfigurationSetProjectId() { + PersistentDiskConfiguration persistentConfiguration = + PersistentDiskConfiguration.of(DiskId.of("zone", "disk")); + comparePersistentDiskConfiguration( + PersistentDiskConfiguration.of(DiskId.of("project", "zone", "disk")), + persistentConfiguration.setProjectId("project")); + ScratchDiskConfiguration scratchDiskConfiguration = + ScratchDiskConfiguration.of(DiskTypeId.of("zone", "diskType")); + compareScratchDiskConfiguration( + ScratchDiskConfiguration.of(DiskTypeId.of("project", "zone", "diskType")), + scratchDiskConfiguration.setProjectId("project")); + CreateDiskConfiguration createDiskConfiguration = CREATE_DISK_CONFIGURATION.toBuilder() + .diskType(DiskTypeId.of("zone", "diskType")) + .sourceImage(ImageId.of("image")) + .build(); + compareCreateDiskConfiguration(CREATE_DISK_CONFIGURATION, + createDiskConfiguration.setProjectId("project")); + } + + @Test + public void testSetProjectId() { + PersistentDiskConfiguration persistentConfiguration = + PersistentDiskConfiguration.of(DiskId.of("zone", "disk")); + PersistentDiskConfiguration persistentConfigurationWithProject = + PersistentDiskConfiguration.of(DiskId.of("project", "zone", "disk")); + AttachedDisk attachedDisk = AttachedDisk.of(persistentConfiguration); + compareAttachedDisk(AttachedDisk.of(persistentConfigurationWithProject), + attachedDisk.setProjectId("project")); + ScratchDiskConfiguration scratchDiskConfiguration = + ScratchDiskConfiguration.of(DiskTypeId.of("zone", "diskType")); + ScratchDiskConfiguration scratchDiskConfigurationWithProject = + ScratchDiskConfiguration.of(DiskTypeId.of("project", "zone", "diskType")); + compareAttachedDisk(AttachedDisk.of(scratchDiskConfigurationWithProject), + AttachedDisk.of(scratchDiskConfiguration).setProjectId("project")); + CreateDiskConfiguration createDiskConfiguration = + CreateDiskConfiguration.of(ImageId.of("image")); + CreateDiskConfiguration createDiskConfigurationWithProject = + CreateDiskConfiguration.of(ImageId.of("project", "image")); + compareAttachedDisk(AttachedDisk.of(createDiskConfigurationWithProject), + AttachedDisk.of(createDiskConfiguration).setProjectId("project")); + createDiskConfiguration = CREATE_DISK_CONFIGURATION.toBuilder() + .diskType(DiskTypeId.of("zone", "diskType")) + .sourceImage(ImageId.of("image")) + .build(); + compareAttachedDisk(AttachedDisk.of(CREATE_DISK_CONFIGURATION), + AttachedDisk.of(createDiskConfiguration).setProjectId("project")); + } + + public void compareAttachedDiskConfiguration(AttachedDisk.AttachedDiskConfiguration expected, + AttachedDisk.AttachedDiskConfiguration value) { + assertEquals(expected, value); + assertEquals(expected.type(), value.type()); + assertEquals(expected.interfaceType(), value.interfaceType()); + assertEquals(expected.boot(), value.boot()); + assertEquals(expected.autoDelete(), value.autoDelete()); + assertEquals(expected.hashCode(), value.hashCode()); + } + + public void comparePersistentDiskConfiguration(PersistentDiskConfiguration expected, + PersistentDiskConfiguration value) { + compareAttachedDiskConfiguration(expected, value); + assertEquals(expected.mode(), value.mode()); + assertEquals(expected.sourceDisk(), value.sourceDisk()); + } + + public void compareCreateDiskConfiguration(CreateDiskConfiguration expected, + CreateDiskConfiguration value) { + compareAttachedDiskConfiguration(expected, value); + assertEquals(expected.diskName(), value.diskName()); + assertEquals(expected.diskType(), value.diskType()); + assertEquals(expected.diskSizeGb(), value.diskSizeGb()); + assertEquals(expected.sourceImage(), value.sourceImage()); + } + + public void compareScratchDiskConfiguration(ScratchDiskConfiguration expected, + ScratchDiskConfiguration value) { + compareAttachedDiskConfiguration(expected, value); + assertEquals(expected.diskType(), value.diskType()); + } + + public void compareAttachedDisk(AttachedDisk expected, AttachedDisk value) { + assertEquals(expected, value); + assertEquals(expected.deviceName(), value.deviceName()); + assertEquals(expected.index(), value.index()); + assertEquals(expected.configuration(), value.configuration()); + assertEquals(expected.licenses(), value.licenses()); + assertEquals(expected.hashCode(), value.hashCode()); + } +} diff --git a/gcloud-java-compute/src/test/java/com/google/cloud/compute/ComputeExceptionTest.java b/gcloud-java-compute/src/test/java/com/google/cloud/compute/ComputeExceptionTest.java new file mode 100644 index 000000000000..2705a739814b --- /dev/null +++ b/gcloud-java-compute/src/test/java/com/google/cloud/compute/ComputeExceptionTest.java @@ -0,0 +1,105 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import static org.easymock.EasyMock.createMock; +import static org.easymock.EasyMock.expect; +import static org.easymock.EasyMock.replay; +import static org.easymock.EasyMock.verify; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; + +import com.google.cloud.BaseServiceException; +import com.google.cloud.RetryHelper.RetryHelperException; + +import org.junit.Test; + +import java.io.IOException; +import java.net.SocketTimeoutException; + +public class ComputeExceptionTest { + + @Test + public void testResourceManagerException() { + ComputeException exception = new ComputeException(500, "message"); + assertEquals(500, exception.code()); + assertEquals("message", exception.getMessage()); + assertNull(exception.reason()); + assertTrue(exception.retryable()); + assertTrue(exception.idempotent()); + + exception = new ComputeException(403, "message"); + assertEquals(403, exception.code()); + assertEquals("message", exception.getMessage()); + assertNull(exception.reason()); + assertFalse(exception.retryable()); + assertTrue(exception.idempotent()); + + IOException cause = new SocketTimeoutException(); + exception = new ComputeException(cause); + assertNull(exception.reason()); + assertNull(exception.getMessage()); + assertTrue(exception.retryable()); + assertTrue(exception.idempotent()); + assertSame(cause, exception.getCause()); + + exception = new ComputeException(403, "message", cause); + assertEquals(403, exception.code()); + assertEquals("message", exception.getMessage()); + assertNull(exception.reason()); + assertFalse(exception.retryable()); + assertTrue(exception.idempotent()); + assertSame(cause, exception.getCause()); + } + + @Test + public void testTranslateAndThrow() throws Exception { + Exception cause = new ComputeException(500, "message"); + RetryHelperException exceptionMock = createMock(RetryHelperException.class); + expect(exceptionMock.getCause()).andReturn(cause).times(2); + replay(exceptionMock); + try { + ComputeException.translateAndThrow(exceptionMock); + } catch (BaseServiceException ex) { + assertEquals(500, ex.code()); + assertEquals("message", ex.getMessage()); + assertTrue(ex.retryable()); + assertTrue(ex.idempotent()); + } finally { + verify(exceptionMock); + } + cause = new IllegalArgumentException("message"); + exceptionMock = createMock(RetryHelperException.class); + expect(exceptionMock.getMessage()).andReturn("message").times(1); + expect(exceptionMock.getCause()).andReturn(cause).times(2); + replay(exceptionMock); + try { + ComputeException.translateAndThrow(exceptionMock); + } catch (BaseServiceException ex) { + assertEquals(ComputeException.UNKNOWN_CODE, ex.code()); + assertEquals("message", ex.getMessage()); + assertFalse(ex.retryable()); + assertTrue(ex.idempotent()); + assertSame(cause, ex.getCause()); + } finally { + verify(exceptionMock); + } + } +} diff --git a/gcloud-java-compute/src/test/java/com/google/cloud/compute/ComputeImplTest.java b/gcloud-java-compute/src/test/java/com/google/cloud/compute/ComputeImplTest.java new file mode 100644 index 000000000000..6d1226df1a08 --- /dev/null +++ b/gcloud-java-compute/src/test/java/com/google/cloud/compute/ComputeImplTest.java @@ -0,0 +1,3966 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import static org.easymock.EasyMock.capture; +import static org.easymock.EasyMock.eq; +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; + +import com.google.cloud.Page; +import com.google.cloud.RetryParams; +import com.google.cloud.compute.NetworkInterface.AccessConfig; +import com.google.cloud.compute.spi.ComputeRpc; +import com.google.cloud.compute.spi.ComputeRpcFactory; +import com.google.common.base.Function; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.Iterables; + +import org.easymock.Capture; +import org.easymock.EasyMock; +import org.junit.After; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; + +import java.util.List; +import java.util.Map; + +public class ComputeImplTest { + + private static final String PROJECT = "project"; + private static final String GENERATED_ID = "42"; + private static final Long CREATION_TIMESTAMP = 1453293540000L; + private static final String DESCRIPTION = "description"; + private static final String VALID_DISK_SIZE = "10GB-10TB"; + private static final Long DEFAULT_DISK_SIZE_GB = 10L; + private static final DiskTypeId DISK_TYPE_ID = DiskTypeId.of("project", "zone", "diskType"); + private static final DiskType DISK_TYPE = DiskType.builder() + .generatedId(GENERATED_ID) + .diskTypeId(DISK_TYPE_ID) + .creationTimestamp(CREATION_TIMESTAMP) + .description(DESCRIPTION) + .validDiskSize(VALID_DISK_SIZE) + .defaultDiskSizeGb(DEFAULT_DISK_SIZE_GB) + .build(); + private static final MachineTypeId MACHINE_TYPE_ID = MachineTypeId.of("project", "zone", "type"); + private static final Integer GUEST_CPUS = 1; + private static final Integer MEMORY_MB = 2; + private static final List SCRATCH_DISKS = ImmutableList.of(3); + private static final Integer MAXIMUM_PERSISTENT_DISKS = 4; + private static final Long MAXIMUM_PERSISTENT_DISKS_SIZE_GB = 5L; + private static final MachineType MACHINE_TYPE = MachineType.builder() + .generatedId(GENERATED_ID) + .machineTypeId(MACHINE_TYPE_ID) + .creationTimestamp(CREATION_TIMESTAMP) + .description(DESCRIPTION) + .cpus(GUEST_CPUS) + .memoryMb(MEMORY_MB) + .scratchDisksSizeGb(SCRATCH_DISKS) + .maximumPersistentDisks(MAXIMUM_PERSISTENT_DISKS) + .maximumPersistentDisksSizeGb(MAXIMUM_PERSISTENT_DISKS_SIZE_GB) + .build(); + private static final RegionId REGION_ID = RegionId.of("project", "region"); + private static final Region.Status REGION_STATUS = Region.Status.DOWN; + private static final ZoneId ZONE_ID1 = ZoneId.of("project", "zone1"); + private static final ZoneId ZONE_ID2 = ZoneId.of("project", "zone2"); + private static final List ZONES = ImmutableList.of(ZONE_ID1, ZONE_ID2); + private static final Region.Quota QUOTA1 = + new Region.Quota("METRIC1", 2, 1); + private static final Region.Quota QUOTA2 = + new Region.Quota("METRIC2", 4, 3); + private static final List QUOTAS = ImmutableList.of(QUOTA1, QUOTA2); + private static final Region REGION = Region.builder() + .regionId(REGION_ID) + .generatedId(GENERATED_ID) + .creationTimestamp(CREATION_TIMESTAMP) + .description(DESCRIPTION) + .status(REGION_STATUS) + .zones(ZONES) + .quotas(QUOTAS) + .build(); + private static final ZoneId ZONE_ID = ZoneId.of("project", "zone"); + private static final Zone.Status ZONE_STATUS = Zone.Status.DOWN; + private static final Zone ZONE = Zone.builder() + .zoneId(ZONE_ID) + .generatedId(GENERATED_ID) + .creationTimestamp(CREATION_TIMESTAMP) + .description(DESCRIPTION) + .status(ZONE_STATUS) + .region(REGION_ID) + .build(); + private static final LicenseId LICENSE_ID = LicenseId.of("project", "license"); + private static final Boolean CHARGES_USE_FEE = true; + private static final License LICENSE = new License(LICENSE_ID, CHARGES_USE_FEE); + private static final Operation.OperationError OPERATION_ERROR1 = + new Operation.OperationError("code1", "location1", "message1"); + private static final Operation.OperationError OPERATION_ERROR2 = + new Operation.OperationError("code2", "location2", "message2"); + private static final Operation.OperationWarning OPERATION_WARNING1 = + new Operation.OperationWarning("code1", "message1", ImmutableMap.of("k1", "v1")); + private static final Operation.OperationWarning OPERATION_WARNING2 = + new Operation.OperationWarning("code2", "location2", ImmutableMap.of("k2", "v2")); + private static final String CLIENT_OPERATION_ID = "clientOperationId"; + private static final String OPERATION_TYPE = "delete"; + private static final String TARGET_LINK = "targetLink"; + private static final String TARGET_ID = "42"; + private static final Operation.Status STATUS = Operation.Status.DONE; + private static final String STATUS_MESSAGE = "statusMessage"; + private static final String USER = "user"; + private static final Integer PROGRESS = 100; + private static final Long INSERT_TIME = 1453293540000L; + private static final Long START_TIME = 1453293420000L; + private static final Long END_TIME = 1453293480000L; + private static final List ERRORS = + ImmutableList.of(OPERATION_ERROR1, OPERATION_ERROR2); + private static final List WARNINGS = + ImmutableList.of(OPERATION_WARNING1, OPERATION_WARNING2); + private static final Integer HTTP_ERROR_STATUS_CODE = 404; + private static final String HTTP_ERROR_MESSAGE = "NOT FOUND"; + private static final GlobalOperationId GLOBAL_OPERATION_ID = + GlobalOperationId.of("project", "op"); + private static final ZoneOperationId ZONE_OPERATION_ID = + ZoneOperationId.of("project", "zone", "op"); + private static final RegionOperationId REGION_OPERATION_ID = + RegionOperationId.of("project", "region", "op"); + private static final RegionAddressId REGION_ADDRESS_ID = + RegionAddressId.of("project", "region", "address"); + private static final GlobalAddressId GLOBAL_ADDRESS_ID = + GlobalAddressId.of("project", "address"); + private static final AddressInfo REGION_ADDRESS = AddressInfo.builder(REGION_ADDRESS_ID).build(); + private static final AddressInfo GLOBAL_ADDRESS = AddressInfo.builder(GLOBAL_ADDRESS_ID).build(); + private static final DiskId DISK_ID = DiskId.of("project", "zone", "disk"); + private static final SnapshotId SNAPSHOT_ID = SnapshotId.of("project", "snapshot"); + private static final SnapshotInfo SNAPSHOT = SnapshotInfo.of(SNAPSHOT_ID, DISK_ID); + private static final ImageId IMAGE_ID = ImageId.of("project", "image"); + private static final ImageInfo IMAGE = ImageInfo.of(IMAGE_ID, DiskImageConfiguration.of(DISK_ID)); + private static final DeprecationStatus DEPRECATION_STATUS = + DeprecationStatus.builder(DeprecationStatus.Status.DEPRECATED, IMAGE_ID).build(); + private static final DiskInfo DISK = + DiskInfo.of(DISK_ID, StandardDiskConfiguration.of(DISK_TYPE_ID)); + private static final NetworkId NETWORK_ID = NetworkId.of("project", "network"); + private static final SubnetworkId SUBNETWORK_ID = SubnetworkId.of("project", "region", "network"); + private static final SubnetworkInfo SUBNETWORK = + SubnetworkInfo.of(SUBNETWORK_ID, NETWORK_ID, "192.168.0.0/16"); + private static final NetworkInfo NETWORK = + NetworkInfo.of(NETWORK_ID, StandardNetworkConfiguration.of("192.168.0.0/16")); + private static final InstanceId INSTANCE_ID = InstanceId.of("project", "zone", "instance"); + private static final AttachedDisk.PersistentDiskConfiguration PERSISTENT_DISK_CONFIGURATION = + AttachedDisk.PersistentDiskConfiguration.of(DISK_ID); + private static final AttachedDisk ATTACHED_DISK = + AttachedDisk.of("device", PERSISTENT_DISK_CONFIGURATION); + private static final NetworkInterface NETWORK_INTERFACE = NetworkInterface.of(NETWORK_ID); + private static final InstanceInfo INSTANCE = + InstanceInfo.of(INSTANCE_ID, MACHINE_TYPE_ID, ATTACHED_DISK, NETWORK_INTERFACE); + + // Empty ComputeRpc options + private static final Map EMPTY_RPC_OPTIONS = ImmutableMap.of(); + + // DiskType options + private static final Compute.DiskTypeOption DISK_TYPE_OPTION_FIELDS = + Compute.DiskTypeOption.fields(Compute.DiskTypeField.ID, Compute.DiskTypeField.DESCRIPTION); + + // DiskType list options + private static final Compute.DiskTypeFilter DISK_TYPE_FILTER = + Compute.DiskTypeFilter.equals(Compute.DiskTypeField.DESCRIPTION, "someDescription"); + private static final Compute.DiskTypeListOption DISK_TYPE_LIST_PAGE_TOKEN = + Compute.DiskTypeListOption.pageToken("cursor"); + private static final Compute.DiskTypeListOption DISK_TYPE_LIST_PAGE_SIZE = + Compute.DiskTypeListOption.pageSize(42L); + private static final Compute.DiskTypeListOption DISK_TYPE_LIST_FILTER = + Compute.DiskTypeListOption.filter(DISK_TYPE_FILTER); + private static final Map DISK_TYPE_LIST_OPTIONS = ImmutableMap.of( + ComputeRpc.Option.PAGE_TOKEN, "cursor", + ComputeRpc.Option.MAX_RESULTS, 42L, + ComputeRpc.Option.FILTER, "description eq someDescription"); + + // DiskType aggregated list options + private static final Compute.DiskTypeAggregatedListOption DISK_TYPE_AGGREGATED_LIST_PAGE_TOKEN = + Compute.DiskTypeAggregatedListOption.pageToken("cursor"); + private static final Compute.DiskTypeAggregatedListOption DISK_TYPE_AGGREGATED_LIST_PAGE_SIZE = + Compute.DiskTypeAggregatedListOption.pageSize(42L); + private static final Compute.DiskTypeAggregatedListOption DISK_TYPE_AGGREGATED_LIST_FILTER = + Compute.DiskTypeAggregatedListOption.filter(DISK_TYPE_FILTER); + + // MachineType options + private static final Compute.MachineTypeOption MACHINE_TYPE_OPTION_FIELDS = + Compute.MachineTypeOption.fields(Compute.MachineTypeField.ID, + Compute.MachineTypeField.DESCRIPTION); + + // MachineType list options + private static final Compute.MachineTypeFilter MACHINE_TYPE_FILTER = + Compute.MachineTypeFilter.notEquals(Compute.MachineTypeField.MAXIMUM_PERSISTENT_DISKS, 42L); + private static final Compute.MachineTypeListOption MACHINE_TYPE_LIST_PAGE_TOKEN = + Compute.MachineTypeListOption.pageToken("cursor"); + private static final Compute.MachineTypeListOption MACHINE_TYPE_LIST_PAGE_SIZE = + Compute.MachineTypeListOption.pageSize(42L); + private static final Compute.MachineTypeListOption MACHINE_TYPE_LIST_FILTER = + Compute.MachineTypeListOption.filter(MACHINE_TYPE_FILTER); + private static final Map MACHINE_TYPE_LIST_OPTIONS = ImmutableMap.of( + ComputeRpc.Option.PAGE_TOKEN, "cursor", + ComputeRpc.Option.MAX_RESULTS, 42L, + ComputeRpc.Option.FILTER, "maximumPersistentDisks ne 42"); + + // MachineType aggregated list options + private static final Compute.MachineTypeAggregatedListOption MACHINE_TYPE_AGGREGATED_LIST_PAGE_TOKEN = + Compute.MachineTypeAggregatedListOption.pageToken("cursor"); + private static final Compute.MachineTypeAggregatedListOption MACHINE_TYPE_AGGREGATED_LIST_PAGE_SIZE = + Compute.MachineTypeAggregatedListOption.pageSize(42L); + private static final Compute.MachineTypeAggregatedListOption MACHINE_TYPE_AGGREGATED_LIST_FILTER = + Compute.MachineTypeAggregatedListOption.filter(MACHINE_TYPE_FILTER); + + // Region options + private static final Compute.RegionOption REGION_OPTION_FIELDS = + Compute.RegionOption.fields(Compute.RegionField.ID, Compute.RegionField.DESCRIPTION); + + // Region list options + private static final Compute.RegionFilter REGION_FILTER = + Compute.RegionFilter.equals(Compute.RegionField.ID, "someId"); + private static final Compute.RegionListOption REGION_LIST_PAGE_TOKEN = + Compute.RegionListOption.pageToken("cursor"); + private static final Compute.RegionListOption REGION_LIST_PAGE_SIZE = + Compute.RegionListOption.pageSize(42L); + private static final Compute.RegionListOption REGION_LIST_FILTER = + Compute.RegionListOption.filter(REGION_FILTER); + private static final Map REGION_LIST_OPTIONS = ImmutableMap.of( + ComputeRpc.Option.PAGE_TOKEN, "cursor", + ComputeRpc.Option.MAX_RESULTS, 42L, + ComputeRpc.Option.FILTER, "id eq someId"); + + // Zone options + private static final Compute.ZoneOption ZONE_OPTION_FIELDS = + Compute.ZoneOption.fields(Compute.ZoneField.ID, Compute.ZoneField.DESCRIPTION); + + // Zone list options + private static final Compute.ZoneFilter ZONE_FILTER = + Compute.ZoneFilter.notEquals(Compute.ZoneField.NAME, "someName"); + private static final Compute.ZoneListOption ZONE_LIST_PAGE_TOKEN = + Compute.ZoneListOption.pageToken("cursor"); + private static final Compute.ZoneListOption ZONE_LIST_PAGE_SIZE = Compute.ZoneListOption.pageSize(42L); + private static final Compute.ZoneListOption ZONE_LIST_FILTER = Compute.ZoneListOption.filter(ZONE_FILTER); + private static final Map ZONE_LIST_OPTIONS = ImmutableMap.of( + ComputeRpc.Option.PAGE_TOKEN, "cursor", + ComputeRpc.Option.MAX_RESULTS, 42L, + ComputeRpc.Option.FILTER, "name ne someName"); + + // License options + private static final Compute.LicenseOption LICENSE_OPTION_FIELDS = + Compute.LicenseOption.fields(Compute.LicenseField.CHARGES_USE_FEE); + + // Operation options + private static final Compute.OperationOption OPERATION_OPTION_FIELDS = + Compute.OperationOption.fields(Compute.OperationField.ID, Compute.OperationField.DESCRIPTION); + + // Operation list options + private static final Compute.OperationFilter OPERATION_FILTER = + Compute.OperationFilter.notEquals(Compute.OperationField.PROGRESS, 0); + private static final Compute.OperationListOption OPERATION_LIST_PAGE_TOKEN = + Compute.OperationListOption.pageToken("cursor"); + private static final Compute.OperationListOption OPERATION_LIST_PAGE_SIZE = + Compute.OperationListOption.pageSize(42L); + private static final Compute.OperationListOption OPERATION_LIST_FILTER = + Compute.OperationListOption.filter(OPERATION_FILTER); + private static final Map OPERATION_LIST_OPTIONS = ImmutableMap.of( + ComputeRpc.Option.PAGE_TOKEN, "cursor", + ComputeRpc.Option.MAX_RESULTS, 42L, + ComputeRpc.Option.FILTER, "progress ne 0"); + + // Address options + private static final Compute.AddressOption ADDRESS_OPTION_FIELDS = + Compute.AddressOption.fields(Compute.AddressField.ID, Compute.AddressField.DESCRIPTION); + + // Address list options + private static final Compute.AddressFilter ADDRESS_FILTER = + Compute.AddressFilter.notEquals(Compute.AddressField.REGION, "someRegion"); + private static final Compute.AddressListOption ADDRESS_LIST_PAGE_TOKEN = + Compute.AddressListOption.pageToken("cursor"); + private static final Compute.AddressListOption ADDRESS_LIST_PAGE_SIZE = Compute.AddressListOption.pageSize(42L); + private static final Compute.AddressListOption ADDRESS_LIST_FILTER = + Compute.AddressListOption.filter(ADDRESS_FILTER); + private static final Map ADDRESS_LIST_OPTIONS = ImmutableMap.of( + ComputeRpc.Option.PAGE_TOKEN, "cursor", + ComputeRpc.Option.MAX_RESULTS, 42L, + ComputeRpc.Option.FILTER, "region ne someRegion"); + + // Address aggregated list options + private static final Compute.AddressAggregatedListOption ADDRESS_AGGREGATED_LIST_PAGE_TOKEN = + Compute.AddressAggregatedListOption.pageToken("cursor"); + private static final Compute.AddressAggregatedListOption ADDRESS_AGGREGATED_LIST_PAGE_SIZE = + Compute.AddressAggregatedListOption.pageSize(42L); + private static final Compute.AddressAggregatedListOption ADDRESS_AGGREGATED_LIST_FILTER = + Compute.AddressAggregatedListOption.filter(ADDRESS_FILTER); + + // Snapshot options + private static final Compute.SnapshotOption SNAPSHOT_OPTION_FIELDS = + Compute.SnapshotOption.fields(Compute.SnapshotField.ID, Compute.SnapshotField.DESCRIPTION); + + // Snapshot list options + private static final Compute.SnapshotFilter SNAPSHOT_FILTER = + Compute.SnapshotFilter.equals(Compute.SnapshotField.DISK_SIZE_GB, 500L); + private static final Compute.SnapshotListOption SNAPSHOT_LIST_PAGE_TOKEN = + Compute.SnapshotListOption.pageToken("cursor"); + private static final Compute.SnapshotListOption SNAPSHOT_LIST_PAGE_SIZE = + Compute.SnapshotListOption.pageSize(42L); + private static final Compute.SnapshotListOption SNAPSHOT_LIST_FILTER = + Compute.SnapshotListOption.filter(SNAPSHOT_FILTER); + private static final Map SNAPSHOT_LIST_OPTIONS = ImmutableMap.of( + ComputeRpc.Option.PAGE_TOKEN, "cursor", + ComputeRpc.Option.MAX_RESULTS, 42L, + ComputeRpc.Option.FILTER, "diskSizeGb eq 500"); + + // Image options + private static final Compute.ImageOption IMAGE_OPTION_FIELDS = + Compute.ImageOption.fields(Compute.ImageField.ID, Compute.ImageField.DESCRIPTION); + + // Image list options + private static final Compute.ImageFilter IMAGE_FILTER = + Compute.ImageFilter.notEquals(Compute.ImageField.DISK_SIZE_GB, 500L); + private static final Compute.ImageListOption IMAGE_LIST_PAGE_TOKEN = Compute.ImageListOption.pageToken("cursor"); + private static final Compute.ImageListOption IMAGE_LIST_PAGE_SIZE = Compute.ImageListOption.pageSize(42L); + private static final Compute.ImageListOption IMAGE_LIST_FILTER = Compute.ImageListOption.filter(IMAGE_FILTER); + private static final Map IMAGE_LIST_OPTIONS = ImmutableMap.of( + ComputeRpc.Option.PAGE_TOKEN, "cursor", + ComputeRpc.Option.MAX_RESULTS, 42L, + ComputeRpc.Option.FILTER, "diskSizeGb ne 500"); + + // Disk options + private static final Compute.DiskOption DISK_OPTION_FIELDS = + Compute.DiskOption.fields(Compute.DiskField.ID, Compute.DiskField.DESCRIPTION); + + // Disk list options + private static final Compute.DiskFilter DISK_FILTER = Compute.DiskFilter.notEquals(Compute.DiskField.SIZE_GB, 500L); + private static final Compute.DiskListOption DISK_LIST_PAGE_TOKEN = Compute.DiskListOption.pageToken("cursor"); + private static final Compute.DiskListOption DISK_LIST_PAGE_SIZE = Compute.DiskListOption.pageSize(42L); + private static final Compute.DiskListOption DISK_LIST_FILTER = Compute.DiskListOption.filter(DISK_FILTER); + private static final Map DISK_LIST_OPTIONS = ImmutableMap.of( + ComputeRpc.Option.PAGE_TOKEN, "cursor", + ComputeRpc.Option.MAX_RESULTS, 42L, + ComputeRpc.Option.FILTER, "sizeGb ne 500"); + + // Disk aggregated list options + private static final Compute.DiskAggregatedListOption DISK_AGGREGATED_LIST_PAGE_TOKEN = + Compute.DiskAggregatedListOption.pageToken("cursor"); + private static final Compute.DiskAggregatedListOption DISK_AGGREGATED_LIST_PAGE_SIZE = + Compute.DiskAggregatedListOption.pageSize(42L); + private static final Compute.DiskAggregatedListOption DISK_AGGREGATED_LIST_FILTER = + Compute.DiskAggregatedListOption.filter(DISK_FILTER); + + // Subnetwork options + private static final Compute.SubnetworkOption SUBNETWORK_OPTION_FIELDS = + Compute.SubnetworkOption.fields(Compute.SubnetworkField.ID, Compute.SubnetworkField.DESCRIPTION); + + // Subnetwork list options + private static final Compute.SubnetworkFilter SUBNETWORK_FILTER = + Compute.SubnetworkFilter.equals(Compute.SubnetworkField.IP_CIDR_RANGE, "192.168.0.0/16"); + private static final Compute.SubnetworkListOption SUBNETWORK_LIST_PAGE_TOKEN = + Compute.SubnetworkListOption.pageToken("cursor"); + private static final Compute.SubnetworkListOption SUBNETWORK_LIST_PAGE_SIZE = + Compute.SubnetworkListOption.pageSize(42L); + private static final Compute.SubnetworkListOption SUBNETWORK_LIST_FILTER = + Compute.SubnetworkListOption.filter(SUBNETWORK_FILTER); + private static final Map SUBNETWORK_LIST_OPTIONS = ImmutableMap.of( + ComputeRpc.Option.PAGE_TOKEN, "cursor", + ComputeRpc.Option.MAX_RESULTS, 42L, + ComputeRpc.Option.FILTER, "ipCidrRange eq 192.168.0.0/16"); + + // Subnetwork aggregated list options + private static final Compute.SubnetworkAggregatedListOption SUBNETWORK_AGGREGATED_LIST_PAGE_TOKEN = + Compute.SubnetworkAggregatedListOption.pageToken("cursor"); + private static final Compute.SubnetworkAggregatedListOption SUBNETWORK_AGGREGATED_LIST_PAGE_SIZE = + Compute.SubnetworkAggregatedListOption.pageSize(42L); + private static final Compute.SubnetworkAggregatedListOption SUBNETWORK_AGGREGATED_LIST_FILTER = + Compute.SubnetworkAggregatedListOption.filter(SUBNETWORK_FILTER); + + // Network options + private static final Compute.NetworkOption NETWORK_OPTION_FIELDS = + Compute.NetworkOption.fields(Compute.NetworkField.ID, Compute.NetworkField.DESCRIPTION); + + // Network list options + private static final Compute.NetworkFilter NETWORK_FILTER = + Compute.NetworkFilter.equals(Compute.NetworkField.IPV4_RANGE, "192.168.0.0/16"); + private static final Compute.NetworkListOption NETWORK_LIST_PAGE_TOKEN = + Compute.NetworkListOption.pageToken("cursor"); + private static final Compute.NetworkListOption NETWORK_LIST_PAGE_SIZE = + Compute.NetworkListOption.pageSize(42L); + private static final Compute.NetworkListOption NETWORK_LIST_FILTER = + Compute.NetworkListOption.filter(NETWORK_FILTER); + private static final Map NETWORK_LIST_OPTIONS = ImmutableMap.of( + ComputeRpc.Option.PAGE_TOKEN, "cursor", + ComputeRpc.Option.MAX_RESULTS, 42L, + ComputeRpc.Option.FILTER, "IPv4Range eq 192.168.0.0/16"); + + // Instance options + private static final Compute.InstanceOption INSTANCE_OPTION_FIELDS = + Compute.InstanceOption.fields(Compute.InstanceField.ID, Compute.InstanceField.DESCRIPTION); + + // Instance list options + private static final Compute.InstanceFilter INSTANCE_FILTER = + Compute.InstanceFilter.equals(Compute.InstanceField.CAN_IP_FORWARD, true); + private static final Compute.InstanceListOption INSTANCE_LIST_PAGE_TOKEN = + Compute.InstanceListOption.pageToken("cursor"); + private static final Compute.InstanceListOption INSTANCE_LIST_PAGE_SIZE = + Compute.InstanceListOption.pageSize(42L); + private static final Compute.InstanceListOption INSTANCE_LIST_FILTER = + Compute.InstanceListOption.filter(INSTANCE_FILTER); + private static final Map INSTANCE_LIST_OPTIONS = ImmutableMap.of( + ComputeRpc.Option.PAGE_TOKEN, "cursor", + ComputeRpc.Option.MAX_RESULTS, 42L, + ComputeRpc.Option.FILTER, "canIpForward eq true"); + + // Instance aggregated list options + private static final Compute.InstanceAggregatedListOption INSTANCE_AGGREGATED_LIST_PAGE_TOKEN = + Compute.InstanceAggregatedListOption.pageToken("cursor"); + private static final Compute.InstanceAggregatedListOption INSTANCE_AGGREGATED_LIST_PAGE_SIZE = + Compute.InstanceAggregatedListOption.pageSize(42L); + private static final Compute.InstanceAggregatedListOption INSTANCE_AGGREGATED_LIST_FILTER = + Compute.InstanceAggregatedListOption.filter(INSTANCE_FILTER); + + private static final Function + OPERATION_TO_PB_FUNCTION = new Function() { + @Override + public com.google.api.services.compute.model.Operation apply(Operation operation) { + return operation.toPb(); + } + }; + + private ComputeOptions options; + private ComputeRpcFactory rpcFactoryMock; + private ComputeRpc computeRpcMock; + private Compute compute; + + private Operation globalOperation; + private Operation zoneOperation; + private Operation regionOperation; + + @Rule + public ExpectedException thrown = ExpectedException.none(); + + @Before + public void setUp() { + rpcFactoryMock = EasyMock.createMock(ComputeRpcFactory.class); + computeRpcMock = EasyMock.createMock(ComputeRpc.class); + EasyMock.expect(rpcFactoryMock.create(EasyMock.anyObject(ComputeOptions.class))) + .andReturn(computeRpcMock).times(2); + EasyMock.replay(rpcFactoryMock); + options = ComputeOptions.builder() + .projectId(PROJECT) + .serviceRpcFactory(rpcFactoryMock) + .retryParams(RetryParams.noRetries()) + .build(); + Compute otherService = options.toBuilder().build().service(); + globalOperation = new Operation.Builder(otherService) + .generatedId(GENERATED_ID) + .operationId(GLOBAL_OPERATION_ID) + .clientOperationId(CLIENT_OPERATION_ID) + .operationType(OPERATION_TYPE) + .targetLink(TARGET_LINK) + .targetId(TARGET_ID) + .status(STATUS) + .statusMessage(STATUS_MESSAGE) + .user(USER) + .progress(PROGRESS) + .insertTime(INSERT_TIME) + .startTime(START_TIME) + .endTime(END_TIME) + .errors(ERRORS) + .warnings(WARNINGS) + .httpErrorStatusCode(HTTP_ERROR_STATUS_CODE) + .httpErrorMessage(HTTP_ERROR_MESSAGE) + .description(DESCRIPTION) + .build(); + zoneOperation = new Operation.Builder(otherService) + .generatedId(GENERATED_ID) + .operationId(ZONE_OPERATION_ID) + .clientOperationId(CLIENT_OPERATION_ID) + .operationType(OPERATION_TYPE) + .targetLink(TARGET_LINK) + .targetId(TARGET_ID) + .status(STATUS) + .statusMessage(STATUS_MESSAGE) + .user(USER) + .progress(PROGRESS) + .insertTime(INSERT_TIME) + .startTime(START_TIME) + .endTime(END_TIME) + .errors(ERRORS) + .warnings(WARNINGS) + .httpErrorStatusCode(HTTP_ERROR_STATUS_CODE) + .httpErrorMessage(HTTP_ERROR_MESSAGE) + .description(DESCRIPTION) + .build(); + regionOperation = new Operation.Builder(otherService) + .generatedId(GENERATED_ID) + .operationId(REGION_OPERATION_ID) + .clientOperationId(CLIENT_OPERATION_ID) + .operationType(OPERATION_TYPE) + .targetLink(TARGET_LINK) + .targetId(TARGET_ID) + .status(STATUS) + .statusMessage(STATUS_MESSAGE) + .user(USER) + .progress(PROGRESS) + .insertTime(INSERT_TIME) + .startTime(START_TIME) + .endTime(END_TIME) + .errors(ERRORS) + .warnings(WARNINGS) + .httpErrorStatusCode(HTTP_ERROR_STATUS_CODE) + .httpErrorMessage(HTTP_ERROR_MESSAGE) + .description(DESCRIPTION) + .build(); + } + + @After + public void tearDown() { + EasyMock.verify(rpcFactoryMock, computeRpcMock); + } + + @Test + public void testGetOptions() { + EasyMock.replay(computeRpcMock); + compute = options.service(); + assertSame(options, compute.options()); + } + + @Test + public void testGetDiskType() { + EasyMock.expect( + computeRpcMock.getDiskType(DISK_TYPE_ID.zone(), DISK_TYPE_ID.type(), EMPTY_RPC_OPTIONS)) + .andReturn(DISK_TYPE.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + DiskType diskType = compute.getDiskType(DISK_TYPE_ID.zone(), DISK_TYPE_ID.type()); + assertEquals(DISK_TYPE, diskType); + } + + @Test + public void testGetDiskType_Null() { + EasyMock.expect( + computeRpcMock.getDiskType(DISK_TYPE_ID.zone(), DISK_TYPE_ID.type(), EMPTY_RPC_OPTIONS)) + .andReturn(null); + EasyMock.replay(computeRpcMock); + compute = options.service(); + assertNull(compute.getDiskType(DISK_TYPE_ID.zone(), DISK_TYPE_ID.type())); + } + + @Test + public void testGetDiskTypeFromDiskTypeId() { + EasyMock.expect( + computeRpcMock.getDiskType(DISK_TYPE_ID.zone(), DISK_TYPE_ID.type(), EMPTY_RPC_OPTIONS)) + .andReturn(DISK_TYPE.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + DiskType diskType = compute.getDiskType(DISK_TYPE_ID); + assertEquals(DISK_TYPE, diskType); + } + + @Test + public void testGetDiskTypeWithSelectedFields() { + Capture> capturedOptions = Capture.newInstance(); + EasyMock.expect( + computeRpcMock.getDiskType( + eq(DISK_TYPE_ID.zone()), eq(DISK_TYPE_ID.type()), capture(capturedOptions))) + .andReturn(DISK_TYPE.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + DiskType diskType = + compute.getDiskType(DISK_TYPE_ID.zone(), DISK_TYPE_ID.type(), DISK_TYPE_OPTION_FIELDS); + String selector = (String) capturedOptions.getValue().get(DISK_TYPE_OPTION_FIELDS.rpcOption()); + assertTrue(selector.contains("selfLink")); + assertTrue(selector.contains("id")); + assertTrue(selector.contains("description")); + assertEquals(23, selector.length()); + assertEquals(DISK_TYPE, diskType); + } + + @Test + public void testListDiskTypes() { + String cursor = "cursor"; + compute = options.service(); + ImmutableList diskTypeList = ImmutableList.of(DISK_TYPE, DISK_TYPE); + ComputeRpc.Tuple> result = + ComputeRpc.Tuple.of(cursor, Iterables.transform(diskTypeList, DiskType.TO_PB_FUNCTION)); + EasyMock.expect(computeRpcMock.listDiskTypes(DISK_TYPE_ID.zone(), EMPTY_RPC_OPTIONS)) + .andReturn(result); + EasyMock.replay(computeRpcMock); + Page page = compute.listDiskTypes(DISK_TYPE_ID.zone()); + assertEquals(cursor, page.nextPageCursor()); + assertArrayEquals(diskTypeList.toArray(), Iterables.toArray(page.values(), DiskType.class)); + } + + @Test + public void testListDiskTypesNextPage() { + String cursor = "cursor"; + String nextCursor = "nextCursor"; + compute = options.service(); + ImmutableList diskTypeList = ImmutableList.of(DISK_TYPE, DISK_TYPE); + ComputeRpc.Tuple> result = + ComputeRpc.Tuple.of(cursor, Iterables.transform(diskTypeList, DiskType.TO_PB_FUNCTION)); + ImmutableList nextDiskTypeList = ImmutableList.of(DISK_TYPE); + ComputeRpc.Tuple> nextResult = + ComputeRpc.Tuple.of(nextCursor, Iterables.transform(nextDiskTypeList, DiskType.TO_PB_FUNCTION)); + Map nextOptions = ImmutableMap.of(ComputeRpc.Option.PAGE_TOKEN, cursor); + EasyMock.expect(computeRpcMock.listDiskTypes(DISK_TYPE_ID.zone(), EMPTY_RPC_OPTIONS)) + .andReturn(result); + EasyMock.expect(computeRpcMock.listDiskTypes(DISK_TYPE_ID.zone(), nextOptions)) + .andReturn(nextResult); + EasyMock.replay(computeRpcMock); + Page page = compute.listDiskTypes(DISK_TYPE_ID.zone()); + assertEquals(cursor, page.nextPageCursor()); + assertArrayEquals(diskTypeList.toArray(), Iterables.toArray(page.values(), DiskType.class)); + page = page.nextPage(); + assertEquals(nextCursor, page.nextPageCursor()); + assertArrayEquals(nextDiskTypeList.toArray(), Iterables.toArray(page.values(), DiskType.class)); + } + + @Test + public void testListEmptyDiskTypes() { + ImmutableList diskTypes = ImmutableList.of(); + ComputeRpc.Tuple> result = + ComputeRpc.Tuple.>of(null, diskTypes); + EasyMock.expect(computeRpcMock.listDiskTypes(DISK_TYPE_ID.zone(), EMPTY_RPC_OPTIONS)) + .andReturn(result); + EasyMock.replay(computeRpcMock); + compute = options.service(); + Page page = compute.listDiskTypes(DISK_TYPE_ID.zone()); + assertNull(page.nextPageCursor()); + assertArrayEquals(diskTypes.toArray(), Iterables.toArray(page.values(), DiskType.class)); + } + + @Test + public void testListDiskTypesWithOptions() { + String cursor = "cursor"; + compute = options.service(); + ImmutableList diskTypeList = ImmutableList.of(DISK_TYPE, DISK_TYPE); + ComputeRpc.Tuple> result = + ComputeRpc.Tuple.of(cursor, Iterables.transform(diskTypeList, DiskType.TO_PB_FUNCTION)); + EasyMock.expect(computeRpcMock.listDiskTypes(DISK_TYPE_ID.zone(), DISK_TYPE_LIST_OPTIONS)) + .andReturn(result); + EasyMock.replay(computeRpcMock); + Page page = compute.listDiskTypes(DISK_TYPE_ID.zone(), DISK_TYPE_LIST_PAGE_SIZE, + DISK_TYPE_LIST_PAGE_TOKEN, DISK_TYPE_LIST_FILTER); + assertEquals(cursor, page.nextPageCursor()); + assertArrayEquals(diskTypeList.toArray(), Iterables.toArray(page.values(), DiskType.class)); + } + + @Test + public void testAggregatedListDiskTypes() { + String cursor = "cursor"; + compute = options.service(); + ImmutableList diskTypeList = ImmutableList.of(DISK_TYPE, DISK_TYPE); + ComputeRpc.Tuple> result = + ComputeRpc.Tuple.of(cursor, Iterables.transform(diskTypeList, DiskType.TO_PB_FUNCTION)); + EasyMock.expect(computeRpcMock.listDiskTypes(EMPTY_RPC_OPTIONS)).andReturn(result); + EasyMock.replay(computeRpcMock); + Page page = compute.listDiskTypes(); + assertEquals(cursor, page.nextPageCursor()); + assertArrayEquals(diskTypeList.toArray(), Iterables.toArray(page.values(), DiskType.class)); + } + + @Test + public void testAggregatedListDiskTypesNextPage() { + String cursor = "cursor"; + String nextCursor = "nextCursor"; + compute = options.service(); + ImmutableList diskTypeList = ImmutableList.of(DISK_TYPE, DISK_TYPE); + ComputeRpc.Tuple> result = + ComputeRpc.Tuple.of(cursor, Iterables.transform(diskTypeList, DiskType.TO_PB_FUNCTION)); + ImmutableList nextDiskTypeList = ImmutableList.of(DISK_TYPE); + ComputeRpc.Tuple> nextResult = + ComputeRpc.Tuple.of(nextCursor, Iterables.transform(nextDiskTypeList, DiskType.TO_PB_FUNCTION)); + Map nextOptions = ImmutableMap.of(ComputeRpc.Option.PAGE_TOKEN, cursor); + EasyMock.expect(computeRpcMock.listDiskTypes(EMPTY_RPC_OPTIONS)).andReturn(result); + EasyMock.expect(computeRpcMock.listDiskTypes(nextOptions)).andReturn(nextResult); + EasyMock.replay(computeRpcMock); + Page page = compute.listDiskTypes(); + assertEquals(cursor, page.nextPageCursor()); + assertArrayEquals(diskTypeList.toArray(), Iterables.toArray(page.values(), DiskType.class)); + page = page.nextPage(); + assertEquals(nextCursor, page.nextPageCursor()); + assertArrayEquals(nextDiskTypeList.toArray(), Iterables.toArray(page.values(), DiskType.class)); + } + + @Test + public void testAggregatedListEmptyDiskTypes() { + ImmutableList diskTypes = ImmutableList.of(); + ComputeRpc.Tuple> result = + ComputeRpc.Tuple.>of(null, diskTypes); + EasyMock.expect(computeRpcMock.listDiskTypes(EMPTY_RPC_OPTIONS)).andReturn(result); + EasyMock.replay(computeRpcMock); + compute = options.service(); + Page page = compute.listDiskTypes(); + assertNull(page.nextPageCursor()); + assertArrayEquals(diskTypes.toArray(), Iterables.toArray(page.values(), DiskType.class)); + } + + @Test + public void testAggregatedListDiskTypesWithOptions() { + String cursor = "cursor"; + compute = options.service(); + ImmutableList diskTypeList = ImmutableList.of(DISK_TYPE, DISK_TYPE); + ComputeRpc.Tuple> result = + ComputeRpc.Tuple.of(cursor, Iterables.transform(diskTypeList, DiskType.TO_PB_FUNCTION)); + EasyMock.expect(computeRpcMock.listDiskTypes(DISK_TYPE_LIST_OPTIONS)).andReturn(result); + EasyMock.replay(computeRpcMock); + Page page = compute.listDiskTypes(DISK_TYPE_AGGREGATED_LIST_PAGE_SIZE, + DISK_TYPE_AGGREGATED_LIST_PAGE_TOKEN, DISK_TYPE_AGGREGATED_LIST_FILTER); + assertEquals(cursor, page.nextPageCursor()); + assertArrayEquals(diskTypeList.toArray(), Iterables.toArray(page.values(), DiskType.class)); + } + + @Test + public void testGetMachineType() { + EasyMock.expect( + computeRpcMock.getMachineType( + MACHINE_TYPE_ID.zone(), MACHINE_TYPE_ID.type(), EMPTY_RPC_OPTIONS)) + .andReturn(MACHINE_TYPE.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + MachineType machineType = + compute.getMachineType(MACHINE_TYPE_ID.zone(), MACHINE_TYPE_ID.type()); + assertEquals(MACHINE_TYPE, machineType); + } + + @Test + public void testGetMachineType_Null() { + EasyMock.expect( + computeRpcMock.getMachineType( + MACHINE_TYPE_ID.zone(), MACHINE_TYPE_ID.type(), EMPTY_RPC_OPTIONS)) + .andReturn(null); + EasyMock.replay(computeRpcMock); + compute = options.service(); + assertNull(compute.getMachineType(MACHINE_TYPE_ID.zone(), MACHINE_TYPE_ID.type())); + } + + @Test + public void testGetMachineTypeFromMachineTypeId() { + EasyMock.expect(computeRpcMock.getMachineType( + MACHINE_TYPE_ID.zone(), MACHINE_TYPE_ID.type(), EMPTY_RPC_OPTIONS)) + .andReturn(MACHINE_TYPE.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + MachineType machineType = compute.getMachineType(MACHINE_TYPE_ID); + assertEquals(MACHINE_TYPE, machineType); + } + + @Test + public void testGetMachineTypeWithSelectedFields() { + Capture> capturedOptions = Capture.newInstance(); + EasyMock.expect( + computeRpcMock.getMachineType(eq(MACHINE_TYPE_ID.zone()), eq(MACHINE_TYPE_ID.type()), + capture(capturedOptions))) + .andReturn(MACHINE_TYPE.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + MachineType machineType = compute.getMachineType(MACHINE_TYPE_ID.zone(), + MACHINE_TYPE_ID.type(), MACHINE_TYPE_OPTION_FIELDS); + String selector = (String) capturedOptions.getValue().get(DISK_TYPE_OPTION_FIELDS.rpcOption()); + assertTrue(selector.contains("selfLink")); + assertTrue(selector.contains("id")); + assertTrue(selector.contains("description")); + assertEquals(23, selector.length()); + assertEquals(MACHINE_TYPE, machineType); + } + + @Test + public void testListMachineTypes() { + String cursor = "cursor"; + compute = options.service(); + ImmutableList machineTypeList = ImmutableList.of(MACHINE_TYPE, MACHINE_TYPE); + ComputeRpc.Tuple> result = + ComputeRpc.Tuple.of(cursor, Iterables.transform(machineTypeList, MachineType.TO_PB_FUNCTION)); + EasyMock.expect(computeRpcMock.listMachineTypes(MACHINE_TYPE_ID.zone(), EMPTY_RPC_OPTIONS)) + .andReturn(result); + EasyMock.replay(computeRpcMock); + Page page = compute.listMachineTypes(MACHINE_TYPE_ID.zone()); + assertEquals(cursor, page.nextPageCursor()); + assertArrayEquals(machineTypeList.toArray(), Iterables.toArray(page.values(), + MachineType.class)); + } + + @Test + public void testListMachineTypesNextPage() { + String cursor = "cursor"; + String nextCursor = "nextCursor"; + compute = options.service(); + ImmutableList machineTypeList = ImmutableList.of(MACHINE_TYPE, MACHINE_TYPE); + ComputeRpc.Tuple> result = + ComputeRpc.Tuple.of(cursor, Iterables.transform(machineTypeList, MachineType.TO_PB_FUNCTION)); + ImmutableList nextMachineTypeList = ImmutableList.of(MACHINE_TYPE); + ComputeRpc.Tuple> nextResult = + ComputeRpc.Tuple.of(nextCursor, Iterables.transform(nextMachineTypeList, MachineType.TO_PB_FUNCTION)); + Map nextOptions = ImmutableMap.of(ComputeRpc.Option.PAGE_TOKEN, cursor); + EasyMock.expect(computeRpcMock.listMachineTypes(MACHINE_TYPE_ID.zone(), EMPTY_RPC_OPTIONS)) + .andReturn(result); + EasyMock.expect(computeRpcMock.listMachineTypes(MACHINE_TYPE_ID.zone(), nextOptions)) + .andReturn(nextResult); + EasyMock.replay(computeRpcMock); + Page page = compute.listMachineTypes(MACHINE_TYPE_ID.zone()); + assertEquals(cursor, page.nextPageCursor()); + assertArrayEquals(machineTypeList.toArray(), + Iterables.toArray(page.values(), MachineType.class)); + page = page.nextPage(); + assertEquals(nextCursor, page.nextPageCursor()); + assertArrayEquals(nextMachineTypeList.toArray(), + Iterables.toArray(page.values(), MachineType.class)); + } + + @Test + public void testListEmptyMachineTypes() { + ImmutableList machineTypes = + ImmutableList.of(); + ComputeRpc.Tuple> result = + ComputeRpc.Tuple.>of(null, + machineTypes); + EasyMock.expect(computeRpcMock.listMachineTypes(MACHINE_TYPE_ID.zone(), EMPTY_RPC_OPTIONS)) + .andReturn(result); + EasyMock.replay(computeRpcMock); + compute = options.service(); + Page page = compute.listMachineTypes(MACHINE_TYPE_ID.zone()); + assertNull(page.nextPageCursor()); + assertArrayEquals(machineTypes.toArray(), Iterables.toArray(page.values(), MachineType.class)); + } + + @Test + public void testListMachineTypesWithOptions() { + String cursor = "cursor"; + compute = options.service(); + ImmutableList machineTypeList = ImmutableList.of(MACHINE_TYPE, MACHINE_TYPE); + ComputeRpc.Tuple> result = + ComputeRpc.Tuple.of(cursor, Iterables.transform(machineTypeList, MachineType.TO_PB_FUNCTION)); + EasyMock.expect( + computeRpcMock.listMachineTypes(MACHINE_TYPE_ID.zone(), MACHINE_TYPE_LIST_OPTIONS)) + .andReturn(result); + EasyMock.replay(computeRpcMock); + Page page = compute.listMachineTypes(MACHINE_TYPE_ID.zone(), + MACHINE_TYPE_LIST_PAGE_SIZE, MACHINE_TYPE_LIST_PAGE_TOKEN, MACHINE_TYPE_LIST_FILTER); + assertEquals(cursor, page.nextPageCursor()); + assertArrayEquals(machineTypeList.toArray(), + Iterables.toArray(page.values(), MachineType.class)); + } + + @Test + public void testAggregatedListMachineTypes() { + String cursor = "cursor"; + compute = options.service(); + ImmutableList machineTypeList = ImmutableList.of(MACHINE_TYPE, MACHINE_TYPE); + ComputeRpc.Tuple> result = + ComputeRpc.Tuple.of(cursor, Iterables.transform(machineTypeList, MachineType.TO_PB_FUNCTION)); + EasyMock.expect(computeRpcMock.listMachineTypes(EMPTY_RPC_OPTIONS)).andReturn(result); + EasyMock.replay(computeRpcMock); + Page page = compute.listMachineTypes(); + assertEquals(cursor, page.nextPageCursor()); + assertArrayEquals(machineTypeList.toArray(), Iterables.toArray(page.values(), + MachineType.class)); + } + + @Test + public void testAggregatedListMachineTypesNextPage() { + String cursor = "cursor"; + String nextCursor = "nextCursor"; + compute = options.service(); + ImmutableList machineTypeList = ImmutableList.of(MACHINE_TYPE, MACHINE_TYPE); + ComputeRpc.Tuple> result = + ComputeRpc.Tuple.of(cursor, Iterables.transform(machineTypeList, MachineType.TO_PB_FUNCTION)); + ImmutableList nextMachineTypeList = ImmutableList.of(MACHINE_TYPE); + ComputeRpc.Tuple> nextResult = + ComputeRpc.Tuple.of(nextCursor, Iterables.transform(nextMachineTypeList, MachineType.TO_PB_FUNCTION)); + Map nextOptions = ImmutableMap.of(ComputeRpc.Option.PAGE_TOKEN, cursor); + EasyMock.expect(computeRpcMock.listMachineTypes(EMPTY_RPC_OPTIONS)).andReturn(result); + EasyMock.expect(computeRpcMock.listMachineTypes(nextOptions)).andReturn(nextResult); + EasyMock.replay(computeRpcMock); + Page page = compute.listMachineTypes(); + assertEquals(cursor, page.nextPageCursor()); + assertArrayEquals(machineTypeList.toArray(), + Iterables.toArray(page.values(), MachineType.class)); + page = page.nextPage(); + assertEquals(nextCursor, page.nextPageCursor()); + assertArrayEquals(nextMachineTypeList.toArray(), + Iterables.toArray(page.values(), MachineType.class)); + } + + @Test + public void testAggregatedListEmptyMachineTypes() { + ImmutableList machineTypes = + ImmutableList.of(); + ComputeRpc.Tuple> result = + ComputeRpc.Tuple.>of(null, + machineTypes); + EasyMock.expect(computeRpcMock.listMachineTypes(EMPTY_RPC_OPTIONS)).andReturn(result); + EasyMock.replay(computeRpcMock); + compute = options.service(); + Page page = compute.listMachineTypes(); + assertNull(page.nextPageCursor()); + assertArrayEquals(machineTypes.toArray(), Iterables.toArray(page.values(), MachineType.class)); + } + + @Test + public void testAggregatedListMachineTypesWithOptions() { + String cursor = "cursor"; + compute = options.service(); + ImmutableList machineTypeList = ImmutableList.of(MACHINE_TYPE, MACHINE_TYPE); + ComputeRpc.Tuple> result = + ComputeRpc.Tuple.of(cursor, Iterables.transform(machineTypeList, MachineType.TO_PB_FUNCTION)); + EasyMock.expect(computeRpcMock.listMachineTypes(MACHINE_TYPE_LIST_OPTIONS)) + .andReturn(result); + EasyMock.replay(computeRpcMock); + Page page = compute.listMachineTypes(MACHINE_TYPE_AGGREGATED_LIST_PAGE_SIZE, + MACHINE_TYPE_AGGREGATED_LIST_PAGE_TOKEN, MACHINE_TYPE_AGGREGATED_LIST_FILTER); + assertEquals(cursor, page.nextPageCursor()); + assertArrayEquals(machineTypeList.toArray(), + Iterables.toArray(page.values(), MachineType.class)); + } + + @Test + public void testGetRegion() { + EasyMock.expect(computeRpcMock.getRegion(REGION_ID.region(), EMPTY_RPC_OPTIONS)) + .andReturn(REGION.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + Region region = compute.getRegion(REGION_ID.region()); + assertEquals(REGION, region); + } + + @Test + public void testGetRegion_Null() { + EasyMock.expect(computeRpcMock.getRegion(REGION_ID.region(), EMPTY_RPC_OPTIONS)) + .andReturn(null); + EasyMock.replay(computeRpcMock); + compute = options.service(); + assertNull(compute.getRegion(REGION_ID.region())); + } + + @Test + public void testGetRegionWithSelectedFields() { + Capture> capturedOptions = Capture.newInstance(); + EasyMock.expect(computeRpcMock.getRegion(eq(REGION_ID.region()), capture(capturedOptions))) + .andReturn(REGION.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + Region region = compute.getRegion(REGION_ID.region(), REGION_OPTION_FIELDS); + String selector = (String) capturedOptions.getValue().get(REGION_OPTION_FIELDS.rpcOption()); + assertTrue(selector.contains("selfLink")); + assertTrue(selector.contains("id")); + assertTrue(selector.contains("description")); + assertEquals(23, selector.length()); + assertEquals(REGION, region); + } + + @Test + public void testListRegions() { + String cursor = "cursor"; + compute = options.service(); + ImmutableList regionList = ImmutableList.of(REGION, REGION); + ComputeRpc.Tuple> result = + ComputeRpc.Tuple.of(cursor, Iterables.transform(regionList, Region.TO_PB_FUNCTION)); + EasyMock.expect(computeRpcMock.listRegions(EMPTY_RPC_OPTIONS)).andReturn(result); + EasyMock.replay(computeRpcMock); + Page page = compute.listRegions(); + assertEquals(cursor, page.nextPageCursor()); + assertArrayEquals(regionList.toArray(), Iterables.toArray(page.values(), Region.class)); + } + + @Test + public void testListRegionsNextPage() { + String cursor = "cursor"; + String nextCursor = "nextCursor"; + compute = options.service(); + ImmutableList regionList = ImmutableList.of(REGION, REGION); + ImmutableList nextRegionList = ImmutableList.of(REGION); + ComputeRpc.Tuple> result = + ComputeRpc.Tuple.of(cursor, Iterables.transform(regionList, Region.TO_PB_FUNCTION)); + ComputeRpc.Tuple> nextResult = + ComputeRpc.Tuple.of(nextCursor, Iterables.transform(nextRegionList, Region.TO_PB_FUNCTION)); + Map nextOptions = ImmutableMap.of(ComputeRpc.Option.PAGE_TOKEN, cursor); + EasyMock.expect(computeRpcMock.listRegions(EMPTY_RPC_OPTIONS)).andReturn(result); + EasyMock.expect(computeRpcMock.listRegions(nextOptions)).andReturn(nextResult); + EasyMock.replay(computeRpcMock); + Page page = compute.listRegions(); + assertEquals(cursor, page.nextPageCursor()); + assertArrayEquals(regionList.toArray(), Iterables.toArray(page.values(), Region.class)); + page = page.nextPage(); + assertEquals(nextCursor, page.nextPageCursor()); + assertArrayEquals(nextRegionList.toArray(), Iterables.toArray(page.values(), Region.class)); + } + + @Test + public void testListEmptyRegions() { + ImmutableList regions = ImmutableList.of(); + ComputeRpc.Tuple> result = + ComputeRpc.Tuple.>of(null, + regions); + EasyMock.expect(computeRpcMock.listRegions(EMPTY_RPC_OPTIONS)).andReturn(result); + EasyMock.replay(computeRpcMock); + compute = options.service(); + Page page = compute.listRegions(); + assertNull(page.nextPageCursor()); + assertArrayEquals(regions.toArray(), Iterables.toArray(page.values(), Region.class)); + } + + @Test + public void testListRegionsWithOptions() { + String cursor = "cursor"; + compute = options.service(); + ImmutableList regionList = ImmutableList.of(REGION, REGION); + ComputeRpc.Tuple> result = + ComputeRpc.Tuple.of(cursor, Iterables.transform(regionList, Region.TO_PB_FUNCTION)); + EasyMock.expect(computeRpcMock.listRegions(REGION_LIST_OPTIONS)).andReturn(result); + EasyMock.replay(computeRpcMock); + Page page = compute.listRegions(REGION_LIST_PAGE_SIZE, REGION_LIST_PAGE_TOKEN, + REGION_LIST_FILTER); + assertEquals(cursor, page.nextPageCursor()); + assertArrayEquals(regionList.toArray(), Iterables.toArray(page.values(), Region.class)); + } + + @Test + public void testGetZone() { + EasyMock.expect(computeRpcMock.getZone(ZONE_ID.zone(), EMPTY_RPC_OPTIONS)) + .andReturn(ZONE.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + Zone zone = compute.getZone(ZONE_ID.zone()); + assertEquals(ZONE, zone); + } + + @Test + public void testGetZone_Null() { + EasyMock.expect(computeRpcMock.getZone(ZONE_ID.zone(), EMPTY_RPC_OPTIONS)).andReturn(null); + EasyMock.replay(computeRpcMock); + compute = options.service(); + assertNull(compute.getZone(ZONE_ID.zone())); + } + + @Test + public void testGetZoneWithSelectedFields() { + Capture> capturedOptions = Capture.newInstance(); + EasyMock.expect(computeRpcMock.getZone(eq(ZONE_ID.zone()), capture(capturedOptions))) + .andReturn(ZONE.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + Zone zone = compute.getZone(ZONE_ID.zone(), ZONE_OPTION_FIELDS); + String selector = (String) capturedOptions.getValue().get(ZONE_OPTION_FIELDS.rpcOption()); + assertTrue(selector.contains("selfLink")); + assertTrue(selector.contains("id")); + assertTrue(selector.contains("description")); + assertEquals(23, selector.length()); + assertEquals(ZONE, zone); + } + + @Test + public void testListZones() { + String cursor = "cursor"; + compute = options.service(); + ImmutableList zoneList = ImmutableList.of(ZONE, ZONE); + ComputeRpc.Tuple> result = + ComputeRpc.Tuple.of(cursor, Iterables.transform(zoneList, Zone.TO_PB_FUNCTION)); + EasyMock.expect(computeRpcMock.listZones(EMPTY_RPC_OPTIONS)).andReturn(result); + EasyMock.replay(computeRpcMock); + Page page = compute.listZones(); + assertEquals(cursor, page.nextPageCursor()); + assertArrayEquals(zoneList.toArray(), Iterables.toArray(page.values(), Zone.class)); + } + + @Test + public void testListZonesNextPage() { + String cursor = "cursor"; + String nextCursor = "nextCursor"; + compute = options.service(); + ImmutableList zoneList = ImmutableList.of(ZONE, ZONE); + ImmutableList nextZoneList = ImmutableList.of(ZONE); + ComputeRpc.Tuple> result = + ComputeRpc.Tuple.of(cursor, Iterables.transform(zoneList, Zone.TO_PB_FUNCTION)); + ComputeRpc.Tuple> nextResult = + ComputeRpc.Tuple.of(nextCursor, Iterables.transform(nextZoneList, Zone.TO_PB_FUNCTION)); + Map nextOptions = ImmutableMap.of(ComputeRpc.Option.PAGE_TOKEN, cursor); + EasyMock.expect(computeRpcMock.listZones(EMPTY_RPC_OPTIONS)).andReturn(result); + EasyMock.expect(computeRpcMock.listZones(nextOptions)).andReturn(nextResult); + EasyMock.replay(computeRpcMock); + Page page = compute.listZones(); + assertEquals(cursor, page.nextPageCursor()); + assertArrayEquals(zoneList.toArray(), Iterables.toArray(page.values(), Zone.class)); + page = page.nextPage(); + assertEquals(nextCursor, page.nextPageCursor()); + assertArrayEquals(nextZoneList.toArray(), Iterables.toArray(page.values(), Zone.class)); + } + + @Test + public void testListEmptyZones() { + ImmutableList zones = ImmutableList.of(); + ComputeRpc.Tuple> result = + ComputeRpc.Tuple.>of(null, zones); + EasyMock.expect(computeRpcMock.listZones(EMPTY_RPC_OPTIONS)).andReturn(result); + EasyMock.replay(computeRpcMock); + compute = options.service(); + Page page = compute.listZones(); + assertNull(page.nextPageCursor()); + assertArrayEquals(zones.toArray(), Iterables.toArray(page.values(), Zone.class)); + } + + @Test + public void testListZonesWithOptions() { + String cursor = "cursor"; + compute = options.service(); + ImmutableList zoneList = ImmutableList.of(ZONE, ZONE); + ComputeRpc.Tuple> result = + ComputeRpc.Tuple.of(cursor, Iterables.transform(zoneList, Zone.TO_PB_FUNCTION)); + EasyMock.expect(computeRpcMock.listZones(ZONE_LIST_OPTIONS)).andReturn(result); + EasyMock.replay(computeRpcMock); + Page page = + compute.listZones(ZONE_LIST_PAGE_SIZE, ZONE_LIST_PAGE_TOKEN, ZONE_LIST_FILTER); + assertEquals(cursor, page.nextPageCursor()); + assertArrayEquals(zoneList.toArray(), Iterables.toArray(page.values(), Zone.class)); + } + + @Test + public void testGetLicenseFromString() { + EasyMock.expect(computeRpcMock.getLicense(PROJECT, LICENSE_ID.license(), EMPTY_RPC_OPTIONS)) + .andReturn(LICENSE.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + License license = compute.getLicense(LICENSE_ID.license()); + assertEquals(LICENSE, license); + } + + @Test + public void testGetLicenseFromString_Null() { + EasyMock.expect(computeRpcMock.getLicense(PROJECT, LICENSE_ID.license(), EMPTY_RPC_OPTIONS)) + .andReturn(null); + EasyMock.replay(computeRpcMock); + compute = options.service(); + assertNull(compute.getLicense(LICENSE_ID.license())); + } + + @Test + public void testGetLicenseFromStringWithOptions() { + Capture> capturedOptions = Capture.newInstance(); + EasyMock.expect( + computeRpcMock.getLicense(eq(PROJECT), eq(LICENSE_ID.license()), capture(capturedOptions))) + .andReturn(LICENSE.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + License license = compute.getLicense(LICENSE_ID.license(), LICENSE_OPTION_FIELDS); + assertEquals(LICENSE, license); + String selector = (String) capturedOptions.getValue().get(LICENSE_OPTION_FIELDS.rpcOption()); + assertTrue(selector.contains("selfLink")); + assertTrue(selector.contains("chargesUseFee")); + assertEquals(22, selector.length()); + assertEquals(LICENSE, license); + } + + @Test + public void testGetLicenseFromIdWithOptions() { + Capture> capturedOptions = Capture.newInstance(); + LicenseId licenseId = LicenseId.of("project2", "license2"); + EasyMock.expect(computeRpcMock.getLicense(eq(licenseId.project()), eq(licenseId.license()), + capture(capturedOptions))) + .andReturn(LICENSE.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + License license = compute.getLicense(licenseId, LICENSE_OPTION_FIELDS); + assertEquals(LICENSE, license); + String selector = (String) capturedOptions.getValue().get(LICENSE_OPTION_FIELDS.rpcOption()); + assertTrue(selector.contains("selfLink")); + assertTrue(selector.contains("chargesUseFee")); + assertEquals(22, selector.length()); + assertEquals(LICENSE, license); + } + + @Test + public void testGetLicenseFromId() { + LicenseId licenseId = LicenseId.of("project2", "license2"); + EasyMock.expect( + computeRpcMock.getLicense(licenseId.project(), licenseId.license(), EMPTY_RPC_OPTIONS)) + .andReturn(LICENSE.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + License license = compute.getLicense(licenseId); + assertEquals(LICENSE, license); + } + + @Test + public void testGetLicenseFromId_Null() { + LicenseId licenseId = LicenseId.of("project2", "license2"); + EasyMock.expect( + computeRpcMock.getLicense(licenseId.project(), licenseId.license(), EMPTY_RPC_OPTIONS)) + .andReturn(null); + EasyMock.replay(computeRpcMock); + compute = options.service(); + assertNull(compute.getLicense(licenseId)); + } + + @Test + public void testGetGlobalOperation() { + EasyMock.expect( + computeRpcMock.getGlobalOperation(GLOBAL_OPERATION_ID.operation(), EMPTY_RPC_OPTIONS)) + .andReturn(globalOperation.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + assertEquals(globalOperation, compute.getOperation(GLOBAL_OPERATION_ID)); + } + + @Test + public void testGetGlobalOperation_Null() { + EasyMock.expect( + computeRpcMock.getGlobalOperation(GLOBAL_OPERATION_ID.operation(), EMPTY_RPC_OPTIONS)) + .andReturn(null); + EasyMock.replay(computeRpcMock); + compute = options.service(); + assertNull(compute.getOperation(GLOBAL_OPERATION_ID)); + } + + @Test + public void testGetGlobalOperationWithSelectedFields() { + Capture> capturedOptions = Capture.newInstance(); + EasyMock.expect(computeRpcMock.getGlobalOperation( + eq(GLOBAL_OPERATION_ID.operation()), capture(capturedOptions))) + .andReturn(globalOperation.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + Operation operation = compute.getOperation(GLOBAL_OPERATION_ID, OPERATION_OPTION_FIELDS); + String selector = (String) capturedOptions.getValue().get(OPERATION_OPTION_FIELDS.rpcOption()); + assertTrue(selector.contains("selfLink")); + assertTrue(selector.contains("id")); + assertTrue(selector.contains("description")); + assertEquals(23, selector.length()); + assertEquals(globalOperation, operation); + } + + @Test + public void testListGlobalOperations() { + String cursor = "cursor"; + compute = options.service(); + ImmutableList operationList = ImmutableList.of(globalOperation, globalOperation); + ComputeRpc.Tuple> result = + ComputeRpc.Tuple.of(cursor, Iterables.transform(operationList, OPERATION_TO_PB_FUNCTION)); + EasyMock.expect(computeRpcMock.listGlobalOperations(EMPTY_RPC_OPTIONS)).andReturn(result); + EasyMock.replay(computeRpcMock); + Page page = compute.listGlobalOperations(); + assertEquals(cursor, page.nextPageCursor()); + assertArrayEquals(operationList.toArray(), Iterables.toArray(page.values(), Operation.class)); + } + + @Test + public void testListGlobalOperationsNextPage() { + String cursor = "cursor"; + String nextCursor = "nextCursor"; + compute = options.service(); + ImmutableList operationList = ImmutableList.of(globalOperation, globalOperation); + ImmutableList nextOperationList = ImmutableList.of(globalOperation); + ComputeRpc.Tuple> result = + ComputeRpc.Tuple.of(cursor, Iterables.transform(operationList, OPERATION_TO_PB_FUNCTION)); + ComputeRpc.Tuple> nextResult = + ComputeRpc.Tuple.of(nextCursor, Iterables.transform(nextOperationList, OPERATION_TO_PB_FUNCTION)); + Map nextOptions = ImmutableMap.of(ComputeRpc.Option.PAGE_TOKEN, cursor); + EasyMock.expect(computeRpcMock.listGlobalOperations(EMPTY_RPC_OPTIONS)).andReturn(result); + EasyMock.expect(computeRpcMock.listGlobalOperations(nextOptions)).andReturn(nextResult); + EasyMock.replay(computeRpcMock); + Page page = compute.listGlobalOperations(); + assertEquals(cursor, page.nextPageCursor()); + assertArrayEquals(operationList.toArray(), Iterables.toArray(page.values(), Operation.class)); + page = page.nextPage(); + assertEquals(nextCursor, page.nextPageCursor()); + assertArrayEquals(nextOperationList.toArray(), + Iterables.toArray(page.values(), Operation.class)); + } + + @Test + public void testListEmptyGlobalOperations() { + ImmutableList operations = ImmutableList.of(); + ComputeRpc.Tuple> result = + ComputeRpc.Tuple.>of(null, + operations); + EasyMock.expect(computeRpcMock.listGlobalOperations(EMPTY_RPC_OPTIONS)).andReturn(result); + EasyMock.replay(computeRpcMock); + compute = options.service(); + Page page = compute.listGlobalOperations(); + assertNull(page.nextPageCursor()); + assertArrayEquals(operations.toArray(), Iterables.toArray(page.values(), Operation.class)); + } + + @Test + public void testListGlobalOperationsWithOptions() { + String cursor = "cursor"; + compute = options.service(); + ImmutableList operationList = ImmutableList.of(globalOperation, globalOperation); + ComputeRpc.Tuple> result = + ComputeRpc.Tuple.of(cursor, Iterables.transform(operationList, OPERATION_TO_PB_FUNCTION)); + EasyMock.expect(computeRpcMock.listGlobalOperations(OPERATION_LIST_OPTIONS)).andReturn(result); + EasyMock.replay(computeRpcMock); + Page page = compute.listGlobalOperations(OPERATION_LIST_PAGE_SIZE, + OPERATION_LIST_PAGE_TOKEN, OPERATION_LIST_FILTER); + assertEquals(cursor, page.nextPageCursor()); + assertArrayEquals(operationList.toArray(), Iterables.toArray(page.values(), Operation.class)); + } + + @Test + public void testDeleteGlobalOperation_True() { + EasyMock.expect(computeRpcMock.deleteGlobalOperation(GLOBAL_OPERATION_ID.operation())) + .andReturn(true); + EasyMock.replay(computeRpcMock); + compute = options.service(); + assertTrue(compute.deleteOperation(GLOBAL_OPERATION_ID)); + } + + @Test + public void testDeleteGlobalOperation_False() { + EasyMock.expect(computeRpcMock.deleteGlobalOperation(GLOBAL_OPERATION_ID.operation())) + .andReturn(false); + EasyMock.replay(computeRpcMock); + compute = options.service(); + assertFalse(compute.deleteOperation(GLOBAL_OPERATION_ID)); + } + + @Test + public void testGetRegionOperation() { + EasyMock.expect(computeRpcMock.getRegionOperation(REGION_OPERATION_ID.region(), + REGION_OPERATION_ID.operation(), EMPTY_RPC_OPTIONS)) + .andReturn(regionOperation.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + Operation operation = compute.getOperation(REGION_OPERATION_ID); + assertEquals(regionOperation, operation); + } + + @Test + public void testGetRegionOperation_Null() { + EasyMock.expect(computeRpcMock.getRegionOperation(REGION_OPERATION_ID.region(), + REGION_OPERATION_ID.operation(), EMPTY_RPC_OPTIONS)) + .andReturn(regionOperation.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + Operation operation = compute.getOperation(REGION_OPERATION_ID); + assertEquals(regionOperation, operation); + } + + @Test + public void testGetRegionOperationWithSelectedFields() { + Capture> capturedOptions = Capture.newInstance(); + EasyMock.expect(computeRpcMock.getRegionOperation(eq(REGION_OPERATION_ID.region()), + eq(REGION_OPERATION_ID.operation()), capture(capturedOptions))) + .andReturn(regionOperation.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + Operation operation = compute.getOperation(REGION_OPERATION_ID, OPERATION_OPTION_FIELDS); + String selector = (String) capturedOptions.getValue().get(OPERATION_OPTION_FIELDS.rpcOption()); + assertTrue(selector.contains("selfLink")); + assertTrue(selector.contains("id")); + assertTrue(selector.contains("description")); + assertEquals(23, selector.length()); + assertEquals(regionOperation, operation); + } + + @Test + public void testListRegionOperations() { + String cursor = "cursor"; + compute = options.service(); + ImmutableList operationList = ImmutableList.of(regionOperation, regionOperation); + ComputeRpc.Tuple> result = + ComputeRpc.Tuple.of(cursor, Iterables.transform(operationList, OPERATION_TO_PB_FUNCTION)); + EasyMock.expect( + computeRpcMock.listRegionOperations(REGION_OPERATION_ID.region(), EMPTY_RPC_OPTIONS)) + .andReturn(result); + EasyMock.replay(computeRpcMock); + Page page = compute.listRegionOperations(REGION_OPERATION_ID.region()); + assertEquals(cursor, page.nextPageCursor()); + assertArrayEquals(operationList.toArray(), Iterables.toArray(page.values(), Operation.class)); + } + + @Test + public void testListRegionOperationsNextPage() { + String cursor = "cursor"; + String nextCursor = "nextCursor"; + compute = options.service(); + ImmutableList operationList = ImmutableList.of(regionOperation, regionOperation); + ImmutableList nextOperationList = ImmutableList.of(regionOperation); + ComputeRpc.Tuple> result = + ComputeRpc.Tuple.of(cursor, Iterables.transform(operationList, OPERATION_TO_PB_FUNCTION)); + ComputeRpc.Tuple> nextResult = + ComputeRpc.Tuple.of(nextCursor, Iterables.transform(nextOperationList, OPERATION_TO_PB_FUNCTION)); + Map nextOptions = ImmutableMap.of(ComputeRpc.Option.PAGE_TOKEN, cursor); + EasyMock.expect(computeRpcMock.listRegionOperations(REGION_OPERATION_ID.region(), + EMPTY_RPC_OPTIONS)).andReturn(result); + EasyMock.expect(computeRpcMock.listRegionOperations(REGION_OPERATION_ID.region(), + nextOptions)).andReturn(nextResult); + EasyMock.replay(computeRpcMock); + Page page = compute.listRegionOperations(REGION_OPERATION_ID.region()); + assertEquals(cursor, page.nextPageCursor()); + assertArrayEquals(operationList.toArray(), Iterables.toArray(page.values(), Operation.class)); + page = page.nextPage(); + assertEquals(nextCursor, page.nextPageCursor()); + assertArrayEquals(nextOperationList.toArray(), + Iterables.toArray(page.values(), Operation.class)); + } + + @Test + public void testListEmptyRegionOperations() { + ImmutableList operations = ImmutableList.of(); + ComputeRpc.Tuple> result = + ComputeRpc.Tuple.>of(null, + operations); + EasyMock.expect( + computeRpcMock.listRegionOperations(REGION_OPERATION_ID.region(), EMPTY_RPC_OPTIONS)) + .andReturn(result); + EasyMock.replay(computeRpcMock); + compute = options.service(); + Page page = compute.listRegionOperations(REGION_OPERATION_ID.region()); + assertNull(page.nextPageCursor()); + assertArrayEquals(operations.toArray(), Iterables.toArray(page.values(), Operation.class)); + } + + @Test + public void testListRegionOperationsWithOptions() { + String cursor = "cursor"; + compute = options.service(); + ImmutableList operationList = ImmutableList.of(regionOperation, regionOperation); + ComputeRpc.Tuple> result = + ComputeRpc.Tuple.of(cursor, Iterables.transform(operationList, OPERATION_TO_PB_FUNCTION)); + EasyMock.expect( + computeRpcMock.listRegionOperations(REGION_OPERATION_ID.region(), OPERATION_LIST_OPTIONS)) + .andReturn(result); + EasyMock.replay(computeRpcMock); + Page page = compute.listRegionOperations(REGION_OPERATION_ID.region(), + OPERATION_LIST_PAGE_SIZE, OPERATION_LIST_PAGE_TOKEN, OPERATION_LIST_FILTER); + assertEquals(cursor, page.nextPageCursor()); + assertArrayEquals(operationList.toArray(), Iterables.toArray(page.values(), Operation.class)); + } + + @Test + public void testDeleteRegionOperation_True() { + EasyMock.expect(computeRpcMock.deleteRegionOperation(REGION_OPERATION_ID.region(), + REGION_OPERATION_ID.operation())).andReturn(true); + EasyMock.replay(computeRpcMock); + compute = options.service(); + assertTrue(compute.deleteOperation(REGION_OPERATION_ID)); + } + + @Test + public void testDeleteRegionOperation_False() { + EasyMock.expect(computeRpcMock.deleteRegionOperation(REGION_OPERATION_ID.region(), + REGION_OPERATION_ID.operation())).andReturn(false); + EasyMock.replay(computeRpcMock); + compute = options.service(); + assertFalse(compute.deleteOperation(REGION_OPERATION_ID)); + } + + @Test + public void testGetZoneOperation() { + EasyMock.expect(computeRpcMock.getZoneOperation(ZONE_OPERATION_ID.zone(), + ZONE_OPERATION_ID.operation(), EMPTY_RPC_OPTIONS)).andReturn(zoneOperation.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + Operation operation = compute.getOperation(ZONE_OPERATION_ID); + assertEquals(zoneOperation, operation); + } + + @Test + public void testGetZoneOperation_Null() { + EasyMock.expect(computeRpcMock.getZoneOperation(ZONE_OPERATION_ID.zone(), + ZONE_OPERATION_ID.operation(), EMPTY_RPC_OPTIONS)).andReturn(null); + EasyMock.replay(computeRpcMock); + compute = options.service(); + assertNull(compute.getOperation(ZONE_OPERATION_ID)); + } + + @Test + public void testGetZoneOperationWithSelectedFields() { + Capture> capturedOptions = Capture.newInstance(); + EasyMock.expect(computeRpcMock.getZoneOperation(eq(ZONE_OPERATION_ID.zone()), + eq(ZONE_OPERATION_ID.operation()), capture(capturedOptions))) + .andReturn(zoneOperation.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + Operation operation = compute.getOperation(ZONE_OPERATION_ID, OPERATION_OPTION_FIELDS); + String selector = (String) capturedOptions.getValue().get(OPERATION_OPTION_FIELDS.rpcOption()); + assertTrue(selector.contains("selfLink")); + assertTrue(selector.contains("id")); + assertTrue(selector.contains("description")); + assertEquals(23, selector.length()); + assertEquals(zoneOperation, operation); + } + + @Test + public void testListZoneOperations() { + String cursor = "cursor"; + compute = options.service(); + ImmutableList operationList = ImmutableList.of(zoneOperation, zoneOperation); + ComputeRpc.Tuple> result = + ComputeRpc.Tuple.of(cursor, Iterables.transform(operationList, OPERATION_TO_PB_FUNCTION)); + EasyMock.expect( + computeRpcMock.listZoneOperations(ZONE_OPERATION_ID.zone(), EMPTY_RPC_OPTIONS)) + .andReturn(result); + EasyMock.replay(computeRpcMock); + Page page = compute.listZoneOperations(ZONE_OPERATION_ID.zone()); + assertEquals(cursor, page.nextPageCursor()); + assertArrayEquals(operationList.toArray(), Iterables.toArray(page.values(), Operation.class)); + } + + @Test + public void testListZoneOperationsNextPage() { + String cursor = "cursor"; + String nextCursor = "nextCursor"; + compute = options.service(); + ImmutableList operationList = ImmutableList.of(zoneOperation, zoneOperation); + ImmutableList nextOperationList = ImmutableList.of(zoneOperation); + ComputeRpc.Tuple> result = + ComputeRpc.Tuple.of(cursor, Iterables.transform(operationList, OPERATION_TO_PB_FUNCTION)); + ComputeRpc.Tuple> nextResult = + ComputeRpc.Tuple.of(nextCursor, Iterables.transform(nextOperationList, OPERATION_TO_PB_FUNCTION)); + Map nextOptions = ImmutableMap.of(ComputeRpc.Option.PAGE_TOKEN, cursor); + EasyMock.expect(computeRpcMock.listZoneOperations(ZONE_OPERATION_ID.zone(), EMPTY_RPC_OPTIONS)) + .andReturn(result); + EasyMock.expect(computeRpcMock.listZoneOperations(ZONE_OPERATION_ID.zone(), nextOptions)) + .andReturn(nextResult); + EasyMock.replay(computeRpcMock); + Page page = compute.listZoneOperations(ZONE_OPERATION_ID.zone()); + assertEquals(cursor, page.nextPageCursor()); + assertArrayEquals(operationList.toArray(), Iterables.toArray(page.values(), Operation.class)); + page = page.nextPage(); + assertEquals(nextCursor, page.nextPageCursor()); + assertArrayEquals(nextOperationList.toArray(), + Iterables.toArray(page.values(), Operation.class)); + } + + @Test + public void testListEmptyZoneOperations() { + ImmutableList operations = ImmutableList.of(); + ComputeRpc.Tuple> result = + ComputeRpc.Tuple.>of(null, + operations); + EasyMock.expect( + computeRpcMock.listZoneOperations(ZONE_OPERATION_ID.zone(), EMPTY_RPC_OPTIONS)) + .andReturn(result); + EasyMock.replay(computeRpcMock); + compute = options.service(); + Page page = compute.listZoneOperations(ZONE_OPERATION_ID.zone()); + assertNull(page.nextPageCursor()); + assertArrayEquals(operations.toArray(), Iterables.toArray(page.values(), Operation.class)); + } + + @Test + public void testListZoneOperationsWithOptions() { + String cursor = "cursor"; + compute = options.service(); + ImmutableList operationList = ImmutableList.of(zoneOperation, zoneOperation); + ComputeRpc.Tuple> result = + ComputeRpc.Tuple.of(cursor, Iterables.transform(operationList, OPERATION_TO_PB_FUNCTION)); + EasyMock.expect( + computeRpcMock.listZoneOperations(ZONE_OPERATION_ID.zone(), OPERATION_LIST_OPTIONS)) + .andReturn(result); + EasyMock.replay(computeRpcMock); + Page page = compute.listZoneOperations(ZONE_OPERATION_ID.zone(), + OPERATION_LIST_PAGE_SIZE, OPERATION_LIST_PAGE_TOKEN, OPERATION_LIST_FILTER); + assertEquals(cursor, page.nextPageCursor()); + assertArrayEquals(operationList.toArray(), Iterables.toArray(page.values(), Operation.class)); + } + + @Test + public void testDeleteZoneOperation_True() { + EasyMock.expect(computeRpcMock.deleteZoneOperation(ZONE_OPERATION_ID.zone(), + ZONE_OPERATION_ID.operation())).andReturn(true); + EasyMock.replay(computeRpcMock); + compute = options.service(); + assertTrue(compute.deleteOperation(ZONE_OPERATION_ID)); + } + + @Test + public void testDeleteZoneOperation_False() { + EasyMock.expect(computeRpcMock.deleteZoneOperation(ZONE_OPERATION_ID.zone(), + ZONE_OPERATION_ID.operation())).andReturn(false); + EasyMock.replay(computeRpcMock); + compute = options.service(); + assertFalse(compute.deleteOperation(ZONE_OPERATION_ID)); + } + + @Test + public void testGetGlobalAddress() { + EasyMock.expect(computeRpcMock.getGlobalAddress(GLOBAL_ADDRESS_ID.address(), EMPTY_RPC_OPTIONS)) + .andReturn(GLOBAL_ADDRESS.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + Address address = compute.getAddress(GLOBAL_ADDRESS_ID); + assertEquals(new Address(compute, new AddressInfo.BuilderImpl(GLOBAL_ADDRESS)), address); + } + + @Test + public void testGetGlobalAddress_Null() { + EasyMock.expect(computeRpcMock.getGlobalAddress(GLOBAL_ADDRESS_ID.address(), EMPTY_RPC_OPTIONS)) + .andReturn(null); + EasyMock.replay(computeRpcMock); + compute = options.service(); + assertNull(compute.getAddress(GLOBAL_ADDRESS_ID)); + } + + @Test + public void testGetGlobalAddressWithSelectedFields() { + Capture> capturedOptions = Capture.newInstance(); + EasyMock.expect(computeRpcMock.getGlobalAddress( + eq(GLOBAL_ADDRESS_ID.address()), capture(capturedOptions))) + .andReturn(GLOBAL_ADDRESS.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + Address address = compute.getAddress(GLOBAL_ADDRESS_ID, ADDRESS_OPTION_FIELDS); + String selector = (String) capturedOptions.getValue().get(ADDRESS_OPTION_FIELDS.rpcOption()); + assertTrue(selector.contains("selfLink")); + assertTrue(selector.contains("id")); + assertTrue(selector.contains("description")); + assertEquals(23, selector.length()); + assertEquals(new Address(compute, new AddressInfo.BuilderImpl(GLOBAL_ADDRESS)), address); + } + + @Test + public void testGetRegionAddress() { + EasyMock.expect(computeRpcMock.getRegionAddress(REGION_ADDRESS_ID.region(), + REGION_ADDRESS_ID.address(), EMPTY_RPC_OPTIONS)).andReturn(REGION_ADDRESS.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + Address address = compute.getAddress(REGION_ADDRESS_ID); + assertEquals(new Address(compute, new AddressInfo.BuilderImpl(REGION_ADDRESS)), address); + } + + @Test + public void testGetRegionAddress_Null() { + EasyMock.expect(computeRpcMock.getRegionAddress(REGION_ADDRESS_ID.region(), + REGION_ADDRESS_ID.address(), EMPTY_RPC_OPTIONS)).andReturn(REGION_ADDRESS.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + Address address = compute.getAddress(REGION_ADDRESS_ID); + assertEquals(new Address(compute, new AddressInfo.BuilderImpl(REGION_ADDRESS)), address); + } + + @Test + public void testGetRegionAddressWithSelectedFields() { + Capture> capturedOptions = Capture.newInstance(); + EasyMock.expect(computeRpcMock.getRegionAddress(eq(REGION_ADDRESS_ID.region()), + eq(REGION_ADDRESS_ID.address()), capture(capturedOptions))) + .andReturn(REGION_ADDRESS.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + Address address = compute.getAddress(REGION_ADDRESS_ID, ADDRESS_OPTION_FIELDS); + String selector = (String) capturedOptions.getValue().get(ADDRESS_OPTION_FIELDS.rpcOption()); + assertTrue(selector.contains("selfLink")); + assertTrue(selector.contains("id")); + assertTrue(selector.contains("description")); + assertEquals(23, selector.length()); + assertEquals(new Address(compute, new AddressInfo.BuilderImpl(REGION_ADDRESS)), address); + } + + @Test + public void testDeleteGlobalAddress_Operation() { + EasyMock + .expect(computeRpcMock.deleteGlobalAddress(GLOBAL_ADDRESS_ID.address(), EMPTY_RPC_OPTIONS)) + .andReturn(globalOperation.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + assertEquals(globalOperation, compute.deleteAddress(GLOBAL_ADDRESS_ID)); + } + + @Test + public void testDeleteGlobalAddressWithSelectedFields_Operation() { + Capture> capturedOptions = Capture.newInstance(); + EasyMock.expect(computeRpcMock.deleteGlobalAddress(eq(GLOBAL_ADDRESS_ID.address()), + capture(capturedOptions))).andReturn(globalOperation.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + Operation operation = compute.deleteAddress(GLOBAL_ADDRESS_ID, OPERATION_OPTION_FIELDS); + String selector = (String) capturedOptions.getValue().get(OPERATION_OPTION_FIELDS.rpcOption()); + assertTrue(selector.contains("selfLink")); + assertTrue(selector.contains("id")); + assertTrue(selector.contains("description")); + assertEquals(23, selector.length()); + assertEquals(globalOperation, operation); + } + + @Test + public void testDeleteGlobalAddress_Null() { + EasyMock + .expect(computeRpcMock.deleteGlobalAddress(GLOBAL_ADDRESS_ID.address(), EMPTY_RPC_OPTIONS)) + .andReturn(null); + EasyMock.replay(computeRpcMock); + compute = options.service(); + assertNull(compute.deleteAddress(GLOBAL_ADDRESS_ID)); + } + + @Test + public void testDeleteRegionAddress_Operation() { + EasyMock.expect(computeRpcMock.deleteRegionAddress(REGION_ADDRESS_ID.region(), + REGION_ADDRESS_ID.address(), EMPTY_RPC_OPTIONS)).andReturn(regionOperation.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + assertEquals(regionOperation, compute.deleteAddress(REGION_ADDRESS_ID)); + } + + @Test + public void testDeleteRegionAddressWithSelectedFields_Operation() { + Capture> capturedOptions = Capture.newInstance(); + EasyMock.expect(computeRpcMock.deleteRegionAddress(eq(REGION_ADDRESS_ID.region()), + eq(REGION_ADDRESS_ID.address()), capture(capturedOptions))) + .andReturn(globalOperation.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + Operation operation = compute.deleteAddress(REGION_ADDRESS_ID, OPERATION_OPTION_FIELDS); + String selector = (String) capturedOptions.getValue().get(OPERATION_OPTION_FIELDS.rpcOption()); + assertTrue(selector.contains("selfLink")); + assertTrue(selector.contains("id")); + assertTrue(selector.contains("description")); + assertEquals(23, selector.length()); + assertEquals(globalOperation, operation); + } + + @Test + public void testDeleteRegionAddress_Null() { + EasyMock.expect(computeRpcMock.deleteRegionAddress(REGION_ADDRESS_ID.region(), + REGION_ADDRESS_ID.address(), EMPTY_RPC_OPTIONS)).andReturn(null); + EasyMock.replay(computeRpcMock); + compute = options.service(); + assertNull(compute.deleteAddress(REGION_ADDRESS_ID)); + } + + @Test + public void testListGlobalAddresses() { + String cursor = "cursor"; + compute = options.service(); + ImmutableList
addressList = ImmutableList.of( + new Address(compute, new AddressInfo.BuilderImpl(GLOBAL_ADDRESS)), + new Address(compute, new AddressInfo.BuilderImpl(GLOBAL_ADDRESS))); + ComputeRpc.Tuple> result = + ComputeRpc.Tuple.of(cursor, Iterables.transform(addressList, AddressInfo.TO_PB_FUNCTION)); + EasyMock.expect(computeRpcMock.listGlobalAddresses(EMPTY_RPC_OPTIONS)).andReturn(result); + EasyMock.replay(computeRpcMock); + Page
page = compute.listGlobalAddresses(); + assertEquals(cursor, page.nextPageCursor()); + assertArrayEquals(addressList.toArray(), Iterables.toArray(page.values(), Address.class)); + } + + @Test + public void testListGlobalAddressesNextPage() { + String cursor = "cursor"; + String nextCursor = "nextCursor"; + compute = options.service(); + ImmutableList
addressList = ImmutableList.of( + new Address(compute, new AddressInfo.BuilderImpl(GLOBAL_ADDRESS)), + new Address(compute, new AddressInfo.BuilderImpl(GLOBAL_ADDRESS))); + ImmutableList
nextAddressList = ImmutableList.of( + new Address(compute, new AddressInfo.BuilderImpl(GLOBAL_ADDRESS))); + ComputeRpc.Tuple> result = + ComputeRpc.Tuple.of(cursor, Iterables.transform(addressList, AddressInfo.TO_PB_FUNCTION)); + ComputeRpc.Tuple> nextResult = + ComputeRpc.Tuple.of(nextCursor, Iterables.transform(nextAddressList, AddressInfo.TO_PB_FUNCTION)); + Map nextOptions = ImmutableMap.of(ComputeRpc.Option.PAGE_TOKEN, cursor); + EasyMock.expect(computeRpcMock.listGlobalAddresses(EMPTY_RPC_OPTIONS)).andReturn(result); + EasyMock.expect(computeRpcMock.listGlobalAddresses(nextOptions)).andReturn(nextResult); + EasyMock.replay(computeRpcMock); + Page
page = compute.listGlobalAddresses(); + assertEquals(cursor, page.nextPageCursor()); + assertArrayEquals(addressList.toArray(), Iterables.toArray(page.values(), Address.class)); + page = page.nextPage(); + assertEquals(nextCursor, page.nextPageCursor()); + assertArrayEquals(nextAddressList.toArray(), Iterables.toArray(page.values(), Address.class)); + } + + @Test + public void testListEmptyGlobalAddresses() { + ImmutableList addresses = ImmutableList.of(); + ComputeRpc.Tuple> result = + ComputeRpc.Tuple.>of(null, addresses); + EasyMock.expect(computeRpcMock.listGlobalAddresses(EMPTY_RPC_OPTIONS)).andReturn(result); + EasyMock.replay(computeRpcMock); + compute = options.service(); + Page
page = compute.listGlobalAddresses(); + assertNull(page.nextPageCursor()); + assertArrayEquals(addresses.toArray(), Iterables.toArray(page.values(), Address.class)); + } + + @Test + public void testListGlobalAddressesWithOptions() { + String cursor = "cursor"; + compute = options.service(); + ImmutableList
addressList = ImmutableList.of( + new Address(compute, new AddressInfo.BuilderImpl(GLOBAL_ADDRESS)), + new Address(compute, new AddressInfo.BuilderImpl(GLOBAL_ADDRESS))); + ComputeRpc.Tuple> result = + ComputeRpc.Tuple.of(cursor, Iterables.transform(addressList, AddressInfo.TO_PB_FUNCTION)); + EasyMock.expect(computeRpcMock.listGlobalAddresses(ADDRESS_LIST_OPTIONS)).andReturn(result); + EasyMock.replay(computeRpcMock); + Page
page = compute.listGlobalAddresses(ADDRESS_LIST_PAGE_SIZE, + ADDRESS_LIST_PAGE_TOKEN, ADDRESS_LIST_FILTER); + assertEquals(cursor, page.nextPageCursor()); + assertArrayEquals(addressList.toArray(), Iterables.toArray(page.values(), Address.class)); + } + + @Test + public void testListRegionAddresses() { + String cursor = "cursor"; + compute = options.service(); + ImmutableList
addressList = ImmutableList.of( + new Address(compute, new AddressInfo.BuilderImpl(REGION_ADDRESS)), + new Address(compute, new AddressInfo.BuilderImpl(REGION_ADDRESS))); + ComputeRpc.Tuple> result = + ComputeRpc.Tuple.of(cursor, Iterables.transform(addressList, AddressInfo.TO_PB_FUNCTION)); + EasyMock.expect( + computeRpcMock.listRegionAddresses(REGION_ADDRESS_ID.region(), EMPTY_RPC_OPTIONS)) + .andReturn(result); + EasyMock.replay(computeRpcMock); + Page
page = compute.listRegionAddresses(REGION_ADDRESS_ID.region()); + assertEquals(cursor, page.nextPageCursor()); + assertArrayEquals(addressList.toArray(), Iterables.toArray(page.values(), Address.class)); + } + + @Test + public void testListRegionAddressesNextPage() { + String cursor = "cursor"; + String nextCursor = "nextCursor"; + compute = options.service(); + ImmutableList
addressList = ImmutableList.of( + new Address(compute, new AddressInfo.BuilderImpl(REGION_ADDRESS)), + new Address(compute, new AddressInfo.BuilderImpl(REGION_ADDRESS))); + ImmutableList
nextAddressList = ImmutableList.of( + new Address(compute, new AddressInfo.BuilderImpl(REGION_ADDRESS))); + ComputeRpc.Tuple> result = + ComputeRpc.Tuple.of(cursor, Iterables.transform(addressList, AddressInfo.TO_PB_FUNCTION)); + ComputeRpc.Tuple> nextResult = + ComputeRpc.Tuple.of(nextCursor, Iterables.transform(nextAddressList, AddressInfo.TO_PB_FUNCTION)); + Map nextOptions = ImmutableMap.of(ComputeRpc.Option.PAGE_TOKEN, cursor); + EasyMock.expect( + computeRpcMock.listRegionAddresses(REGION_ADDRESS_ID.region(), EMPTY_RPC_OPTIONS)) + .andReturn(result); + EasyMock.expect( + computeRpcMock.listRegionAddresses(REGION_ADDRESS_ID.region(), nextOptions)) + .andReturn(nextResult); + EasyMock.replay(computeRpcMock); + Page
page = compute.listRegionAddresses(REGION_ADDRESS_ID.region()); + assertEquals(cursor, page.nextPageCursor()); + assertArrayEquals(addressList.toArray(), Iterables.toArray(page.values(), Address.class)); + page = page.nextPage(); + assertEquals(nextCursor, page.nextPageCursor()); + assertArrayEquals(nextAddressList.toArray(), Iterables.toArray(page.values(), Address.class)); + } + + @Test + public void testListEmptyRegionAddresses() { + ImmutableList addresses = ImmutableList.of(); + ComputeRpc.Tuple> result = + ComputeRpc.Tuple.>of(null, addresses); + EasyMock.expect( + computeRpcMock.listRegionAddresses(REGION_ADDRESS_ID.region(), EMPTY_RPC_OPTIONS)) + .andReturn(result); + EasyMock.replay(computeRpcMock); + compute = options.service(); + Page
page = compute.listRegionAddresses(REGION_ADDRESS_ID.region()); + assertNull(page.nextPageCursor()); + assertArrayEquals(addresses.toArray(), Iterables.toArray(page.values(), Address.class)); + } + + @Test + public void testListRegionAddressesWithOptions() { + String cursor = "cursor"; + compute = options.service(); + ImmutableList
addressList = ImmutableList.of( + new Address(compute, new AddressInfo.BuilderImpl(REGION_ADDRESS)), + new Address(compute, new AddressInfo.BuilderImpl(REGION_ADDRESS))); + ComputeRpc.Tuple> result = + ComputeRpc.Tuple.of(cursor, Iterables.transform(addressList, AddressInfo.TO_PB_FUNCTION)); + EasyMock.expect( + computeRpcMock.listRegionAddresses(REGION_ADDRESS_ID.region(), ADDRESS_LIST_OPTIONS)) + .andReturn(result); + EasyMock.replay(computeRpcMock); + Page
page = compute.listRegionAddresses(REGION_ADDRESS_ID.region(), + ADDRESS_LIST_PAGE_SIZE, ADDRESS_LIST_PAGE_TOKEN, ADDRESS_LIST_FILTER); + assertEquals(cursor, page.nextPageCursor()); + assertArrayEquals(addressList.toArray(), Iterables.toArray(page.values(), Address.class)); + } + + @Test + public void testAggregatedListAddresses() { + String cursor = "cursor"; + compute = options.service(); + ImmutableList
addressList = ImmutableList.of( + new Address(compute, new AddressInfo.BuilderImpl(REGION_ADDRESS)), + new Address(compute, new AddressInfo.BuilderImpl(REGION_ADDRESS))); + ComputeRpc.Tuple> result = + ComputeRpc.Tuple.of(cursor, Iterables.transform(addressList, AddressInfo.TO_PB_FUNCTION)); + EasyMock.expect(computeRpcMock.listAddresses(EMPTY_RPC_OPTIONS)).andReturn(result); + EasyMock.replay(computeRpcMock); + Page
page = compute.listAddresses(); + assertEquals(cursor, page.nextPageCursor()); + assertArrayEquals(addressList.toArray(), Iterables.toArray(page.values(), Address.class)); + } + + @Test + public void testAggregatedListAddressesNextPage() { + String cursor = "cursor"; + String nextCursor = "nextCursor"; + compute = options.service(); + ImmutableList
addressList = ImmutableList.of( + new Address(compute, new AddressInfo.BuilderImpl(REGION_ADDRESS)), + new Address(compute, new AddressInfo.BuilderImpl(REGION_ADDRESS))); + ImmutableList
nextAddressList = ImmutableList.of( + new Address(compute, new AddressInfo.BuilderImpl(REGION_ADDRESS))); + ComputeRpc.Tuple> result = + ComputeRpc.Tuple.of(cursor, Iterables.transform(addressList, AddressInfo.TO_PB_FUNCTION)); + ComputeRpc.Tuple> nextResult = + ComputeRpc.Tuple.of(nextCursor, Iterables.transform(nextAddressList, AddressInfo.TO_PB_FUNCTION)); + Map nextOptions = ImmutableMap.of(ComputeRpc.Option.PAGE_TOKEN, cursor); + EasyMock.expect(computeRpcMock.listAddresses(EMPTY_RPC_OPTIONS)).andReturn(result); + EasyMock.expect(computeRpcMock.listAddresses(nextOptions)).andReturn(nextResult); + EasyMock.replay(computeRpcMock); + Page
page = compute.listAddresses(); + assertEquals(cursor, page.nextPageCursor()); + assertArrayEquals(addressList.toArray(), Iterables.toArray(page.values(), Address.class)); + page = page.nextPage(); + assertEquals(nextCursor, page.nextPageCursor()); + assertArrayEquals(nextAddressList.toArray(), Iterables.toArray(page.values(), Address.class)); + } + + @Test + public void testAggregatedListEmptyAddresses() { + ImmutableList addresses = ImmutableList.of(); + ComputeRpc.Tuple> result = + ComputeRpc.Tuple.>of(null, addresses); + EasyMock.expect(computeRpcMock.listAddresses(EMPTY_RPC_OPTIONS)).andReturn(result); + EasyMock.replay(computeRpcMock); + compute = options.service(); + Page
page = compute.listAddresses(); + assertNull(page.nextPageCursor()); + assertArrayEquals(addresses.toArray(), Iterables.toArray(page.values(), Address.class)); + } + + @Test + public void testAggregatedListAddressesWithOptions() { + String cursor = "cursor"; + compute = options.service(); + ImmutableList
addressList = ImmutableList.of( + new Address(compute, new AddressInfo.BuilderImpl(REGION_ADDRESS)), + new Address(compute, new AddressInfo.BuilderImpl(REGION_ADDRESS))); + ComputeRpc.Tuple> result = + ComputeRpc.Tuple.of(cursor, Iterables.transform(addressList, AddressInfo.TO_PB_FUNCTION)); + EasyMock.expect(computeRpcMock.listAddresses(ADDRESS_LIST_OPTIONS)).andReturn(result); + EasyMock.replay(computeRpcMock); + Page
page = compute.listAddresses(ADDRESS_AGGREGATED_LIST_PAGE_SIZE, + ADDRESS_AGGREGATED_LIST_PAGE_TOKEN, ADDRESS_AGGREGATED_LIST_FILTER); + assertEquals(cursor, page.nextPageCursor()); + assertArrayEquals(addressList.toArray(), Iterables.toArray(page.values(), Address.class)); + } + + @Test + public void testCreateGlobalAddress() { + EasyMock.expect(computeRpcMock.createGlobalAddress(GLOBAL_ADDRESS.toPb(), EMPTY_RPC_OPTIONS)) + .andReturn(globalOperation.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + AddressId incompleteId = GlobalAddressId.of("address"); + Operation operation = + compute.create(GLOBAL_ADDRESS.toBuilder().addressId(incompleteId).build()); + assertEquals(globalOperation, operation); + } + + @Test + public void testCreateGlobalAddressWithOptions() { + Capture> capturedOptions = Capture.newInstance(); + EasyMock.expect( + computeRpcMock.createGlobalAddress(eq(GLOBAL_ADDRESS.toPb()), capture(capturedOptions))) + .andReturn(globalOperation.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + Operation operation = compute.create(GLOBAL_ADDRESS, OPERATION_OPTION_FIELDS); + String selector = (String) capturedOptions.getValue().get(OPERATION_OPTION_FIELDS.rpcOption()); + assertTrue(selector.contains("selfLink")); + assertTrue(selector.contains("id")); + assertTrue(selector.contains("description")); + assertEquals(23, selector.length()); + assertEquals(globalOperation, operation); + } + + @Test + public void testCreateRegionAddress() { + EasyMock.expect(computeRpcMock.createRegionAddress(REGION_ADDRESS_ID.region(), + REGION_ADDRESS.toPb(), EMPTY_RPC_OPTIONS)).andReturn(regionOperation.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + AddressId incompleteId = RegionAddressId.of("region", "address"); + Operation operation = + compute.create(REGION_ADDRESS.toBuilder().addressId(incompleteId).build()); + assertEquals(regionOperation, operation); + } + + @Test + public void testCreateRegionAddressWithOptions() { + Capture> capturedOptions = Capture.newInstance(); + EasyMock.expect(computeRpcMock.createRegionAddress(eq(REGION_ADDRESS_ID.region()), + eq(REGION_ADDRESS.toPb()), capture(capturedOptions))).andReturn(regionOperation.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + Operation operation = compute.create(REGION_ADDRESS, OPERATION_OPTION_FIELDS); + String selector = (String) capturedOptions.getValue().get(OPERATION_OPTION_FIELDS.rpcOption()); + assertTrue(selector.contains("selfLink")); + assertTrue(selector.contains("id")); + assertTrue(selector.contains("description")); + assertEquals(23, selector.length()); + assertEquals(regionOperation, operation); + } + + @Test + public void testCreateSnapshot() { + EasyMock.expect(computeRpcMock.createSnapshot(DISK_ID.zone(), DISK_ID.disk(), + SNAPSHOT_ID.snapshot(), null, EMPTY_RPC_OPTIONS)).andReturn(zoneOperation.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + Operation operation = compute.create(SNAPSHOT); + assertEquals(zoneOperation, operation); + } + + @Test + public void testCreateSnapshotWithOptions() { + Capture> capturedOptions = Capture.newInstance(); + EasyMock.expect(computeRpcMock.createSnapshot(eq(DISK_ID.zone()), eq(DISK_ID.disk()), + eq(SNAPSHOT_ID.snapshot()), EasyMock.isNull(), capture(capturedOptions))) + .andReturn(zoneOperation.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + Operation operation = compute.create(SNAPSHOT, OPERATION_OPTION_FIELDS); + String selector = (String) capturedOptions.getValue().get(OPERATION_OPTION_FIELDS.rpcOption()); + assertTrue(selector.contains("selfLink")); + assertTrue(selector.contains("id")); + assertTrue(selector.contains("description")); + assertEquals(23, selector.length()); + assertEquals(zoneOperation, operation); + } + + @Test + public void testGetSnapshot() { + EasyMock.expect(computeRpcMock.getSnapshot(SNAPSHOT_ID.snapshot(), EMPTY_RPC_OPTIONS)) + .andReturn(SNAPSHOT.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + Snapshot snapshot = compute.getSnapshot(SNAPSHOT_ID.snapshot()); + assertEquals(new Snapshot(compute, new SnapshotInfo.BuilderImpl(SNAPSHOT)), snapshot); + } + + @Test + public void testGetSnapshot_Null() { + EasyMock.expect(computeRpcMock.getSnapshot(SNAPSHOT_ID.snapshot(), EMPTY_RPC_OPTIONS)) + .andReturn(null); + EasyMock.replay(computeRpcMock); + compute = options.service(); + assertNull(compute.getSnapshot(SNAPSHOT_ID.snapshot())); + } + + @Test + public void testGetSnapshotWithSelectedFields() { + Capture> capturedOptions = Capture.newInstance(); + EasyMock.expect(computeRpcMock.getSnapshot(eq(SNAPSHOT_ID.snapshot()), + capture(capturedOptions))).andReturn(SNAPSHOT.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + Snapshot snapshot = compute.getSnapshot(SNAPSHOT_ID.snapshot(), SNAPSHOT_OPTION_FIELDS); + String selector = (String) capturedOptions.getValue().get(SNAPSHOT_OPTION_FIELDS.rpcOption()); + assertTrue(selector.contains("selfLink")); + assertTrue(selector.contains("id")); + assertTrue(selector.contains("description")); + assertEquals(23, selector.length()); + assertEquals(new Snapshot(compute, new SnapshotInfo.BuilderImpl(SNAPSHOT)), snapshot); + } + + @Test + public void testDeleteSnapshot_Operation() { + EasyMock.expect(computeRpcMock.deleteSnapshot(SNAPSHOT_ID.snapshot(), EMPTY_RPC_OPTIONS)) + .andReturn(globalOperation.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + assertEquals(globalOperation, compute.deleteSnapshot(SNAPSHOT_ID.snapshot())); + } + + @Test + public void testDeleteSnapshotWithSelectedFields_Operation() { + Capture> capturedOptions = Capture.newInstance(); + EasyMock.expect(computeRpcMock.deleteSnapshot(eq(SNAPSHOT_ID.snapshot()), + capture(capturedOptions))).andReturn(globalOperation.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + Operation operation = compute.deleteSnapshot(SNAPSHOT_ID, OPERATION_OPTION_FIELDS); + String selector = (String) capturedOptions.getValue().get(OPERATION_OPTION_FIELDS.rpcOption()); + assertTrue(selector.contains("selfLink")); + assertTrue(selector.contains("id")); + assertTrue(selector.contains("description")); + assertEquals(23, selector.length()); + assertEquals(globalOperation, operation); + } + + @Test + public void testDeleteSnapshot_Null() { + EasyMock.expect(computeRpcMock.deleteSnapshot(SNAPSHOT_ID.snapshot(), EMPTY_RPC_OPTIONS)) + .andReturn(null); + EasyMock.replay(computeRpcMock); + compute = options.service(); + assertNull(compute.deleteSnapshot(SNAPSHOT_ID)); + } + + @Test + public void testListSnapshots() { + String cursor = "cursor"; + compute = options.service(); + ImmutableList snapshotList = ImmutableList.of( + new Snapshot(compute, new SnapshotInfo.BuilderImpl(SNAPSHOT)), + new Snapshot(compute, new SnapshotInfo.BuilderImpl(SNAPSHOT))); + ComputeRpc.Tuple> result = + ComputeRpc.Tuple.of(cursor, Iterables.transform(snapshotList, SnapshotInfo.TO_PB_FUNCTION)); + EasyMock.expect(computeRpcMock.listSnapshots(EMPTY_RPC_OPTIONS)).andReturn(result); + EasyMock.replay(computeRpcMock); + Page page = compute.listSnapshots(); + assertEquals(cursor, page.nextPageCursor()); + assertArrayEquals(snapshotList.toArray(), Iterables.toArray(page.values(), Snapshot.class)); + } + + @Test + public void testListSnapshotsNextPage() { + String cursor = "cursor"; + String nextCursor = "nextCursor"; + compute = options.service(); + ImmutableList snapshotList = ImmutableList.of( + new Snapshot(compute, new SnapshotInfo.BuilderImpl(SNAPSHOT)), + new Snapshot(compute, new SnapshotInfo.BuilderImpl(SNAPSHOT))); + ImmutableList nextSnapshotList = ImmutableList.of( + new Snapshot(compute, new SnapshotInfo.BuilderImpl(SNAPSHOT))); + ComputeRpc.Tuple> result = + ComputeRpc.Tuple.of(cursor, Iterables.transform(snapshotList, SnapshotInfo.TO_PB_FUNCTION)); + ComputeRpc.Tuple> nextResult = + ComputeRpc.Tuple.of(nextCursor, Iterables.transform(nextSnapshotList, SnapshotInfo.TO_PB_FUNCTION)); + Map nextOptions = ImmutableMap.of(ComputeRpc.Option.PAGE_TOKEN, cursor); + EasyMock.expect(computeRpcMock.listSnapshots(EMPTY_RPC_OPTIONS)).andReturn(result); + EasyMock.expect(computeRpcMock.listSnapshots(nextOptions)).andReturn(nextResult); + EasyMock.replay(computeRpcMock); + Page page = compute.listSnapshots(); + assertEquals(cursor, page.nextPageCursor()); + assertArrayEquals(snapshotList.toArray(), Iterables.toArray(page.values(), Snapshot.class)); + page = page.nextPage(); + assertEquals(nextCursor, page.nextPageCursor()); + assertArrayEquals(nextSnapshotList.toArray(), Iterables.toArray(page.values(), Snapshot.class)); + } + + @Test + public void testListEmptySnapshots() { + compute = options.service(); + ImmutableList snapshots = ImmutableList.of(); + ComputeRpc.Tuple> result = + ComputeRpc.Tuple.>of(null, snapshots); + EasyMock.expect(computeRpcMock.listSnapshots(EMPTY_RPC_OPTIONS)).andReturn(result); + EasyMock.replay(computeRpcMock); + Page page = compute.listSnapshots(); + assertNull(page.nextPageCursor()); + assertArrayEquals(snapshots.toArray(), Iterables.toArray(page.values(), Snapshot.class)); + } + + @Test + public void testListSnapshotsWithOptions() { + String cursor = "cursor"; + compute = options.service(); + ImmutableList snapshotList = ImmutableList.of( + new Snapshot(compute, new SnapshotInfo.BuilderImpl(SNAPSHOT)), + new Snapshot(compute, new SnapshotInfo.BuilderImpl(SNAPSHOT))); + ComputeRpc.Tuple> result = + ComputeRpc.Tuple.of(cursor, Iterables.transform(snapshotList, SnapshotInfo.TO_PB_FUNCTION)); + EasyMock.expect(computeRpcMock.listSnapshots(SNAPSHOT_LIST_OPTIONS)).andReturn(result); + EasyMock.replay(computeRpcMock); + Page page = compute.listSnapshots(SNAPSHOT_LIST_PAGE_SIZE, SNAPSHOT_LIST_PAGE_TOKEN, + SNAPSHOT_LIST_FILTER); + assertEquals(cursor, page.nextPageCursor()); + assertArrayEquals(snapshotList.toArray(), Iterables.toArray(page.values(), Snapshot.class)); + } + + @Test + public void testCreateImage() { + EasyMock.expect(computeRpcMock.createImage(IMAGE.toPb(), EMPTY_RPC_OPTIONS)) + .andReturn(globalOperation.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + Operation operation = compute.create(IMAGE); + assertEquals(globalOperation, operation); + } + + @Test + public void testCreateImageWithOptions() { + Capture> capturedOptions = Capture.newInstance(); + EasyMock.expect(computeRpcMock.createImage(eq(IMAGE.toPb()), capture(capturedOptions))) + .andReturn(globalOperation.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + Operation operation = compute.create(IMAGE, OPERATION_OPTION_FIELDS); + String selector = (String) capturedOptions.getValue().get(OPERATION_OPTION_FIELDS.rpcOption()); + assertTrue(selector.contains("selfLink")); + assertTrue(selector.contains("id")); + assertTrue(selector.contains("description")); + assertEquals(23, selector.length()); + assertEquals(globalOperation, operation); + } + + @Test + public void testGetImage() { + EasyMock.expect( + computeRpcMock.getImage(IMAGE_ID.project(), IMAGE_ID.image(), EMPTY_RPC_OPTIONS)) + .andReturn(IMAGE.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + Image image = compute.getImage(IMAGE_ID); + assertEquals(new Image(compute, new ImageInfo.BuilderImpl(IMAGE)), image); + } + + @Test + public void testGetImage_Null() { + EasyMock.expect( + computeRpcMock.getImage(IMAGE_ID.project(), IMAGE_ID.image(), EMPTY_RPC_OPTIONS)) + .andReturn(null); + EasyMock.replay(computeRpcMock); + compute = options.service(); + assertNull(compute.getImage(IMAGE_ID)); + } + + @Test + public void testGetImageWithSelectedFields() { + Capture> capturedOptions = Capture.newInstance(); + EasyMock.expect(computeRpcMock.getImage(eq(IMAGE_ID.project()), eq(IMAGE_ID.image()), + capture(capturedOptions))).andReturn(IMAGE.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + Image image = compute.getImage(IMAGE_ID, IMAGE_OPTION_FIELDS); + String selector = (String) capturedOptions.getValue().get(IMAGE_OPTION_FIELDS.rpcOption()); + assertTrue(selector.contains("selfLink")); + assertTrue(selector.contains("id")); + assertTrue(selector.contains("sourceDisk")); + assertTrue(selector.contains("rawDisk")); + assertTrue(selector.contains("description")); + assertEquals(42, selector.length()); + assertEquals(new Image(compute, new ImageInfo.BuilderImpl(IMAGE)), image); + } + + @Test + public void testDeleteImage_Operation() { + EasyMock.expect(computeRpcMock.deleteImage(IMAGE_ID.project(), IMAGE_ID.image(), + EMPTY_RPC_OPTIONS)).andReturn(globalOperation.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + assertEquals(globalOperation, compute.deleteImage(IMAGE_ID)); + } + + @Test + public void testDeleteImageWithSelectedFields_Operation() { + Capture> capturedOptions = Capture.newInstance(); + EasyMock.expect(computeRpcMock.deleteImage(eq(PROJECT), eq(IMAGE_ID.image()), + capture(capturedOptions))).andReturn(globalOperation.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + Operation operation = compute.deleteImage(ImageId.of("image"), OPERATION_OPTION_FIELDS); + String selector = (String) capturedOptions.getValue().get(OPERATION_OPTION_FIELDS.rpcOption()); + assertTrue(selector.contains("selfLink")); + assertTrue(selector.contains("id")); + assertTrue(selector.contains("description")); + assertEquals(23, selector.length()); + assertEquals(globalOperation, operation); + } + + @Test + public void testDeleteImage_Null() { + EasyMock.expect(computeRpcMock.deleteImage(IMAGE_ID.project(), IMAGE_ID.image(), + EMPTY_RPC_OPTIONS)).andReturn(null); + EasyMock.replay(computeRpcMock); + compute = options.service(); + assertNull(compute.deleteImage(IMAGE_ID)); + } + + @Test + public void testDeprecateImage_Operation() { + EasyMock.expect(computeRpcMock.deprecateImage(IMAGE_ID.project(), IMAGE_ID.image(), + DEPRECATION_STATUS.toPb(), EMPTY_RPC_OPTIONS)).andReturn(globalOperation.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + assertEquals(globalOperation, compute.deprecate(IMAGE_ID, DEPRECATION_STATUS)); + } + + @Test + public void testDeprecateImageWithSelectedFields_Operation() { + Capture> capturedOptions = Capture.newInstance(); + EasyMock.expect(computeRpcMock.deprecateImage(eq(PROJECT), eq(IMAGE_ID.image()), + eq(DEPRECATION_STATUS.toPb()), capture(capturedOptions))).andReturn(globalOperation.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + Operation operation = + compute.deprecate(ImageId.of("image"), DEPRECATION_STATUS, OPERATION_OPTION_FIELDS); + String selector = (String) capturedOptions.getValue().get(OPERATION_OPTION_FIELDS.rpcOption()); + assertTrue(selector.contains("selfLink")); + assertTrue(selector.contains("id")); + assertTrue(selector.contains("description")); + assertEquals(23, selector.length()); + assertEquals(globalOperation, operation); + } + + @Test + public void testDeprecateImage_Null() { + EasyMock.expect(computeRpcMock.deprecateImage(IMAGE_ID.project(), IMAGE_ID.image(), + DEPRECATION_STATUS.toPb(), EMPTY_RPC_OPTIONS)).andReturn(null); + EasyMock.replay(computeRpcMock); + compute = options.service(); + assertNull(compute.deprecate(IMAGE_ID, DEPRECATION_STATUS)); + } + + @Test + public void testListImages() { + String cursor = "cursor"; + compute = options.service(); + ImmutableList imageList = ImmutableList.of( + new Image(compute, new ImageInfo.BuilderImpl(IMAGE)), + new Image(compute, new ImageInfo.BuilderImpl(IMAGE))); + ComputeRpc.Tuple> result = + ComputeRpc.Tuple.of(cursor, Iterables.transform(imageList, ImageInfo.TO_PB_FUNCTION)); + EasyMock.expect(computeRpcMock.listImages(PROJECT, EMPTY_RPC_OPTIONS)).andReturn(result); + EasyMock.replay(computeRpcMock); + Page page = compute.listImages(); + assertEquals(cursor, page.nextPageCursor()); + assertArrayEquals(imageList.toArray(), Iterables.toArray(page.values(), Image.class)); + } + + @Test + public void testListImagesNextPage() { + String cursor = "cursor"; + String nextCursor = "nextCursor"; + compute = options.service(); + ImmutableList imageList = ImmutableList.of( + new Image(compute, new ImageInfo.BuilderImpl(IMAGE)), + new Image(compute, new ImageInfo.BuilderImpl(IMAGE))); + ImmutableList nextImageList = ImmutableList.of( + new Image(compute, new ImageInfo.BuilderImpl(IMAGE))); + ComputeRpc.Tuple> result = + ComputeRpc.Tuple.of(cursor, Iterables.transform(imageList, ImageInfo.TO_PB_FUNCTION)); + ComputeRpc.Tuple> nextResult = + ComputeRpc.Tuple.of(nextCursor, Iterables.transform(nextImageList, ImageInfo.TO_PB_FUNCTION)); + Map nextOptions = ImmutableMap.of(ComputeRpc.Option.PAGE_TOKEN, cursor); + EasyMock.expect(computeRpcMock.listImages(PROJECT, EMPTY_RPC_OPTIONS)).andReturn(result); + EasyMock.expect(computeRpcMock.listImages(PROJECT, nextOptions)).andReturn(nextResult); + EasyMock.replay(computeRpcMock); + Page page = compute.listImages(); + assertEquals(cursor, page.nextPageCursor()); + assertArrayEquals(imageList.toArray(), Iterables.toArray(page.values(), Image.class)); + page = page.nextPage(); + assertEquals(nextCursor, page.nextPageCursor()); + assertArrayEquals(nextImageList.toArray(), Iterables.toArray(page.values(), Image.class)); + } + + @Test + public void testListImagesForProject() { + String cursor = "cursor"; + compute = options.service(); + ImmutableList imageList = ImmutableList.of( + new Image(compute, new ImageInfo.BuilderImpl(IMAGE)), + new Image(compute, new ImageInfo.BuilderImpl(IMAGE))); + ComputeRpc.Tuple> result = + ComputeRpc.Tuple.of(cursor, Iterables.transform(imageList, ImageInfo.TO_PB_FUNCTION)); + EasyMock.expect(computeRpcMock.listImages("otherProject", EMPTY_RPC_OPTIONS)).andReturn(result); + EasyMock.replay(computeRpcMock); + Page page = compute.listImages("otherProject"); + assertEquals(cursor, page.nextPageCursor()); + assertArrayEquals(imageList.toArray(), Iterables.toArray(page.values(), Image.class)); + } + + @Test + public void testListEmptyImages() { + compute = options.service(); + ImmutableList images = ImmutableList.of(); + ComputeRpc.Tuple> result = + ComputeRpc.Tuple.>of(null, images); + EasyMock.expect(computeRpcMock.listImages(PROJECT, EMPTY_RPC_OPTIONS)).andReturn(result); + EasyMock.replay(computeRpcMock); + Page page = compute.listImages(); + assertNull(page.nextPageCursor()); + assertArrayEquals(images.toArray(), Iterables.toArray(page.values(), Image.class)); + } + + @Test + public void testListEmptyImagesForProject() { + compute = options.service(); + ImmutableList images = ImmutableList.of(); + ComputeRpc.Tuple> result = + ComputeRpc.Tuple.>of(null, images); + EasyMock.expect(computeRpcMock.listImages("otherProject", EMPTY_RPC_OPTIONS)).andReturn(result); + EasyMock.replay(computeRpcMock); + Page page = compute.listImages("otherProject"); + assertNull(page.nextPageCursor()); + assertArrayEquals(images.toArray(), Iterables.toArray(page.values(), Image.class)); + } + + @Test + public void testListImagesWithOptions() { + String cursor = "cursor"; + compute = options.service(); + ImmutableList imageList = ImmutableList.of( + new Image(compute, new ImageInfo.BuilderImpl(IMAGE)), + new Image(compute, new ImageInfo.BuilderImpl(IMAGE))); + ComputeRpc.Tuple> result = + ComputeRpc.Tuple.of(cursor, Iterables.transform(imageList, ImageInfo.TO_PB_FUNCTION)); + EasyMock.expect(computeRpcMock.listImages(PROJECT, IMAGE_LIST_OPTIONS)).andReturn(result); + EasyMock.replay(computeRpcMock); + Page page = compute.listImages(IMAGE_LIST_PAGE_SIZE, IMAGE_LIST_PAGE_TOKEN, + IMAGE_LIST_FILTER); + assertEquals(cursor, page.nextPageCursor()); + assertArrayEquals(imageList.toArray(), Iterables.toArray(page.values(), Image.class)); + } + + @Test + public void testListImagesForProjectWithOptions() { + String cursor = "cursor"; + compute = options.service(); + ImmutableList imageList = ImmutableList.of( + new Image(compute, new ImageInfo.BuilderImpl(IMAGE)), + new Image(compute, new ImageInfo.BuilderImpl(IMAGE))); + ComputeRpc.Tuple> result = + ComputeRpc.Tuple.of(cursor, Iterables.transform(imageList, ImageInfo.TO_PB_FUNCTION)); + EasyMock.expect(computeRpcMock.listImages("other", IMAGE_LIST_OPTIONS)).andReturn(result); + EasyMock.replay(computeRpcMock); + Page page = compute.listImages("other", IMAGE_LIST_PAGE_SIZE, IMAGE_LIST_PAGE_TOKEN, + IMAGE_LIST_FILTER); + assertEquals(cursor, page.nextPageCursor()); + assertArrayEquals(imageList.toArray(), Iterables.toArray(page.values(), Image.class)); + } + + @Test + public void testGetDisk() { + EasyMock.expect(computeRpcMock.getDisk(DISK_ID.zone(), DISK_ID.disk(), EMPTY_RPC_OPTIONS)) + .andReturn(DISK.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + Disk disk = compute.getDisk(DISK_ID); + assertEquals(new Disk(compute, new DiskInfo.BuilderImpl(DISK)), disk); + } + + @Test + public void testGetDisk_Null() { + EasyMock.expect(computeRpcMock.getDisk(DISK_ID.zone(), DISK_ID.disk(), EMPTY_RPC_OPTIONS)) + .andReturn(null); + EasyMock.replay(computeRpcMock); + compute = options.service(); + assertNull(compute.getDisk(DISK_ID)); + } + + @Test + public void testGetDiskWithSelectedFields() { + Capture> capturedOptions = Capture.newInstance(); + EasyMock.expect(computeRpcMock.getDisk(eq(DISK_ID.zone()), eq(DISK_ID.disk()), + capture(capturedOptions))).andReturn(DISK.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + Disk disk = compute.getDisk(DISK_ID, DISK_OPTION_FIELDS); + String selector = (String) capturedOptions.getValue().get(DISK_OPTION_FIELDS.rpcOption()); + assertTrue(selector.contains("selfLink")); + assertTrue(selector.contains("type")); + assertTrue(selector.contains("sourceImage")); + assertTrue(selector.contains("sourceSnapshot")); + assertTrue(selector.contains("id")); + assertTrue(selector.contains("description")); + assertEquals(55, selector.length()); + assertEquals(new Disk(compute, new DiskInfo.BuilderImpl(DISK)), disk); + } + + @Test + public void testDeleteDisk_Operation() { + EasyMock.expect(computeRpcMock.deleteDisk(DISK_ID.zone(), DISK_ID.disk(), EMPTY_RPC_OPTIONS)) + .andReturn(zoneOperation.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + assertEquals(zoneOperation, compute.deleteDisk(DISK_ID)); + } + + @Test + public void testDeleteDiskWithSelectedFields_Operation() { + Capture> capturedOptions = Capture.newInstance(); + EasyMock.expect(computeRpcMock.deleteDisk(eq(DISK_ID.zone()), eq(DISK_ID.disk()), + capture(capturedOptions))).andReturn(zoneOperation.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + Operation operation = compute.deleteDisk(DISK_ID, OPERATION_OPTION_FIELDS); + String selector = (String) capturedOptions.getValue().get(OPERATION_OPTION_FIELDS.rpcOption()); + assertTrue(selector.contains("selfLink")); + assertTrue(selector.contains("id")); + assertTrue(selector.contains("description")); + assertEquals(23, selector.length()); + assertEquals(zoneOperation, operation); + } + + @Test + public void testDeleteDisk_Null() { + EasyMock.expect(computeRpcMock.deleteDisk(DISK_ID.zone(), DISK_ID.disk(), EMPTY_RPC_OPTIONS)) + .andReturn(null); + EasyMock.replay(computeRpcMock); + compute = options.service(); + assertNull(compute.deleteDisk(DISK_ID)); + } + + @Test + public void testListDisks() { + String cursor = "cursor"; + compute = options.service(); + ImmutableList diskList = ImmutableList.of( + new Disk(compute, new DiskInfo.BuilderImpl(DISK)), + new Disk(compute, new DiskInfo.BuilderImpl(DISK))); + ComputeRpc.Tuple> result = + ComputeRpc.Tuple.of(cursor, Iterables.transform(diskList, DiskInfo.TO_PB_FUNCTION)); + EasyMock.expect(computeRpcMock.listDisks(DISK_ID.zone(), EMPTY_RPC_OPTIONS)) + .andReturn(result); + EasyMock.replay(computeRpcMock); + Page page = compute.listDisks(DISK_ID.zone()); + assertEquals(cursor, page.nextPageCursor()); + assertArrayEquals(diskList.toArray(), Iterables.toArray(page.values(), Disk.class)); + } + + @Test + public void testListDisksNextPage() { + String cursor = "cursor"; + String nextCursor = "nextCursor"; + compute = options.service(); + ImmutableList diskList = ImmutableList.of( + new Disk(compute, new DiskInfo.BuilderImpl(DISK)), + new Disk(compute, new DiskInfo.BuilderImpl(DISK))); + ImmutableList nextDiskList = ImmutableList.of( + new Disk(compute, new DiskInfo.BuilderImpl(DISK))); + ComputeRpc.Tuple> result = + ComputeRpc.Tuple.of(cursor, Iterables.transform(diskList, DiskInfo.TO_PB_FUNCTION)); + ComputeRpc.Tuple> nextResult = + ComputeRpc.Tuple.of(nextCursor, Iterables.transform(nextDiskList, DiskInfo.TO_PB_FUNCTION)); + Map nextOptions = ImmutableMap.of(ComputeRpc.Option.PAGE_TOKEN, cursor); + EasyMock.expect(computeRpcMock.listDisks(DISK_ID.zone(), EMPTY_RPC_OPTIONS)).andReturn(result); + EasyMock.expect(computeRpcMock.listDisks(DISK_ID.zone(), nextOptions)).andReturn(nextResult); + EasyMock.replay(computeRpcMock); + Page page = compute.listDisks(DISK_ID.zone()); + assertEquals(cursor, page.nextPageCursor()); + assertArrayEquals(diskList.toArray(), Iterables.toArray(page.values(), Disk.class)); + page = page.nextPage(); + assertEquals(nextCursor, page.nextPageCursor()); + assertArrayEquals(nextDiskList.toArray(), Iterables.toArray(page.values(), Disk.class)); + } + + @Test + public void testListEmptyDisks() { + compute = options.service(); + ImmutableList disks = ImmutableList.of(); + ComputeRpc.Tuple> result = + ComputeRpc.Tuple.>of(null, disks); + EasyMock.expect(computeRpcMock.listDisks(DISK_ID.zone(), EMPTY_RPC_OPTIONS)) + .andReturn(result); + EasyMock.replay(computeRpcMock); + Page page = compute.listDisks(DISK_ID.zone()); + assertNull(page.nextPageCursor()); + assertArrayEquals(disks.toArray(), Iterables.toArray(page.values(), Disk.class)); + } + + @Test + public void testListDisksWithOptions() { + String cursor = "cursor"; + compute = options.service(); + ImmutableList diskList = ImmutableList.of( + new Disk(compute, new DiskInfo.BuilderImpl(DISK)), + new Disk(compute, new DiskInfo.BuilderImpl(DISK))); + ComputeRpc.Tuple> result = + ComputeRpc.Tuple.of(cursor, Iterables.transform(diskList, DiskInfo.TO_PB_FUNCTION)); + EasyMock.expect(computeRpcMock.listDisks(DISK_ID.zone(), DISK_LIST_OPTIONS)) + .andReturn(result); + EasyMock.replay(computeRpcMock); + Page page = compute.listDisks(DISK_ID.zone(), DISK_LIST_PAGE_SIZE, DISK_LIST_PAGE_TOKEN, + DISK_LIST_FILTER); + assertEquals(cursor, page.nextPageCursor()); + assertArrayEquals(diskList.toArray(), Iterables.toArray(page.values(), Disk.class)); + } + + @Test + public void testAggregatedListDisks() { + String cursor = "cursor"; + compute = options.service(); + ImmutableList diskList = ImmutableList.of( + new Disk(compute, new DiskInfo.BuilderImpl(DISK)), + new Disk(compute, new DiskInfo.BuilderImpl(DISK))); + ComputeRpc.Tuple> result = + ComputeRpc.Tuple.of(cursor, Iterables.transform(diskList, DiskInfo.TO_PB_FUNCTION)); + EasyMock.expect(computeRpcMock.listDisks(EMPTY_RPC_OPTIONS)).andReturn(result); + EasyMock.replay(computeRpcMock); + Page page = compute.listDisks(); + assertEquals(cursor, page.nextPageCursor()); + assertArrayEquals(diskList.toArray(), Iterables.toArray(page.values(), Disk.class)); + } + + @Test + public void testAggregatedListDisksNextPage() { + String cursor = "cursor"; + String nextCursor = "nextCursor"; + compute = options.service(); + ImmutableList diskList = ImmutableList.of( + new Disk(compute, new DiskInfo.BuilderImpl(DISK)), + new Disk(compute, new DiskInfo.BuilderImpl(DISK))); + ImmutableList nextDiskList = ImmutableList.of( + new Disk(compute, new DiskInfo.BuilderImpl(DISK))); + ComputeRpc.Tuple> result = + ComputeRpc.Tuple.of(cursor, Iterables.transform(diskList, DiskInfo.TO_PB_FUNCTION)); + ComputeRpc.Tuple> nextResult = + ComputeRpc.Tuple.of(nextCursor, Iterables.transform(nextDiskList, DiskInfo.TO_PB_FUNCTION)); + Map nextOptions = ImmutableMap.of(ComputeRpc.Option.PAGE_TOKEN, cursor); + EasyMock.expect(computeRpcMock.listDisks(EMPTY_RPC_OPTIONS)).andReturn(result); + EasyMock.expect(computeRpcMock.listDisks(nextOptions)).andReturn(nextResult); + EasyMock.replay(computeRpcMock); + Page page = compute.listDisks(); + assertEquals(cursor, page.nextPageCursor()); + assertArrayEquals(diskList.toArray(), Iterables.toArray(page.values(), Disk.class)); + page = page.nextPage(); + assertEquals(nextCursor, page.nextPageCursor()); + assertArrayEquals(nextDiskList.toArray(), Iterables.toArray(page.values(), Disk.class)); + } + + @Test + public void testAggregatedListEmptyDisks() { + compute = options.service(); + ImmutableList diskList = ImmutableList.of(); + ComputeRpc.Tuple> result = + ComputeRpc.Tuple.>of(null, diskList); + EasyMock.expect(computeRpcMock.listDisks(EMPTY_RPC_OPTIONS)).andReturn(result); + EasyMock.replay(computeRpcMock); + Page page = compute.listDisks(); + assertNull(page.nextPageCursor()); + assertArrayEquals(diskList.toArray(), Iterables.toArray(page.values(), Disk.class)); + } + + @Test + public void testAggregatedListDisksWithOptions() { + String cursor = "cursor"; + compute = options.service(); + ImmutableList diskList = ImmutableList.of( + new Disk(compute, new DiskInfo.BuilderImpl(DISK)), + new Disk(compute, new DiskInfo.BuilderImpl(DISK))); + ComputeRpc.Tuple> result = + ComputeRpc.Tuple.of(cursor, Iterables.transform(diskList, DiskInfo.TO_PB_FUNCTION)); + EasyMock.expect(computeRpcMock.listDisks(DISK_LIST_OPTIONS)).andReturn(result); + EasyMock.replay(computeRpcMock); + Page page = compute.listDisks(DISK_AGGREGATED_LIST_PAGE_SIZE, + DISK_AGGREGATED_LIST_PAGE_TOKEN, DISK_AGGREGATED_LIST_FILTER); + assertEquals(cursor, page.nextPageCursor()); + assertArrayEquals(diskList.toArray(), Iterables.toArray(page.values(), Disk.class)); + } + + @Test + public void testCreateDisk() { + EasyMock.expect(computeRpcMock.createDisk(DISK_ID.zone(), DISK.toPb(), EMPTY_RPC_OPTIONS)) + .andReturn(zoneOperation.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + DiskId diskId = DiskId.of("zone", "disk"); + DiskTypeId diskTypeId = DiskTypeId.of("zone", "diskType"); + DiskInfo disk = DISK.toBuilder() + .diskId(diskId) + .configuration(StandardDiskConfiguration.of(diskTypeId)) + .build(); + Operation operation = compute.create(disk); + assertEquals(zoneOperation, operation); + } + + @Test + public void testCreateDiskWithOptions() { + Capture> capturedOptions = Capture.newInstance(); + EasyMock.expect(computeRpcMock.createDisk(eq(DISK_ID.zone()), eq(DISK.toPb()), + capture(capturedOptions))).andReturn(zoneOperation.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + Operation operation = compute.create(DISK, OPERATION_OPTION_FIELDS); + String selector = (String) capturedOptions.getValue().get(OPERATION_OPTION_FIELDS.rpcOption()); + assertTrue(selector.contains("selfLink")); + assertTrue(selector.contains("id")); + assertTrue(selector.contains("description")); + assertEquals(23, selector.length()); + assertEquals(zoneOperation, operation); + } + + @Test + public void testResizeDisk_Operation() { + EasyMock.expect(computeRpcMock.resizeDisk(DISK_ID.zone(), DISK_ID.disk(), 42L, + EMPTY_RPC_OPTIONS)).andReturn(zoneOperation.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + assertEquals(zoneOperation, compute.resize(DISK_ID, 42L)); + } + + @Test + public void testResizeDiskWithSelectedFields_Operation() { + Capture> capturedOptions = Capture.newInstance(); + EasyMock.expect(computeRpcMock.resizeDisk(eq(DISK_ID.zone()), eq(DISK_ID.disk()), eq(42L), + capture(capturedOptions))).andReturn(zoneOperation.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + Operation operation = compute.resize(DISK_ID, 42L, OPERATION_OPTION_FIELDS); + String selector = (String) capturedOptions.getValue().get(OPERATION_OPTION_FIELDS.rpcOption()); + assertTrue(selector.contains("selfLink")); + assertTrue(selector.contains("id")); + assertTrue(selector.contains("description")); + assertEquals(23, selector.length()); + assertEquals(zoneOperation, operation); + } + + @Test + public void testResizeDisk_Null() { + EasyMock.expect(computeRpcMock.resizeDisk(DISK_ID.zone(), DISK_ID.disk(), 42L, + EMPTY_RPC_OPTIONS)).andReturn(null); + EasyMock.replay(computeRpcMock); + compute = options.service(); + assertNull(compute.resize(DISK_ID, 42L)); + } + + @Test + public void testGetSubnetwork() { + EasyMock.expect(computeRpcMock.getSubnetwork(SUBNETWORK_ID.region(), SUBNETWORK_ID.subnetwork(), + EMPTY_RPC_OPTIONS)).andReturn(SUBNETWORK.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + Subnetwork subnetwork = compute.getSubnetwork(SUBNETWORK_ID); + assertEquals(new Subnetwork(compute, new SubnetworkInfo.BuilderImpl(SUBNETWORK)), subnetwork); + } + + @Test + public void testGetSubnetwork_Null() { + EasyMock.expect(computeRpcMock.getSubnetwork(SUBNETWORK_ID.region(), SUBNETWORK_ID.subnetwork(), + EMPTY_RPC_OPTIONS)).andReturn(null); + EasyMock.replay(computeRpcMock); + compute = options.service(); + assertNull(compute.getSubnetwork(SUBNETWORK_ID)); + } + + @Test + public void testGetSubnetworkWithSelectedFields() { + Capture> capturedOptions = Capture.newInstance(); + EasyMock.expect(computeRpcMock.getSubnetwork(eq(SUBNETWORK_ID.region()), + eq(SUBNETWORK_ID.subnetwork()), capture(capturedOptions))).andReturn(SUBNETWORK.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + Subnetwork subnetwork = compute.getSubnetwork(SUBNETWORK_ID, SUBNETWORK_OPTION_FIELDS); + String selector = (String) capturedOptions.getValue().get(SUBNETWORK_OPTION_FIELDS.rpcOption()); + assertTrue(selector.contains("selfLink")); + assertTrue(selector.contains("id")); + assertTrue(selector.contains("description")); + assertEquals(23, selector.length()); + assertEquals(new Subnetwork(compute, new SubnetworkInfo.BuilderImpl(SUBNETWORK)), subnetwork); + } + + @Test + public void testDeleteSubnetwork_Operation() { + EasyMock.expect(computeRpcMock.deleteSubnetwork(SUBNETWORK_ID.region(), + SUBNETWORK_ID.subnetwork(), EMPTY_RPC_OPTIONS)).andReturn(regionOperation.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + assertEquals(regionOperation, compute.deleteSubnetwork(SUBNETWORK_ID)); + } + + @Test + public void testDeleteSubnetworkWithSelectedFields_Operation() { + Capture> capturedOptions = Capture.newInstance(); + EasyMock.expect(computeRpcMock.deleteSubnetwork(eq(SUBNETWORK_ID.region()), + eq(SUBNETWORK_ID.subnetwork()), capture(capturedOptions))) + .andReturn(regionOperation.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + Operation operation = compute.deleteSubnetwork(SUBNETWORK_ID, OPERATION_OPTION_FIELDS); + String selector = (String) capturedOptions.getValue().get(OPERATION_OPTION_FIELDS.rpcOption()); + assertTrue(selector.contains("selfLink")); + assertTrue(selector.contains("id")); + assertTrue(selector.contains("description")); + assertEquals(23, selector.length()); + assertEquals(regionOperation, operation); + } + + @Test + public void testDeleteSubnetwork_Null() { + EasyMock.expect(computeRpcMock.deleteSubnetwork(SUBNETWORK_ID.region(), + SUBNETWORK_ID.subnetwork(), EMPTY_RPC_OPTIONS)).andReturn(null); + EasyMock.replay(computeRpcMock); + compute = options.service(); + assertNull(compute.deleteSubnetwork(SUBNETWORK_ID)); + } + + @Test + public void testListSubnetworks() { + String cursor = "cursor"; + compute = options.service(); + ImmutableList subnetworkList = ImmutableList.of( + new Subnetwork(compute, new SubnetworkInfo.BuilderImpl(SUBNETWORK)), + new Subnetwork(compute, new SubnetworkInfo.BuilderImpl(SUBNETWORK))); + ComputeRpc.Tuple> result = + ComputeRpc.Tuple.of(cursor, Iterables.transform(subnetworkList, SubnetworkInfo.TO_PB_FUNCTION)); + EasyMock.expect(computeRpcMock.listSubnetworks(SUBNETWORK_ID.region(), EMPTY_RPC_OPTIONS)) + .andReturn(result); + EasyMock.replay(computeRpcMock); + Page page = compute.listSubnetworks(SUBNETWORK_ID.region()); + assertEquals(cursor, page.nextPageCursor()); + assertArrayEquals(subnetworkList.toArray(), Iterables.toArray(page.values(), Subnetwork.class)); + } + + @Test + public void testListSubnetworksNextPage() { + String cursor = "cursor"; + String nextCursor = "nextCursor"; + compute = options.service(); + ImmutableList subnetworkList = ImmutableList.of( + new Subnetwork(compute, new SubnetworkInfo.BuilderImpl(SUBNETWORK)), + new Subnetwork(compute, new SubnetworkInfo.BuilderImpl(SUBNETWORK))); + ImmutableList nextSubnetworkList = ImmutableList.of( + new Subnetwork(compute, new SubnetworkInfo.BuilderImpl(SUBNETWORK))); + ComputeRpc.Tuple> result = + ComputeRpc.Tuple.of(cursor, Iterables.transform(subnetworkList, SubnetworkInfo.TO_PB_FUNCTION)); + ComputeRpc.Tuple> nextResult = + ComputeRpc.Tuple.of(nextCursor, + Iterables.transform(nextSubnetworkList, SubnetworkInfo.TO_PB_FUNCTION)); + Map nextOptions = ImmutableMap.of(ComputeRpc.Option.PAGE_TOKEN, cursor); + EasyMock.expect(computeRpcMock.listSubnetworks(SUBNETWORK_ID.region(), EMPTY_RPC_OPTIONS)) + .andReturn(result); + EasyMock.expect(computeRpcMock.listSubnetworks(SUBNETWORK_ID.region(), nextOptions)) + .andReturn(nextResult); + EasyMock.replay(computeRpcMock); + Page page = compute.listSubnetworks(SUBNETWORK_ID.region()); + assertEquals(cursor, page.nextPageCursor()); + assertArrayEquals(subnetworkList.toArray(), Iterables.toArray(page.values(), Subnetwork.class)); + page = page.nextPage(); + assertEquals(nextCursor, page.nextPageCursor()); + assertArrayEquals(nextSubnetworkList.toArray(), + Iterables.toArray(page.values(), Subnetwork.class)); + } + + @Test + public void testListEmptySubnetworks() { + compute = options.service(); + ImmutableList subnetworks = + ImmutableList.of(); + ComputeRpc.Tuple> result = + ComputeRpc.Tuple.>of(null, + subnetworks); + EasyMock.expect(computeRpcMock.listSubnetworks(SUBNETWORK_ID.region(), EMPTY_RPC_OPTIONS)) + .andReturn(result); + EasyMock.replay(computeRpcMock); + Page page = compute.listSubnetworks(SUBNETWORK_ID.region()); + assertNull(page.nextPageCursor()); + assertArrayEquals(subnetworks.toArray(), Iterables.toArray(page.values(), Subnetwork.class)); + } + + @Test + public void testListSubnetworksWithOptions() { + String cursor = "cursor"; + compute = options.service(); + ImmutableList subnetworkList = ImmutableList.of( + new Subnetwork(compute, new SubnetworkInfo.BuilderImpl(SUBNETWORK)), + new Subnetwork(compute, new SubnetworkInfo.BuilderImpl(SUBNETWORK))); + ComputeRpc.Tuple> result = + ComputeRpc.Tuple.of(cursor, Iterables.transform(subnetworkList, SubnetworkInfo.TO_PB_FUNCTION)); + EasyMock.expect(computeRpcMock.listSubnetworks(SUBNETWORK_ID.region(), SUBNETWORK_LIST_OPTIONS)) + .andReturn(result); + EasyMock.replay(computeRpcMock); + Page page = compute.listSubnetworks(SUBNETWORK_ID.region(), + SUBNETWORK_LIST_PAGE_SIZE, SUBNETWORK_LIST_PAGE_TOKEN, SUBNETWORK_LIST_FILTER); + assertEquals(cursor, page.nextPageCursor()); + assertArrayEquals(subnetworkList.toArray(), Iterables.toArray(page.values(), Subnetwork.class)); + } + + @Test + public void testAggregatedListSubnetworks() { + String cursor = "cursor"; + compute = options.service(); + ImmutableList subnetworkList = ImmutableList.of( + new Subnetwork(compute, new SubnetworkInfo.BuilderImpl(SUBNETWORK)), + new Subnetwork(compute, new SubnetworkInfo.BuilderImpl(SUBNETWORK))); + ComputeRpc.Tuple> result = + ComputeRpc.Tuple.of(cursor, Iterables.transform(subnetworkList, SubnetworkInfo.TO_PB_FUNCTION)); + EasyMock.expect(computeRpcMock.listSubnetworks(EMPTY_RPC_OPTIONS)).andReturn(result); + EasyMock.replay(computeRpcMock); + Page page = compute.listSubnetworks(); + assertEquals(cursor, page.nextPageCursor()); + assertArrayEquals(subnetworkList.toArray(), Iterables.toArray(page.values(), Subnetwork.class)); + } + + @Test + public void testAggregatedListSubnetworksNextPage() { + String cursor = "cursor"; + String nextCursor = "nextCursor"; + compute = options.service(); + ImmutableList subnetworkList = ImmutableList.of( + new Subnetwork(compute, new SubnetworkInfo.BuilderImpl(SUBNETWORK)), + new Subnetwork(compute, new SubnetworkInfo.BuilderImpl(SUBNETWORK))); + ImmutableList nextSubnetworkList = ImmutableList.of( + new Subnetwork(compute, new SubnetworkInfo.BuilderImpl(SUBNETWORK))); + ComputeRpc.Tuple> result = + ComputeRpc.Tuple.of(cursor, Iterables.transform(subnetworkList, SubnetworkInfo.TO_PB_FUNCTION)); + ComputeRpc.Tuple> nextResult = + ComputeRpc.Tuple.of(nextCursor, + Iterables.transform(nextSubnetworkList, SubnetworkInfo.TO_PB_FUNCTION)); + Map nextOptions = ImmutableMap.of(ComputeRpc.Option.PAGE_TOKEN, cursor); + EasyMock.expect(computeRpcMock.listSubnetworks(EMPTY_RPC_OPTIONS)).andReturn(result); + EasyMock.expect(computeRpcMock.listSubnetworks(nextOptions)).andReturn(nextResult); + EasyMock.replay(computeRpcMock); + Page page = compute.listSubnetworks(); + assertEquals(cursor, page.nextPageCursor()); + assertArrayEquals(subnetworkList.toArray(), Iterables.toArray(page.values(), Subnetwork.class)); + page = page.nextPage(); + assertEquals(nextCursor, page.nextPageCursor()); + assertArrayEquals(nextSubnetworkList.toArray(), + Iterables.toArray(page.values(), Subnetwork.class)); + } + + @Test + public void testAggregatedListEmptySubnetworks() { + compute = options.service(); + ImmutableList subnetworks = + ImmutableList.of(); + ComputeRpc.Tuple> result = + ComputeRpc.Tuple.>of(null, + subnetworks); + EasyMock.expect(computeRpcMock.listSubnetworks(EMPTY_RPC_OPTIONS)).andReturn(result); + EasyMock.replay(computeRpcMock); + Page page = compute.listSubnetworks(); + assertNull(page.nextPageCursor()); + assertArrayEquals(subnetworks.toArray(), Iterables.toArray(page.values(), Subnetwork.class)); + } + + @Test + public void testAggregatedListSubnetworksWithOptions() { + String cursor = "cursor"; + compute = options.service(); + ImmutableList subnetworkList = ImmutableList.of( + new Subnetwork(compute, new SubnetworkInfo.BuilderImpl(SUBNETWORK)), + new Subnetwork(compute, new SubnetworkInfo.BuilderImpl(SUBNETWORK))); + ComputeRpc.Tuple> result = + ComputeRpc.Tuple.of(cursor, Iterables.transform(subnetworkList, SubnetworkInfo.TO_PB_FUNCTION)); + EasyMock.expect(computeRpcMock.listSubnetworks(SUBNETWORK_LIST_OPTIONS)).andReturn(result); + EasyMock.replay(computeRpcMock); + Page page = compute.listSubnetworks(SUBNETWORK_AGGREGATED_LIST_PAGE_SIZE, + SUBNETWORK_AGGREGATED_LIST_PAGE_TOKEN, SUBNETWORK_AGGREGATED_LIST_FILTER); + assertEquals(cursor, page.nextPageCursor()); + assertArrayEquals(subnetworkList.toArray(), Iterables.toArray(page.values(), Subnetwork.class)); + } + + @Test + public void testCreateSubnetwork() { + EasyMock.expect(computeRpcMock.createSubnetwork(SUBNETWORK_ID.region(), SUBNETWORK.toPb(), + EMPTY_RPC_OPTIONS)).andReturn(regionOperation.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + SubnetworkId subnetworkId = SubnetworkId.of("region", "network"); + NetworkId networkId = NetworkId.of("network"); + SubnetworkInfo subnetwork = SubnetworkInfo.of(subnetworkId, networkId, "192.168.0.0/16"); + Operation operation = compute.create(subnetwork); + assertEquals(regionOperation, operation); + } + + @Test + public void testCreateSubnetworkWithOptions() { + Capture> capturedOptions = Capture.newInstance(); + EasyMock.expect(computeRpcMock.createSubnetwork(eq(SUBNETWORK_ID.region()), + eq(SUBNETWORK.toPb()), capture(capturedOptions))).andReturn(regionOperation.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + Operation operation = compute.create(SUBNETWORK, OPERATION_OPTION_FIELDS); + String selector = (String) capturedOptions.getValue().get(OPERATION_OPTION_FIELDS.rpcOption()); + assertTrue(selector.contains("selfLink")); + assertTrue(selector.contains("id")); + assertTrue(selector.contains("description")); + assertEquals(23, selector.length()); + assertEquals(regionOperation, operation); + } + + @Test + public void testGetNetwork() { + EasyMock.expect(computeRpcMock.getNetwork(NETWORK_ID.network(), EMPTY_RPC_OPTIONS)) + .andReturn(NETWORK.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + Network network = compute.getNetwork(NETWORK_ID.network()); + assertEquals(new Network(compute, new NetworkInfo.BuilderImpl(NETWORK)), network); + } + + @Test + public void testGetNetwork_Null() { + EasyMock.expect(computeRpcMock.getNetwork(NETWORK_ID.network(), EMPTY_RPC_OPTIONS)) + .andReturn(null); + EasyMock.replay(computeRpcMock); + compute = options.service(); + assertNull(compute.getNetwork(NETWORK_ID.network())); + } + + @Test + public void testGetNetworkWithSelectedFields() { + Capture> capturedOptions = Capture.newInstance(); + EasyMock.expect(computeRpcMock.getNetwork(eq(NETWORK_ID.network()), capture(capturedOptions))) + .andReturn(NETWORK.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + Network network = compute.getNetwork(NETWORK_ID.network(), NETWORK_OPTION_FIELDS); + String selector = (String) capturedOptions.getValue().get(NETWORK_OPTION_FIELDS.rpcOption()); + assertTrue(selector.contains("selfLink")); + assertTrue(selector.contains("id")); + assertTrue(selector.contains("description")); + assertTrue(selector.contains("IPv4Range")); + assertTrue(selector.contains("autoCreateSubnetworks")); + assertEquals(55, selector.length()); + assertEquals(new Network(compute, new NetworkInfo.BuilderImpl(NETWORK)), network); + } + + @Test + public void testDeleteNetwork_Operation() { + EasyMock.expect(computeRpcMock.deleteNetwork(NETWORK_ID.network(), EMPTY_RPC_OPTIONS)) + .andReturn(globalOperation.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + assertEquals(globalOperation, compute.deleteNetwork(NETWORK_ID)); + } + + @Test + public void testDeleteNetworkWithSelectedFields_Operation() { + Capture> capturedOptions = Capture.newInstance(); + EasyMock.expect(computeRpcMock.deleteNetwork(eq(NETWORK_ID.network()), + capture(capturedOptions))).andReturn(globalOperation.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + Operation operation = compute.deleteNetwork(NETWORK_ID, OPERATION_OPTION_FIELDS); + String selector = (String) capturedOptions.getValue().get(OPERATION_OPTION_FIELDS.rpcOption()); + assertTrue(selector.contains("selfLink")); + assertTrue(selector.contains("id")); + assertTrue(selector.contains("description")); + assertEquals(23, selector.length()); + assertEquals(globalOperation, operation); + } + + @Test + public void testDeleteNetwork_Null() { + EasyMock.expect(computeRpcMock.deleteNetwork(NETWORK_ID.network(), EMPTY_RPC_OPTIONS)) + .andReturn(null); + EasyMock.replay(computeRpcMock); + compute = options.service(); + assertNull(compute.deleteNetwork(NETWORK_ID)); + } + + @Test + public void testListNetworks() { + String cursor = "cursor"; + compute = options.service(); + ImmutableList networkList = ImmutableList.of( + new Network(compute, new NetworkInfo.BuilderImpl(NETWORK)), + new Network(compute, new NetworkInfo.BuilderImpl(NETWORK))); + ComputeRpc.Tuple> result = + ComputeRpc.Tuple.of(cursor, Iterables.transform(networkList, NetworkInfo.TO_PB_FUNCTION)); + EasyMock.expect(computeRpcMock.listNetworks(EMPTY_RPC_OPTIONS)).andReturn(result); + EasyMock.replay(computeRpcMock); + Page page = compute.listNetworks(); + assertEquals(cursor, page.nextPageCursor()); + assertArrayEquals(networkList.toArray(), Iterables.toArray(page.values(), Network.class)); + } + + @Test + public void testListNetworksNextPage() { + String cursor = "cursor"; + String nextCursor = "nextCursor"; + compute = options.service(); + ImmutableList networkList = ImmutableList.of( + new Network(compute, new NetworkInfo.BuilderImpl(NETWORK)), + new Network(compute, new NetworkInfo.BuilderImpl(NETWORK))); + ImmutableList nextNetworkList = ImmutableList.of( + new Network(compute, new NetworkInfo.BuilderImpl(NETWORK))); + ComputeRpc.Tuple> result = + ComputeRpc.Tuple.of(cursor, Iterables.transform(networkList, NetworkInfo.TO_PB_FUNCTION)); + ComputeRpc.Tuple> nextResult = + ComputeRpc.Tuple.of(nextCursor, Iterables.transform(nextNetworkList, NetworkInfo.TO_PB_FUNCTION)); + Map nextOptions = ImmutableMap.of(ComputeRpc.Option.PAGE_TOKEN, cursor); + EasyMock.expect(computeRpcMock.listNetworks(EMPTY_RPC_OPTIONS)).andReturn(result); + EasyMock.expect(computeRpcMock.listNetworks(nextOptions)).andReturn(nextResult); + EasyMock.replay(computeRpcMock); + Page page = compute.listNetworks(); + assertEquals(cursor, page.nextPageCursor()); + assertArrayEquals(networkList.toArray(), Iterables.toArray(page.values(), Network.class)); + page = page.nextPage(); + assertEquals(nextCursor, page.nextPageCursor()); + assertArrayEquals(nextNetworkList.toArray(), Iterables.toArray(page.values(), Network.class)); + } + + @Test + public void testListEmptyNetworks() { + compute = options.service(); + ImmutableList networks = ImmutableList.of(); + ComputeRpc.Tuple> result = + ComputeRpc.Tuple.>of(null, networks); + EasyMock.expect(computeRpcMock.listNetworks(EMPTY_RPC_OPTIONS)).andReturn(result); + EasyMock.replay(computeRpcMock); + Page page = compute.listNetworks(); + assertNull(page.nextPageCursor()); + assertArrayEquals(networks.toArray(), Iterables.toArray(page.values(), Network.class)); + } + + @Test + public void testListNetworksWithOptions() { + String cursor = "cursor"; + compute = options.service(); + ImmutableList networkList = ImmutableList.of( + new Network(compute, new NetworkInfo.BuilderImpl(NETWORK)), + new Network(compute, new NetworkInfo.BuilderImpl(NETWORK))); + ComputeRpc.Tuple> result = + ComputeRpc.Tuple.of(cursor, Iterables.transform(networkList, NetworkInfo.TO_PB_FUNCTION)); + EasyMock.expect(computeRpcMock.listNetworks(NETWORK_LIST_OPTIONS)).andReturn(result); + EasyMock.replay(computeRpcMock); + Page page = compute.listNetworks(NETWORK_LIST_PAGE_SIZE, NETWORK_LIST_PAGE_TOKEN, + NETWORK_LIST_FILTER); + assertEquals(cursor, page.nextPageCursor()); + assertArrayEquals(networkList.toArray(), Iterables.toArray(page.values(), Network.class)); + } + + @Test + public void testCreateNetwork() { + EasyMock.expect(computeRpcMock.createNetwork(NETWORK.toPb(), EMPTY_RPC_OPTIONS)) + .andReturn(globalOperation.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + NetworkInfo network = + NetworkInfo.of(NetworkId.of("network"), StandardNetworkConfiguration.of("192.168.0.0/16")); + Operation operation = compute.create(network); + assertEquals(globalOperation, operation); + } + + @Test + public void testCreateNetworkWithOptions() { + Capture> capturedOptions = Capture.newInstance(); + EasyMock.expect(computeRpcMock.createNetwork(eq(NETWORK.toPb()), capture(capturedOptions))) + .andReturn(globalOperation.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + Operation operation = compute.create(NETWORK, OPERATION_OPTION_FIELDS); + String selector = (String) capturedOptions.getValue().get(OPERATION_OPTION_FIELDS.rpcOption()); + assertTrue(selector.contains("selfLink")); + assertTrue(selector.contains("id")); + assertTrue(selector.contains("description")); + assertEquals(23, selector.length()); + assertEquals(globalOperation, operation); + } + + @Test + public void testGetInstance() { + EasyMock.expect(computeRpcMock.getInstance(INSTANCE_ID.zone(), INSTANCE_ID.instance(), + EMPTY_RPC_OPTIONS)).andReturn(INSTANCE.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + Instance instance = compute.getInstance(INSTANCE_ID); + assertEquals(new Instance(compute, new InstanceInfo.BuilderImpl(INSTANCE)), instance); + } + + @Test + public void testGetInstance_Null() { + EasyMock.expect(computeRpcMock.getInstance(INSTANCE_ID.zone(), INSTANCE_ID.instance(), + EMPTY_RPC_OPTIONS)).andReturn(null); + EasyMock.replay(computeRpcMock); + compute = options.service(); + assertNull(compute.getInstance(INSTANCE_ID)); + } + + @Test + public void testGetInstanceWithSelectedFields() { + Capture> capturedOptions = Capture.newInstance(); + EasyMock.expect(computeRpcMock.getInstance(eq(INSTANCE_ID.zone()), eq(INSTANCE_ID.instance()), + capture(capturedOptions))).andReturn(INSTANCE.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + Instance instance = compute.getInstance(INSTANCE_ID, INSTANCE_OPTION_FIELDS); + String selector = (String) capturedOptions.getValue().get(INSTANCE_OPTION_FIELDS.rpcOption()); + assertTrue(selector.contains("selfLink")); + assertTrue(selector.contains("id")); + assertTrue(selector.contains("description")); + assertEquals(23, selector.length()); + assertEquals(new Instance(compute, new InstanceInfo.BuilderImpl(INSTANCE)), instance); + } + + @Test + public void testDeleteInstance_Operation() { + EasyMock.expect(computeRpcMock.deleteInstance(INSTANCE_ID.zone(), INSTANCE_ID.instance(), + EMPTY_RPC_OPTIONS)).andReturn(zoneOperation.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + assertEquals(zoneOperation, compute.deleteInstance(INSTANCE_ID)); + } + + @Test + public void testDeleteInstanceWithSelectedFields_Operation() { + Capture> capturedOptions = Capture.newInstance(); + EasyMock.expect(computeRpcMock.deleteInstance(eq(INSTANCE_ID.zone()), + eq(INSTANCE_ID.instance()), capture(capturedOptions))).andReturn(zoneOperation.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + Operation operation = compute.deleteInstance(INSTANCE_ID, OPERATION_OPTION_FIELDS); + String selector = (String) capturedOptions.getValue().get(OPERATION_OPTION_FIELDS.rpcOption()); + assertTrue(selector.contains("selfLink")); + assertTrue(selector.contains("id")); + assertTrue(selector.contains("description")); + assertEquals(23, selector.length()); + assertEquals(zoneOperation, operation); + } + + @Test + public void testDeleteInstance_Null() { + EasyMock.expect(computeRpcMock.deleteInstance(INSTANCE_ID.zone(), INSTANCE_ID.instance(), + EMPTY_RPC_OPTIONS)).andReturn(null); + EasyMock.replay(computeRpcMock); + compute = options.service(); + assertNull(compute.deleteInstance(INSTANCE_ID)); + } + + @Test + public void testListInstances() { + String cursor = "cursor"; + compute = options.service(); + ImmutableList instanceList = ImmutableList.of( + new Instance(compute, new InstanceInfo.BuilderImpl(INSTANCE)), + new Instance(compute, new InstanceInfo.BuilderImpl(INSTANCE))); + ComputeRpc.Tuple> result = + ComputeRpc.Tuple.of(cursor, Iterables.transform(instanceList, InstanceInfo.TO_PB_FUNCTION)); + EasyMock.expect(computeRpcMock.listInstances(INSTANCE_ID.zone(), EMPTY_RPC_OPTIONS)) + .andReturn(result); + EasyMock.replay(computeRpcMock); + Page page = compute.listInstances(INSTANCE_ID.zone()); + assertEquals(cursor, page.nextPageCursor()); + assertArrayEquals(instanceList.toArray(), Iterables.toArray(page.values(), Instance.class)); + } + + @Test + public void testListInstancesNextPage() { + String cursor = "cursor"; + String nextCursor = "nextCursor"; + compute = options.service(); + ImmutableList instanceList = ImmutableList.of( + new Instance(compute, new InstanceInfo.BuilderImpl(INSTANCE)), + new Instance(compute, new InstanceInfo.BuilderImpl(INSTANCE))); + ImmutableList nextInstanceList = ImmutableList.of( + new Instance(compute, new InstanceInfo.BuilderImpl(INSTANCE))); + ComputeRpc.Tuple> result = + ComputeRpc.Tuple.of(cursor, Iterables.transform(instanceList, InstanceInfo.TO_PB_FUNCTION)); + ComputeRpc.Tuple> nextResult = + ComputeRpc.Tuple.of(nextCursor, Iterables.transform(nextInstanceList, InstanceInfo.TO_PB_FUNCTION)); + Map nextOptions = ImmutableMap.of(ComputeRpc.Option.PAGE_TOKEN, cursor); + EasyMock.expect(computeRpcMock.listInstances(INSTANCE_ID.zone(), EMPTY_RPC_OPTIONS)) + .andReturn(result); + EasyMock.expect(computeRpcMock.listInstances(INSTANCE_ID.zone(), nextOptions)) + .andReturn(nextResult); + EasyMock.replay(computeRpcMock); + Page page = compute.listInstances(INSTANCE_ID.zone()); + assertEquals(cursor, page.nextPageCursor()); + assertArrayEquals(instanceList.toArray(), Iterables.toArray(page.values(), Instance.class)); + page = page.nextPage(); + assertEquals(nextCursor, page.nextPageCursor()); + assertArrayEquals(nextInstanceList.toArray(), Iterables.toArray(page.values(), Instance.class)); + } + + @Test + public void testListEmptyInstances() { + compute = options.service(); + ImmutableList instances = ImmutableList.of(); + ComputeRpc.Tuple> result = + ComputeRpc.Tuple.>of(null, instances); + EasyMock.expect(computeRpcMock.listInstances(INSTANCE_ID.zone(), EMPTY_RPC_OPTIONS)) + .andReturn(result); + EasyMock.replay(computeRpcMock); + Page page = compute.listInstances(INSTANCE_ID.zone()); + assertNull(page.nextPageCursor()); + assertArrayEquals(instances.toArray(), Iterables.toArray(page.values(), Instance.class)); + } + + @Test + public void testListInstancesWithOptions() { + String cursor = "cursor"; + compute = options.service(); + ImmutableList instanceList = ImmutableList.of( + new Instance(compute, new InstanceInfo.BuilderImpl(INSTANCE)), + new Instance(compute, new InstanceInfo.BuilderImpl(INSTANCE))); + ComputeRpc.Tuple> result = + ComputeRpc.Tuple.of(cursor, Iterables.transform(instanceList, InstanceInfo.TO_PB_FUNCTION)); + EasyMock.expect(computeRpcMock.listInstances(INSTANCE_ID.zone(), INSTANCE_LIST_OPTIONS)) + .andReturn(result); + EasyMock.replay(computeRpcMock); + Page page = compute.listInstances(INSTANCE_ID.zone(), INSTANCE_LIST_PAGE_SIZE, + INSTANCE_LIST_PAGE_TOKEN, INSTANCE_LIST_FILTER); + assertEquals(cursor, page.nextPageCursor()); + assertArrayEquals(instanceList.toArray(), Iterables.toArray(page.values(), Instance.class)); + } + + @Test + public void testAggregatedListInstances() { + String cursor = "cursor"; + compute = options.service(); + ImmutableList instanceList = ImmutableList.of( + new Instance(compute, new InstanceInfo.BuilderImpl(INSTANCE)), + new Instance(compute, new InstanceInfo.BuilderImpl(INSTANCE))); + ComputeRpc.Tuple> result = + ComputeRpc.Tuple.of(cursor, Iterables.transform(instanceList, InstanceInfo.TO_PB_FUNCTION)); + EasyMock.expect(computeRpcMock.listInstances(EMPTY_RPC_OPTIONS)).andReturn(result); + EasyMock.replay(computeRpcMock); + Page page = compute.listInstances(); + assertEquals(cursor, page.nextPageCursor()); + assertArrayEquals(instanceList.toArray(), Iterables.toArray(page.values(), Instance.class)); + } + + @Test + public void testAggregatedListInstancesNextPage() { + String cursor = "cursor"; + String nextCursor = "nextCursor"; + compute = options.service(); + ImmutableList instanceList = ImmutableList.of( + new Instance(compute, new InstanceInfo.BuilderImpl(INSTANCE)), + new Instance(compute, new InstanceInfo.BuilderImpl(INSTANCE))); + ImmutableList nextInstanceList = ImmutableList.of( + new Instance(compute, new InstanceInfo.BuilderImpl(INSTANCE))); + ComputeRpc.Tuple> result = + ComputeRpc.Tuple.of(cursor, Iterables.transform(instanceList, InstanceInfo.TO_PB_FUNCTION)); + ComputeRpc.Tuple> nextResult = + ComputeRpc.Tuple.of(nextCursor, Iterables.transform(nextInstanceList, InstanceInfo.TO_PB_FUNCTION)); + Map nextOptions = ImmutableMap.of(ComputeRpc.Option.PAGE_TOKEN, cursor); + EasyMock.expect(computeRpcMock.listInstances(EMPTY_RPC_OPTIONS)).andReturn(result); + EasyMock.expect(computeRpcMock.listInstances(nextOptions)).andReturn(nextResult); + EasyMock.replay(computeRpcMock); + Page page = compute.listInstances(); + assertEquals(cursor, page.nextPageCursor()); + assertArrayEquals(instanceList.toArray(), Iterables.toArray(page.values(), Instance.class)); + page = page.nextPage(); + assertEquals(nextCursor, page.nextPageCursor()); + assertArrayEquals(nextInstanceList.toArray(), Iterables.toArray(page.values(), Instance.class)); + } + + @Test + public void testAggregatedListEmptyInstances() { + compute = options.service(); + ImmutableList instanceList = ImmutableList.of(); + ComputeRpc.Tuple> result = + ComputeRpc.Tuple.>of(null, + instanceList); + EasyMock.expect(computeRpcMock.listInstances(EMPTY_RPC_OPTIONS)).andReturn(result); + EasyMock.replay(computeRpcMock); + Page page = compute.listInstances(); + assertNull(page.nextPageCursor()); + assertArrayEquals(instanceList.toArray(), Iterables.toArray(page.values(), Instance.class)); + } + + @Test + public void testAggregatedListInstancesWithOptions() { + String cursor = "cursor"; + compute = options.service(); + ImmutableList instanceList = ImmutableList.of( + new Instance(compute, new InstanceInfo.BuilderImpl(INSTANCE)), + new Instance(compute, new InstanceInfo.BuilderImpl(INSTANCE))); + ComputeRpc.Tuple> result = + ComputeRpc.Tuple.of(cursor, Iterables.transform(instanceList, InstanceInfo.TO_PB_FUNCTION)); + EasyMock.expect(computeRpcMock.listInstances(INSTANCE_LIST_OPTIONS)).andReturn(result); + EasyMock.replay(computeRpcMock); + Page page = compute.listInstances(INSTANCE_AGGREGATED_LIST_PAGE_SIZE, + INSTANCE_AGGREGATED_LIST_PAGE_TOKEN, INSTANCE_AGGREGATED_LIST_FILTER); + assertEquals(cursor, page.nextPageCursor()); + assertArrayEquals(instanceList.toArray(), Iterables.toArray(page.values(), Instance.class)); + } + + @Test + public void testCreateInstance() { + EasyMock.expect(computeRpcMock.createInstance(INSTANCE_ID.zone(), INSTANCE.toPb(), + EMPTY_RPC_OPTIONS)).andReturn(zoneOperation.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + InstanceInfo instance = InstanceInfo.of(InstanceId.of("zone", "instance"), + MachineTypeId.of("zone", "type"), ATTACHED_DISK, + NetworkInterface.of(NetworkId.of("network"))); + Operation operation = compute.create(instance); + assertEquals(zoneOperation, operation); + } + + @Test + public void testCreateInstanceWithOptions() { + Capture> capturedOptions = Capture.newInstance(); + EasyMock.expect(computeRpcMock.createInstance(eq(INSTANCE_ID.zone()), eq(INSTANCE.toPb()), + capture(capturedOptions))).andReturn(zoneOperation.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + Operation operation = compute.create(INSTANCE, OPERATION_OPTION_FIELDS); + String selector = (String) capturedOptions.getValue().get(OPERATION_OPTION_FIELDS.rpcOption()); + assertTrue(selector.contains("selfLink")); + assertTrue(selector.contains("id")); + assertTrue(selector.contains("description")); + assertEquals(23, selector.length()); + assertEquals(zoneOperation, operation); + } + + @Test + public void testAddAccessConfig_Operation() { + AccessConfig accessConfig = AccessConfig.of("192.168.1.1"); + EasyMock.expect(computeRpcMock.addAccessConfig(INSTANCE_ID.zone(), INSTANCE_ID.instance(), + "networkInterface", accessConfig.toPb(), EMPTY_RPC_OPTIONS)) + .andReturn(zoneOperation.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + assertEquals(zoneOperation, + compute.addAccessConfig(INSTANCE_ID, "networkInterface", accessConfig)); + } + + @Test + public void testAddAccessConfigWithSelectedFields_Operation() { + Capture> capturedOptions = Capture.newInstance(); + AccessConfig accessConfig = AccessConfig.of("192.168.1.1"); + EasyMock.expect(computeRpcMock.addAccessConfig(eq(INSTANCE_ID.zone()), + eq(INSTANCE_ID.instance()), eq("networkInterface"), eq(accessConfig.toPb()), + capture(capturedOptions))).andReturn(zoneOperation.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + Operation operation = compute.addAccessConfig(INSTANCE_ID, "networkInterface", accessConfig, + OPERATION_OPTION_FIELDS); + String selector = (String) capturedOptions.getValue().get(OPERATION_OPTION_FIELDS.rpcOption()); + assertTrue(selector.contains("selfLink")); + assertTrue(selector.contains("id")); + assertTrue(selector.contains("description")); + assertEquals(23, selector.length()); + assertEquals(zoneOperation, operation); + } + + @Test + public void testAddAccessConfig_Null() { + AccessConfig accessConfig = AccessConfig.of("192.168.1.1"); + EasyMock.expect(computeRpcMock.addAccessConfig(INSTANCE_ID.zone(), INSTANCE_ID.instance(), + "networkInterface", accessConfig.toPb(), EMPTY_RPC_OPTIONS)).andReturn(null); + EasyMock.replay(computeRpcMock); + compute = options.service(); + assertNull(compute.addAccessConfig(INSTANCE_ID, "networkInterface", accessConfig)); + } + + @Test + public void testAttachDisk_Operation() { + AttachedDisk attachedDisk = AttachedDisk.of(PERSISTENT_DISK_CONFIGURATION); + EasyMock.expect(computeRpcMock.attachDisk(INSTANCE_ID.zone(), INSTANCE_ID.instance(), + attachedDisk.toPb(), EMPTY_RPC_OPTIONS)).andReturn(zoneOperation.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + assertEquals(zoneOperation, compute.attachDisk(INSTANCE_ID, PERSISTENT_DISK_CONFIGURATION)); + } + + @Test + public void testAttachDiskWithSelectedFields_Operation() { + AttachedDisk attachedDisk = AttachedDisk.of(PERSISTENT_DISK_CONFIGURATION); + Capture> capturedOptions = Capture.newInstance(); + EasyMock.expect(computeRpcMock.attachDisk(eq(INSTANCE_ID.zone()), eq(INSTANCE_ID.instance()), + eq(attachedDisk.toPb()), capture(capturedOptions))).andReturn(zoneOperation.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + Operation operation = + compute.attachDisk(INSTANCE_ID, PERSISTENT_DISK_CONFIGURATION, OPERATION_OPTION_FIELDS); + String selector = (String) capturedOptions.getValue().get(OPERATION_OPTION_FIELDS.rpcOption()); + assertTrue(selector.contains("selfLink")); + assertTrue(selector.contains("id")); + assertTrue(selector.contains("description")); + assertEquals(23, selector.length()); + assertEquals(zoneOperation, operation); + } + + @Test + public void testAttachDisk_Null() { + AttachedDisk attachedDisk = AttachedDisk.of(PERSISTENT_DISK_CONFIGURATION); + EasyMock.expect(computeRpcMock.attachDisk(INSTANCE_ID.zone(), INSTANCE_ID.instance(), + attachedDisk.toPb(), EMPTY_RPC_OPTIONS)).andReturn(null); + EasyMock.replay(computeRpcMock); + compute = options.service(); + assertNull(compute.attachDisk(INSTANCE_ID, PERSISTENT_DISK_CONFIGURATION)); + } + + @Test + public void testAttachDiskName_Operation() { + AttachedDisk attachedDisk = AttachedDisk.of("dev0", PERSISTENT_DISK_CONFIGURATION); + EasyMock.expect(computeRpcMock.attachDisk(INSTANCE_ID.zone(), INSTANCE_ID.instance(), + attachedDisk.toPb(), EMPTY_RPC_OPTIONS)).andReturn(zoneOperation.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + assertEquals(zoneOperation, + compute.attachDisk(INSTANCE_ID, "dev0", PERSISTENT_DISK_CONFIGURATION)); + } + + @Test + public void testAttachDiskNameWithSelectedFields_Operation() { + AttachedDisk attachedDisk = AttachedDisk.of("dev0", PERSISTENT_DISK_CONFIGURATION); + Capture> capturedOptions = Capture.newInstance(); + EasyMock.expect(computeRpcMock.attachDisk(eq(INSTANCE_ID.zone()), eq(INSTANCE_ID.instance()), + eq(attachedDisk.toPb()), capture(capturedOptions))).andReturn(zoneOperation.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + Operation operation = compute.attachDisk(INSTANCE_ID, "dev0", PERSISTENT_DISK_CONFIGURATION, + OPERATION_OPTION_FIELDS); + String selector = (String) capturedOptions.getValue().get(OPERATION_OPTION_FIELDS.rpcOption()); + assertTrue(selector.contains("selfLink")); + assertTrue(selector.contains("id")); + assertTrue(selector.contains("description")); + assertEquals(23, selector.length()); + assertEquals(zoneOperation, operation); + } + + @Test + public void testAttachDiskName_Null() { + AttachedDisk attachedDisk = AttachedDisk.of("dev0", PERSISTENT_DISK_CONFIGURATION); + EasyMock.expect(computeRpcMock.attachDisk(INSTANCE_ID.zone(), INSTANCE_ID.instance(), + attachedDisk.toPb(), EMPTY_RPC_OPTIONS)).andReturn(null); + EasyMock.replay(computeRpcMock); + compute = options.service(); + assertNull(compute.attachDisk(INSTANCE_ID, "dev0", PERSISTENT_DISK_CONFIGURATION)); + } + + @Test + public void testDeleteAccessConfig_Operation() { + EasyMock.expect(computeRpcMock.deleteAccessConfig(INSTANCE_ID.zone(), INSTANCE_ID.instance(), + "networkInterface", "accessConfig", EMPTY_RPC_OPTIONS)).andReturn(zoneOperation.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + assertEquals(zoneOperation, + compute.deleteAccessConfig(INSTANCE_ID, "networkInterface", "accessConfig")); + } + + @Test + public void testDeleteAccessConfigWithSelectedFields_Operation() { + Capture> capturedOptions = Capture.newInstance(); + EasyMock.expect(computeRpcMock.deleteAccessConfig(eq(INSTANCE_ID.zone()), + eq(INSTANCE_ID.instance()), eq("networkInterface"), eq("accessConfig"), + capture(capturedOptions))).andReturn(zoneOperation.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + Operation operation = compute.deleteAccessConfig(INSTANCE_ID, "networkInterface", + "accessConfig", OPERATION_OPTION_FIELDS); + String selector = (String) capturedOptions.getValue().get(OPERATION_OPTION_FIELDS.rpcOption()); + assertTrue(selector.contains("selfLink")); + assertTrue(selector.contains("id")); + assertTrue(selector.contains("description")); + assertEquals(23, selector.length()); + assertEquals(zoneOperation, operation); + } + + @Test + public void testDeleteAccessConfig_Null() { + EasyMock.expect(computeRpcMock.deleteAccessConfig(INSTANCE_ID.zone(), INSTANCE_ID.instance(), + "networkInterface", "accessConfig", EMPTY_RPC_OPTIONS)).andReturn(null); + EasyMock.replay(computeRpcMock); + compute = options.service(); + assertNull(compute.deleteAccessConfig(INSTANCE_ID, "networkInterface", "accessConfig")); + } + + @Test + public void testDetachDisk_Operation() { + EasyMock.expect(computeRpcMock.detachDisk(INSTANCE_ID.zone(), INSTANCE_ID.instance(), + "device", EMPTY_RPC_OPTIONS)).andReturn(zoneOperation.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + assertEquals(zoneOperation, compute.detachDisk(INSTANCE_ID, "device")); + } + + @Test + public void testDetachDiskWithSelectedFields_Operation() { + Capture> capturedOptions = Capture.newInstance(); + EasyMock.expect(computeRpcMock.detachDisk(eq(INSTANCE_ID.zone()), + eq(INSTANCE_ID.instance()), eq("device"), capture(capturedOptions))) + .andReturn(zoneOperation.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + Operation operation = compute.detachDisk(INSTANCE_ID, "device", OPERATION_OPTION_FIELDS); + String selector = (String) capturedOptions.getValue().get(OPERATION_OPTION_FIELDS.rpcOption()); + assertTrue(selector.contains("selfLink")); + assertTrue(selector.contains("id")); + assertTrue(selector.contains("description")); + assertEquals(23, selector.length()); + assertEquals(zoneOperation, operation); + } + + @Test + public void testDetachDisk_Null() { + EasyMock.expect(computeRpcMock.detachDisk(INSTANCE_ID.zone(), INSTANCE_ID.instance(), "device", + EMPTY_RPC_OPTIONS)).andReturn(null); + EasyMock.replay(computeRpcMock); + compute = options.service(); + assertNull(compute.detachDisk(INSTANCE_ID, "device")); + } + + @Test + public void testSerialPortOutputFromPort() { + String output = "output"; + EasyMock.expect(computeRpcMock.getSerialPortOutput(INSTANCE_ID.zone(), INSTANCE_ID.instance(), + 2, EMPTY_RPC_OPTIONS)).andReturn(output); + EasyMock.replay(computeRpcMock); + compute = options.service(); + assertEquals(output, compute.getSerialPortOutput(INSTANCE_ID, 2)); + } + + @Test + public void testSerialPortOutputDefault() { + String output = "output"; + EasyMock.expect(computeRpcMock.getSerialPortOutput(INSTANCE_ID.zone(), INSTANCE_ID.instance(), + null, EMPTY_RPC_OPTIONS)).andReturn(output); + EasyMock.replay(computeRpcMock); + compute = options.service(); + assertEquals(output, compute.getSerialPortOutput(INSTANCE_ID)); + } + + @Test + public void testSerialPortOutputFromPort_Null() { + EasyMock.expect(computeRpcMock.getSerialPortOutput(INSTANCE_ID.zone(), INSTANCE_ID.instance(), + 2, EMPTY_RPC_OPTIONS)).andReturn(null); + EasyMock.replay(computeRpcMock); + compute = options.service(); + assertNull(compute.getSerialPortOutput(INSTANCE_ID, 2)); + } + + @Test + public void testSerialPortOutputDefault_Null() { + EasyMock.expect(computeRpcMock.getSerialPortOutput(INSTANCE_ID.zone(), INSTANCE_ID.instance(), + null, EMPTY_RPC_OPTIONS)).andReturn(null); + EasyMock.replay(computeRpcMock); + compute = options.service(); + assertNull(compute.getSerialPortOutput(INSTANCE_ID)); + } + + @Test + public void testResetInstance_Operation() { + EasyMock.expect(computeRpcMock.reset(INSTANCE_ID.zone(), INSTANCE_ID.instance(), + EMPTY_RPC_OPTIONS)).andReturn(zoneOperation.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + assertEquals(zoneOperation, compute.reset(INSTANCE_ID)); + } + + @Test + public void testResetInstanceWithSelectedFields_Operation() { + Capture> capturedOptions = Capture.newInstance(); + EasyMock.expect(computeRpcMock.reset(eq(INSTANCE_ID.zone()), eq(INSTANCE_ID.instance()), + capture(capturedOptions))).andReturn(zoneOperation.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + Operation operation = compute.reset(INSTANCE_ID, OPERATION_OPTION_FIELDS); + String selector = (String) capturedOptions.getValue().get(OPERATION_OPTION_FIELDS.rpcOption()); + assertTrue(selector.contains("selfLink")); + assertTrue(selector.contains("id")); + assertTrue(selector.contains("description")); + assertEquals(23, selector.length()); + assertEquals(zoneOperation, operation); + } + + @Test + public void testResetInstance_Null() { + EasyMock.expect(computeRpcMock.reset(INSTANCE_ID.zone(), INSTANCE_ID.instance(), + EMPTY_RPC_OPTIONS)).andReturn(null); + EasyMock.replay(computeRpcMock); + compute = options.service(); + assertNull(compute.reset(INSTANCE_ID)); + } + + @Test + public void testSetDiskAutodelete_Operation() { + EasyMock.expect(computeRpcMock.setDiskAutoDelete(INSTANCE_ID.zone(), INSTANCE_ID.instance(), + "device", true, EMPTY_RPC_OPTIONS)).andReturn(zoneOperation.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + assertEquals(zoneOperation, compute.setDiskAutoDelete(INSTANCE_ID, "device", true)); + } + + @Test + public void testSetDiskAutodeleteWithSelectedFields_Operation() { + Capture> capturedOptions = Capture.newInstance(); + EasyMock.expect(computeRpcMock.setDiskAutoDelete(eq(INSTANCE_ID.zone()), + eq(INSTANCE_ID.instance()), eq("device"), eq(true), capture(capturedOptions))) + .andReturn(zoneOperation.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + Operation operation = + compute.setDiskAutoDelete(INSTANCE_ID, "device", true, OPERATION_OPTION_FIELDS); + String selector = (String) capturedOptions.getValue().get(OPERATION_OPTION_FIELDS.rpcOption()); + assertTrue(selector.contains("selfLink")); + assertTrue(selector.contains("id")); + assertTrue(selector.contains("description")); + assertEquals(23, selector.length()); + assertEquals(zoneOperation, operation); + } + + @Test + public void testSetDiskAutodelete_Null() { + EasyMock.expect(computeRpcMock.setDiskAutoDelete(INSTANCE_ID.zone(), INSTANCE_ID.instance(), + "device", false, EMPTY_RPC_OPTIONS)).andReturn(null); + EasyMock.replay(computeRpcMock); + compute = options.service(); + assertNull(compute.setDiskAutoDelete(INSTANCE_ID, "device", false)); + } + + @Test + public void testSetMachineType_Operation() { + EasyMock.expect(computeRpcMock.setMachineType(INSTANCE_ID.zone(), INSTANCE_ID.instance(), + MACHINE_TYPE_ID.selfLink(), EMPTY_RPC_OPTIONS)).andReturn(zoneOperation.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + assertEquals(zoneOperation, + compute.setMachineType(INSTANCE_ID, MachineTypeId.of("zone", "type"))); + } + + @Test + public void testSetMachineTypeWithOptions_Operation() { + Capture> capturedOptions = Capture.newInstance(); + EasyMock.expect(computeRpcMock.setMachineType(eq(INSTANCE_ID.zone()), + eq(INSTANCE_ID.instance()), eq(MACHINE_TYPE_ID.selfLink()), capture(capturedOptions))) + .andReturn(zoneOperation.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + Operation operation = compute.setMachineType(INSTANCE_ID, MachineTypeId.of("zone", "type"), + OPERATION_OPTION_FIELDS); + String selector = (String) capturedOptions.getValue().get(OPERATION_OPTION_FIELDS.rpcOption()); + assertTrue(selector.contains("selfLink")); + assertTrue(selector.contains("id")); + assertTrue(selector.contains("description")); + assertEquals(23, selector.length()); + assertEquals(zoneOperation, operation); + } + + @Test + public void testSetMachineType_Null() { + EasyMock.expect(computeRpcMock.setMachineType(INSTANCE_ID.zone(), INSTANCE_ID.instance(), + MACHINE_TYPE_ID.selfLink(), EMPTY_RPC_OPTIONS)).andReturn(null); + EasyMock.replay(computeRpcMock); + compute = options.service(); + assertNull(compute.setMachineType(INSTANCE_ID, MachineTypeId.of("zone", "type"))); + } + + @Test + public void testSetMetadata_Operation() { + Metadata metadata = Metadata.builder() + .add("key", "value") + .fingerprint("fingerprint") + .build(); + EasyMock.expect(computeRpcMock.setMetadata(INSTANCE_ID.zone(), INSTANCE_ID.instance(), + metadata.toPb(), EMPTY_RPC_OPTIONS)).andReturn(zoneOperation.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + assertEquals(zoneOperation, compute.setMetadata(INSTANCE_ID, metadata)); + } + + @Test + public void testSetMetadataWithOptions_Operation() { + Capture> capturedOptions = Capture.newInstance(); + Metadata metadata = Metadata.builder() + .add("key", "value") + .fingerprint("fingerprint") + .build(); + EasyMock.expect(computeRpcMock.setMetadata(eq(INSTANCE_ID.zone()), eq(INSTANCE_ID.instance()), + eq(metadata.toPb()), capture(capturedOptions))).andReturn(zoneOperation.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + Operation operation = compute.setMetadata(INSTANCE_ID, metadata, OPERATION_OPTION_FIELDS); + String selector = (String) capturedOptions.getValue().get(OPERATION_OPTION_FIELDS.rpcOption()); + assertTrue(selector.contains("selfLink")); + assertTrue(selector.contains("id")); + assertTrue(selector.contains("description")); + assertEquals(23, selector.length()); + assertEquals(zoneOperation, operation); + } + + @Test + public void testSetMetadata_Null() { + Metadata metadata = Metadata.builder() + .add("key", "value") + .fingerprint("fingerprint") + .build(); + EasyMock.expect(computeRpcMock.setMetadata(INSTANCE_ID.zone(), INSTANCE_ID.instance(), + metadata.toPb(), EMPTY_RPC_OPTIONS)).andReturn(null); + EasyMock.replay(computeRpcMock); + compute = options.service(); + assertNull(compute.setMetadata(INSTANCE_ID, metadata)); + } + + @Test + public void testSetSchedulingOptions_Operation() { + SchedulingOptions schedulingOptions = + SchedulingOptions.standard(true, SchedulingOptions.Maintenance.MIGRATE); + EasyMock.expect(computeRpcMock.setScheduling(INSTANCE_ID.zone(), INSTANCE_ID.instance(), + schedulingOptions.toPb(), EMPTY_RPC_OPTIONS)).andReturn(zoneOperation.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + assertEquals(zoneOperation, compute.setSchedulingOptions(INSTANCE_ID, schedulingOptions)); + } + + @Test + public void testSetSchedulingOptionsWithOptions_Operation() { + Capture> capturedOptions = Capture.newInstance(); + SchedulingOptions schedulingOptions = + SchedulingOptions.standard(true, SchedulingOptions.Maintenance.MIGRATE); + EasyMock.expect(computeRpcMock.setScheduling(eq(INSTANCE_ID.zone()), eq(INSTANCE_ID.instance()), + eq(schedulingOptions.toPb()), capture(capturedOptions))).andReturn(zoneOperation.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + Operation operation = + compute.setSchedulingOptions(INSTANCE_ID, schedulingOptions, OPERATION_OPTION_FIELDS); + String selector = (String) capturedOptions.getValue().get(OPERATION_OPTION_FIELDS.rpcOption()); + assertTrue(selector.contains("selfLink")); + assertTrue(selector.contains("id")); + assertTrue(selector.contains("description")); + assertEquals(23, selector.length()); + assertEquals(zoneOperation, operation); + } + + @Test + public void testSetSchedulingOptions_Null() { + SchedulingOptions schedulingOptions = + SchedulingOptions.standard(true, SchedulingOptions.Maintenance.MIGRATE); + EasyMock.expect(computeRpcMock.setScheduling(INSTANCE_ID.zone(), INSTANCE_ID.instance(), + schedulingOptions.toPb(), EMPTY_RPC_OPTIONS)).andReturn(null); + EasyMock.replay(computeRpcMock); + compute = options.service(); + assertNull(compute.setSchedulingOptions(INSTANCE_ID, schedulingOptions)); + } + + @Test + public void testTags_Operation() { + Tags tags = Tags.of("tag1", "tag2"); + EasyMock.expect(computeRpcMock.setTags(INSTANCE_ID.zone(), INSTANCE_ID.instance(), tags.toPb(), + EMPTY_RPC_OPTIONS)).andReturn(zoneOperation.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + assertEquals(zoneOperation, compute.setTags(INSTANCE_ID, tags)); + } + + @Test + public void testSetTagsWithOptions_Operation() { + Tags tags = Tags.of("tag1", "tag2"); + Capture> capturedOptions = Capture.newInstance(); + EasyMock.expect(computeRpcMock.setTags(eq(INSTANCE_ID.zone()), eq(INSTANCE_ID.instance()), + eq(tags.toPb()), capture(capturedOptions))).andReturn(zoneOperation.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + Operation operation = compute.setTags(INSTANCE_ID, tags, OPERATION_OPTION_FIELDS); + String selector = (String) capturedOptions.getValue().get(OPERATION_OPTION_FIELDS.rpcOption()); + assertTrue(selector.contains("selfLink")); + assertTrue(selector.contains("id")); + assertTrue(selector.contains("description")); + assertEquals(23, selector.length()); + assertEquals(zoneOperation, operation); + } + + @Test + public void testSetTags_Null() { + Tags tags = Tags.of("tag1", "tag2"); + EasyMock.expect(computeRpcMock.setTags(INSTANCE_ID.zone(), INSTANCE_ID.instance(), tags.toPb(), + EMPTY_RPC_OPTIONS)).andReturn(null); + EasyMock.replay(computeRpcMock); + compute = options.service(); + assertNull(compute.setTags(INSTANCE_ID, tags)); + } + + @Test + public void testStartInstance_Operation() { + EasyMock.expect(computeRpcMock.start(INSTANCE_ID.zone(), INSTANCE_ID.instance(), + EMPTY_RPC_OPTIONS)).andReturn(zoneOperation.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + assertEquals(zoneOperation, compute.start(INSTANCE_ID)); + } + + @Test + public void testStartInstanceWithSelectedFields_Operation() { + Capture> capturedOptions = Capture.newInstance(); + EasyMock.expect(computeRpcMock.start(eq(INSTANCE_ID.zone()), eq(INSTANCE_ID.instance()), + capture(capturedOptions))).andReturn(zoneOperation.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + Operation operation = compute.start(INSTANCE_ID, OPERATION_OPTION_FIELDS); + String selector = (String) capturedOptions.getValue().get(OPERATION_OPTION_FIELDS.rpcOption()); + assertTrue(selector.contains("selfLink")); + assertTrue(selector.contains("id")); + assertTrue(selector.contains("description")); + assertEquals(23, selector.length()); + assertEquals(zoneOperation, operation); + } + + @Test + public void testStartInstance_Null() { + EasyMock.expect(computeRpcMock.start(INSTANCE_ID.zone(), INSTANCE_ID.instance(), + EMPTY_RPC_OPTIONS)).andReturn(null); + EasyMock.replay(computeRpcMock); + compute = options.service(); + assertNull(compute.start(INSTANCE_ID)); + } + + @Test + public void testStopInstance_Operation() { + EasyMock.expect(computeRpcMock.stop(INSTANCE_ID.zone(), INSTANCE_ID.instance(), + EMPTY_RPC_OPTIONS)).andReturn(zoneOperation.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + assertEquals(zoneOperation, compute.stop(INSTANCE_ID)); + } + + @Test + public void testStopInstanceWithSelectedFields_Operation() { + Capture> capturedOptions = Capture.newInstance(); + EasyMock.expect(computeRpcMock.stop(eq(INSTANCE_ID.zone()), eq(INSTANCE_ID.instance()), + capture(capturedOptions))).andReturn(zoneOperation.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.service(); + Operation operation = compute.stop(INSTANCE_ID, OPERATION_OPTION_FIELDS); + String selector = (String) capturedOptions.getValue().get(OPERATION_OPTION_FIELDS.rpcOption()); + assertTrue(selector.contains("selfLink")); + assertTrue(selector.contains("id")); + assertTrue(selector.contains("description")); + assertEquals(23, selector.length()); + assertEquals(zoneOperation, operation); + } + + @Test + public void testStopInstance_Null() { + EasyMock.expect(computeRpcMock.stop(INSTANCE_ID.zone(), INSTANCE_ID.instance(), + EMPTY_RPC_OPTIONS)).andReturn(null); + EasyMock.replay(computeRpcMock); + compute = options.service(); + assertNull(compute.stop(INSTANCE_ID)); + } + + @Test + public void testRetryableException() { + EasyMock.expect( + computeRpcMock.getDiskType(DISK_TYPE_ID.zone(), DISK_TYPE_ID.type(), EMPTY_RPC_OPTIONS)) + .andThrow(new ComputeException(500, "InternalError")) + .andReturn(DISK_TYPE.toPb()); + EasyMock.replay(computeRpcMock); + compute = options.toBuilder().retryParams(RetryParams.defaultInstance()).build().service(); + DiskType diskType = compute.getDiskType(DISK_TYPE_ID); + assertEquals(DISK_TYPE, diskType); + } + + @Test + public void testNonRetryableException() { + String exceptionMessage = "Not Implemented"; + EasyMock.expect( + computeRpcMock.getDiskType(DISK_TYPE_ID.zone(), DISK_TYPE_ID.type(), EMPTY_RPC_OPTIONS)) + .andThrow(new ComputeException(501, exceptionMessage)); + EasyMock.replay(computeRpcMock); + compute = options.toBuilder().retryParams(RetryParams.defaultInstance()).build().service(); + thrown.expect(ComputeException.class); + thrown.expectMessage(exceptionMessage); + compute.getDiskType(DISK_TYPE_ID); + } + + @Test + public void testRuntimeException() { + String exceptionMessage = "Artificial runtime exception"; + EasyMock.expect( + computeRpcMock.getDiskType(DISK_TYPE_ID.zone(), DISK_TYPE_ID.type(), EMPTY_RPC_OPTIONS)) + .andThrow(new RuntimeException(exceptionMessage)); + EasyMock.replay(computeRpcMock); + compute = options.toBuilder().retryParams(RetryParams.defaultInstance()).build().service(); + thrown.expect(ComputeException.class); + thrown.expectMessage(exceptionMessage); + compute.getDiskType(DISK_TYPE_ID); + } +} diff --git a/gcloud-java-compute/src/test/java/com/google/cloud/compute/DeprecationStatusTest.java b/gcloud-java-compute/src/test/java/com/google/cloud/compute/DeprecationStatusTest.java new file mode 100644 index 000000000000..57fc8ba0a786 --- /dev/null +++ b/gcloud-java-compute/src/test/java/com/google/cloud/compute/DeprecationStatusTest.java @@ -0,0 +1,181 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.fail; + +import org.joda.time.format.DateTimeFormatter; +import org.joda.time.format.ISODateTimeFormat; +import org.junit.Test; + +public class DeprecationStatusTest { + + private static final DateTimeFormatter TIMESTAMP_FORMATTER = ISODateTimeFormat.dateTime(); + private static final Long DELETED_MILLIS = 1453293540000L; + private static final Long DEPRECATED_MILLIS = 1453293420000L; + private static final Long OBSOLETE_MILLIS = 1453293480000L; + private static final String DELETED = TIMESTAMP_FORMATTER.print(DELETED_MILLIS); + private static final String DEPRECATED = TIMESTAMP_FORMATTER.print(DEPRECATED_MILLIS); + private static final String OBSOLETE = TIMESTAMP_FORMATTER.print(OBSOLETE_MILLIS); + private static final DiskTypeId DISK_TYPE_ID = DiskTypeId.of("project", "zone", "diskType"); + private static final MachineTypeId MACHINE_TYPE_ID = + MachineTypeId.of("project", "zone", "machineType"); + private static final DeprecationStatus.Status STATUS = DeprecationStatus.Status.DELETED; + private static final DeprecationStatus DISK_TYPE_STATUS = + DeprecationStatus.builder(STATUS) + .replacement(DISK_TYPE_ID) + .deprecated(DEPRECATED) + .obsolete(OBSOLETE) + .deleted(DELETED) + .build(); + private static final DeprecationStatus DISK_TYPE_STATUS_MILLIS = + DeprecationStatus.builder(STATUS) + .replacement(DISK_TYPE_ID) + .deprecated(DEPRECATED_MILLIS) + .obsolete(OBSOLETE_MILLIS) + .deleted(DELETED_MILLIS) + .build(); + private static final DeprecationStatus MACHINE_TYPE_STATUS = + DeprecationStatus.builder(STATUS, MACHINE_TYPE_ID) + .deprecated(DEPRECATED) + .obsolete(OBSOLETE) + .deleted(DELETED) + .build(); + + @Test + public void testBuilder() { + compareDeprecationStatus(DISK_TYPE_STATUS, DISK_TYPE_STATUS_MILLIS); + assertEquals(DELETED, DISK_TYPE_STATUS.deleted()); + assertEquals(DEPRECATED, DISK_TYPE_STATUS.deprecated()); + assertEquals(OBSOLETE, DISK_TYPE_STATUS.obsolete()); + assertEquals(DISK_TYPE_ID, DISK_TYPE_STATUS.replacement()); + assertEquals(DEPRECATED_MILLIS, DISK_TYPE_STATUS.deprecatedMillis()); + assertEquals(DELETED_MILLIS, DISK_TYPE_STATUS.deletedMillis()); + assertEquals(OBSOLETE_MILLIS, DISK_TYPE_STATUS.obsoleteMillis()); + assertEquals(STATUS, DISK_TYPE_STATUS.status()); + assertEquals(DELETED, DISK_TYPE_STATUS_MILLIS.deleted()); + assertEquals(DEPRECATED, DISK_TYPE_STATUS_MILLIS.deprecated()); + assertEquals(OBSOLETE, DISK_TYPE_STATUS_MILLIS.obsolete()); + assertEquals(DISK_TYPE_ID, DISK_TYPE_STATUS_MILLIS.replacement()); + assertEquals(DEPRECATED_MILLIS, DISK_TYPE_STATUS_MILLIS.deprecatedMillis()); + assertEquals(DELETED_MILLIS, DISK_TYPE_STATUS_MILLIS.deletedMillis()); + assertEquals(OBSOLETE_MILLIS, DISK_TYPE_STATUS_MILLIS.obsoleteMillis()); + assertEquals(STATUS, DISK_TYPE_STATUS.status()); + assertEquals(DELETED, MACHINE_TYPE_STATUS.deleted()); + assertEquals(DEPRECATED, MACHINE_TYPE_STATUS.deprecated()); + assertEquals(OBSOLETE, MACHINE_TYPE_STATUS.obsolete()); + assertEquals(DEPRECATED_MILLIS, MACHINE_TYPE_STATUS.deprecatedMillis()); + assertEquals(DELETED_MILLIS, MACHINE_TYPE_STATUS.deletedMillis()); + assertEquals(OBSOLETE_MILLIS, MACHINE_TYPE_STATUS.obsoleteMillis()); + assertEquals(MACHINE_TYPE_ID, MACHINE_TYPE_STATUS.replacement()); + assertEquals(STATUS, MACHINE_TYPE_STATUS.status()); + } + + @Test + public void testGettersIllegalArgument() { + DeprecationStatus deprecationStatus = + DeprecationStatus.builder(STATUS, MACHINE_TYPE_ID) + .deprecated("deprecated") + .obsolete("obsolete") + .deleted("delete") + .build(); + assertEquals("deprecated", deprecationStatus.deprecated()); + try { + deprecationStatus.deprecatedMillis(); + fail("Expected IllegalArgumentException"); + } catch (IllegalStateException ex) { + // never reached + } + assertEquals("obsolete", deprecationStatus.obsolete()); + try { + deprecationStatus.obsoleteMillis(); + fail("Expected IllegalArgumentException"); + } catch (IllegalStateException ex) { + // never reached + } + assertEquals("delete", deprecationStatus.deleted()); + try { + deprecationStatus.deletedMillis(); + fail("Expected IllegalArgumentException"); + } catch (IllegalStateException ex) { + // never reached + } + } + + @Test + public void testToBuilder() { + compareDeprecationStatus(DISK_TYPE_STATUS, DISK_TYPE_STATUS.toBuilder().build()); + compareDeprecationStatus(MACHINE_TYPE_STATUS, MACHINE_TYPE_STATUS.toBuilder().build()); + DeprecationStatus deprecationStatus = DISK_TYPE_STATUS.toBuilder() + .deleted(DEPRECATED) + .build(); + assertEquals(DEPRECATED, deprecationStatus.deleted()); + deprecationStatus = deprecationStatus.toBuilder().deleted(DELETED).build(); + compareDeprecationStatus(DISK_TYPE_STATUS, deprecationStatus); + } + + @Test + public void testToBuilderIncomplete() { + DeprecationStatus diskStatus = DeprecationStatus.of(STATUS, DISK_TYPE_ID); + assertEquals(diskStatus, diskStatus.toBuilder().build()); + } + + @Test + public void testOf() { + DeprecationStatus diskStatus = DeprecationStatus.of(STATUS, DISK_TYPE_ID); + assertNull(diskStatus.deleted()); + assertNull(diskStatus.deprecated()); + assertNull(diskStatus.obsolete()); + assertEquals(DISK_TYPE_ID, diskStatus.replacement()); + assertEquals(STATUS, diskStatus.status()); + } + + @Test + public void testToAndFromPb() { + DeprecationStatus diskStatus = + DeprecationStatus.fromPb(DISK_TYPE_STATUS.toPb(), DiskTypeId.FROM_URL_FUNCTION); + compareDeprecationStatus(DISK_TYPE_STATUS, diskStatus); + DeprecationStatus machineStatus = + DeprecationStatus.fromPb(MACHINE_TYPE_STATUS.toPb(), MachineTypeId.FROM_URL_FUNCTION); + compareDeprecationStatus(MACHINE_TYPE_STATUS, machineStatus); + diskStatus = DeprecationStatus.builder(STATUS, DISK_TYPE_ID).deprecated(DEPRECATED).build(); + assertEquals(diskStatus, + DeprecationStatus.fromPb(diskStatus.toPb(), DiskTypeId.FROM_URL_FUNCTION)); + machineStatus = + DeprecationStatus.builder(STATUS, MACHINE_TYPE_ID).deprecated(DEPRECATED).build(); + assertEquals(machineStatus, + DeprecationStatus.fromPb(machineStatus.toPb(), MachineTypeId.FROM_URL_FUNCTION)); + diskStatus = DeprecationStatus.of(STATUS, DISK_TYPE_ID); + assertEquals(diskStatus, + DeprecationStatus.fromPb(diskStatus.toPb(), DiskTypeId.FROM_URL_FUNCTION)); + } + + private void compareDeprecationStatus(DeprecationStatus expected, DeprecationStatus value) { + assertEquals(expected, value); + assertEquals(expected.deleted(), value.deleted()); + assertEquals(expected.deprecated(), value.deprecated()); + assertEquals(expected.obsolete(), value.obsolete()); + assertEquals(expected.deletedMillis(), value.deletedMillis()); + assertEquals(expected.deprecatedMillis(), value.deprecatedMillis()); + assertEquals(expected.obsoleteMillis(), value.obsoleteMillis()); + assertEquals(expected.replacement(), value.replacement()); + assertEquals(expected.status(), value.status()); + assertEquals(expected.hashCode(), value.hashCode()); + } +} diff --git a/gcloud-java-compute/src/test/java/com/google/cloud/compute/DiskIdTest.java b/gcloud-java-compute/src/test/java/com/google/cloud/compute/DiskIdTest.java new file mode 100644 index 000000000000..df5fee9e6ac7 --- /dev/null +++ b/gcloud-java-compute/src/test/java/com/google/cloud/compute/DiskIdTest.java @@ -0,0 +1,87 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; + +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; + +public class DiskIdTest { + + private static final String PROJECT = "project"; + private static final String ZONE = "zone"; + private static final String NAME = "disk"; + private static final String URL = + "https://www.googleapis.com/compute/v1/projects/project/zones/zone/disks/disk"; + + @Rule + public ExpectedException thrown = ExpectedException.none(); + + @Test + public void testOf() { + DiskId diskId = DiskId.of(PROJECT, ZONE, NAME); + assertEquals(PROJECT, diskId.project()); + assertEquals(ZONE, diskId.zone()); + assertEquals(NAME, diskId.disk()); + assertEquals(URL, diskId.selfLink()); + diskId = DiskId.of(ZONE, NAME); + assertNull(diskId.project()); + assertEquals(ZONE, diskId.zone()); + assertEquals(NAME, diskId.disk()); + diskId = DiskId.of(ZoneId.of(ZONE), NAME); + assertNull(diskId.project()); + assertEquals(ZONE, diskId.zone()); + assertEquals(NAME, diskId.disk()); + } + + @Test + public void testToAndFromUrl() { + DiskId diskId = DiskId.of(PROJECT, ZONE, NAME); + compareDiskId(diskId, DiskId.fromUrl(diskId.selfLink())); + thrown.expect(IllegalArgumentException.class); + thrown.expectMessage("notMatchingUrl is not a valid disk URL"); + DiskId.fromUrl("notMatchingUrl"); + } + + @Test + public void testSetProjectId() { + DiskId diskId = DiskId.of(PROJECT, ZONE, NAME); + assertSame(diskId, diskId.setProjectId(PROJECT)); + compareDiskId(diskId, DiskId.of(ZONE, NAME).setProjectId(PROJECT)); + } + + @Test + public void testMatchesUrl() { + assertTrue(DiskId.matchesUrl(DiskId.of(PROJECT, ZONE, NAME).selfLink())); + assertFalse(DiskId.matchesUrl("notMatchingUrl")); + } + + private void compareDiskId(DiskId expected, DiskId value) { + assertEquals(expected, value); + assertEquals(expected.project(), expected.project()); + assertEquals(expected.zone(), expected.zone()); + assertEquals(expected.disk(), expected.disk()); + assertEquals(expected.selfLink(), expected.selfLink()); + assertEquals(expected.hashCode(), expected.hashCode()); + } +} diff --git a/gcloud-java-compute/src/test/java/com/google/cloud/compute/DiskImageConfigurationTest.java b/gcloud-java-compute/src/test/java/com/google/cloud/compute/DiskImageConfigurationTest.java new file mode 100644 index 000000000000..f5ecf03ddf2a --- /dev/null +++ b/gcloud-java-compute/src/test/java/com/google/cloud/compute/DiskImageConfigurationTest.java @@ -0,0 +1,110 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + +import org.junit.Assert; +import org.junit.Test; + +public class DiskImageConfigurationTest { + + private static final DiskId SOURCE_DISK = DiskId.of("project", "zone", "disk"); + private static final String SOURCE_DISK_ID = "diskId"; + private static final Long ARCHIVE_SIZE_BYTES = 42L; + private static final ImageConfiguration.SourceType SOURCE_TYPE = ImageConfiguration.SourceType.RAW; + private static final DiskImageConfiguration CONFIGURATION = + DiskImageConfiguration.builder(SOURCE_DISK) + .sourceDiskId(SOURCE_DISK_ID) + .sourceType(SOURCE_TYPE) + .archiveSizeBytes(ARCHIVE_SIZE_BYTES) + .build(); + + @Test + public void testToBuilder() { + compareDiskImageConfiguration(CONFIGURATION, CONFIGURATION.toBuilder().build()); + DiskId newDisk = DiskId.of("newProject", "newZone", "newDisk"); + String newDiskId = "newDiskId"; + DiskImageConfiguration configuration = CONFIGURATION.toBuilder() + .sourceDisk(newDisk) + .sourceDiskId(newDiskId) + .build(); + assertEquals(newDisk, configuration.sourceDisk()); + assertEquals(newDiskId, configuration.sourceDiskId()); + configuration = configuration.toBuilder() + .sourceDiskId(SOURCE_DISK_ID) + .sourceDisk(SOURCE_DISK) + .build(); + compareDiskImageConfiguration(CONFIGURATION, configuration); + } + + @Test + public void testToBuilderIncomplete() { + DiskImageConfiguration configuration = DiskImageConfiguration.of(SOURCE_DISK); + compareDiskImageConfiguration(configuration, configuration.toBuilder().build()); + } + + @Test + public void testBuilder() { + assertEquals(SOURCE_TYPE, CONFIGURATION.sourceType()); + assertEquals(SOURCE_DISK, CONFIGURATION.sourceDisk()); + assertEquals(SOURCE_DISK_ID, CONFIGURATION.sourceDiskId()); + assertEquals(ARCHIVE_SIZE_BYTES, CONFIGURATION.archiveSizeBytes()); + Assert.assertEquals(ImageConfiguration.Type.DISK, CONFIGURATION.type()); + } + + @Test + public void testToAndFromPb() { + assertTrue(ImageConfiguration.fromPb(CONFIGURATION.toPb()) instanceof DiskImageConfiguration); + compareDiskImageConfiguration(CONFIGURATION, + ImageConfiguration.fromPb(CONFIGURATION.toPb())); + DiskImageConfiguration configuration = DiskImageConfiguration.of(SOURCE_DISK); + compareDiskImageConfiguration(configuration, + DiskImageConfiguration.fromPb(configuration.toPb())); + } + + @Test + public void testOf() { + DiskImageConfiguration configuration = DiskImageConfiguration.of(SOURCE_DISK); + Assert.assertEquals(ImageConfiguration.Type.DISK, configuration.type()); + assertNull(configuration.sourceDiskId()); + assertNull(configuration.sourceType()); + assertNull(configuration.archiveSizeBytes()); + assertEquals(SOURCE_DISK, configuration.sourceDisk()); + } + + @Test + public void testSetProjectId() { + DiskImageConfiguration configuration = CONFIGURATION.toBuilder() + .sourceDisk(DiskId.of("zone", "disk")) + .build(); + compareDiskImageConfiguration(CONFIGURATION, configuration.setProjectId("project")); + } + + private void compareDiskImageConfiguration(DiskImageConfiguration expected, + DiskImageConfiguration value) { + assertEquals(expected, value); + assertEquals(expected.type(), value.type()); + assertEquals(expected.archiveSizeBytes(), value.archiveSizeBytes()); + assertEquals(expected.sourceDisk(), value.sourceDisk()); + assertEquals(expected.sourceDiskId(), value.sourceDiskId()); + assertEquals(expected.sourceType(), value.sourceType()); + assertEquals(expected.hashCode(), value.hashCode()); + } +} diff --git a/gcloud-java-compute/src/test/java/com/google/cloud/compute/DiskInfoTest.java b/gcloud-java-compute/src/test/java/com/google/cloud/compute/DiskInfoTest.java new file mode 100644 index 000000000000..7e4bbc31b617 --- /dev/null +++ b/gcloud-java-compute/src/test/java/com/google/cloud/compute/DiskInfoTest.java @@ -0,0 +1,267 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; + +import com.google.api.services.compute.model.Disk; +import com.google.common.collect.ImmutableList; + +import org.junit.Test; + +import java.util.List; + +public class DiskInfoTest { + + private static final String GENERATED_ID = "42"; + private static final DiskId DISK_ID = DiskId.of("project", "zone", "disk"); + private static final Long CREATION_TIMESTAMP = 1453293540000L; + private static final DiskInfo.CreationStatus CREATION_STATUS = DiskInfo.CreationStatus.READY; + private static final String DESCRIPTION = "description"; + private static final Long SIZE_GB = 500L; + private static final DiskTypeId TYPE = DiskTypeId.of("project", "zone", "disk"); + private static final List LICENSES = ImmutableList.of( + LicenseId.of("project", "license1"), LicenseId.of("project", "license2")); + private static final List ATTACHED_INSTANCES = ImmutableList.of( + InstanceId.of("project", "zone", "instance1"), + InstanceId.of("project", "zone", "instance2")); + private static final SnapshotId SNAPSHOT = SnapshotId.of("project", "snapshot"); + private static final ImageId IMAGE = ImageId.of("project", "image"); + private static final String SNAPSHOT_ID = "snapshotId"; + private static final String IMAGE_ID = "snapshotId"; + private static final Long LAST_ATTACH_TIMESTAMP = 1453293600000L; + private static final Long LAST_DETACH_TIMESTAMP = 1453293660000L; + private static final StandardDiskConfiguration DISK_CONFIGURATION = + StandardDiskConfiguration.builder() + .sizeGb(SIZE_GB) + .diskType(TYPE) + .build(); + private static final SnapshotDiskConfiguration SNAPSHOT_DISK_CONFIGURATION = + SnapshotDiskConfiguration.builder(SNAPSHOT) + .sizeGb(SIZE_GB) + .diskType(TYPE) + .sourceSnapshotId(SNAPSHOT_ID) + .build(); + private static final ImageDiskConfiguration IMAGE_DISK_CONFIGURATION = + ImageDiskConfiguration.builder(IMAGE) + .sizeGb(SIZE_GB) + .diskType(TYPE) + .sourceImageId(IMAGE_ID) + .build(); + private static final DiskInfo DISK_INFO = DiskInfo.builder(DISK_ID, DISK_CONFIGURATION) + .generatedId(GENERATED_ID) + .creationTimestamp(CREATION_TIMESTAMP) + .creationStatus(CREATION_STATUS) + .description(DESCRIPTION) + .licenses(LICENSES) + .attachedInstances(ATTACHED_INSTANCES) + .lastAttachTimestamp(LAST_ATTACH_TIMESTAMP) + .lastDetachTimestamp(LAST_DETACH_TIMESTAMP) + .build(); + private static final DiskInfo SNAPSHOT_DISK_INFO = + DiskInfo.builder(DISK_ID, SNAPSHOT_DISK_CONFIGURATION) + .generatedId(GENERATED_ID) + .creationTimestamp(CREATION_TIMESTAMP) + .creationStatus(CREATION_STATUS) + .description(DESCRIPTION) + .licenses(LICENSES) + .attachedInstances(ATTACHED_INSTANCES) + .lastAttachTimestamp(LAST_ATTACH_TIMESTAMP) + .lastDetachTimestamp(LAST_DETACH_TIMESTAMP) + .build(); + private static final DiskInfo IMAGE_DISK_INFO = + DiskInfo.builder(DISK_ID, IMAGE_DISK_CONFIGURATION) + .generatedId(GENERATED_ID) + .creationTimestamp(CREATION_TIMESTAMP) + .creationStatus(CREATION_STATUS) + .description(DESCRIPTION) + .licenses(LICENSES) + .attachedInstances(ATTACHED_INSTANCES) + .lastAttachTimestamp(LAST_ATTACH_TIMESTAMP) + .lastDetachTimestamp(LAST_DETACH_TIMESTAMP) + .build(); + + @Test + public void testToBuilder() { + compareDiskInfo(DISK_INFO, DISK_INFO.toBuilder().build()); + compareDiskInfo(IMAGE_DISK_INFO, IMAGE_DISK_INFO.toBuilder().build()); + compareDiskInfo(SNAPSHOT_DISK_INFO, SNAPSHOT_DISK_INFO.toBuilder().build()); + DiskInfo diskInfo = DISK_INFO.toBuilder().description("newDescription").build(); + assertEquals("newDescription", diskInfo.description()); + diskInfo = diskInfo.toBuilder().description("description").build(); + compareDiskInfo(DISK_INFO, diskInfo); + } + + @Test + public void testToBuilderIncomplete() { + DiskInfo diskInfo = DiskInfo.of(DISK_ID, DISK_CONFIGURATION); + assertEquals(diskInfo, diskInfo.toBuilder().build()); + diskInfo = DiskInfo.of(DISK_ID, SNAPSHOT_DISK_CONFIGURATION); + assertEquals(diskInfo, diskInfo.toBuilder().build()); + diskInfo = DiskInfo.of(DISK_ID, IMAGE_DISK_CONFIGURATION); + assertEquals(diskInfo, diskInfo.toBuilder().build()); + } + + @Test + public void testBuilder() { + assertEquals(GENERATED_ID, DISK_INFO.generatedId()); + assertEquals(DISK_ID, DISK_INFO.diskId()); + assertEquals(DISK_CONFIGURATION, DISK_INFO.configuration()); + assertEquals(CREATION_TIMESTAMP, DISK_INFO.creationTimestamp()); + assertEquals(CREATION_STATUS, DISK_INFO.creationStatus()); + assertEquals(DESCRIPTION, DISK_INFO.description()); + assertEquals(LICENSES, DISK_INFO.licenses()); + assertEquals(ATTACHED_INSTANCES, DISK_INFO.attachedInstances()); + assertEquals(LAST_ATTACH_TIMESTAMP, DISK_INFO.lastAttachTimestamp()); + assertEquals(LAST_DETACH_TIMESTAMP, DISK_INFO.lastDetachTimestamp()); + assertEquals(GENERATED_ID, IMAGE_DISK_INFO.generatedId()); + assertEquals(DISK_ID, IMAGE_DISK_INFO.diskId()); + assertEquals(IMAGE_DISK_CONFIGURATION, IMAGE_DISK_INFO.configuration()); + assertEquals(CREATION_TIMESTAMP, IMAGE_DISK_INFO.creationTimestamp()); + assertEquals(CREATION_STATUS, IMAGE_DISK_INFO.creationStatus()); + assertEquals(DESCRIPTION, IMAGE_DISK_INFO.description()); + assertEquals(LICENSES, IMAGE_DISK_INFO.licenses()); + assertEquals(ATTACHED_INSTANCES, IMAGE_DISK_INFO.attachedInstances()); + assertEquals(LAST_ATTACH_TIMESTAMP, IMAGE_DISK_INFO.lastAttachTimestamp()); + assertEquals(LAST_DETACH_TIMESTAMP, IMAGE_DISK_INFO.lastDetachTimestamp()); + assertEquals(GENERATED_ID, SNAPSHOT_DISK_INFO.generatedId()); + assertEquals(DISK_ID, SNAPSHOT_DISK_INFO.diskId()); + assertEquals(SNAPSHOT_DISK_CONFIGURATION, SNAPSHOT_DISK_INFO.configuration()); + assertEquals(CREATION_TIMESTAMP, SNAPSHOT_DISK_INFO.creationTimestamp()); + assertEquals(CREATION_STATUS, SNAPSHOT_DISK_INFO.creationStatus()); + assertEquals(DESCRIPTION, SNAPSHOT_DISK_INFO.description()); + assertEquals(LICENSES, SNAPSHOT_DISK_INFO.licenses()); + assertEquals(ATTACHED_INSTANCES, SNAPSHOT_DISK_INFO.attachedInstances()); + assertEquals(LAST_ATTACH_TIMESTAMP, SNAPSHOT_DISK_INFO.lastAttachTimestamp()); + assertEquals(LAST_DETACH_TIMESTAMP, SNAPSHOT_DISK_INFO.lastDetachTimestamp()); + } + + @Test + public void testOf() { + DiskInfo diskInfo = DiskInfo.of(DISK_ID, DISK_CONFIGURATION); + assertNull(diskInfo.generatedId()); + assertEquals(DISK_ID, diskInfo.diskId()); + assertEquals(DISK_CONFIGURATION, diskInfo.configuration()); + assertNull(diskInfo.creationTimestamp()); + assertNull(diskInfo.creationStatus()); + assertNull(diskInfo.description()); + assertNull(diskInfo.licenses()); + assertNull(diskInfo.attachedInstances()); + assertNull(diskInfo.lastAttachTimestamp()); + assertNull(diskInfo.lastDetachTimestamp()); + diskInfo = DiskInfo.of(DISK_ID, IMAGE_DISK_CONFIGURATION); + assertNull(diskInfo.generatedId()); + assertEquals(DISK_ID, diskInfo.diskId()); + assertEquals(IMAGE_DISK_CONFIGURATION, diskInfo.configuration()); + assertNull(diskInfo.creationTimestamp()); + assertNull(diskInfo.creationStatus()); + assertNull(diskInfo.description()); + assertNull(diskInfo.licenses()); + assertNull(diskInfo.attachedInstances()); + assertNull(diskInfo.lastAttachTimestamp()); + assertNull(diskInfo.lastDetachTimestamp()); + diskInfo = DiskInfo.of(DISK_ID, SNAPSHOT_DISK_CONFIGURATION); + assertNull(diskInfo.generatedId()); + assertEquals(DISK_ID, diskInfo.diskId()); + assertEquals(SNAPSHOT_DISK_CONFIGURATION, diskInfo.configuration()); + assertNull(diskInfo.creationTimestamp()); + assertNull(diskInfo.creationStatus()); + assertNull(diskInfo.description()); + assertNull(diskInfo.licenses()); + assertNull(diskInfo.attachedInstances()); + assertNull(diskInfo.lastAttachTimestamp()); + assertNull(diskInfo.lastDetachTimestamp()); + } + + @Test + public void testToAndFromPb() { + DiskInfo diskInfo = DiskInfo.fromPb(DISK_INFO.toPb()); + compareDiskInfo(DISK_INFO, diskInfo); + diskInfo = DiskInfo.fromPb(SNAPSHOT_DISK_INFO.toPb()); + compareDiskInfo(SNAPSHOT_DISK_INFO, diskInfo); + diskInfo = DiskInfo.fromPb(IMAGE_DISK_INFO.toPb()); + compareDiskInfo(IMAGE_DISK_INFO, diskInfo); + Disk disk = new Disk() + .setSelfLink(DISK_ID.selfLink()) + .setType(TYPE.selfLink()) + .setSizeGb(SIZE_GB); + diskInfo = DiskInfo.of(DISK_ID, DISK_CONFIGURATION); + compareDiskInfo(diskInfo, DiskInfo.fromPb(disk)); + disk = new Disk() + .setType(TYPE.selfLink()) + .setSizeGb(SIZE_GB) + .setSelfLink(DISK_ID.selfLink()) + .setSourceSnapshotId(SNAPSHOT_ID) + .setSourceSnapshot(SNAPSHOT.selfLink()); + diskInfo = DiskInfo.of(DISK_ID, SNAPSHOT_DISK_CONFIGURATION); + compareDiskInfo(diskInfo, DiskInfo.fromPb(disk)); + disk = new Disk() + .setType(TYPE.selfLink()) + .setSizeGb(SIZE_GB) + .setSelfLink(DISK_ID.selfLink()) + .setSourceImageId(IMAGE_ID) + .setSourceImage(IMAGE.selfLink()); + diskInfo = DiskInfo.of(DISK_ID, IMAGE_DISK_CONFIGURATION); + compareDiskInfo(diskInfo, DiskInfo.fromPb(disk)); + } + + @Test + public void testSetProjectId() { + StandardDiskConfiguration standardDiskConfiguration = DISK_CONFIGURATION.toBuilder() + .diskType(DiskTypeId.of(TYPE.zone(), TYPE.type())) + .build(); + DiskInfo diskInfo = DISK_INFO.toBuilder() + .diskId(DiskId.of(DISK_ID.zone(), DISK_ID.disk())) + .configuration(standardDiskConfiguration) + .build(); + compareDiskInfo(DISK_INFO, diskInfo.setProjectId("project")); + SnapshotDiskConfiguration snapshotDiskConfiguration = SNAPSHOT_DISK_CONFIGURATION.toBuilder() + .diskType(DiskTypeId.of(TYPE.zone(), TYPE.type())) + .sourceSnapshot(SnapshotId.of(SNAPSHOT.snapshot())) + .build(); + diskInfo = SNAPSHOT_DISK_INFO.toBuilder() + .diskId(DiskId.of(DISK_ID.zone(), DISK_ID.disk())) + .configuration(snapshotDiskConfiguration) + .build(); + compareDiskInfo(SNAPSHOT_DISK_INFO, diskInfo.setProjectId("project")); + ImageDiskConfiguration imageDiskConfiguration = IMAGE_DISK_CONFIGURATION.toBuilder() + .diskType(DiskTypeId.of(TYPE.zone(), TYPE.type())) + .sourceImage(ImageId.of(IMAGE.image())) + .build(); + diskInfo = IMAGE_DISK_INFO.toBuilder() + .diskId(DiskId.of(DISK_ID.zone(), DISK_ID.disk())) + .configuration(imageDiskConfiguration) + .build(); + compareDiskInfo(IMAGE_DISK_INFO, diskInfo.setProjectId("project")); + } + + public void compareDiskInfo(DiskInfo expected, DiskInfo value) { + assertEquals(expected, value); + assertEquals(expected.configuration(), value.configuration()); + assertEquals(expected.generatedId(), value.generatedId()); + assertEquals(expected.diskId(), value.diskId()); + assertEquals(expected.creationTimestamp(), value.creationTimestamp()); + assertEquals(expected.creationStatus(), value.creationStatus()); + assertEquals(expected.description(), value.description()); + assertEquals(expected.licenses(), value.licenses()); + assertEquals(expected.attachedInstances(), value.attachedInstances()); + assertEquals(expected.lastAttachTimestamp(), value.lastAttachTimestamp()); + assertEquals(expected.lastDetachTimestamp(), value.lastDetachTimestamp()); + assertEquals(expected.hashCode(), value.hashCode()); + } +} diff --git a/gcloud-java-compute/src/test/java/com/google/cloud/compute/DiskTest.java b/gcloud-java-compute/src/test/java/com/google/cloud/compute/DiskTest.java new file mode 100644 index 000000000000..03182493803c --- /dev/null +++ b/gcloud-java-compute/src/test/java/com/google/cloud/compute/DiskTest.java @@ -0,0 +1,474 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import static org.easymock.EasyMock.createMock; +import static org.easymock.EasyMock.createStrictMock; +import static org.easymock.EasyMock.expect; +import static org.easymock.EasyMock.replay; +import static org.easymock.EasyMock.verify; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; + +import com.google.common.collect.ImmutableList; + +import org.junit.Test; + +import java.util.List; + +public class DiskTest { + + private static final String GENERATED_ID = "42"; + private static final DiskId DISK_ID = DiskId.of("project", "zone", "disk"); + private static final Long CREATION_TIMESTAMP = 1453293540000L; + private static final DiskInfo.CreationStatus CREATION_STATUS = DiskInfo.CreationStatus.READY; + private static final String DESCRIPTION = "description"; + private static final Long SIZE_GB = 500L; + private static final DiskTypeId TYPE = DiskTypeId.of("project", "zone", "disk"); + private static final List LICENSES = ImmutableList.of( + LicenseId.of("project", "license1"), LicenseId.of("project", "license2")); + private static final List ATTACHED_INSTANCES = ImmutableList.of( + InstanceId.of("project", "zone", "instance1"), + InstanceId.of("project", "zone", "instance2")); + private static final SnapshotId SNAPSHOT = SnapshotId.of("project", "snapshot"); + private static final ImageId IMAGE = ImageId.of("project", "image"); + private static final String SNAPSHOT_ID = "snapshotId"; + private static final String IMAGE_ID = "imageId"; + private static final Long LAST_ATTACH_TIMESTAMP = 1453293600000L; + private static final Long LAST_DETACH_TIMESTAMP = 1453293660000L; + private static final StandardDiskConfiguration DISK_CONFIGURATION = + StandardDiskConfiguration.builder() + .sizeGb(SIZE_GB) + .diskType(TYPE) + .build(); + private static final SnapshotDiskConfiguration SNAPSHOT_DISK_CONFIGURATION = + SnapshotDiskConfiguration.builder(SNAPSHOT) + .sizeGb(SIZE_GB) + .diskType(TYPE) + .sourceSnapshotId(SNAPSHOT_ID) + .build(); + private static final ImageDiskConfiguration IMAGE_DISK_CONFIGURATION = + ImageDiskConfiguration.builder(IMAGE) + .sizeGb(SIZE_GB) + .diskType(TYPE) + .sourceImageId(IMAGE_ID) + .build(); + + private final Compute serviceMockReturnsOptions = createStrictMock(Compute.class); + private final ComputeOptions mockOptions = createMock(ComputeOptions.class); + private Compute compute; + private Disk disk; + private Disk standardDisk; + private Disk snapshotDisk; + private Disk imageDisk; + + private void initializeExpectedDisk(int optionsCalls) { + expect(serviceMockReturnsOptions.options()).andReturn(mockOptions).times(optionsCalls); + replay(serviceMockReturnsOptions); + standardDisk = new Disk.Builder(serviceMockReturnsOptions, DISK_ID, DISK_CONFIGURATION) + .generatedId(GENERATED_ID) + .creationTimestamp(CREATION_TIMESTAMP) + .creationStatus(CREATION_STATUS) + .description(DESCRIPTION) + .licenses(LICENSES) + .attachedInstances(ATTACHED_INSTANCES) + .lastAttachTimestamp(LAST_ATTACH_TIMESTAMP) + .lastDetachTimestamp(LAST_DETACH_TIMESTAMP) + .build(); + snapshotDisk = new Disk.Builder(serviceMockReturnsOptions, DISK_ID, SNAPSHOT_DISK_CONFIGURATION) + .generatedId(GENERATED_ID) + .creationTimestamp(CREATION_TIMESTAMP) + .creationStatus(CREATION_STATUS) + .description(DESCRIPTION) + .licenses(LICENSES) + .attachedInstances(ATTACHED_INSTANCES) + .lastAttachTimestamp(LAST_ATTACH_TIMESTAMP) + .lastDetachTimestamp(LAST_DETACH_TIMESTAMP) + .build(); + imageDisk = new Disk.Builder(serviceMockReturnsOptions, DISK_ID, IMAGE_DISK_CONFIGURATION) + .generatedId(GENERATED_ID) + .creationTimestamp(CREATION_TIMESTAMP) + .creationStatus(CREATION_STATUS) + .description(DESCRIPTION) + .licenses(LICENSES) + .attachedInstances(ATTACHED_INSTANCES) + .lastAttachTimestamp(LAST_ATTACH_TIMESTAMP) + .lastDetachTimestamp(LAST_DETACH_TIMESTAMP) + .build(); + compute = createStrictMock(Compute.class); + } + + private void initializeDisk() { + disk = new Disk.Builder(compute, DISK_ID, DISK_CONFIGURATION) + .generatedId(GENERATED_ID) + .creationTimestamp(CREATION_TIMESTAMP) + .creationStatus(CREATION_STATUS) + .description(DESCRIPTION) + .licenses(LICENSES) + .attachedInstances(ATTACHED_INSTANCES) + .build(); + } + + @Test + public void testToBuilder() { + initializeExpectedDisk(16); + compareDisk(standardDisk, standardDisk.toBuilder().build()); + compareDisk(imageDisk, imageDisk.toBuilder().build()); + compareDisk(snapshotDisk, snapshotDisk.toBuilder().build()); + Disk newDisk = standardDisk.toBuilder().description("newDescription").build(); + assertEquals("newDescription", newDisk.description()); + newDisk = newDisk.toBuilder().description("description").build(); + compareDisk(standardDisk, newDisk); + } + + @Test + public void testToBuilderIncomplete() { + initializeExpectedDisk(18); + DiskInfo diskInfo = DiskInfo.of(DISK_ID, DISK_CONFIGURATION); + Disk disk = new Disk(serviceMockReturnsOptions, new DiskInfo.BuilderImpl(diskInfo)); + compareDisk(disk, disk.toBuilder().build()); + diskInfo = DiskInfo.of(DISK_ID, SNAPSHOT_DISK_CONFIGURATION); + disk = new Disk(serviceMockReturnsOptions, new DiskInfo.BuilderImpl(diskInfo)); + compareDisk(disk, disk.toBuilder().build()); + diskInfo = DiskInfo.of(DISK_ID, IMAGE_DISK_CONFIGURATION); + disk = new Disk(serviceMockReturnsOptions, new DiskInfo.BuilderImpl(diskInfo)); + compareDisk(disk, disk.toBuilder().build()); + } + + @Test + public void testBuilder() { + initializeExpectedDisk(4); + assertEquals(DISK_ID, standardDisk.diskId()); + assertEquals(GENERATED_ID, standardDisk.generatedId()); + assertEquals(DISK_CONFIGURATION, standardDisk.configuration()); + assertEquals(CREATION_TIMESTAMP, standardDisk.creationTimestamp()); + assertEquals(CREATION_STATUS, standardDisk.creationStatus()); + assertEquals(DESCRIPTION, standardDisk.description()); + assertEquals(LICENSES, standardDisk.licenses()); + assertEquals(ATTACHED_INSTANCES, standardDisk.attachedInstances()); + assertEquals(LAST_ATTACH_TIMESTAMP, standardDisk.lastAttachTimestamp()); + assertEquals(LAST_DETACH_TIMESTAMP, standardDisk.lastDetachTimestamp()); + assertSame(serviceMockReturnsOptions, standardDisk.compute()); + assertEquals(DISK_ID, imageDisk.diskId()); + assertEquals(GENERATED_ID, imageDisk.generatedId()); + assertEquals(IMAGE_DISK_CONFIGURATION, imageDisk.configuration()); + assertEquals(CREATION_TIMESTAMP, imageDisk.creationTimestamp()); + assertEquals(CREATION_STATUS, imageDisk.creationStatus()); + assertEquals(DESCRIPTION, imageDisk.description()); + assertEquals(LICENSES, imageDisk.licenses()); + assertEquals(ATTACHED_INSTANCES, imageDisk.attachedInstances()); + assertEquals(LAST_ATTACH_TIMESTAMP, imageDisk.lastAttachTimestamp()); + assertEquals(LAST_DETACH_TIMESTAMP, imageDisk.lastDetachTimestamp()); + assertSame(serviceMockReturnsOptions, imageDisk.compute()); + assertEquals(DISK_ID, snapshotDisk.diskId()); + assertEquals(GENERATED_ID, snapshotDisk.generatedId()); + assertEquals(SNAPSHOT_DISK_CONFIGURATION, snapshotDisk.configuration()); + assertEquals(CREATION_TIMESTAMP, snapshotDisk.creationTimestamp()); + assertEquals(CREATION_STATUS, snapshotDisk.creationStatus()); + assertEquals(DESCRIPTION, snapshotDisk.description()); + assertEquals(LICENSES, snapshotDisk.licenses()); + assertEquals(ATTACHED_INSTANCES, snapshotDisk.attachedInstances()); + assertEquals(LAST_ATTACH_TIMESTAMP, snapshotDisk.lastAttachTimestamp()); + assertEquals(LAST_DETACH_TIMESTAMP, snapshotDisk.lastDetachTimestamp()); + assertSame(serviceMockReturnsOptions, snapshotDisk.compute()); + Disk disk = new Disk.Builder(serviceMockReturnsOptions, DISK_ID, DISK_CONFIGURATION) + .diskId(DiskId.of("newProject", "newZone")) + .configuration(SNAPSHOT_DISK_CONFIGURATION) + .build(); + assertEquals(DiskId.of("newProject", "newZone"), disk.diskId()); + assertNull(disk.generatedId()); + assertEquals(SNAPSHOT_DISK_CONFIGURATION, disk.configuration()); + assertNull(disk.creationTimestamp()); + assertNull(disk.creationStatus()); + assertNull(disk.description()); + assertNull(disk.licenses()); + assertNull(disk.attachedInstances()); + assertNull(disk.lastAttachTimestamp()); + assertNull(disk.lastDetachTimestamp()); + assertSame(serviceMockReturnsOptions, disk.compute()); + } + + @Test + public void testToAndFromPb() { + initializeExpectedDisk(24); + compareDisk(standardDisk, Disk.fromPb(serviceMockReturnsOptions, standardDisk.toPb())); + compareDisk(imageDisk, Disk.fromPb(serviceMockReturnsOptions, imageDisk.toPb())); + compareDisk(snapshotDisk, Disk.fromPb(serviceMockReturnsOptions, snapshotDisk.toPb())); + Disk disk = new Disk.Builder(serviceMockReturnsOptions, DISK_ID, DISK_CONFIGURATION).build(); + compareDisk(disk, Disk.fromPb(serviceMockReturnsOptions, disk.toPb())); + disk = + new Disk.Builder(serviceMockReturnsOptions, DISK_ID, SNAPSHOT_DISK_CONFIGURATION).build(); + compareDisk(disk, Disk.fromPb(serviceMockReturnsOptions, disk.toPb())); + disk = new Disk.Builder(serviceMockReturnsOptions, DISK_ID, IMAGE_DISK_CONFIGURATION).build(); + compareDisk(disk, Disk.fromPb(serviceMockReturnsOptions, disk.toPb())); + } + + @Test + public void testDeleteOperation() { + initializeExpectedDisk(4); + expect(compute.options()).andReturn(mockOptions); + Operation operation = new Operation.Builder(serviceMockReturnsOptions) + .operationId(ZoneOperationId.of("project", "zone", "op")) + .build(); + expect(compute.deleteDisk(DISK_ID)).andReturn(operation); + replay(compute); + initializeDisk(); + assertSame(operation, disk.delete()); + } + + @Test + public void testDeleteNull() { + initializeExpectedDisk(3); + expect(compute.options()).andReturn(mockOptions); + expect(compute.deleteDisk(DISK_ID)).andReturn(null); + replay(compute); + initializeDisk(); + assertNull(disk.delete()); + } + + @Test + public void testExists_True() throws Exception { + initializeExpectedDisk(3); + Compute.DiskOption[] expectedOptions = {Compute.DiskOption.fields()}; + expect(compute.options()).andReturn(mockOptions); + expect(compute.getDisk(DISK_ID, expectedOptions)).andReturn(imageDisk); + replay(compute); + initializeDisk(); + assertTrue(disk.exists()); + verify(compute); + } + + @Test + public void testExists_False() throws Exception { + initializeExpectedDisk(3); + Compute.DiskOption[] expectedOptions = {Compute.DiskOption.fields()}; + expect(compute.options()).andReturn(mockOptions); + expect(compute.getDisk(DISK_ID, expectedOptions)).andReturn(null); + replay(compute); + initializeDisk(); + assertFalse(disk.exists()); + verify(compute); + } + + @Test + public void testReload() throws Exception { + initializeExpectedDisk(5); + expect(compute.options()).andReturn(mockOptions); + expect(compute.getDisk(DISK_ID)).andReturn(imageDisk); + replay(compute); + initializeDisk(); + Disk updatedDisk = disk.reload(); + compareDisk(imageDisk, updatedDisk); + verify(compute); + } + + @Test + public void testReloadNull() throws Exception { + initializeExpectedDisk(3); + expect(compute.options()).andReturn(mockOptions); + expect(compute.getDisk(DISK_ID)).andReturn(null); + replay(compute); + initializeDisk(); + assertNull(disk.reload()); + verify(compute); + } + + @Test + public void testReloadWithOptions() throws Exception { + initializeExpectedDisk(5); + expect(compute.options()).andReturn(mockOptions); + expect(compute.getDisk(DISK_ID, Compute.DiskOption.fields())).andReturn(imageDisk); + replay(compute); + initializeDisk(); + Disk updatedDisk = disk.reload(Compute.DiskOption.fields()); + compareDisk(imageDisk, updatedDisk); + verify(compute); + } + + @Test + public void testCreateSnapshot() { + initializeExpectedDisk(4); + expect(compute.options()).andReturn(mockOptions); + Operation operation = new Operation.Builder(serviceMockReturnsOptions) + .operationId(ZoneOperationId.of("project", "zone", "op")) + .build(); + SnapshotId snapshotId = SnapshotId.of(SNAPSHOT.snapshot()); + SnapshotInfo snapshot = SnapshotInfo.builder(snapshotId, DISK_ID).build(); + expect(compute.create(snapshot)).andReturn(operation); + replay(compute); + initializeDisk(); + assertSame(operation, disk.createSnapshot(SNAPSHOT.snapshot())); + } + + @Test + public void testCreateSnapshotWithDescription() { + initializeExpectedDisk(4); + expect(compute.options()).andReturn(mockOptions); + Operation operation = new Operation.Builder(serviceMockReturnsOptions) + .operationId(ZoneOperationId.of("project", "zone", "op")) + .build(); + SnapshotId snapshotId = SnapshotId.of(SNAPSHOT.snapshot()); + SnapshotInfo snapshot = SnapshotInfo.builder(snapshotId, DISK_ID) + .description("description") + .build(); + expect(compute.create(snapshot)).andReturn(operation); + replay(compute); + initializeDisk(); + assertSame(operation, disk.createSnapshot(SNAPSHOT.snapshot(), "description")); + } + + @Test + public void testCreateSnapshotWithOptions() { + initializeExpectedDisk(4); + expect(compute.options()).andReturn(mockOptions); + Operation operation = new Operation.Builder(serviceMockReturnsOptions) + .operationId(ZoneOperationId.of("project", "zone", "op")) + .build(); + SnapshotId snapshotId = SnapshotId.of(SNAPSHOT.snapshot()); + SnapshotInfo snapshot = SnapshotInfo.builder(snapshotId, DISK_ID).build(); + expect(compute.create(snapshot, Compute.OperationOption.fields())).andReturn(operation); + replay(compute); + initializeDisk(); + assertSame(operation, + disk.createSnapshot(SNAPSHOT.snapshot(), Compute.OperationOption.fields())); + } + + @Test + public void testCreateSnapshotWithDescriptionAndOptions() { + initializeExpectedDisk(4); + expect(compute.options()).andReturn(mockOptions); + Operation operation = new Operation.Builder(serviceMockReturnsOptions) + .operationId(ZoneOperationId.of("project", "zone", "op")) + .build(); + SnapshotId snapshotId = SnapshotId.of(SNAPSHOT.snapshot()); + SnapshotInfo snapshot = SnapshotInfo.builder(snapshotId, DISK_ID) + .description("description") + .build(); + expect(compute.create(snapshot, Compute.OperationOption.fields())).andReturn(operation); + replay(compute); + initializeDisk(); + assertSame(operation, + disk.createSnapshot(SNAPSHOT.snapshot(), "description", Compute.OperationOption.fields())); + } + + @Test + public void testCreateImage() { + initializeExpectedDisk(4); + expect(compute.options()).andReturn(mockOptions); + Operation operation = new Operation.Builder(serviceMockReturnsOptions) + .operationId(GlobalOperationId.of("project", "op")) + .build(); + ImageId imageId = ImageId.of(IMAGE.image()); + ImageInfo image = ImageInfo.of(imageId, DiskImageConfiguration.of(DISK_ID)); + expect(compute.create(image)).andReturn(operation); + replay(compute); + initializeDisk(); + assertSame(operation, disk.createImage(IMAGE.image())); + } + + @Test + public void testCreateImageWithDescription() { + initializeExpectedDisk(4); + expect(compute.options()).andReturn(mockOptions); + Operation operation = new Operation.Builder(serviceMockReturnsOptions) + .operationId(GlobalOperationId.of("project", "op")) + .build(); + ImageId imageId = ImageId.of(IMAGE.image()); + ImageInfo image = ImageInfo.builder(imageId, DiskImageConfiguration.of(DISK_ID)) + .description("description") + .build(); + expect(compute.create(image)).andReturn(operation); + replay(compute); + initializeDisk(); + assertSame(operation, disk.createImage(IMAGE.image(), "description")); + } + + @Test + public void testCreateImageWithOptions() { + initializeExpectedDisk(4); + expect(compute.options()).andReturn(mockOptions); + Operation operation = new Operation.Builder(serviceMockReturnsOptions) + .operationId(GlobalOperationId.of("project", "op")) + .build(); + ImageId imageId = ImageId.of(IMAGE.image()); + ImageInfo image = ImageInfo.of(imageId, DiskImageConfiguration.of(DISK_ID)); + expect(compute.create(image, Compute.OperationOption.fields())).andReturn(operation); + replay(compute); + initializeDisk(); + assertSame(operation, disk.createImage(IMAGE.image(), Compute.OperationOption.fields())); + } + + @Test + public void testCreateImageWithDescriptionAndOptions() { + initializeExpectedDisk(4); + expect(compute.options()).andReturn(mockOptions); + Operation operation = new Operation.Builder(serviceMockReturnsOptions) + .operationId(GlobalOperationId.of("project", "op")) + .build(); + ImageId imageId = ImageId.of(IMAGE.image()); + ImageInfo image = ImageInfo.builder(imageId, DiskImageConfiguration.of(DISK_ID)) + .description("description") + .build(); + expect(compute.create(image, Compute.OperationOption.fields())).andReturn(operation); + replay(compute); + initializeDisk(); + assertSame(operation, + disk.createImage(IMAGE.image(), "description", Compute.OperationOption.fields())); + } + + @Test + public void testResizeOperation() { + initializeExpectedDisk(4); + expect(compute.options()).andReturn(mockOptions); + Operation operation = new Operation.Builder(serviceMockReturnsOptions) + .operationId(ZoneOperationId.of("project", "zone", "op")) + .build(); + expect(compute.resize(DISK_ID, 42L)).andReturn(operation); + replay(compute); + initializeDisk(); + assertSame(operation, disk.resize(42L)); + } + + @Test + public void testResizeNull() { + initializeExpectedDisk(3); + expect(compute.options()).andReturn(mockOptions); + expect(compute.resize(DISK_ID, 42L)).andReturn(null); + replay(compute); + initializeDisk(); + assertNull(disk.resize(42L)); + } + + public void compareDisk(Disk expected, Disk value) { + assertEquals(expected, value); + assertEquals(expected.compute().options(), value.compute().options()); + assertEquals(expected.diskId(), value.diskId()); + assertEquals(expected.configuration(), value.configuration()); + assertEquals(expected.generatedId(), value.generatedId()); + assertEquals(expected.creationTimestamp(), value.creationTimestamp()); + assertEquals(expected.creationStatus(), value.creationStatus()); + assertEquals(expected.description(), value.description()); + assertEquals(expected.licenses(), value.licenses()); + assertEquals(expected.attachedInstances(), value.attachedInstances()); + assertEquals(expected.lastAttachTimestamp(), value.lastAttachTimestamp()); + assertEquals(expected.lastDetachTimestamp(), value.lastDetachTimestamp()); + assertEquals(expected.hashCode(), value.hashCode()); + } +} diff --git a/gcloud-java-compute/src/test/java/com/google/cloud/compute/DiskTypeIdTest.java b/gcloud-java-compute/src/test/java/com/google/cloud/compute/DiskTypeIdTest.java new file mode 100644 index 000000000000..ac6788329a32 --- /dev/null +++ b/gcloud-java-compute/src/test/java/com/google/cloud/compute/DiskTypeIdTest.java @@ -0,0 +1,84 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; + +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; + +public class DiskTypeIdTest { + + private static final String PROJECT = "project"; + private static final String ZONE = "zone"; + private static final String DISK_TYPE = "diskType"; + private static final String URL = + "https://www.googleapis.com/compute/v1/projects/project/zones/zone/diskTypes/diskType"; + + @Rule + public ExpectedException thrown = ExpectedException.none(); + + @Test + public void testOf() { + DiskTypeId diskTypeId = DiskTypeId.of(PROJECT, ZONE, DISK_TYPE); + assertEquals(PROJECT, diskTypeId.project()); + assertEquals(ZONE, diskTypeId.zone()); + assertEquals(DISK_TYPE, diskTypeId.type()); + assertEquals(URL, diskTypeId.selfLink()); + diskTypeId = DiskTypeId.of(ZONE, DISK_TYPE); + assertNull(diskTypeId.project()); + assertEquals(ZONE, diskTypeId.zone()); + assertEquals(DISK_TYPE, diskTypeId.type()); + } + + @Test + public void testToAndFromUrl() { + DiskTypeId diskTypeId = DiskTypeId.of(PROJECT, ZONE, DISK_TYPE); + assertSame(diskTypeId, diskTypeId.setProjectId(PROJECT)); + compareDiskTypeId(diskTypeId, DiskTypeId.fromUrl(diskTypeId.selfLink())); + thrown.expect(IllegalArgumentException.class); + thrown.expectMessage("notMatchingUrl is not a valid disk type URL"); + DiskTypeId.fromUrl("notMatchingUrl"); + } + + @Test + public void testSetProjectId() { + DiskTypeId diskTypeId = DiskTypeId.of(PROJECT, ZONE, DISK_TYPE); + assertSame(diskTypeId, diskTypeId.setProjectId(PROJECT)); + compareDiskTypeId(diskTypeId, DiskTypeId.of(ZONE, DISK_TYPE).setProjectId(PROJECT)); + } + + @Test + public void testMatchesUrl() { + assertTrue(DiskTypeId.matchesUrl(DiskTypeId.of(PROJECT, ZONE, DISK_TYPE).selfLink())); + assertFalse(DiskTypeId.matchesUrl("notMatchingUrl")); + } + + private void compareDiskTypeId(DiskTypeId expected, DiskTypeId value) { + assertEquals(expected, value); + assertEquals(expected.project(), expected.project()); + assertEquals(expected.zone(), expected.zone()); + assertEquals(expected.type(), expected.type()); + assertEquals(expected.selfLink(), expected.selfLink()); + assertEquals(expected.hashCode(), expected.hashCode()); + } +} diff --git a/gcloud-java-compute/src/test/java/com/google/cloud/compute/DiskTypeTest.java b/gcloud-java-compute/src/test/java/com/google/cloud/compute/DiskTypeTest.java new file mode 100644 index 000000000000..691d1fcd336d --- /dev/null +++ b/gcloud-java-compute/src/test/java/com/google/cloud/compute/DiskTypeTest.java @@ -0,0 +1,72 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import static org.junit.Assert.assertEquals; + +import org.junit.Test; + +public class DiskTypeTest { + + private static final String GENERATED_ID = "42"; + private static final Long CREATION_TIMESTAMP = 1453293540000L; + private static final String DESCRIPTION = "description"; + private static final String VALID_DISK_SIZE = "10GB-10TB"; + private static final Long DEFAULT_DISK_SIZE_GB = 10L; + private static final DiskTypeId DISK_TYPE_ID = DiskTypeId.of("project", "zone", "diskType"); + private static final DeprecationStatus DEPRECATION_STATUS = + DeprecationStatus.of(DeprecationStatus.Status.DELETED, DISK_TYPE_ID); + private static final DiskType DISK_TYPE = DiskType.builder() + .generatedId(GENERATED_ID) + .diskTypeId(DISK_TYPE_ID) + .creationTimestamp(CREATION_TIMESTAMP) + .description(DESCRIPTION) + .validDiskSize(VALID_DISK_SIZE) + .defaultDiskSizeGb(DEFAULT_DISK_SIZE_GB) + .deprecationStatus(DEPRECATION_STATUS) + .build(); + + @Test + public void testBuilder() { + assertEquals(GENERATED_ID, DISK_TYPE.generatedId()); + assertEquals(DISK_TYPE_ID, DISK_TYPE.diskTypeId()); + assertEquals(CREATION_TIMESTAMP, DISK_TYPE.creationTimestamp()); + assertEquals(DESCRIPTION, DISK_TYPE.description()); + assertEquals(VALID_DISK_SIZE, DISK_TYPE.validDiskSize()); + assertEquals(DEFAULT_DISK_SIZE_GB, DISK_TYPE.defaultDiskSizeGb()); + assertEquals(DEPRECATION_STATUS, DISK_TYPE.deprecationStatus()); + } + + @Test + public void testToPbAndFromPb() { + compareDiskTypes(DISK_TYPE, DiskType.fromPb(DISK_TYPE.toPb())); + DiskType diskType = DiskType.builder().diskTypeId(DISK_TYPE_ID).build(); + compareDiskTypes(diskType, DiskType.fromPb(diskType.toPb())); + } + + private void compareDiskTypes(DiskType expected, DiskType value) { + assertEquals(expected, value); + assertEquals(expected.generatedId(), value.generatedId()); + assertEquals(expected.diskTypeId(), value.diskTypeId()); + assertEquals(expected.creationTimestamp(), value.creationTimestamp()); + assertEquals(expected.description(), value.description()); + assertEquals(expected.validDiskSize(), value.validDiskSize()); + assertEquals(expected.defaultDiskSizeGb(), value.defaultDiskSizeGb()); + assertEquals(expected.deprecationStatus(), value.deprecationStatus()); + assertEquals(expected.hashCode(), value.hashCode()); + } +} diff --git a/gcloud-java-compute/src/test/java/com/google/cloud/compute/ForwardingRuleIdTest.java b/gcloud-java-compute/src/test/java/com/google/cloud/compute/ForwardingRuleIdTest.java new file mode 100644 index 000000000000..3afc0c187c05 --- /dev/null +++ b/gcloud-java-compute/src/test/java/com/google/cloud/compute/ForwardingRuleIdTest.java @@ -0,0 +1,139 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; + +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; + +public class ForwardingRuleIdTest { + + private static final String PROJECT = "project"; + private static final String REGION = "region"; + private static final String NAME = "rule"; + private static final String GLOBAL_URL = + "https://www.googleapis.com/compute/v1/projects/project/global/forwardingRules/rule"; + private static final String REGION_URL = "https://www.googleapis.com/compute/v1/projects/" + + "project/regions/region/forwardingRules/rule"; + + @Rule + public ExpectedException thrown = ExpectedException.none(); + + @Test + public void testOf() { + GlobalForwardingRuleId forwardingRuleId = GlobalForwardingRuleId.of(PROJECT, NAME); + assertEquals(PROJECT, forwardingRuleId.project()); + assertEquals(NAME, forwardingRuleId.rule()); + assertEquals(GLOBAL_URL, forwardingRuleId.selfLink()); + assertEquals(ForwardingRuleId.Type.GLOBAL, forwardingRuleId.type()); + forwardingRuleId = GlobalForwardingRuleId.of(NAME); + assertNull(forwardingRuleId.project()); + assertEquals(NAME, forwardingRuleId.rule()); + assertEquals(ForwardingRuleId.Type.GLOBAL, forwardingRuleId.type()); + RegionForwardingRuleId regionForwardingRuleId = + RegionForwardingRuleId.of(PROJECT, REGION, NAME); + assertEquals(PROJECT, regionForwardingRuleId.project()); + assertEquals(REGION, regionForwardingRuleId.region()); + assertEquals(NAME, regionForwardingRuleId.rule()); + assertEquals(REGION_URL, regionForwardingRuleId.selfLink()); + assertEquals(ForwardingRuleId.Type.REGION, regionForwardingRuleId.type()); + regionForwardingRuleId = RegionForwardingRuleId.of(RegionId.of(PROJECT, REGION), NAME); + assertEquals(PROJECT, regionForwardingRuleId.project()); + assertEquals(REGION, regionForwardingRuleId.region()); + assertEquals(NAME, regionForwardingRuleId.rule()); + assertEquals(REGION_URL, regionForwardingRuleId.selfLink()); + assertEquals(ForwardingRuleId.Type.REGION, regionForwardingRuleId.type()); + regionForwardingRuleId = RegionForwardingRuleId.of(REGION, NAME); + assertNull(regionForwardingRuleId.project()); + assertEquals(REGION, regionForwardingRuleId.region()); + assertEquals(NAME, regionForwardingRuleId.rule()); + assertEquals(ForwardingRuleId.Type.REGION, regionForwardingRuleId.type()); + } + + @Test + public void testToAndFromUrlGlobal() { + GlobalForwardingRuleId forwardingRuleId = GlobalForwardingRuleId.of(PROJECT, NAME); + compareGlobalForwardingRuleId(forwardingRuleId, + GlobalForwardingRuleId.fromUrl(forwardingRuleId.selfLink())); + RegionForwardingRuleId regionForwardingRuleId = + RegionForwardingRuleId.of(PROJECT, REGION, NAME); + compareRegionForwardingRuleId(regionForwardingRuleId, + RegionForwardingRuleId.fromUrl(regionForwardingRuleId.selfLink())); + thrown.expect(IllegalArgumentException.class); + thrown.expectMessage("notMatchingUrl is not a valid global forwarding rule URL"); + GlobalForwardingRuleId.fromUrl("notMatchingUrl"); + } + + @Test + public void testToAndFromUrlRegion() { + RegionForwardingRuleId regionForwardingRuleId = + RegionForwardingRuleId.of(PROJECT, REGION, NAME); + compareRegionForwardingRuleId(regionForwardingRuleId, + RegionForwardingRuleId.fromUrl(regionForwardingRuleId.selfLink())); + thrown.expect(IllegalArgumentException.class); + thrown.expectMessage("notMatchingUrl is not a valid region forwarding rule URL"); + RegionForwardingRuleId.fromUrl("notMatchingUrl"); + } + + @Test + public void testSetProjectId() { + GlobalForwardingRuleId forwardingRuleId = GlobalForwardingRuleId.of(PROJECT, NAME); + assertSame(forwardingRuleId, forwardingRuleId.setProjectId(PROJECT)); + compareGlobalForwardingRuleId(forwardingRuleId, + GlobalForwardingRuleId.of(NAME).setProjectId(PROJECT)); + RegionForwardingRuleId regionForwardingRuleId = + RegionForwardingRuleId.of(PROJECT, REGION, NAME); + assertSame(regionForwardingRuleId, regionForwardingRuleId.setProjectId(PROJECT)); + compareRegionForwardingRuleId(regionForwardingRuleId, + RegionForwardingRuleId.of(REGION, NAME).setProjectId(PROJECT)); + } + + @Test + public void testMatchesUrl() { + assertTrue(GlobalForwardingRuleId.matchesUrl( + GlobalForwardingRuleId.of(PROJECT, NAME).selfLink())); + assertFalse(GlobalForwardingRuleId.matchesUrl("notMatchingUrl")); + assertTrue(RegionForwardingRuleId.matchesUrl( + RegionForwardingRuleId.of(PROJECT, REGION, NAME).selfLink())); + assertFalse(RegionForwardingRuleId.matchesUrl("notMatchingUrl")); + } + + private void compareGlobalForwardingRuleId(GlobalForwardingRuleId expected, + GlobalForwardingRuleId value) { + assertEquals(expected, value); + assertEquals(expected.project(), expected.project()); + assertEquals(expected.rule(), expected.rule()); + assertEquals(expected.selfLink(), expected.selfLink()); + assertEquals(expected.hashCode(), expected.hashCode()); + } + + private void compareRegionForwardingRuleId(RegionForwardingRuleId expected, + RegionForwardingRuleId value) { + assertEquals(expected, value); + assertEquals(expected.project(), expected.project()); + assertEquals(expected.region(), expected.region()); + assertEquals(expected.rule(), expected.rule()); + assertEquals(expected.selfLink(), expected.selfLink()); + assertEquals(expected.hashCode(), expected.hashCode()); + } +} diff --git a/gcloud-java-compute/src/test/java/com/google/cloud/compute/ImageDiskConfigurationTest.java b/gcloud-java-compute/src/test/java/com/google/cloud/compute/ImageDiskConfigurationTest.java new file mode 100644 index 000000000000..db8974ddeee4 --- /dev/null +++ b/gcloud-java-compute/src/test/java/com/google/cloud/compute/ImageDiskConfigurationTest.java @@ -0,0 +1,112 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + +import com.google.cloud.compute.DiskConfiguration.Type; + +import org.junit.Test; + +public class ImageDiskConfigurationTest { + + private static final Long SIZE = 42L; + private static final DiskTypeId DISK_TYPE = DiskTypeId.of("project", "zone", "type"); + private static final ImageId IMAGE = ImageId.of("project", "image"); + private static final String IMAGE_ID = "imageId"; + private static final ImageDiskConfiguration DISK_CONFIGURATION = + ImageDiskConfiguration.builder(IMAGE) + .sizeGb(SIZE) + .diskType(DISK_TYPE) + .sourceImageId(IMAGE_ID) + .build(); + + @Test + public void testToBuilder() { + compareImageDiskConfiguration(DISK_CONFIGURATION, DISK_CONFIGURATION.toBuilder().build()); + ImageId newImageId = ImageId.of("newProject", "newImage"); + ImageDiskConfiguration diskConfiguration = DISK_CONFIGURATION.toBuilder() + .sizeGb(24L) + .sourceImage(newImageId) + .sourceImageId("newImageId") + .build(); + assertEquals(24L, diskConfiguration.sizeGb().longValue()); + assertEquals(newImageId, diskConfiguration.sourceImage()); + assertEquals("newImageId", diskConfiguration.sourceImageId()); + diskConfiguration = diskConfiguration.toBuilder() + .sizeGb(SIZE) + .sourceImage(IMAGE) + .sourceImageId(IMAGE_ID) + .build(); + compareImageDiskConfiguration(DISK_CONFIGURATION, diskConfiguration); + } + + @Test + public void testToBuilderIncomplete() { + ImageDiskConfiguration diskConfiguration = ImageDiskConfiguration.of(IMAGE); + compareImageDiskConfiguration(diskConfiguration, diskConfiguration.toBuilder().build()); + } + + @Test + public void testBuilder() { + assertEquals(DISK_TYPE, DISK_CONFIGURATION.diskType()); + assertEquals(SIZE, DISK_CONFIGURATION.sizeGb()); + assertEquals(IMAGE, DISK_CONFIGURATION.sourceImage()); + assertEquals(IMAGE_ID, DISK_CONFIGURATION.sourceImageId()); + assertEquals(Type.IMAGE, DISK_CONFIGURATION.type()); + } + + @Test + public void testToAndFromPb() { + assertTrue(DiskConfiguration.fromPb(DISK_CONFIGURATION.toPb()) + instanceof ImageDiskConfiguration); + compareImageDiskConfiguration(DISK_CONFIGURATION, + DiskConfiguration.fromPb(DISK_CONFIGURATION.toPb())); + } + + @Test + public void testOf() { + ImageDiskConfiguration configuration = ImageDiskConfiguration.of(IMAGE); + assertNull(configuration.diskType()); + assertNull(configuration.sizeGb()); + assertNull(configuration.sourceImageId()); + assertEquals(IMAGE, configuration.sourceImage()); + assertEquals(Type.IMAGE, configuration.type()); + } + + @Test + public void testSetProjectId() { + ImageDiskConfiguration diskConfiguration = DISK_CONFIGURATION.toBuilder() + .diskType(DiskTypeId.of(DISK_TYPE.zone(), DISK_TYPE.type())) + .sourceImage(ImageId.of(IMAGE.image())) + .build(); + compareImageDiskConfiguration(DISK_CONFIGURATION, diskConfiguration.setProjectId("project")); + } + + private void compareImageDiskConfiguration(ImageDiskConfiguration expected, + ImageDiskConfiguration value) { + assertEquals(expected, value); + assertEquals(expected.diskType(), value.diskType()); + assertEquals(expected.sizeGb(), value.sizeGb()); + assertEquals(expected.sourceImage(), value.sourceImage()); + assertEquals(expected.sourceImageId(), value.sourceImageId()); + assertEquals(expected.type(), value.type()); + assertEquals(expected.hashCode(), value.hashCode()); + } +} diff --git a/gcloud-java-compute/src/test/java/com/google/cloud/compute/ImageIdTest.java b/gcloud-java-compute/src/test/java/com/google/cloud/compute/ImageIdTest.java new file mode 100644 index 000000000000..32f8ac7c024b --- /dev/null +++ b/gcloud-java-compute/src/test/java/com/google/cloud/compute/ImageIdTest.java @@ -0,0 +1,79 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; + +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; + +public class ImageIdTest { + + private static final String PROJECT = "project"; + private static final String NAME = "image"; + private static final String URL = + "https://www.googleapis.com/compute/v1/projects/project/global/images/image"; + + @Rule + public ExpectedException thrown = ExpectedException.none(); + + @Test + public void testOf() { + ImageId imageId = ImageId.of(PROJECT, NAME); + assertEquals(PROJECT, imageId.project()); + assertEquals(NAME, imageId.image()); + assertEquals(URL, imageId.selfLink()); + imageId = ImageId.of(NAME); + assertNull(imageId.project()); + assertEquals(NAME, imageId.image()); + } + + @Test + public void testToAndFromUrl() { + ImageId imageId = ImageId.of(PROJECT, NAME); + compareImageId(imageId, ImageId.fromUrl(imageId.selfLink())); + thrown.expect(IllegalArgumentException.class); + thrown.expectMessage("notMatchingUrl is not a valid image URL"); + ImageId.fromUrl("notMatchingUrl"); + } + + @Test + public void testSetProjectId() { + ImageId imageId = ImageId.of(PROJECT, NAME); + assertSame(imageId, imageId.setProjectId(PROJECT)); + compareImageId(imageId, ImageId.of(NAME).setProjectId(PROJECT)); + } + + @Test + public void testMatchesUrl() { + assertTrue(ImageId.matchesUrl(ImageId.of(PROJECT, NAME).selfLink())); + assertFalse(ImageId.matchesUrl("notMatchingUrl")); + } + + private void compareImageId(ImageId expected, ImageId value) { + assertEquals(expected, value); + assertEquals(expected.project(), expected.project()); + assertEquals(expected.image(), expected.image()); + assertEquals(expected.selfLink(), expected.selfLink()); + assertEquals(expected.hashCode(), expected.hashCode()); + } +} diff --git a/gcloud-java-compute/src/test/java/com/google/cloud/compute/ImageInfoTest.java b/gcloud-java-compute/src/test/java/com/google/cloud/compute/ImageInfoTest.java new file mode 100644 index 000000000000..4433fbe96ffb --- /dev/null +++ b/gcloud-java-compute/src/test/java/com/google/cloud/compute/ImageInfoTest.java @@ -0,0 +1,175 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; + +import com.google.cloud.compute.ImageConfiguration.SourceType; +import com.google.common.collect.ImmutableList; + +import org.junit.Test; + +import java.util.List; + +public class ImageInfoTest { + + private static final ImageId IMAGE_ID = ImageId.of("project", "image"); + private static final String GENERATED_ID = "42"; + private static final Long CREATION_TIMESTAMP = 1453293540000L; + private static final String DESCRIPTION = "description"; + private static final ImageInfo.Status STATUS = ImageInfo.Status.READY; + private static final List LICENSES = ImmutableList.of( + LicenseId.of("project", "license1"), LicenseId.of("project", "license2")); + private static final Long DISK_SIZE_GB = 42L; + private static final String STORAGE_SOURCE = "source"; + private static final Long ARCHIVE_SIZE_BYTES = 24L; + private static final String SHA1_CHECKSUM = "checksum"; + private static final DiskId SOURCE_DISK = DiskId.of("project", "zone", "disk"); + private static final String SOURCE_DISK_ID = "diskId"; + private static final SourceType SOURCE_TYPE = SourceType.RAW; + private static final StorageImageConfiguration STORAGE_CONFIGURATION = + StorageImageConfiguration.builder(STORAGE_SOURCE) + .archiveSizeBytes(ARCHIVE_SIZE_BYTES) + .containerType(StorageImageConfiguration.ContainerType.TAR) + .sha1(SHA1_CHECKSUM) + .sourceType(SOURCE_TYPE) + .build(); + private static final DiskImageConfiguration DISK_CONFIGURATION = + DiskImageConfiguration.builder(SOURCE_DISK) + .archiveSizeBytes(ARCHIVE_SIZE_BYTES) + .sourceDiskId(SOURCE_DISK_ID) + .sourceType(SOURCE_TYPE) + .build(); + private static final DeprecationStatus DEPRECATION_STATUS = + DeprecationStatus.of(DeprecationStatus.Status.DELETED, IMAGE_ID); + private static final ImageInfo STORAGE_IMAGE = ImageInfo.builder(IMAGE_ID, STORAGE_CONFIGURATION) + .generatedId(GENERATED_ID) + .creationTimestamp(CREATION_TIMESTAMP) + .description(DESCRIPTION) + .status(STATUS) + .diskSizeGb(DISK_SIZE_GB) + .licenses(LICENSES) + .deprecationStatus(DEPRECATION_STATUS) + .build(); + private static final ImageInfo DISK_IMAGE = ImageInfo.builder(IMAGE_ID, DISK_CONFIGURATION) + .generatedId(GENERATED_ID) + .creationTimestamp(CREATION_TIMESTAMP) + .description(DESCRIPTION) + .status(STATUS) + .diskSizeGb(DISK_SIZE_GB) + .licenses(LICENSES) + .deprecationStatus(DEPRECATION_STATUS) + .build(); + + @Test + public void testToBuilder() { + compareImageInfo(STORAGE_IMAGE, STORAGE_IMAGE.toBuilder().build()); + compareImageInfo(DISK_IMAGE, DISK_IMAGE.toBuilder().build()); + ImageInfo imageInfo = STORAGE_IMAGE.toBuilder().description("newDescription").build(); + assertEquals("newDescription", imageInfo.description()); + imageInfo = imageInfo.toBuilder().description("description").build(); + compareImageInfo(STORAGE_IMAGE, imageInfo); + } + + @Test + public void testToBuilderIncomplete() { + ImageInfo imageInfo = ImageInfo.of(IMAGE_ID, STORAGE_CONFIGURATION); + assertEquals(imageInfo, imageInfo.toBuilder().build()); + imageInfo = ImageInfo.of(IMAGE_ID, DISK_CONFIGURATION); + assertEquals(imageInfo, imageInfo.toBuilder().build()); + } + + @Test + public void testBuilder() { + assertEquals(GENERATED_ID, STORAGE_IMAGE.generatedId()); + assertEquals(IMAGE_ID, STORAGE_IMAGE.imageId()); + assertEquals(CREATION_TIMESTAMP, STORAGE_IMAGE.creationTimestamp()); + assertEquals(DESCRIPTION, STORAGE_IMAGE.description()); + assertEquals(STORAGE_CONFIGURATION, STORAGE_IMAGE.configuration()); + assertEquals(STATUS, STORAGE_IMAGE.status()); + assertEquals(DISK_SIZE_GB, STORAGE_IMAGE.diskSizeGb()); + assertEquals(LICENSES, STORAGE_IMAGE.licenses()); + assertEquals(DEPRECATION_STATUS, STORAGE_IMAGE.deprecationStatus()); + assertEquals(GENERATED_ID, DISK_IMAGE.generatedId()); + assertEquals(IMAGE_ID, DISK_IMAGE.imageId()); + assertEquals(CREATION_TIMESTAMP, DISK_IMAGE.creationTimestamp()); + assertEquals(DESCRIPTION, DISK_IMAGE.description()); + assertEquals(DISK_CONFIGURATION, DISK_IMAGE.configuration()); + assertEquals(STATUS, DISK_IMAGE.status()); + assertEquals(DISK_SIZE_GB, DISK_IMAGE.diskSizeGb()); + assertEquals(LICENSES, DISK_IMAGE.licenses()); + assertEquals(DEPRECATION_STATUS, DISK_IMAGE.deprecationStatus()); + } + + @Test + public void testOf() { + ImageInfo imageInfo = ImageInfo.of(IMAGE_ID, STORAGE_CONFIGURATION); + assertEquals(IMAGE_ID, imageInfo.imageId()); + assertEquals(STORAGE_CONFIGURATION, imageInfo.configuration()); + assertNull(imageInfo.generatedId()); + assertNull(imageInfo.creationTimestamp()); + assertNull(imageInfo.description()); + assertNull(imageInfo.status()); + assertNull(imageInfo.diskSizeGb()); + assertNull(imageInfo.licenses()); + assertNull(imageInfo.deprecationStatus()); + imageInfo = ImageInfo.of(IMAGE_ID, DISK_CONFIGURATION); + assertEquals(IMAGE_ID, imageInfo.imageId()); + assertEquals(DISK_CONFIGURATION, imageInfo.configuration()); + assertNull(imageInfo.generatedId()); + assertNull(imageInfo.creationTimestamp()); + assertNull(imageInfo.description()); + assertNull(imageInfo.status()); + assertNull(imageInfo.diskSizeGb()); + assertNull(imageInfo.licenses()); + assertNull(imageInfo.deprecationStatus()); + } + + @Test + public void testToAndFromPb() { + compareImageInfo(STORAGE_IMAGE, ImageInfo.fromPb(STORAGE_IMAGE.toPb())); + compareImageInfo(DISK_IMAGE, ImageInfo.fromPb(DISK_IMAGE.toPb())); + ImageInfo imageInfo = ImageInfo.of(IMAGE_ID, StorageImageConfiguration.of(STORAGE_SOURCE)); + compareImageInfo(imageInfo, ImageInfo.fromPb(imageInfo.toPb())); + imageInfo = ImageInfo.of(IMAGE_ID, DiskImageConfiguration.of(SOURCE_DISK)); + compareImageInfo(imageInfo, ImageInfo.fromPb(imageInfo.toPb())); + } + + @Test + public void testSetProjectId() { + ImageInfo imageInfo = DISK_IMAGE.toBuilder() + .imageId(ImageId.of("image")) + .configuration(DISK_CONFIGURATION.toBuilder().sourceDisk(DiskId.of("zone", "disk")).build()) + .build(); + compareImageInfo(DISK_IMAGE, imageInfo.setProjectId("project")); + } + + public void compareImageInfo(ImageInfo expected, ImageInfo value) { + assertEquals(expected, value); + assertEquals(expected.generatedId(), value.generatedId()); + assertEquals(expected.imageId(), value.imageId()); + assertEquals(expected.creationTimestamp(), value.creationTimestamp()); + assertEquals(expected.description(), value.description()); + assertEquals(expected.configuration(), value.configuration()); + assertEquals(expected.status(), value.status()); + assertEquals(expected.diskSizeGb(), value.diskSizeGb()); + assertEquals(expected.licenses(), value.licenses()); + assertEquals(expected.deprecationStatus(), value.deprecationStatus()); + assertEquals(expected.hashCode(), value.hashCode()); + } +} diff --git a/gcloud-java-compute/src/test/java/com/google/cloud/compute/ImageTest.java b/gcloud-java-compute/src/test/java/com/google/cloud/compute/ImageTest.java new file mode 100644 index 000000000000..ca1cd009c5eb --- /dev/null +++ b/gcloud-java-compute/src/test/java/com/google/cloud/compute/ImageTest.java @@ -0,0 +1,304 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import static org.easymock.EasyMock.createMock; +import static org.easymock.EasyMock.createStrictMock; +import static org.easymock.EasyMock.expect; +import static org.easymock.EasyMock.replay; +import static org.easymock.EasyMock.verify; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; + +import com.google.common.collect.ImmutableList; + +import org.junit.Test; + +import java.util.List; + +public class ImageTest { + + private static final ImageId IMAGE_ID = ImageId.of("project", "image"); + private static final String GENERATED_ID = "42"; + private static final Long CREATION_TIMESTAMP = 1453293540000L; + private static final String DESCRIPTION = "description"; + private static final ImageInfo.Status STATUS = ImageInfo.Status.READY; + private static final List LICENSES = ImmutableList.of( + LicenseId.of("project", "license1"), LicenseId.of("project", "license2")); + private static final Long DISK_SIZE_GB = 42L; + private static final String STORAGE_SOURCE = "source"; + private static final Long ARCHIVE_SIZE_BYTES = 24L; + private static final String SHA1_CHECKSUM = "checksum"; + private static final DiskId SOURCE_DISK = DiskId.of("project", "zone", "disk"); + private static final String SOURCE_DISK_ID = "diskId"; + private static final ImageConfiguration.SourceType SOURCE_TYPE = ImageConfiguration.SourceType.RAW; + private static final StorageImageConfiguration STORAGE_CONFIGURATION = + StorageImageConfiguration.builder(STORAGE_SOURCE) + .archiveSizeBytes(ARCHIVE_SIZE_BYTES) + .containerType(StorageImageConfiguration.ContainerType.TAR) + .sha1(SHA1_CHECKSUM) + .sourceType(SOURCE_TYPE) + .build(); + private static final DiskImageConfiguration DISK_CONFIGURATION = + DiskImageConfiguration.builder(SOURCE_DISK) + .archiveSizeBytes(ARCHIVE_SIZE_BYTES) + .sourceDiskId(SOURCE_DISK_ID) + .sourceType(SOURCE_TYPE) + .build(); + private static final DeprecationStatus DEPRECATION_STATUS = + DeprecationStatus.of(DeprecationStatus.Status.DELETED, IMAGE_ID); + + private final Compute serviceMockReturnsOptions = createStrictMock(Compute.class); + private final ComputeOptions mockOptions = createMock(ComputeOptions.class); + private Compute compute; + private Image image; + private Image diskImage; + private Image storageImage; + + private void initializeExpectedImage(int optionsCalls) { + expect(serviceMockReturnsOptions.options()).andReturn(mockOptions).times(optionsCalls); + replay(serviceMockReturnsOptions); + diskImage = new Image.Builder(serviceMockReturnsOptions, IMAGE_ID, DISK_CONFIGURATION) + .generatedId(GENERATED_ID) + .creationTimestamp(CREATION_TIMESTAMP) + .description(DESCRIPTION) + .status(STATUS) + .diskSizeGb(DISK_SIZE_GB) + .licenses(LICENSES) + .deprecationStatus(DEPRECATION_STATUS) + .build(); + storageImage = new Image.Builder(serviceMockReturnsOptions, IMAGE_ID, STORAGE_CONFIGURATION) + .generatedId(GENERATED_ID) + .creationTimestamp(CREATION_TIMESTAMP) + .description(DESCRIPTION) + .status(STATUS) + .diskSizeGb(DISK_SIZE_GB) + .licenses(LICENSES) + .deprecationStatus(DEPRECATION_STATUS) + .build(); + compute = createStrictMock(Compute.class); + } + + private void initializeImage() { + image = new Image.Builder(compute, IMAGE_ID, DISK_CONFIGURATION) + .generatedId(GENERATED_ID) + .creationTimestamp(CREATION_TIMESTAMP) + .description(DESCRIPTION) + .status(STATUS) + .diskSizeGb(DISK_SIZE_GB) + .licenses(LICENSES) + .deprecationStatus(DEPRECATION_STATUS) + .build(); + } + + @Test + public void testToBuilder() { + initializeExpectedImage(12); + compareImage(diskImage, diskImage.toBuilder().build()); + compareImage(storageImage, storageImage.toBuilder().build()); + Image newImage = diskImage.toBuilder().description("newDescription").build(); + assertEquals("newDescription", newImage.description()); + newImage = newImage.toBuilder().description("description").build(); + compareImage(diskImage, newImage); + } + + @Test + public void testToBuilderIncomplete() { + initializeExpectedImage(6); + ImageInfo imageInfo = ImageInfo.of(IMAGE_ID, DISK_CONFIGURATION); + Image image = + new Image(serviceMockReturnsOptions, new ImageInfo.BuilderImpl(imageInfo)); + compareImage(image, image.toBuilder().build()); + } + + @Test + public void testBuilder() { + initializeExpectedImage(3); + assertEquals(GENERATED_ID, diskImage.generatedId()); + assertEquals(IMAGE_ID, diskImage.imageId()); + assertEquals(CREATION_TIMESTAMP, diskImage.creationTimestamp()); + assertEquals(DESCRIPTION, diskImage.description()); + assertEquals(DISK_CONFIGURATION, diskImage.configuration()); + assertEquals(STATUS, diskImage.status()); + assertEquals(DISK_SIZE_GB, diskImage.diskSizeGb()); + assertEquals(LICENSES, diskImage.licenses()); + assertEquals(DEPRECATION_STATUS, diskImage.deprecationStatus()); + assertSame(serviceMockReturnsOptions, diskImage.compute()); + assertEquals(GENERATED_ID, storageImage.generatedId()); + assertEquals(IMAGE_ID, storageImage.imageId()); + assertEquals(CREATION_TIMESTAMP, storageImage.creationTimestamp()); + assertEquals(DESCRIPTION, storageImage.description()); + assertEquals(STORAGE_CONFIGURATION, storageImage.configuration()); + assertEquals(STATUS, storageImage.status()); + assertEquals(DISK_SIZE_GB, storageImage.diskSizeGb()); + assertEquals(LICENSES, storageImage.licenses()); + assertEquals(DEPRECATION_STATUS, storageImage.deprecationStatus()); + assertSame(serviceMockReturnsOptions, storageImage.compute()); + ImageId imageId = ImageId.of("otherImage"); + Image image = new Image.Builder(serviceMockReturnsOptions, IMAGE_ID, STORAGE_CONFIGURATION) + .imageId(imageId) + .configuration(DISK_CONFIGURATION) + .build(); + assertNull(image.generatedId()); + assertEquals(imageId, image.imageId()); + assertNull(image.creationTimestamp()); + assertNull(image.description()); + assertEquals(DISK_CONFIGURATION, image.configuration()); + assertNull(image.status()); + assertNull(image.diskSizeGb()); + assertNull(image.licenses()); + assertNull(image.deprecationStatus()); + assertSame(serviceMockReturnsOptions, image.compute()); + } + + @Test + public void testToAndFromPb() { + initializeExpectedImage(12); + compareImage(diskImage, + Image.fromPb(serviceMockReturnsOptions, diskImage.toPb())); + compareImage(storageImage, + Image.fromPb(serviceMockReturnsOptions, storageImage.toPb())); + Image image = + new Image.Builder(serviceMockReturnsOptions, IMAGE_ID, DISK_CONFIGURATION).build(); + compareImage(image, Image.fromPb(serviceMockReturnsOptions, image.toPb())); + } + + @Test + public void testDeleteOperation() { + initializeExpectedImage(3); + expect(compute.options()).andReturn(mockOptions); + Operation operation = new Operation.Builder(serviceMockReturnsOptions) + .operationId(GlobalOperationId.of("project", "op")) + .build(); + expect(compute.deleteImage(IMAGE_ID)).andReturn(operation); + replay(compute); + initializeImage(); + assertSame(operation, image.delete()); + } + + @Test + public void testDeleteNull() { + initializeExpectedImage(2); + expect(compute.options()).andReturn(mockOptions); + expect(compute.deleteImage(IMAGE_ID)).andReturn(null); + replay(compute); + initializeImage(); + assertNull(image.delete()); + } + + @Test + public void testExists_True() throws Exception { + initializeExpectedImage(2); + Compute.ImageOption[] expectedOptions = {Compute.ImageOption.fields()}; + expect(compute.options()).andReturn(mockOptions); + expect(compute.getImage(IMAGE_ID, expectedOptions)).andReturn(diskImage); + replay(compute); + initializeImage(); + assertTrue(image.exists()); + verify(compute); + } + + @Test + public void testExists_False() throws Exception { + initializeExpectedImage(2); + Compute.ImageOption[] expectedOptions = {Compute.ImageOption.fields()}; + expect(compute.options()).andReturn(mockOptions); + expect(compute.getImage(IMAGE_ID, expectedOptions)).andReturn(null); + replay(compute); + initializeImage(); + assertFalse(image.exists()); + verify(compute); + } + + @Test + public void testReload() throws Exception { + initializeExpectedImage(5); + expect(compute.options()).andReturn(mockOptions); + expect(compute.getImage(IMAGE_ID)).andReturn(storageImage); + replay(compute); + initializeImage(); + Image updateImage = image.reload(); + compareImage(storageImage, updateImage); + verify(compute); + } + + @Test + public void testReloadNull() throws Exception { + initializeExpectedImage(2); + expect(compute.options()).andReturn(mockOptions); + expect(compute.getImage(IMAGE_ID)).andReturn(null); + replay(compute); + initializeImage(); + assertNull(image.reload()); + verify(compute); + } + + @Test + public void testReloadWithOptions() throws Exception { + initializeExpectedImage(5); + expect(compute.options()).andReturn(mockOptions); + expect(compute.getImage(IMAGE_ID, Compute.ImageOption.fields())).andReturn(storageImage); + replay(compute); + initializeImage(); + Image updateImage = image.reload(Compute.ImageOption.fields()); + compareImage(storageImage, updateImage); + verify(compute); + } + + @Test + public void testDeprecateImage() { + initializeExpectedImage(3); + expect(compute.options()).andReturn(mockOptions); + Operation operation = new Operation.Builder(serviceMockReturnsOptions) + .operationId(GlobalOperationId.of("project", "op")) + .build(); + DeprecationStatus status = DeprecationStatus.of(DeprecationStatus.Status.DEPRECATED, IMAGE_ID); + expect(compute.deprecate(IMAGE_ID, status)).andReturn(operation); + replay(compute); + initializeImage(); + assertSame(operation, image.deprecate(status)); + } + + @Test + public void testDeprecateNull() { + initializeExpectedImage(2); + expect(compute.options()).andReturn(mockOptions); + DeprecationStatus status = DeprecationStatus.of(DeprecationStatus.Status.DEPRECATED, IMAGE_ID); + expect(compute.deprecate(IMAGE_ID, status)).andReturn(null); + replay(compute); + initializeImage(); + assertNull(image.deprecate(status)); + } + + public void compareImage(Image expected, Image value) { + assertEquals(expected, value); + assertEquals(expected.compute().options(), value.compute().options()); + assertEquals(expected.generatedId(), value.generatedId()); + assertEquals(expected.imageId(), value.imageId()); + assertEquals(expected.creationTimestamp(), value.creationTimestamp()); + assertEquals(expected.description(), value.description()); + assertEquals(expected.configuration(), value.configuration()); + assertEquals(expected.status(), value.status()); + assertEquals(expected.diskSizeGb(), value.diskSizeGb()); + assertEquals(expected.licenses(), value.licenses()); + assertEquals(expected.deprecationStatus(), value.deprecationStatus()); + assertEquals(expected.hashCode(), value.hashCode()); + } +} diff --git a/gcloud-java-compute/src/test/java/com/google/cloud/compute/InstanceIdTest.java b/gcloud-java-compute/src/test/java/com/google/cloud/compute/InstanceIdTest.java new file mode 100644 index 000000000000..5c02e6d7c7ee --- /dev/null +++ b/gcloud-java-compute/src/test/java/com/google/cloud/compute/InstanceIdTest.java @@ -0,0 +1,88 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; + +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; + +public class InstanceIdTest { + + private static final String PROJECT = "project"; + private static final String ZONE = "zone"; + private static final String NAME = "instance"; + private static final String URL = + "https://www.googleapis.com/compute/v1/projects/project/zones/zone/instances/instance"; + + @Rule + public ExpectedException thrown = ExpectedException.none(); + + @Test + public void testOf() { + InstanceId instanceId = InstanceId.of(PROJECT, ZONE, NAME); + assertEquals(PROJECT, instanceId.project()); + assertEquals(ZONE, instanceId.zone()); + assertEquals(NAME, instanceId.instance()); + assertEquals(URL, instanceId.selfLink()); + instanceId = InstanceId.of(ZoneId.of(PROJECT, ZONE), NAME); + assertEquals(PROJECT, instanceId.project()); + assertEquals(ZONE, instanceId.zone()); + assertEquals(NAME, instanceId.instance()); + assertEquals(URL, instanceId.selfLink()); + instanceId = InstanceId.of(ZONE, NAME); + assertNull(instanceId.project()); + assertEquals(ZONE, instanceId.zone()); + assertEquals(NAME, instanceId.instance()); + } + + @Test + public void testToAndFromUrl() { + InstanceId instanceId = InstanceId.of(PROJECT, ZONE, NAME); + compareInstanceId(instanceId, InstanceId.fromUrl(instanceId.selfLink())); + thrown.expect(IllegalArgumentException.class); + thrown.expectMessage("notMatchingUrl is not a valid instance URL"); + InstanceId.fromUrl("notMatchingUrl"); + } + + @Test + public void testSetProjectId() { + InstanceId instanceId = InstanceId.of(PROJECT, ZONE, NAME); + assertSame(instanceId, instanceId.setProjectId(PROJECT)); + compareInstanceId(instanceId, InstanceId.of(ZONE, NAME).setProjectId(PROJECT)); + } + + @Test + public void testMatchesUrl() { + assertTrue(InstanceId.matchesUrl(InstanceId.of(PROJECT, ZONE, NAME).selfLink())); + assertFalse(InstanceId.matchesUrl("notMatchingUrl")); + } + + private void compareInstanceId(InstanceId expected, InstanceId value) { + assertEquals(expected, value); + assertEquals(expected.project(), expected.project()); + assertEquals(expected.zone(), expected.zone()); + assertEquals(expected.instance(), expected.instance()); + assertEquals(expected.selfLink(), expected.selfLink()); + assertEquals(expected.hashCode(), expected.hashCode()); + } +} diff --git a/gcloud-java-compute/src/test/java/com/google/cloud/compute/InstanceInfoTest.java b/gcloud-java-compute/src/test/java/com/google/cloud/compute/InstanceInfoTest.java new file mode 100644 index 000000000000..13e7a87af06e --- /dev/null +++ b/gcloud-java-compute/src/test/java/com/google/cloud/compute/InstanceInfoTest.java @@ -0,0 +1,183 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; + +import com.google.common.collect.ImmutableList; + +import org.junit.Test; + +import java.util.List; + +public class InstanceInfoTest { + + private static final String GENERATED_ID = "42"; + private static final Long CREATION_TIMESTAMP = 1453293540000L; + private static final String DESCRIPTION = "description"; + private static final InstanceId INSTANCE_ID = InstanceId.of("project", "zone", "instance"); + private static final InstanceInfo.Status STATUS = InstanceInfo.Status.RUNNING; + private static final String STATUS_MESSAGE = "statusMessage"; + private static final Tags TAGS = Tags.of("tag1", "tag2"); + private static final MachineTypeId MACHINE_TYPE = MachineTypeId.of("project", "zone", "type"); + private static final Boolean CAN_IP_FORWARD = true; + private static final NetworkInterface NETWORK_INTERFACE = + NetworkInterface.of(NetworkId.of("project", "network")); + private static final List NETWORK_INTERFACES = + ImmutableList.of(NETWORK_INTERFACE); + private static final DiskId DISK_ID = DiskId.of("project", "zone", "disk"); + private static final AttachedDisk ATTACHED_DISK = + AttachedDisk.of(AttachedDisk.PersistentDiskConfiguration.of(DISK_ID)); + private static final List ATTACHED_DISKS = ImmutableList.of(ATTACHED_DISK); + private static final Metadata METADATA = Metadata.builder() + .add("key1", "value1") + .add("key2", "value2") + .build(); + private static final ServiceAccount SERVICE_ACCOUNT = + ServiceAccount.of("email", ImmutableList.of("scope1")); + private static final List SERVICE_ACCOUNTS = ImmutableList.of(SERVICE_ACCOUNT); + private static final SchedulingOptions SCHEDULING_OPTIONS = SchedulingOptions.preemptible(); + private static final String CPU_PLATFORM = "cpuPlatform"; + private static final InstanceInfo INSTANCE_INFO = InstanceInfo.builder(INSTANCE_ID, MACHINE_TYPE) + .generatedId(GENERATED_ID) + .creationTimestamp(CREATION_TIMESTAMP) + .description(DESCRIPTION) + .status(STATUS) + .statusMessage(STATUS_MESSAGE) + .tags(TAGS) + .canIpForward(CAN_IP_FORWARD) + .networkInterfaces(NETWORK_INTERFACES) + .attachedDisks(ATTACHED_DISKS) + .metadata(METADATA) + .serviceAccounts(SERVICE_ACCOUNTS) + .schedulingOptions(SCHEDULING_OPTIONS) + .cpuPlatform(CPU_PLATFORM) + .build(); + + @Test + public void testToBuilder() { + compareInstanceInfo(INSTANCE_INFO, INSTANCE_INFO.toBuilder().build()); + InstanceInfo instance = INSTANCE_INFO.toBuilder().description("newDescription").build(); + assertEquals("newDescription", instance.description()); + instance = instance.toBuilder().description(DESCRIPTION).build(); + compareInstanceInfo(INSTANCE_INFO, instance); + } + + @Test + public void testToBuilderIncomplete() { + InstanceInfo instanceInfo = InstanceInfo.of(INSTANCE_ID, MACHINE_TYPE, ATTACHED_DISK, + NETWORK_INTERFACE); + assertEquals(instanceInfo, instanceInfo.toBuilder().build()); + } + + @Test + public void testBuilder() { + assertEquals(GENERATED_ID, INSTANCE_INFO.generatedId()); + assertEquals(INSTANCE_ID, INSTANCE_INFO.instanceId()); + assertEquals(CREATION_TIMESTAMP, INSTANCE_INFO.creationTimestamp()); + assertEquals(DESCRIPTION, INSTANCE_INFO.description()); + assertEquals(STATUS, INSTANCE_INFO.status()); + assertEquals(STATUS_MESSAGE, INSTANCE_INFO.statusMessage()); + assertEquals(TAGS, INSTANCE_INFO.tags()); + assertEquals(MACHINE_TYPE, INSTANCE_INFO.machineType()); + assertEquals(CAN_IP_FORWARD, INSTANCE_INFO.canIpForward()); + assertEquals(NETWORK_INTERFACES, INSTANCE_INFO.networkInterfaces()); + assertEquals(ATTACHED_DISKS, INSTANCE_INFO.attachedDisks()); + assertEquals(METADATA, INSTANCE_INFO.metadata()); + assertEquals(SERVICE_ACCOUNTS, INSTANCE_INFO.serviceAccounts()); + assertEquals(SCHEDULING_OPTIONS, INSTANCE_INFO.schedulingOptions()); + assertEquals(CPU_PLATFORM, INSTANCE_INFO.cpuPlatform()); + InstanceInfo instanceInfo = InstanceInfo.builder(INSTANCE_ID, MACHINE_TYPE) + .generatedId(GENERATED_ID) + .creationTimestamp(CREATION_TIMESTAMP) + .description(DESCRIPTION) + .status(STATUS) + .statusMessage(STATUS_MESSAGE) + .tags(TAGS) + .canIpForward(CAN_IP_FORWARD) + .networkInterfaces(NETWORK_INTERFACE) + .attachedDisks(ATTACHED_DISK) + .metadata(METADATA) + .serviceAccounts(SERVICE_ACCOUNTS) + .schedulingOptions(SCHEDULING_OPTIONS) + .cpuPlatform(CPU_PLATFORM) + .build(); + compareInstanceInfo(INSTANCE_INFO, instanceInfo); + } + + @Test + public void testOf() { + InstanceInfo instance = + InstanceInfo.of(INSTANCE_ID, MACHINE_TYPE, ATTACHED_DISK, NETWORK_INTERFACE); + assertNull(instance.generatedId()); + assertEquals(INSTANCE_ID, instance.instanceId()); + assertNull(instance.creationTimestamp()); + assertNull(instance.description()); + assertNull(instance.status()); + assertNull(instance.statusMessage()); + assertNull(instance.tags()); + assertEquals(MACHINE_TYPE, instance.machineType()); + assertNull(instance.canIpForward()); + assertEquals(NETWORK_INTERFACES, instance.networkInterfaces()); + assertEquals(ATTACHED_DISKS, instance.attachedDisks()); + assertNull(instance.metadata()); + assertNull(instance.serviceAccounts()); + assertNull(instance.schedulingOptions()); + assertNull(instance.cpuPlatform()); + } + + @Test + public void testToAndFromPb() { + compareInstanceInfo(INSTANCE_INFO, InstanceInfo.fromPb(INSTANCE_INFO.toPb())); + InstanceInfo instance = + InstanceInfo.of(INSTANCE_ID, MACHINE_TYPE, ATTACHED_DISK, NETWORK_INTERFACE); + compareInstanceInfo(instance, InstanceInfo.fromPb(instance.toPb())); + } + + @Test + public void testSetProjectId() { + InstanceInfo instance = InstanceInfo.of( + InstanceId.of("zone", "instance"), + MachineTypeId.of("zone", "type"), + AttachedDisk.of(AttachedDisk.PersistentDiskConfiguration.of(DiskId.of("zone", "disk"))), + NetworkInterface.of(NetworkId.of("project", "network"))); + InstanceInfo instanceWithProject = + InstanceInfo.of(INSTANCE_ID, MACHINE_TYPE, ATTACHED_DISK, NETWORK_INTERFACE); + compareInstanceInfo(instanceWithProject, instance.setProjectId("project")); + } + + public void compareInstanceInfo(InstanceInfo expected, InstanceInfo value) { + assertEquals(expected, value); + assertEquals(expected.generatedId(), value.generatedId()); + assertEquals(expected.instanceId(), value.instanceId()); + assertEquals(expected.creationTimestamp(), value.creationTimestamp()); + assertEquals(expected.description(), value.description()); + assertEquals(expected.status(), value.status()); + assertEquals(expected.statusMessage(), value.statusMessage()); + assertEquals(expected.tags(), value.tags()); + assertEquals(expected.machineType(), value.machineType()); + assertEquals(expected.canIpForward(), value.canIpForward()); + assertEquals(expected.networkInterfaces(), value.networkInterfaces()); + assertEquals(expected.attachedDisks(), value.attachedDisks()); + assertEquals(expected.metadata(), value.metadata()); + assertEquals(expected.serviceAccounts(), value.serviceAccounts()); + assertEquals(expected.schedulingOptions(), value.schedulingOptions()); + assertEquals(expected.cpuPlatform(), value.cpuPlatform()); + assertEquals(expected.hashCode(), value.hashCode()); + } +} diff --git a/gcloud-java-compute/src/test/java/com/google/cloud/compute/InstanceTest.java b/gcloud-java-compute/src/test/java/com/google/cloud/compute/InstanceTest.java new file mode 100644 index 000000000000..522e1a7daf28 --- /dev/null +++ b/gcloud-java-compute/src/test/java/com/google/cloud/compute/InstanceTest.java @@ -0,0 +1,890 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import static org.easymock.EasyMock.createMock; +import static org.easymock.EasyMock.createStrictMock; +import static org.easymock.EasyMock.expect; +import static org.easymock.EasyMock.replay; +import static org.easymock.EasyMock.verify; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; + +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; + +import org.junit.Test; + +import java.util.List; +import java.util.Map; + +public class InstanceTest { + + private static final String GENERATED_ID = "42"; + private static final Long CREATION_TIMESTAMP = 1453293540000L; + private static final String DESCRIPTION = "description"; + private static final InstanceId INSTANCE_ID = InstanceId.of("project", "zone", "instance"); + private static final InstanceInfo.Status STATUS = InstanceInfo.Status.RUNNING; + private static final String STATUS_MESSAGE = "statusMessage"; + private static final Tags TAGS = Tags.builder() + .values("tag1", "tag2") + .fingerprint("fingerprint") + .build(); + private static final MachineTypeId MACHINE_TYPE = MachineTypeId.of("project", "zone", "type"); + private static final Boolean CAN_IP_FORWARD = true; + private static final NetworkInterface NETWORK_INTERFACE = + NetworkInterface.of(NetworkId.of("project", "network")); + private static final List NETWORK_INTERFACES = + ImmutableList.of(NETWORK_INTERFACE); + private static final DiskId DISK_ID = DiskId.of("project", "zone", "disk"); + private static final AttachedDisk ATTACHED_DISK = + AttachedDisk.of(AttachedDisk.PersistentDiskConfiguration.of(DISK_ID)); + private static final List ATTACHED_DISKS = ImmutableList.of(ATTACHED_DISK); + private static final Metadata METADATA = Metadata.builder() + .add("key1", "value1") + .add("key2", "value2") + .fingerprint("fingerprint") + .build(); + private static final ServiceAccount SERVICE_ACCOUNT = + ServiceAccount.of("email", ImmutableList.of("scope1")); + private static final List SERVICE_ACCOUNTS = + ImmutableList.of(SERVICE_ACCOUNT); + private static final SchedulingOptions SCHEDULING_OPTIONS = SchedulingOptions.preemptible(); + private static final String CPU_PLATFORM = "cpuPlatform"; + + private final Compute serviceMockReturnsOptions = createStrictMock(Compute.class); + private final ComputeOptions mockOptions = createMock(ComputeOptions.class); + private Compute compute; + private Instance instance; + private Instance expectedInstance; + + private void initializeExpectedInstance(int optionsCalls) { + expect(serviceMockReturnsOptions.options()).andReturn(mockOptions).times(optionsCalls); + replay(serviceMockReturnsOptions); + expectedInstance = new Instance.Builder(serviceMockReturnsOptions, INSTANCE_ID, MACHINE_TYPE, + ATTACHED_DISK, NETWORK_INTERFACE) + .generatedId(GENERATED_ID) + .creationTimestamp(CREATION_TIMESTAMP) + .description(DESCRIPTION) + .status(STATUS) + .statusMessage(STATUS_MESSAGE) + .tags(TAGS) + .canIpForward(CAN_IP_FORWARD) + .metadata(METADATA) + .serviceAccounts(SERVICE_ACCOUNTS) + .schedulingOptions(SCHEDULING_OPTIONS) + .cpuPlatform(CPU_PLATFORM) + .build(); + compute = createStrictMock(Compute.class); + } + + private void initializeInstance() { + instance = new Instance.Builder(compute, INSTANCE_ID, MACHINE_TYPE, + ATTACHED_DISK, NETWORK_INTERFACE) + .generatedId(GENERATED_ID) + .creationTimestamp(CREATION_TIMESTAMP) + .description(DESCRIPTION) + .status(STATUS) + .statusMessage(STATUS_MESSAGE) + .tags(TAGS) + .canIpForward(CAN_IP_FORWARD) + .metadata(METADATA) + .serviceAccounts(SERVICE_ACCOUNTS) + .schedulingOptions(SCHEDULING_OPTIONS) + .cpuPlatform(CPU_PLATFORM) + .build(); + } + + @Test + public void testToBuilder() { + initializeExpectedInstance(8); + compareInstance(expectedInstance, expectedInstance.toBuilder().build()); + Instance newInstance = expectedInstance.toBuilder().description("newDescription").build(); + assertEquals("newDescription", newInstance.description()); + newInstance = newInstance.toBuilder().description("description").build(); + compareInstance(expectedInstance, newInstance); + } + + @Test + public void testToBuilderIncomplete() { + initializeExpectedInstance(5); + InstanceInfo instanceInfo = + InstanceInfo.of(INSTANCE_ID, MACHINE_TYPE, ATTACHED_DISK, NETWORK_INTERFACE); + Instance instance = + new Instance(serviceMockReturnsOptions, new InstanceInfo.BuilderImpl(instanceInfo)); + compareInstance(instance, instance.toBuilder().build()); + } + + @Test + public void testBuilder() { + initializeExpectedInstance(2); + assertEquals(GENERATED_ID, expectedInstance.generatedId()); + assertEquals(INSTANCE_ID, expectedInstance.instanceId()); + assertEquals(CREATION_TIMESTAMP, expectedInstance.creationTimestamp()); + assertEquals(DESCRIPTION, expectedInstance.description()); + assertEquals(STATUS, expectedInstance.status()); + assertEquals(STATUS_MESSAGE, expectedInstance.statusMessage()); + assertEquals(TAGS, expectedInstance.tags()); + assertEquals(MACHINE_TYPE, expectedInstance.machineType()); + assertEquals(CAN_IP_FORWARD, expectedInstance.canIpForward()); + assertEquals(NETWORK_INTERFACES, expectedInstance.networkInterfaces()); + assertEquals(ATTACHED_DISKS, expectedInstance.attachedDisks()); + assertEquals(METADATA, expectedInstance.metadata()); + assertEquals(SERVICE_ACCOUNTS, expectedInstance.serviceAccounts()); + assertEquals(SCHEDULING_OPTIONS, expectedInstance.schedulingOptions()); + assertEquals(CPU_PLATFORM, expectedInstance.cpuPlatform()); + assertSame(serviceMockReturnsOptions, expectedInstance.compute()); + InstanceInfo instanceInfo = + InstanceInfo.of(INSTANCE_ID, MACHINE_TYPE, ATTACHED_DISK, NETWORK_INTERFACE); + Instance instance = + new Instance(serviceMockReturnsOptions, new InstanceInfo.BuilderImpl(instanceInfo)); + assertNull(instance.generatedId()); + assertEquals(INSTANCE_ID, instance.instanceId()); + assertNull(instance.creationTimestamp()); + assertNull(instance.description()); + assertNull(instance.status()); + assertNull(instance.statusMessage()); + assertNull(instance.tags()); + assertEquals(MACHINE_TYPE, instance.machineType()); + assertNull(instance.canIpForward()); + assertEquals(NETWORK_INTERFACES, instance.networkInterfaces()); + assertEquals(ATTACHED_DISKS, instance.attachedDisks()); + assertNull(instance.metadata()); + assertNull(instance.serviceAccounts()); + assertNull(instance.schedulingOptions()); + assertNull(instance.cpuPlatform()); + assertSame(serviceMockReturnsOptions, instance.compute()); + } + + @Test + public void testToAndFromPb() { + initializeExpectedInstance(8); + compareInstance(expectedInstance, + Instance.fromPb(serviceMockReturnsOptions, expectedInstance.toPb())); + Instance instance = new Instance.Builder(serviceMockReturnsOptions, INSTANCE_ID, MACHINE_TYPE, + ATTACHED_DISK, NETWORK_INTERFACE).build(); + compareInstance(instance, Instance.fromPb(serviceMockReturnsOptions, instance.toPb())); + } + + @Test + public void testDeleteOperation() { + initializeExpectedInstance(2); + expect(compute.options()).andReturn(mockOptions); + Operation operation = new Operation.Builder(serviceMockReturnsOptions) + .operationId(ZoneOperationId.of("project", "op")) + .build(); + expect(compute.deleteInstance(INSTANCE_ID)).andReturn(operation); + replay(compute); + initializeInstance(); + assertSame(operation, instance.delete()); + } + + @Test + public void testDeleteNull() { + initializeExpectedInstance(1); + expect(compute.options()).andReturn(mockOptions); + expect(compute.deleteInstance(INSTANCE_ID)).andReturn(null); + replay(compute); + initializeInstance(); + assertNull(instance.delete()); + } + + @Test + public void testExists_True() throws Exception { + initializeExpectedInstance(1); + Compute.InstanceOption[] expectedOptions = {Compute.InstanceOption.fields()}; + expect(compute.options()).andReturn(mockOptions); + expect(compute.getInstance(INSTANCE_ID, expectedOptions)).andReturn(expectedInstance); + replay(compute); + initializeInstance(); + assertTrue(instance.exists()); + verify(compute); + } + + @Test + public void testExists_False() throws Exception { + initializeExpectedInstance(1); + Compute.InstanceOption[] expectedOptions = {Compute.InstanceOption.fields()}; + expect(compute.options()).andReturn(mockOptions); + expect(compute.getInstance(INSTANCE_ID, expectedOptions)).andReturn(null); + replay(compute); + initializeInstance(); + assertFalse(instance.exists()); + verify(compute); + } + + @Test + public void testReload() throws Exception { + initializeExpectedInstance(3); + expect(compute.options()).andReturn(mockOptions); + expect(compute.getInstance(INSTANCE_ID)).andReturn(expectedInstance); + replay(compute); + initializeInstance(); + Instance updatedInstance = instance.reload(); + compareInstance(expectedInstance, updatedInstance); + verify(compute); + } + + @Test + public void testReloadNull() throws Exception { + initializeExpectedInstance(1); + expect(compute.options()).andReturn(mockOptions); + expect(compute.getInstance(INSTANCE_ID)).andReturn(null); + replay(compute); + initializeInstance(); + assertNull(instance.reload()); + verify(compute); + } + + @Test + public void testReloadWithOptions() throws Exception { + initializeExpectedInstance(3); + expect(compute.options()).andReturn(mockOptions); + expect(compute.getInstance(INSTANCE_ID, Compute.InstanceOption.fields())).andReturn(expectedInstance); + replay(compute); + initializeInstance(); + Instance updateInstance = instance.reload(Compute.InstanceOption.fields()); + compareInstance(expectedInstance, updateInstance); + verify(compute); + } + + @Test + public void testAddAccessConfig() throws Exception { + initializeExpectedInstance(2); + expect(compute.options()).andReturn(mockOptions); + NetworkInterface.AccessConfig accessConfig = NetworkInterface.AccessConfig.of("192.168.1.1"); + Operation operation = new Operation.Builder(serviceMockReturnsOptions) + .operationId(ZoneOperationId.of("project", "op")) + .build(); + expect(compute.addAccessConfig(INSTANCE_ID, "nic0", accessConfig)).andReturn(operation); + replay(compute); + initializeInstance(); + assertSame(operation, instance.addAccessConfig("nic0", accessConfig)); + } + + @Test + public void testAddAccessConfig_Null() throws Exception { + initializeExpectedInstance(1); + expect(compute.options()).andReturn(mockOptions); + NetworkInterface.AccessConfig accessConfig = NetworkInterface.AccessConfig.of("192.168.1.1"); + expect(compute.addAccessConfig(INSTANCE_ID, "nic0", accessConfig)).andReturn(null); + replay(compute); + initializeInstance(); + assertNull(instance.addAccessConfig("nic0", accessConfig)); + } + + @Test + public void testAddAccessConfigWithOptions() throws Exception { + initializeExpectedInstance(2); + expect(compute.options()).andReturn(mockOptions); + NetworkInterface.AccessConfig accessConfig = NetworkInterface.AccessConfig.of("192.168.1.1"); + Operation operation = new Operation.Builder(serviceMockReturnsOptions) + .operationId(ZoneOperationId.of("project", "op")) + .build(); + expect(compute.addAccessConfig(INSTANCE_ID, "nic0", accessConfig, Compute.OperationOption.fields())) + .andReturn(operation); + replay(compute); + initializeInstance(); + assertSame(operation, instance.addAccessConfig("nic0", accessConfig, Compute.OperationOption.fields())); + } + + @Test + public void testAttachDisk() throws Exception { + initializeExpectedInstance(2); + expect(compute.options()).andReturn(mockOptions); + AttachedDisk.PersistentDiskConfiguration configuration = AttachedDisk.PersistentDiskConfiguration.of(DISK_ID); + Operation operation = new Operation.Builder(serviceMockReturnsOptions) + .operationId(ZoneOperationId.of("project", "op")) + .build(); + expect(compute.attachDisk(INSTANCE_ID, configuration)).andReturn(operation); + replay(compute); + initializeInstance(); + assertSame(operation, instance.attachDisk(configuration)); + } + + @Test + public void testAttachDisk_Null() throws Exception { + initializeExpectedInstance(1); + expect(compute.options()).andReturn(mockOptions); + AttachedDisk.PersistentDiskConfiguration configuration = AttachedDisk.PersistentDiskConfiguration.of(DISK_ID); + expect(compute.attachDisk(INSTANCE_ID, configuration)).andReturn(null); + replay(compute); + initializeInstance(); + assertNull(instance.attachDisk(configuration)); + } + + @Test + public void testAttachDiskWithOptions() throws Exception { + initializeExpectedInstance(2); + expect(compute.options()).andReturn(mockOptions); + AttachedDisk.PersistentDiskConfiguration configuration = AttachedDisk.PersistentDiskConfiguration.of(DISK_ID); + Operation operation = new Operation.Builder(serviceMockReturnsOptions) + .operationId(ZoneOperationId.of("project", "op")) + .build(); + expect(compute.attachDisk(INSTANCE_ID, configuration, Compute.OperationOption.fields())) + .andReturn(operation); + replay(compute); + initializeInstance(); + assertSame(operation, instance.attachDisk(configuration, Compute.OperationOption.fields())); + } + + @Test + public void testAttachDiskName() throws Exception { + initializeExpectedInstance(2); + expect(compute.options()).andReturn(mockOptions); + AttachedDisk.PersistentDiskConfiguration configuration = AttachedDisk.PersistentDiskConfiguration.of(DISK_ID); + Operation operation = new Operation.Builder(serviceMockReturnsOptions) + .operationId(ZoneOperationId.of("project", "op")) + .build(); + expect(compute.attachDisk(INSTANCE_ID, "dev0", configuration)).andReturn(operation); + replay(compute); + initializeInstance(); + assertSame(operation, instance.attachDisk("dev0", configuration)); + } + + @Test + public void testAttachDiskName_Null() throws Exception { + initializeExpectedInstance(1); + expect(compute.options()).andReturn(mockOptions); + AttachedDisk.PersistentDiskConfiguration configuration = AttachedDisk.PersistentDiskConfiguration.of(DISK_ID); + expect(compute.attachDisk(INSTANCE_ID, "dev0", configuration)).andReturn(null); + replay(compute); + initializeInstance(); + assertNull(instance.attachDisk("dev0", configuration)); + } + + @Test + public void testAttachDiskNameWithOptions() throws Exception { + initializeExpectedInstance(2); + expect(compute.options()).andReturn(mockOptions); + AttachedDisk.PersistentDiskConfiguration configuration = AttachedDisk.PersistentDiskConfiguration.of(DISK_ID); + Operation operation = new Operation.Builder(serviceMockReturnsOptions) + .operationId(ZoneOperationId.of("project", "op")) + .build(); + expect(compute.attachDisk(INSTANCE_ID, "dev0", configuration, Compute.OperationOption.fields())) + .andReturn(operation); + replay(compute); + initializeInstance(); + assertSame(operation, instance.attachDisk("dev0", configuration, Compute.OperationOption.fields())); + } + + @Test + public void testAttachDiskNameIndex() throws Exception { + initializeExpectedInstance(2); + expect(compute.options()).andReturn(mockOptions); + AttachedDisk.PersistentDiskConfiguration configuration = AttachedDisk.PersistentDiskConfiguration.of(DISK_ID); + Operation operation = new Operation.Builder(serviceMockReturnsOptions) + .operationId(ZoneOperationId.of("project", "op")) + .build(); + expect(compute.attachDisk(INSTANCE_ID, "dev0", configuration, 1)).andReturn(operation); + replay(compute); + initializeInstance(); + assertSame(operation, instance.attachDisk("dev0", configuration, 1)); + } + + @Test + public void testAttachDiskNameIndex_Null() throws Exception { + initializeExpectedInstance(1); + expect(compute.options()).andReturn(mockOptions); + AttachedDisk.PersistentDiskConfiguration configuration = AttachedDisk.PersistentDiskConfiguration.of(DISK_ID); + expect(compute.attachDisk(INSTANCE_ID, "dev0", configuration, 1)).andReturn(null); + replay(compute); + initializeInstance(); + assertNull(instance.attachDisk("dev0", configuration, 1)); + } + + @Test + public void testAttachDiskNameIndexWithOptions() throws Exception { + initializeExpectedInstance(2); + expect(compute.options()).andReturn(mockOptions); + AttachedDisk.PersistentDiskConfiguration configuration = AttachedDisk.PersistentDiskConfiguration.of(DISK_ID); + Operation operation = new Operation.Builder(serviceMockReturnsOptions) + .operationId(ZoneOperationId.of("project", "op")) + .build(); + expect(compute.attachDisk(INSTANCE_ID, "dev0", configuration, 1, Compute.OperationOption.fields())) + .andReturn(operation); + replay(compute); + initializeInstance(); + assertSame(operation, + instance.attachDisk("dev0", configuration, 1, Compute.OperationOption.fields())); + } + + @Test + public void testDeleteAccessConfig() throws Exception { + initializeExpectedInstance(2); + expect(compute.options()).andReturn(mockOptions); + Operation operation = new Operation.Builder(serviceMockReturnsOptions) + .operationId(ZoneOperationId.of("project", "op")) + .build(); + expect(compute.deleteAccessConfig(INSTANCE_ID, "nic0", "NAT")).andReturn(operation); + replay(compute); + initializeInstance(); + assertSame(operation, instance.deleteAccessConfig("nic0", "NAT")); + } + + @Test + public void testDeleteAccessConfig_Null() throws Exception { + initializeExpectedInstance(1); + expect(compute.options()).andReturn(mockOptions); + expect(compute.deleteAccessConfig(INSTANCE_ID, "nic0", "NAT")).andReturn(null); + replay(compute); + initializeInstance(); + assertNull(instance.deleteAccessConfig("nic0", "NAT")); + } + + @Test + public void testDeleteAccessConfigWithOptions() throws Exception { + initializeExpectedInstance(2); + expect(compute.options()).andReturn(mockOptions); + Operation operation = new Operation.Builder(serviceMockReturnsOptions) + .operationId(ZoneOperationId.of("project", "op")) + .build(); + expect(compute.deleteAccessConfig(INSTANCE_ID, "nic0", "NAT", Compute.OperationOption.fields())) + .andReturn(operation); + replay(compute); + initializeInstance(); + assertSame(operation, instance.deleteAccessConfig("nic0", "NAT", Compute.OperationOption.fields())); + } + + @Test + public void testDetachDisk() throws Exception { + initializeExpectedInstance(2); + expect(compute.options()).andReturn(mockOptions); + Operation operation = new Operation.Builder(serviceMockReturnsOptions) + .operationId(ZoneOperationId.of("project", "op")) + .build(); + expect(compute.detachDisk(INSTANCE_ID, "dev0")).andReturn(operation); + replay(compute); + initializeInstance(); + assertSame(operation, instance.detachDisk("dev0")); + } + + @Test + public void testDetachDisk_Null() throws Exception { + initializeExpectedInstance(1); + expect(compute.options()).andReturn(mockOptions); + expect(compute.detachDisk(INSTANCE_ID, "dev0")).andReturn(null); + replay(compute); + initializeInstance(); + assertNull(instance.detachDisk("dev0")); + } + + @Test + public void testDetachDiskWithOptions() throws Exception { + initializeExpectedInstance(2); + expect(compute.options()).andReturn(mockOptions); + Operation operation = new Operation.Builder(serviceMockReturnsOptions) + .operationId(ZoneOperationId.of("project", "op")) + .build(); + expect(compute.detachDisk(INSTANCE_ID, "dev0", Compute.OperationOption.fields())).andReturn(operation); + replay(compute); + initializeInstance(); + assertSame(operation, instance.detachDisk("dev0", Compute.OperationOption.fields())); + } + + @Test + public void testGetSerialPortOutputWithNumber() throws Exception { + initializeExpectedInstance(1); + expect(compute.options()).andReturn(mockOptions); + expect(compute.getSerialPortOutput(INSTANCE_ID, 2)).andReturn("output"); + replay(compute); + initializeInstance(); + assertSame("output", instance.getSerialPortOutput(2)); + } + + @Test + public void testGetSerialPortOutput() throws Exception { + initializeExpectedInstance(1); + expect(compute.options()).andReturn(mockOptions); + expect(compute.getSerialPortOutput(INSTANCE_ID)).andReturn("output"); + replay(compute); + initializeInstance(); + assertSame("output", instance.getSerialPortOutput()); + } + + @Test + public void testResetOperation() { + initializeExpectedInstance(2); + expect(compute.options()).andReturn(mockOptions); + Operation operation = new Operation.Builder(serviceMockReturnsOptions) + .operationId(ZoneOperationId.of("project", "op")) + .build(); + expect(compute.reset(INSTANCE_ID)).andReturn(operation); + replay(compute); + initializeInstance(); + assertSame(operation, instance.reset()); + } + + @Test + public void testResetNull() { + initializeExpectedInstance(1); + expect(compute.options()).andReturn(mockOptions); + expect(compute.reset(INSTANCE_ID)).andReturn(null); + replay(compute); + initializeInstance(); + assertNull(instance.reset()); + } + + @Test + public void testSetDiskAutodelete() throws Exception { + initializeExpectedInstance(2); + expect(compute.options()).andReturn(mockOptions); + Operation operation = new Operation.Builder(serviceMockReturnsOptions) + .operationId(ZoneOperationId.of("project", "op")) + .build(); + expect(compute.setDiskAutoDelete(INSTANCE_ID, "dev0", true)).andReturn(operation); + replay(compute); + initializeInstance(); + assertSame(operation, instance.setDiskAutoDelete("dev0", true)); + } + + @Test + public void testSetDiskAutodelete_Null() throws Exception { + initializeExpectedInstance(1); + expect(compute.options()).andReturn(mockOptions); + expect(compute.setDiskAutoDelete(INSTANCE_ID, "dev0", false)).andReturn(null); + replay(compute); + initializeInstance(); + assertNull(instance.setDiskAutoDelete("dev0", false)); + } + + @Test + public void testSetDiskAutodeleteWithOptions() throws Exception { + initializeExpectedInstance(2); + expect(compute.options()).andReturn(mockOptions); + Operation operation = new Operation.Builder(serviceMockReturnsOptions) + .operationId(ZoneOperationId.of("project", "op")) + .build(); + expect(compute.setDiskAutoDelete(INSTANCE_ID, "dev0", true, Compute.OperationOption.fields())) + .andReturn(operation); + replay(compute); + initializeInstance(); + assertSame(operation, instance.setDiskAutoDelete("dev0", true, Compute.OperationOption.fields())); + } + + @Test + public void testSetMachineType() throws Exception { + initializeExpectedInstance(2); + expect(compute.options()).andReturn(mockOptions); + Operation operation = new Operation.Builder(serviceMockReturnsOptions) + .operationId(ZoneOperationId.of("project", "op")) + .build(); + expect(compute.setMachineType(INSTANCE_ID, MACHINE_TYPE)).andReturn(operation); + replay(compute); + initializeInstance(); + assertSame(operation, instance.setMachineType(MACHINE_TYPE)); + } + + @Test + public void testSetMachineType_Null() throws Exception { + initializeExpectedInstance(1); + expect(compute.options()).andReturn(mockOptions); + expect(compute.setMachineType(INSTANCE_ID, MACHINE_TYPE)).andReturn(null); + replay(compute); + initializeInstance(); + assertNull(instance.setMachineType(MACHINE_TYPE)); + } + + @Test + public void testSetMachineTypeWithOptions() throws Exception { + initializeExpectedInstance(2); + expect(compute.options()).andReturn(mockOptions); + Operation operation = new Operation.Builder(serviceMockReturnsOptions) + .operationId(ZoneOperationId.of("project", "op")) + .build(); + expect(compute.setMachineType(INSTANCE_ID, MACHINE_TYPE, Compute.OperationOption.fields())) + .andReturn(operation); + replay(compute); + initializeInstance(); + assertSame(operation, instance.setMachineType(MACHINE_TYPE, Compute.OperationOption.fields())); + } + + @Test + public void testSetMetadata() throws Exception { + initializeExpectedInstance(2); + expect(compute.options()).andReturn(mockOptions); + Operation operation = new Operation.Builder(serviceMockReturnsOptions) + .operationId(ZoneOperationId.of("project", "op")) + .build(); + Metadata metadata = Metadata.builder().add("k", "v").fingerprint("fingerprint").build(); + expect(compute.setMetadata(INSTANCE_ID, metadata)).andReturn(operation); + replay(compute); + initializeInstance(); + assertSame(operation, instance.setMetadata(metadata)); + } + + @Test + public void testSetMetadata_Null() throws Exception { + initializeExpectedInstance(2); + expect(compute.options()).andReturn(mockOptions); + Metadata metadata = Metadata.builder().add("k", "v").fingerprint("fingerprint").build(); + expect(compute.setMetadata(INSTANCE_ID, metadata)).andReturn(null); + replay(compute); + initializeInstance(); + assertNull(instance.setMetadata(metadata)); + } + + @Test + public void testSetMetadataWithOptions() throws Exception { + initializeExpectedInstance(2); + expect(compute.options()).andReturn(mockOptions); + Operation operation = new Operation.Builder(serviceMockReturnsOptions) + .operationId(ZoneOperationId.of("project", "op")) + .build(); + Metadata metadata = Metadata.builder().add("k", "v").fingerprint("fingerprint").build(); + expect(compute.setMetadata(INSTANCE_ID, metadata, Compute.OperationOption.fields())) + .andReturn(operation); + replay(compute); + initializeInstance(); + assertSame(operation, instance.setMetadata(metadata, Compute.OperationOption.fields())); + } + + @Test + public void testSetMetadataFromMap() throws Exception { + initializeExpectedInstance(2); + expect(compute.options()).andReturn(mockOptions); + Operation operation = new Operation.Builder(serviceMockReturnsOptions) + .operationId(ZoneOperationId.of("project", "op")) + .build(); + Map metadataMap = ImmutableMap.of("k", "v"); + Metadata metadata = Metadata.builder().values(metadataMap).fingerprint("fingerprint").build(); + expect(compute.setMetadata(INSTANCE_ID, metadata)).andReturn(operation); + replay(compute); + initializeInstance(); + assertSame(operation, instance.setMetadata(metadataMap)); + } + + @Test + public void testSetMetadataFromMap_Null() throws Exception { + initializeExpectedInstance(2); + expect(compute.options()).andReturn(mockOptions); + Map metadataMap = ImmutableMap.of("k", "v"); + Metadata metadata = Metadata.builder().values(metadataMap).fingerprint("fingerprint").build(); + expect(compute.setMetadata(INSTANCE_ID, metadata)).andReturn(null); + replay(compute); + initializeInstance(); + assertNull(instance.setMetadata(metadataMap)); + } + + @Test + public void testSetMetadataFromMapWithOptions() throws Exception { + initializeExpectedInstance(2); + expect(compute.options()).andReturn(mockOptions); + Operation operation = new Operation.Builder(serviceMockReturnsOptions) + .operationId(ZoneOperationId.of("project", "op")) + .build(); + Map metadataMap = ImmutableMap.of("k", "v"); + Metadata metadata = Metadata.builder().values(metadataMap).fingerprint("fingerprint").build(); + expect(compute.setMetadata(INSTANCE_ID, metadata, Compute.OperationOption.fields())) + .andReturn(operation); + replay(compute); + initializeInstance(); + assertSame(operation, instance.setMetadata(metadataMap, Compute.OperationOption.fields())); + } + + @Test + public void testSetSchedulingOptions() throws Exception { + initializeExpectedInstance(2); + expect(compute.options()).andReturn(mockOptions); + Operation operation = new Operation.Builder(serviceMockReturnsOptions) + .operationId(ZoneOperationId.of("project", "op")) + .build(); + SchedulingOptions schedulingOptions = SchedulingOptions.standard(true, SchedulingOptions.Maintenance.MIGRATE); + expect(compute.setSchedulingOptions(INSTANCE_ID, schedulingOptions)).andReturn(operation); + replay(compute); + initializeInstance(); + assertSame(operation, instance.setSchedulingOptions(schedulingOptions)); + } + + @Test + public void testSetSchedulingOptions_Null() throws Exception { + initializeExpectedInstance(1); + expect(compute.options()).andReturn(mockOptions); + SchedulingOptions schedulingOptions = SchedulingOptions.standard(true, SchedulingOptions.Maintenance.MIGRATE); + expect(compute.setSchedulingOptions(INSTANCE_ID, schedulingOptions)).andReturn(null); + replay(compute); + initializeInstance(); + assertNull(instance.setSchedulingOptions(schedulingOptions)); + } + + @Test + public void testSetSchedulingOptionsWithOptions() throws Exception { + initializeExpectedInstance(2); + expect(compute.options()).andReturn(mockOptions); + Operation operation = new Operation.Builder(serviceMockReturnsOptions) + .operationId(ZoneOperationId.of("project", "op")) + .build(); + SchedulingOptions schedulingOptions = SchedulingOptions.standard(true, SchedulingOptions.Maintenance.MIGRATE); + expect(compute.setSchedulingOptions(INSTANCE_ID, schedulingOptions, Compute.OperationOption.fields())) + .andReturn(operation); + replay(compute); + initializeInstance(); + assertSame(operation, + instance.setSchedulingOptions(schedulingOptions, Compute.OperationOption.fields())); + } + + @Test + public void testSetTags() throws Exception { + initializeExpectedInstance(2); + expect(compute.options()).andReturn(mockOptions); + Operation operation = new Operation.Builder(serviceMockReturnsOptions) + .operationId(ZoneOperationId.of("project", "op")) + .build(); + Tags tags = Tags.builder().values("v1", "v2").fingerprint("fingerprint").build(); + expect(compute.setTags(INSTANCE_ID, tags)).andReturn(operation); + replay(compute); + initializeInstance(); + assertSame(operation, instance.setTags(tags)); + } + + @Test + public void testSetTags_Null() throws Exception { + initializeExpectedInstance(2); + expect(compute.options()).andReturn(mockOptions); + Tags tags = Tags.builder().values("v1", "v2").fingerprint("fingerprint").build(); + expect(compute.setTags(INSTANCE_ID, tags)).andReturn(null); + replay(compute); + initializeInstance(); + assertNull(instance.setTags(tags)); + } + + @Test + public void testSetTagsWithOptions() throws Exception { + initializeExpectedInstance(2); + expect(compute.options()).andReturn(mockOptions); + Operation operation = new Operation.Builder(serviceMockReturnsOptions) + .operationId(ZoneOperationId.of("project", "op")) + .build(); + Tags tags = Tags.builder().values("v1", "v2").fingerprint("fingerprint").build(); + expect(compute.setTags(INSTANCE_ID, tags, Compute.OperationOption.fields())).andReturn(operation); + replay(compute); + initializeInstance(); + assertSame(operation, instance.setTags(tags, Compute.OperationOption.fields())); + } + + @Test + public void testSetTagsFromList() throws Exception { + initializeExpectedInstance(2); + expect(compute.options()).andReturn(mockOptions); + Operation operation = new Operation.Builder(serviceMockReturnsOptions) + .operationId(ZoneOperationId.of("project", "op")) + .build(); + List tagList = ImmutableList.of("v1", "v2"); + Tags tags = Tags.builder().values(tagList).fingerprint("fingerprint").build(); + expect(compute.setTags(INSTANCE_ID, tags)).andReturn(operation); + replay(compute); + initializeInstance(); + assertSame(operation, instance.setTags(tagList)); + } + + @Test + public void testSetTagsFromList_Null() throws Exception { + initializeExpectedInstance(2); + expect(compute.options()).andReturn(mockOptions); + List tagList = ImmutableList.of("v1", "v2"); + Tags tags = Tags.builder().values(tagList).fingerprint("fingerprint").build(); + expect(compute.setTags(INSTANCE_ID, tags)).andReturn(null); + replay(compute); + initializeInstance(); + assertNull(instance.setTags(tagList)); + } + + @Test + public void testSetTagsFromListWithOptions() throws Exception { + initializeExpectedInstance(2); + expect(compute.options()).andReturn(mockOptions); + Operation operation = new Operation.Builder(serviceMockReturnsOptions) + .operationId(ZoneOperationId.of("project", "op")) + .build(); + List tagList = ImmutableList.of("v1", "v2"); + Tags tags = Tags.builder().values(tagList).fingerprint("fingerprint").build(); + expect(compute.setTags(INSTANCE_ID, tags, Compute.OperationOption.fields())).andReturn(operation); + replay(compute); + initializeInstance(); + assertSame(operation, instance.setTags(tagList, Compute.OperationOption.fields())); + } + + @Test + public void testStartOperation() { + initializeExpectedInstance(2); + expect(compute.options()).andReturn(mockOptions); + Operation operation = new Operation.Builder(serviceMockReturnsOptions) + .operationId(ZoneOperationId.of("project", "op")) + .build(); + expect(compute.start(INSTANCE_ID)).andReturn(operation); + replay(compute); + initializeInstance(); + assertSame(operation, instance.start()); + } + + @Test + public void testStartNull() { + initializeExpectedInstance(1); + expect(compute.options()).andReturn(mockOptions); + expect(compute.start(INSTANCE_ID)).andReturn(null); + replay(compute); + initializeInstance(); + assertNull(instance.start()); + } + + @Test + public void testStopOperation() { + initializeExpectedInstance(2); + expect(compute.options()).andReturn(mockOptions); + Operation operation = new Operation.Builder(serviceMockReturnsOptions) + .operationId(ZoneOperationId.of("project", "op")) + .build(); + expect(compute.stop(INSTANCE_ID)).andReturn(operation); + replay(compute); + initializeInstance(); + assertSame(operation, instance.stop()); + } + + @Test + public void testStopNull() { + initializeExpectedInstance(1); + expect(compute.options()).andReturn(mockOptions); + expect(compute.stop(INSTANCE_ID)).andReturn(null); + replay(compute); + initializeInstance(); + assertNull(instance.stop()); + } + + public void compareInstance(Instance expected, Instance value) { + assertEquals(expected, value); + assertEquals(expected.compute().options(), value.compute().options()); + assertEquals(expected.generatedId(), value.generatedId()); + assertEquals(expected.instanceId(), value.instanceId()); + assertEquals(expected.creationTimestamp(), value.creationTimestamp()); + assertEquals(expected.description(), value.description()); + assertEquals(expected.status(), value.status()); + assertEquals(expected.statusMessage(), value.statusMessage()); + assertEquals(expected.tags(), value.tags()); + assertEquals(expected.machineType(), value.machineType()); + assertEquals(expected.canIpForward(), value.canIpForward()); + assertEquals(expected.networkInterfaces(), value.networkInterfaces()); + assertEquals(expected.attachedDisks(), value.attachedDisks()); + assertEquals(expected.metadata(), value.metadata()); + assertEquals(expected.serviceAccounts(), value.serviceAccounts()); + assertEquals(expected.schedulingOptions(), value.schedulingOptions()); + assertEquals(expected.cpuPlatform(), value.cpuPlatform()); + assertEquals(expected.hashCode(), value.hashCode()); + } +} diff --git a/gcloud-java-compute/src/test/java/com/google/cloud/compute/LicenseIdTest.java b/gcloud-java-compute/src/test/java/com/google/cloud/compute/LicenseIdTest.java new file mode 100644 index 000000000000..cdb5e7adf992 --- /dev/null +++ b/gcloud-java-compute/src/test/java/com/google/cloud/compute/LicenseIdTest.java @@ -0,0 +1,79 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; + +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; + +public class LicenseIdTest { + + private static final String PROJECT = "project"; + private static final String LICENSE = "license"; + private static final String URL = + "https://www.googleapis.com/compute/v1/projects/project/global/licenses/license"; + + @Rule + public ExpectedException thrown = ExpectedException.none(); + + @Test + public void testOf() { + LicenseId licenseId = LicenseId.of(PROJECT, LICENSE); + assertEquals(PROJECT, licenseId.project()); + assertEquals(LICENSE, licenseId.license()); + assertEquals(URL, licenseId.selfLink()); + licenseId = LicenseId.of(LICENSE); + assertNull(licenseId.project()); + assertEquals(LICENSE, licenseId.license()); + } + + @Test + public void testToAndFromUrl() { + LicenseId licenseId = LicenseId.of(PROJECT, LICENSE); + compareLicenseId(licenseId, LicenseId.fromUrl(licenseId.selfLink())); + } + + @Test + public void testSetProjectId() { + LicenseId licenseId = LicenseId.of(PROJECT, LICENSE); + assertSame(licenseId, licenseId.setProjectId(PROJECT)); + compareLicenseId(licenseId, LicenseId.of(LICENSE).setProjectId(PROJECT)); + thrown.expect(IllegalArgumentException.class); + thrown.expectMessage("notMatchingUrl is not a valid license URL"); + LicenseId.fromUrl("notMatchingUrl"); + } + + @Test + public void testMatchesUrl() { + assertTrue(LicenseId.matchesUrl(LicenseId.of(PROJECT, LICENSE).selfLink())); + assertFalse(LicenseId.matchesUrl("notMatchingUrl")); + } + + private void compareLicenseId(LicenseId expected, LicenseId value) { + assertEquals(expected, value); + assertEquals(expected.project(), expected.project()); + assertEquals(expected.license(), expected.license()); + assertEquals(expected.selfLink(), expected.selfLink()); + assertEquals(expected.hashCode(), expected.hashCode()); + } +} diff --git a/gcloud-java-compute/src/test/java/com/google/cloud/compute/LicenseTest.java b/gcloud-java-compute/src/test/java/com/google/cloud/compute/LicenseTest.java new file mode 100644 index 000000000000..6bbe50b71489 --- /dev/null +++ b/gcloud-java-compute/src/test/java/com/google/cloud/compute/LicenseTest.java @@ -0,0 +1,49 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import static org.junit.Assert.assertEquals; + +import org.junit.Test; + +public class LicenseTest { + + private static final LicenseId LICENSE_ID = LicenseId.of("project", "license"); + private static final Boolean CHARGES_USE_FEE = true; + private static final License LICENSE = new License(LICENSE_ID, CHARGES_USE_FEE); + + @Test + public void testBuilder() { + assertEquals(LICENSE_ID, LICENSE.licenseId()); + assertEquals(CHARGES_USE_FEE, LICENSE.chargesUseFee()); + } + + @Test + public void testToAndFromPb() { + License license = License.fromPb(LICENSE.toPb()); + compareLicenses(LICENSE, license); + assertEquals(LICENSE_ID.project(), license.licenseId().project()); + assertEquals(LICENSE_ID.license(), license.licenseId().license()); + } + + private void compareLicenses(License expected, License value) { + assertEquals(expected, value); + assertEquals(expected.licenseId(), value.licenseId()); + assertEquals(expected.chargesUseFee(), value.chargesUseFee()); + assertEquals(expected.hashCode(), value.hashCode()); + } +} diff --git a/gcloud-java-compute/src/test/java/com/google/cloud/compute/MachineTypeIdTest.java b/gcloud-java-compute/src/test/java/com/google/cloud/compute/MachineTypeIdTest.java new file mode 100644 index 000000000000..f7f776aec5a5 --- /dev/null +++ b/gcloud-java-compute/src/test/java/com/google/cloud/compute/MachineTypeIdTest.java @@ -0,0 +1,83 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; + +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; + +public class MachineTypeIdTest { + + private static final String PROJECT = "project"; + private static final String ZONE = "zone"; + private static final String TYPE = "type"; + private static final String URL = + "https://www.googleapis.com/compute/v1/projects/project/zones/zone/machineTypes/type"; + + @Rule + public ExpectedException thrown = ExpectedException.none(); + + @Test + public void testOf() { + MachineTypeId machineTypeId = MachineTypeId.of(PROJECT, ZONE, TYPE); + assertEquals(PROJECT, machineTypeId.project()); + assertEquals(ZONE, machineTypeId.zone()); + assertEquals(TYPE, machineTypeId.type()); + assertEquals(URL, machineTypeId.selfLink()); + machineTypeId = MachineTypeId.of(ZONE, TYPE); + assertNull(machineTypeId.project()); + assertEquals(ZONE, machineTypeId.zone()); + assertEquals(TYPE, machineTypeId.type()); + } + + @Test + public void testToAndFromUrl() { + MachineTypeId machineTypeId = MachineTypeId.of(PROJECT, ZONE, TYPE); + compareMachineTypeId(machineTypeId, MachineTypeId.fromUrl(machineTypeId.selfLink())); + thrown.expect(IllegalArgumentException.class); + thrown.expectMessage("notMatchingUrl is not a valid machine type URL"); + MachineTypeId.fromUrl("notMatchingUrl"); + } + + @Test + public void testSetProjectId() { + MachineTypeId machineTypeId = MachineTypeId.of(PROJECT, ZONE, TYPE); + assertSame(machineTypeId, machineTypeId.setProjectId(PROJECT)); + compareMachineTypeId(machineTypeId, MachineTypeId.of(ZONE, TYPE).setProjectId(PROJECT)); + } + + @Test + public void testMatchesUrl() { + assertTrue(MachineTypeId.matchesUrl(MachineTypeId.of(PROJECT, ZONE, TYPE).selfLink())); + assertFalse(MachineTypeId.matchesUrl("notMatchingUrl")); + } + + private void compareMachineTypeId(MachineTypeId expected, MachineTypeId value) { + assertEquals(expected, value); + assertEquals(expected.project(), expected.project()); + assertEquals(expected.zone(), expected.zone()); + assertEquals(expected.type(), expected.type()); + assertEquals(expected.selfLink(), expected.selfLink()); + assertEquals(expected.hashCode(), expected.hashCode()); + } +} diff --git a/gcloud-java-compute/src/test/java/com/google/cloud/compute/MachineTypeTest.java b/gcloud-java-compute/src/test/java/com/google/cloud/compute/MachineTypeTest.java new file mode 100644 index 000000000000..924d22bde36c --- /dev/null +++ b/gcloud-java-compute/src/test/java/com/google/cloud/compute/MachineTypeTest.java @@ -0,0 +1,88 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import static org.junit.Assert.assertEquals; + +import com.google.common.collect.ImmutableList; + +import org.junit.Test; + +import java.util.List; + +public class MachineTypeTest { + + private static final String GENERATED_ID = "42"; + private static final Long CREATION_TIMESTAMP = 1453293540000L; + private static final String DESCRIPTION = "description"; + private static final MachineTypeId MACHINE_TYPE_ID = MachineTypeId.of("project", "zone", "type"); + private static final Integer CPUS = 1; + private static final Integer MEMORY_MB = 2; + private static final List SCRATCH_DISKS = ImmutableList.of(3); + private static final Integer MAXIMUM_PERSISTENT_DISKS = 4; + private static final Long MAXIMUM_PERSISTENT_DISKS_SIZE_GB = 5L; + private static final DeprecationStatus DEPRECATION_STATUS = + DeprecationStatus.of(DeprecationStatus.Status.DELETED, MACHINE_TYPE_ID); + private static final MachineType MACHINE_TYPE = MachineType.builder() + .generatedId(GENERATED_ID) + .machineTypeId(MACHINE_TYPE_ID) + .creationTimestamp(CREATION_TIMESTAMP) + .description(DESCRIPTION) + .cpus(CPUS) + .memoryMb(MEMORY_MB) + .scratchDisksSizeGb(SCRATCH_DISKS) + .maximumPersistentDisks(MAXIMUM_PERSISTENT_DISKS) + .maximumPersistentDisksSizeGb(MAXIMUM_PERSISTENT_DISKS_SIZE_GB) + .deprecationStatus(DEPRECATION_STATUS) + .build(); + + @Test + public void testBuilder() { + assertEquals(GENERATED_ID, MACHINE_TYPE.generatedId()); + assertEquals(MACHINE_TYPE_ID, MACHINE_TYPE.machineTypeId()); + assertEquals(CREATION_TIMESTAMP, MACHINE_TYPE.creationTimestamp()); + assertEquals(DESCRIPTION, MACHINE_TYPE.description()); + assertEquals(CPUS, MACHINE_TYPE.cpus()); + assertEquals(MEMORY_MB, MACHINE_TYPE.memoryMb()); + assertEquals(SCRATCH_DISKS, MACHINE_TYPE.scratchDisksSizeGb()); + assertEquals(MAXIMUM_PERSISTENT_DISKS, MACHINE_TYPE.maximumPersistentDisks()); + assertEquals(MAXIMUM_PERSISTENT_DISKS_SIZE_GB, MACHINE_TYPE.maximumPersistentDisksSizeGb()); + assertEquals(DEPRECATION_STATUS, MACHINE_TYPE.deprecationStatus()); + } + + @Test + public void testToPbAndFromPb() { + compareMachineTypes(MACHINE_TYPE, MachineType.fromPb(MACHINE_TYPE.toPb())); + MachineType machineType = MachineType.builder().machineTypeId(MACHINE_TYPE_ID).build(); + compareMachineTypes(machineType, MachineType.fromPb(machineType.toPb())); + } + + private void compareMachineTypes(MachineType expected, MachineType value) { + assertEquals(expected, value); + assertEquals(expected.machineTypeId(), value.machineTypeId()); + assertEquals(expected.generatedId(), value.generatedId()); + assertEquals(expected.creationTimestamp(), value.creationTimestamp()); + assertEquals(expected.description(), value.description()); + assertEquals(expected.cpus(), value.cpus()); + assertEquals(expected.memoryMb(), value.memoryMb()); + assertEquals(expected.scratchDisksSizeGb(), value.scratchDisksSizeGb()); + assertEquals(expected.maximumPersistentDisks(), value.maximumPersistentDisks()); + assertEquals(expected.maximumPersistentDisksSizeGb(), value.maximumPersistentDisksSizeGb()); + assertEquals(expected.deprecationStatus(), value.deprecationStatus()); + assertEquals(expected.hashCode(), value.hashCode()); + } +} diff --git a/gcloud-java-compute/src/test/java/com/google/cloud/compute/MetadataTest.java b/gcloud-java-compute/src/test/java/com/google/cloud/compute/MetadataTest.java new file mode 100644 index 000000000000..9de656ff24d0 --- /dev/null +++ b/gcloud-java-compute/src/test/java/com/google/cloud/compute/MetadataTest.java @@ -0,0 +1,81 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; + +import com.google.common.collect.ImmutableMap; + +import org.junit.Test; + +import java.util.Map; + +public class MetadataTest { + + private static final Metadata METADATA = Metadata.builder() + .add("key1", "value1") + .add("key2", "value2") + .build(); + + @Test + public void testToBuilder() { + Metadata metadata = METADATA.toBuilder().fingerprint("newFingerprint").build(); + assertEquals("newFingerprint", metadata.fingerprint()); + compareMetadata(METADATA, metadata.toBuilder().fingerprint(null).build()); + } + + @Test + public void testBuilder() { + assertEquals(ImmutableMap.of("key1", "value1", "key2", "value2"), METADATA.values()); + assertNull(METADATA.fingerprint()); + Metadata metadata = Metadata.builder() + .values(ImmutableMap.of("key1", "value1", "key2", "value2")) + .build(); + assertEquals(ImmutableMap.of("key1", "value1", "key2", "value2"), metadata.values()); + assertNull(metadata.fingerprint()); + metadata = Metadata.builder() + .values(ImmutableMap.of("key1", "value1", "key2", "value2")) + .fingerprint("fingerprint") + .build(); + assertEquals(ImmutableMap.of("key1", "value1", "key2", "value2"), metadata.values()); + assertEquals("fingerprint", metadata.fingerprint()); + } + + @Test + public void testOf() { + Map map = ImmutableMap.of("key1", "value1", "key2", "value2"); + compareMetadata(METADATA, Metadata.of(map)); + } + + @Test + public void testToAndFromPb() { + compareMetadata(METADATA, Metadata.fromPb(METADATA.toPb())); + Metadata metadata = Metadata.builder() + .values(ImmutableMap.of("key1", "value1", "key2", "value2")) + .fingerprint("fingerprint") + .build(); + compareMetadata(metadata, Metadata.fromPb(metadata.toPb())); + } + + public void compareMetadata(Metadata expected, Metadata value) { + assertEquals(expected, value); + assertEquals(expected.fingerprint(), value.fingerprint()); + assertEquals(expected.values(), value.values()); + assertEquals(expected.hashCode(), value.hashCode()); + } +} diff --git a/gcloud-java-compute/src/test/java/com/google/cloud/compute/NetworkIdTest.java b/gcloud-java-compute/src/test/java/com/google/cloud/compute/NetworkIdTest.java new file mode 100644 index 000000000000..fb785852cf71 --- /dev/null +++ b/gcloud-java-compute/src/test/java/com/google/cloud/compute/NetworkIdTest.java @@ -0,0 +1,79 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; + +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; + +public class NetworkIdTest { + + private static final String PROJECT = "project"; + private static final String NETWORK = "network"; + private static final String URL = + "https://www.googleapis.com/compute/v1/projects/project/global/networks/network"; + + @Rule + public ExpectedException thrown = ExpectedException.none(); + + @Test + public void testOf() { + NetworkId networkId = NetworkId.of(PROJECT, NETWORK); + assertEquals(PROJECT, networkId.project()); + assertEquals(NETWORK, networkId.network()); + assertEquals(URL, networkId.selfLink()); + networkId = NetworkId.of(NETWORK); + assertNull(networkId.project()); + assertEquals(NETWORK, networkId.network()); + } + + @Test + public void testToAndFromUrl() { + NetworkId networkId = NetworkId.of(PROJECT, NETWORK); + compareNetworkId(networkId, NetworkId.fromUrl(networkId.selfLink())); + thrown.expect(IllegalArgumentException.class); + thrown.expectMessage("notMatchingUrl is not a valid network URL"); + NetworkId.fromUrl("notMatchingUrl"); + } + + @Test + public void testSetProjectId() { + NetworkId networkId = NetworkId.of(PROJECT, NETWORK); + assertSame(networkId, networkId.setProjectId(PROJECT)); + compareNetworkId(networkId, NetworkId.of(NETWORK).setProjectId(PROJECT)); + } + + @Test + public void testMatchesUrl() { + assertTrue(NetworkId.matchesUrl(NetworkId.of(PROJECT, NETWORK).selfLink())); + assertFalse(NetworkId.matchesUrl("notMatchingUrl")); + } + + private void compareNetworkId(NetworkId expected, NetworkId value) { + assertEquals(expected, value); + assertEquals(expected.project(), expected.project()); + assertEquals(expected.network(), expected.network()); + assertEquals(expected.selfLink(), expected.selfLink()); + assertEquals(expected.hashCode(), expected.hashCode()); + } +} diff --git a/gcloud-java-compute/src/test/java/com/google/cloud/compute/NetworkInfoTest.java b/gcloud-java-compute/src/test/java/com/google/cloud/compute/NetworkInfoTest.java new file mode 100644 index 000000000000..e2d9c4451ee4 --- /dev/null +++ b/gcloud-java-compute/src/test/java/com/google/cloud/compute/NetworkInfoTest.java @@ -0,0 +1,126 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; + +import com.google.common.collect.ImmutableList; + +import org.junit.Test; + +import java.util.List; + +public class NetworkInfoTest { + + private static final String GENERATED_ID = "42"; + private static final Long CREATION_TIMESTAMP = 1453293540000L; + private static final String DESCRIPTION = "description"; + private static final SubnetworkId SUBNETWORK1 = SubnetworkId.of("project", "region1", "network1"); + private static final SubnetworkId SUBNETWORK2 = SubnetworkId.of("project", "region2", "network2"); + private static final List SUBNETWORKS = ImmutableList.of(SUBNETWORK1, SUBNETWORK2); + private static final String GATEWAY_ADDRESS = "192.168.1.1"; + private static final NetworkId NETWORK_ID = NetworkId.of("project", "network"); + private static final String IP_RANGE = "192.168.0.0/16"; + private static final Boolean AUTO_CREATE_SUBNETWORKS = true; + private static final StandardNetworkConfiguration NETWORK_CONFIGURATION = + new StandardNetworkConfiguration(IP_RANGE, GATEWAY_ADDRESS); + private static final SubnetNetworkConfiguration SUBNET_NETWORK_CONFIGURATION = + new SubnetNetworkConfiguration(AUTO_CREATE_SUBNETWORKS, SUBNETWORKS); + private static final NetworkInfo NETWORK_INFO = + NetworkInfo.builder(NETWORK_ID, NETWORK_CONFIGURATION) + .generatedId(GENERATED_ID) + .creationTimestamp(CREATION_TIMESTAMP) + .description(DESCRIPTION) + .build(); + private static final NetworkInfo SUBNET_NETWORK_INFO = + NetworkInfo.builder(NETWORK_ID, SUBNET_NETWORK_CONFIGURATION) + .generatedId(GENERATED_ID) + .creationTimestamp(CREATION_TIMESTAMP) + .description(DESCRIPTION) + .build(); + + @Test + public void testToBuilder() { + compareNetworkInfo(NETWORK_INFO, NETWORK_INFO.toBuilder().build()); + NetworkInfo networkInfo = NETWORK_INFO.toBuilder().description("newDescription").build(); + assertEquals("newDescription", networkInfo.description()); + networkInfo = networkInfo.toBuilder().description("description").build(); + compareNetworkInfo(NETWORK_INFO, networkInfo); + compareNetworkInfo(SUBNET_NETWORK_INFO, SUBNET_NETWORK_INFO.toBuilder().build()); + networkInfo = SUBNET_NETWORK_INFO.toBuilder().description("newDescription").build(); + assertEquals("newDescription", networkInfo.description()); + networkInfo = networkInfo.toBuilder().description("description").build(); + compareNetworkInfo(SUBNET_NETWORK_INFO, networkInfo); + } + + @Test + public void testToBuilderIncomplete() { + NetworkInfo networkInfo = NetworkInfo.of(NETWORK_ID, NETWORK_CONFIGURATION); + assertEquals(networkInfo, networkInfo.toBuilder().build()); + } + + @Test + public void testBuilder() { + assertEquals(GENERATED_ID, NETWORK_INFO.generatedId()); + assertEquals(NETWORK_ID, NETWORK_INFO.networkId()); + assertEquals(CREATION_TIMESTAMP, NETWORK_INFO.creationTimestamp()); + assertEquals(DESCRIPTION, NETWORK_INFO.description()); + assertEquals(NETWORK_CONFIGURATION, NETWORK_INFO.configuration()); + assertEquals(GENERATED_ID, SUBNET_NETWORK_INFO.generatedId()); + assertEquals(NETWORK_ID, SUBNET_NETWORK_INFO.networkId()); + assertEquals(CREATION_TIMESTAMP, SUBNET_NETWORK_INFO.creationTimestamp()); + assertEquals(DESCRIPTION, SUBNET_NETWORK_INFO.description()); + assertEquals(SUBNET_NETWORK_CONFIGURATION, SUBNET_NETWORK_INFO.configuration()); + } + + @Test + public void testOf() { + NetworkInfo networkInfo = NetworkInfo.of(NETWORK_ID, NETWORK_CONFIGURATION); + assertNull(networkInfo.generatedId()); + assertEquals(NETWORK_ID, NETWORK_INFO.networkId()); + assertEquals(NETWORK_CONFIGURATION, NETWORK_INFO.configuration()); + assertNull(networkInfo.creationTimestamp()); + assertNull(networkInfo.description()); + } + + @Test + public void testToAndFromPb() { + compareNetworkInfo(NETWORK_INFO, NetworkInfo.fromPb(NETWORK_INFO.toPb())); + compareNetworkInfo(SUBNET_NETWORK_INFO, NetworkInfo.fromPb(SUBNET_NETWORK_INFO.toPb())); + NetworkInfo networkInfo = NetworkInfo.of(NETWORK_ID, NETWORK_CONFIGURATION); + compareNetworkInfo(networkInfo, NetworkInfo.fromPb(networkInfo.toPb())); + } + + @Test + public void testSetProjectId() { + NetworkInfo networkInfo = NETWORK_INFO.toBuilder() + .networkId(NetworkId.of("network")) + .build(); + compareNetworkInfo(NETWORK_INFO, networkInfo.setProjectId("project")); + } + + public void compareNetworkInfo(NetworkInfo expected, NetworkInfo value) { + assertEquals(expected, value); + assertEquals(expected.generatedId(), value.generatedId()); + assertEquals(expected.networkId(), value.networkId()); + assertEquals(expected.creationTimestamp(), value.creationTimestamp()); + assertEquals(expected.description(), value.description()); + assertEquals(expected.configuration(), value.configuration()); + assertEquals(expected.hashCode(), value.hashCode()); + } +} diff --git a/gcloud-java-compute/src/test/java/com/google/cloud/compute/NetworkInterfaceTest.java b/gcloud-java-compute/src/test/java/com/google/cloud/compute/NetworkInterfaceTest.java new file mode 100644 index 000000000000..c36d52d8bd0f --- /dev/null +++ b/gcloud-java-compute/src/test/java/com/google/cloud/compute/NetworkInterfaceTest.java @@ -0,0 +1,178 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; + +import com.google.common.collect.ImmutableList; + +import org.junit.Assert; +import org.junit.Test; + +import java.util.List; + +public class NetworkInterfaceTest { + + private static final String NAME = "networkInterface"; + private static final NetworkId NETWORK = NetworkId.of("project", "network"); + private static final String NETWORK_IP = "192.168.1.1"; + private static final SubnetworkId SUBNETWORK = SubnetworkId.of("project", "region", "subnetwork"); + private static final NetworkInterface.AccessConfig ACCESS_CONFIG = NetworkInterface.AccessConfig.builder() + .name("accessConfig") + .natIp("192.168.1.1") + .type(NetworkInterface.AccessConfig.Type.ONE_TO_ONE_NAT) + .build(); + private static final List ACCESS_CONFIGURATIONS = + ImmutableList.of(ACCESS_CONFIG); + private static final NetworkInterface NETWORK_INTERFACE = NetworkInterface.builder(NETWORK) + .name(NAME) + .networkIp(NETWORK_IP) + .subnetwork(SUBNETWORK) + .accessConfigurations(ACCESS_CONFIGURATIONS) + .build(); + + @Test + public void testAccessConfigToBuilder() { + NetworkInterface.AccessConfig accessConfig = ACCESS_CONFIG.toBuilder().name("newName").build(); + assertEquals("newName", accessConfig.name()); + compareAccessConfig(ACCESS_CONFIG, accessConfig.toBuilder().name("accessConfig").build()); + } + + @Test + public void testAccessConfigToBuilderIncomplete() { + NetworkInterface.AccessConfig accessConfig = NetworkInterface.AccessConfig.of(); + compareAccessConfig(accessConfig, accessConfig.toBuilder().build()); + } + + @Test + public void testToBuilder() { + compareNetworkInterface(NETWORK_INTERFACE, NETWORK_INTERFACE.toBuilder().build()); + NetworkInterface networkInterface = NETWORK_INTERFACE.toBuilder().name("newInterface").build(); + assertEquals("newInterface", networkInterface.name()); + networkInterface = networkInterface.toBuilder().name(NAME).build(); + compareNetworkInterface(NETWORK_INTERFACE, networkInterface); + } + + @Test + public void testToBuilderIncomplete() { + NetworkInterface networkInterface = NetworkInterface.of(NETWORK); + assertEquals(networkInterface, networkInterface.toBuilder().build()); + networkInterface = NetworkInterface.of(NETWORK.network()); + assertEquals(networkInterface, networkInterface.toBuilder().build()); + } + + @Test + public void testAccessConfigBuilder() { + assertEquals("accessConfig", ACCESS_CONFIG.name()); + assertEquals("192.168.1.1", ACCESS_CONFIG.natIp()); + Assert.assertEquals(NetworkInterface.AccessConfig.Type.ONE_TO_ONE_NAT, ACCESS_CONFIG.type()); + } + + @Test + public void testBuilder() { + assertEquals(NAME, NETWORK_INTERFACE.name()); + assertEquals(NETWORK, NETWORK_INTERFACE.network()); + assertEquals(NETWORK_IP, NETWORK_INTERFACE.networkIp()); + assertEquals(SUBNETWORK, NETWORK_INTERFACE.subnetwork()); + assertEquals(ACCESS_CONFIGURATIONS, NETWORK_INTERFACE.accessConfigurations()); + NetworkInterface networkInterface = NetworkInterface.builder("network") + .name(NAME) + .networkIp(NETWORK_IP) + .subnetwork(SUBNETWORK) + .accessConfigurations(ACCESS_CONFIG) + .build(); + assertEquals(NAME, networkInterface.name()); + assertEquals(NetworkId.of("network"), networkInterface.network()); + assertEquals(NETWORK_IP, networkInterface.networkIp()); + assertEquals(SUBNETWORK, networkInterface.subnetwork()); + assertEquals(ACCESS_CONFIGURATIONS, networkInterface.accessConfigurations()); + } + + @Test + public void testAccessConfigOf() { + NetworkInterface.AccessConfig accessConfig = NetworkInterface.AccessConfig.of("192.168.1.1"); + assertNull(accessConfig.name()); + assertEquals("192.168.1.1", accessConfig.natIp()); + assertNull(accessConfig.type()); + accessConfig = NetworkInterface.AccessConfig.of(); + assertNull(accessConfig.name()); + assertNull(accessConfig.natIp()); + assertNull(accessConfig.type()); + } + + @Test + public void testOf() { + NetworkInterface networkInterface = NetworkInterface.of(NETWORK); + assertNull(networkInterface.name()); + assertEquals(NETWORK, networkInterface.network()); + assertNull(networkInterface.networkIp()); + assertNull(networkInterface.subnetwork()); + networkInterface = NetworkInterface.of(NETWORK.network()); + assertNull(networkInterface.name()); + assertNull(networkInterface.network().project()); + assertEquals(NETWORK.network(), networkInterface.network().network()); + assertNull(networkInterface.networkIp()); + assertNull(networkInterface.subnetwork()); + } + + @Test + public void testAccessConfigToAndFromPb() { + NetworkInterface.AccessConfig accessConfig = NetworkInterface.AccessConfig.fromPb(ACCESS_CONFIG.toPb()); + compareAccessConfig(ACCESS_CONFIG, accessConfig); + accessConfig = NetworkInterface.AccessConfig.of(); + compareAccessConfig(accessConfig, NetworkInterface.AccessConfig.fromPb(accessConfig.toPb())); + } + + @Test + public void testToAndFromPb() { + NetworkInterface networkInterface = NetworkInterface.fromPb(NETWORK_INTERFACE.toPb()); + compareNetworkInterface(NETWORK_INTERFACE, networkInterface); + networkInterface = NetworkInterface.of(NETWORK); + compareNetworkInterface(networkInterface, NetworkInterface.fromPb(networkInterface.toPb())); + } + + @Test + public void testSetProjectId() { + NetworkInterface networkInterface = NetworkInterface.of(NETWORK); + compareNetworkInterface(networkInterface, + NetworkInterface.of(NetworkId.of("network")).setProjectId("project")); + networkInterface = NETWORK_INTERFACE.toBuilder() + .network(NetworkId.of("network")) + .subnetwork(SubnetworkId.of("region", "subnetwork")) + .build(); + compareNetworkInterface(NETWORK_INTERFACE, networkInterface.setProjectId("project")); + } + + public void compareAccessConfig(NetworkInterface.AccessConfig expected, NetworkInterface.AccessConfig value) { + assertEquals(expected, value); + assertEquals(expected.name(), value.name()); + assertEquals(expected.natIp(), value.natIp()); + assertEquals(expected.type(), value.type()); + assertEquals(expected.hashCode(), value.hashCode()); + } + + public void compareNetworkInterface(NetworkInterface expected, NetworkInterface value) { + assertEquals(expected, value); + assertEquals(expected.name(), value.name()); + assertEquals(expected.network(), value.network()); + assertEquals(expected.networkIp(), value.networkIp()); + assertEquals(expected.subnetwork(), value.subnetwork()); + assertEquals(expected.accessConfigurations(), value.accessConfigurations()); + assertEquals(expected.hashCode(), value.hashCode()); + } +} diff --git a/gcloud-java-compute/src/test/java/com/google/cloud/compute/NetworkTest.java b/gcloud-java-compute/src/test/java/com/google/cloud/compute/NetworkTest.java new file mode 100644 index 000000000000..5bc240fafddc --- /dev/null +++ b/gcloud-java-compute/src/test/java/com/google/cloud/compute/NetworkTest.java @@ -0,0 +1,259 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import static org.easymock.EasyMock.createMock; +import static org.easymock.EasyMock.createStrictMock; +import static org.easymock.EasyMock.expect; +import static org.easymock.EasyMock.replay; +import static org.easymock.EasyMock.verify; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; + +import com.google.common.collect.ImmutableList; + +import org.junit.Test; + +import java.util.List; + +public class NetworkTest { + + private static final String GENERATED_ID = "42"; + private static final Long CREATION_TIMESTAMP = 1453293540000L; + private static final String DESCRIPTION = "description"; + private static final SubnetworkId SUBNETWORK1 = SubnetworkId.of("project", "region1", "network1"); + private static final SubnetworkId SUBNETWORK2 = SubnetworkId.of("project", "region2", "network2"); + private static final List SUBNETWORKS = ImmutableList.of(SUBNETWORK1, SUBNETWORK2); + private static final String GATEWAY_ADDRESS = "192.168.1.1"; + private static final NetworkId NETWORK_ID = NetworkId.of("project", "network"); + private static final String IP_RANGE = "192.168.0.0/16"; + private static final Boolean AUTO_CREATE_SUBNETWORKS = true; + private static final StandardNetworkConfiguration NETWORK_CONFIGURATION = + new StandardNetworkConfiguration(IP_RANGE, GATEWAY_ADDRESS); + private static final SubnetNetworkConfiguration SUBNET_NETWORK_CONFIGURATION = + new SubnetNetworkConfiguration(AUTO_CREATE_SUBNETWORKS, SUBNETWORKS); + + private final Compute serviceMockReturnsOptions = createStrictMock(Compute.class); + private final ComputeOptions mockOptions = createMock(ComputeOptions.class); + private Compute compute; + private Network network; + private Network standardNetwork; + private Network subnetNetwork; + + private void initializeExpectedNetwork(int optionsCalls) { + expect(serviceMockReturnsOptions.options()).andReturn(mockOptions).times(optionsCalls); + replay(serviceMockReturnsOptions); + standardNetwork = + new Network.Builder(serviceMockReturnsOptions, NETWORK_ID, NETWORK_CONFIGURATION) + .generatedId(GENERATED_ID) + .creationTimestamp(CREATION_TIMESTAMP) + .description(DESCRIPTION) + .build(); + subnetNetwork = + new Network.Builder(serviceMockReturnsOptions, NETWORK_ID, SUBNET_NETWORK_CONFIGURATION) + .generatedId(GENERATED_ID) + .creationTimestamp(CREATION_TIMESTAMP) + .description(DESCRIPTION) + .build(); + compute = createStrictMock(Compute.class); + } + + private void initializeNetwork() { + network = new Network.Builder(compute, NETWORK_ID, NETWORK_CONFIGURATION) + .generatedId(GENERATED_ID) + .creationTimestamp(CREATION_TIMESTAMP) + .description(DESCRIPTION) + .build(); + } + + @Test + public void testToBuilder() { + initializeExpectedNetwork(9); + compareNetwork(standardNetwork, standardNetwork.toBuilder().build()); + Network newNetwork = standardNetwork.toBuilder().description("newDescription").build(); + assertEquals("newDescription", newNetwork.description()); + newNetwork = newNetwork.toBuilder().description("description").build(); + compareNetwork(standardNetwork, newNetwork); + } + + @Test + public void testToBuilderIncomplete() { + initializeExpectedNetwork(6); + NetworkInfo networkInfo = NetworkInfo.of(NETWORK_ID, NETWORK_CONFIGURATION); + Network network = + new Network(serviceMockReturnsOptions, new NetworkInfo.BuilderImpl(networkInfo)); + compareNetwork(network, network.toBuilder().build()); + } + + @Test + public void testBuilder() { + initializeExpectedNetwork(2); + assertEquals(GENERATED_ID, standardNetwork.generatedId()); + assertEquals(NETWORK_ID, standardNetwork.networkId()); + assertEquals(CREATION_TIMESTAMP, standardNetwork.creationTimestamp()); + assertEquals(DESCRIPTION, standardNetwork.description()); + assertEquals(NETWORK_CONFIGURATION, standardNetwork.configuration()); + assertSame(serviceMockReturnsOptions, standardNetwork.compute()); + assertEquals(GENERATED_ID, subnetNetwork.generatedId()); + assertEquals(NETWORK_ID, subnetNetwork.networkId()); + assertEquals(CREATION_TIMESTAMP, subnetNetwork.creationTimestamp()); + assertEquals(DESCRIPTION, subnetNetwork.description()); + assertEquals(SUBNET_NETWORK_CONFIGURATION, subnetNetwork.configuration()); + assertSame(serviceMockReturnsOptions, subnetNetwork.compute()); + } + + @Test + public void testToAndFromPb() { + initializeExpectedNetwork(12); + compareNetwork(standardNetwork, + Network.fromPb(serviceMockReturnsOptions, standardNetwork.toPb())); + compareNetwork(subnetNetwork, + Network.fromPb(serviceMockReturnsOptions, subnetNetwork.toPb())); + Network network = + new Network.Builder(serviceMockReturnsOptions, NETWORK_ID, NETWORK_CONFIGURATION).build(); + compareNetwork(network, Network.fromPb(serviceMockReturnsOptions, network.toPb())); + } + + @Test + public void testDeleteOperation() { + initializeExpectedNetwork(3); + expect(compute.options()).andReturn(mockOptions); + Operation operation = new Operation.Builder(serviceMockReturnsOptions) + .operationId(GlobalOperationId.of("project", "op")) + .build(); + expect(compute.deleteNetwork(NETWORK_ID.network())).andReturn(operation); + replay(compute); + initializeNetwork(); + assertSame(operation, network.delete()); + } + + @Test + public void testDeleteNull() { + initializeExpectedNetwork(2); + expect(compute.options()).andReturn(mockOptions); + expect(compute.deleteNetwork(NETWORK_ID.network())).andReturn(null); + replay(compute); + initializeNetwork(); + assertNull(network.delete()); + } + + @Test + public void testExists_True() throws Exception { + initializeExpectedNetwork(2); + Compute.NetworkOption[] expectedOptions = {Compute.NetworkOption.fields()}; + expect(compute.options()).andReturn(mockOptions); + expect(compute.getNetwork(NETWORK_ID.network(), expectedOptions)) + .andReturn(standardNetwork); + replay(compute); + initializeNetwork(); + assertTrue(network.exists()); + verify(compute); + } + + @Test + public void testExists_False() throws Exception { + initializeExpectedNetwork(2); + Compute.NetworkOption[] expectedOptions = {Compute.NetworkOption.fields()}; + expect(compute.options()).andReturn(mockOptions); + expect(compute.getNetwork(NETWORK_ID.network(), expectedOptions)).andReturn(null); + replay(compute); + initializeNetwork(); + assertFalse(network.exists()); + verify(compute); + } + + @Test + public void testReload() throws Exception { + initializeExpectedNetwork(4); + expect(compute.options()).andReturn(mockOptions); + expect(compute.getNetwork(NETWORK_ID.network())).andReturn(standardNetwork); + replay(compute); + initializeNetwork(); + Network updatedNetwork = network.reload(); + compareNetwork(standardNetwork, updatedNetwork); + verify(compute); + } + + @Test + public void testReloadNull() throws Exception { + initializeExpectedNetwork(2); + expect(compute.options()).andReturn(mockOptions); + expect(compute.getNetwork(NETWORK_ID.network())).andReturn(null); + replay(compute); + initializeNetwork(); + assertNull(network.reload()); + verify(compute); + } + + @Test + public void testReloadWithOptions() throws Exception { + initializeExpectedNetwork(4); + expect(compute.options()).andReturn(mockOptions); + expect(compute.getNetwork(NETWORK_ID.network(), Compute.NetworkOption.fields())) + .andReturn(standardNetwork); + replay(compute); + initializeNetwork(); + Network updatedNetwork = network.reload(Compute.NetworkOption.fields()); + compareNetwork(standardNetwork, updatedNetwork); + verify(compute); + } + + @Test + public void testCreateSubnetwork() throws Exception { + initializeExpectedNetwork(3); + Operation operation = new Operation.Builder(serviceMockReturnsOptions) + .operationId(RegionOperationId.of(SUBNETWORK1.regionId(), "op")) + .build(); + expect(compute.options()).andReturn(mockOptions); + expect(compute.create(SubnetworkInfo.of(SUBNETWORK1, NETWORK_ID, IP_RANGE))) + .andReturn(operation); + replay(compute); + initializeNetwork(); + assertSame(operation, network.createSubnetwork(SUBNETWORK1, IP_RANGE)); + verify(compute); + } + + @Test + public void testCreateSubnetworkWithOptions() throws Exception { + initializeExpectedNetwork(3); + Operation operation = new Operation.Builder(serviceMockReturnsOptions) + .operationId(RegionOperationId.of(SUBNETWORK1.regionId(), "op")) + .build(); + expect(compute.options()).andReturn(mockOptions); + expect(compute.create(SubnetworkInfo.of(SUBNETWORK1, NETWORK_ID, IP_RANGE), + Compute.OperationOption.fields())).andReturn(operation); + replay(compute); + initializeNetwork(); + assertSame(operation, + network.createSubnetwork(SUBNETWORK1, IP_RANGE, Compute.OperationOption.fields())); + verify(compute); + } + + public void compareNetwork(Network expected, Network value) { + assertEquals(expected, value); + assertEquals(expected.compute().options(), value.compute().options()); + assertEquals(expected.generatedId(), value.generatedId()); + assertEquals(expected.networkId(), value.networkId()); + assertEquals(expected.creationTimestamp(), value.creationTimestamp()); + assertEquals(expected.description(), value.description()); + assertEquals(expected.configuration(), value.configuration()); + assertEquals(expected.hashCode(), value.hashCode()); + } +} diff --git a/gcloud-java-compute/src/test/java/com/google/cloud/compute/OperationIdTest.java b/gcloud-java-compute/src/test/java/com/google/cloud/compute/OperationIdTest.java new file mode 100644 index 000000000000..9944a6fc7585 --- /dev/null +++ b/gcloud-java-compute/src/test/java/com/google/cloud/compute/OperationIdTest.java @@ -0,0 +1,169 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; + +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; + +public class OperationIdTest { + + private static final String PROJECT = "project"; + private static final String ZONE = "zone"; + private static final String REGION = "region"; + private static final String NAME = "op"; + private static final String GLOBAL_URL = + "https://www.googleapis.com/compute/v1/projects/project/global/operations/op"; + private static final String ZONE_URL = + "https://www.googleapis.com/compute/v1/projects/project/zones/zone/operations/op"; + private static final String REGION_URL = + "https://www.googleapis.com/compute/v1/projects/project/regions/region/operations/op"; + + @Rule + public ExpectedException thrown = ExpectedException.none(); + + @Test + public void testOf() { + GlobalOperationId operationId = GlobalOperationId.of(PROJECT, NAME); + assertEquals(OperationId.Type.GLOBAL, operationId.type()); + assertEquals(PROJECT, operationId.project()); + assertEquals(NAME, operationId.operation()); + assertEquals(GLOBAL_URL, operationId.selfLink()); + operationId = GlobalOperationId.of(NAME); + assertEquals(OperationId.Type.GLOBAL, operationId.type()); + assertNull(operationId.project()); + assertEquals(NAME, operationId.operation()); + ZoneOperationId zoneOperationId = ZoneOperationId.of(PROJECT, ZONE, NAME); + assertEquals(OperationId.Type.ZONE, zoneOperationId.type()); + assertEquals(PROJECT, zoneOperationId.project()); + assertEquals(ZONE, zoneOperationId.zone()); + assertEquals(NAME, zoneOperationId.operation()); + assertEquals(ZONE_URL, zoneOperationId.selfLink()); + zoneOperationId = ZoneOperationId.of(ZONE, NAME); + assertEquals(OperationId.Type.ZONE, zoneOperationId.type()); + assertNull(zoneOperationId.project()); + assertEquals(ZONE, zoneOperationId.zone()); + assertEquals(NAME, zoneOperationId.operation()); + zoneOperationId = ZoneOperationId.of(ZoneId.of(PROJECT, ZONE), NAME); + assertEquals(OperationId.Type.ZONE, zoneOperationId.type()); + assertEquals(PROJECT, zoneOperationId.project()); + assertEquals(ZONE, zoneOperationId.zone()); + assertEquals(NAME, zoneOperationId.operation()); + RegionOperationId regionOperationId = RegionOperationId.of(PROJECT, REGION, NAME); + assertEquals(OperationId.Type.REGION, regionOperationId.type()); + assertEquals(PROJECT, regionOperationId.project()); + assertEquals(REGION, regionOperationId.region()); + assertEquals(NAME, regionOperationId.operation()); + assertEquals(REGION_URL, regionOperationId.selfLink()); + regionOperationId = RegionOperationId.of(REGION, NAME); + assertEquals(OperationId.Type.REGION, regionOperationId.type()); + assertNull(regionOperationId.project()); + assertEquals(REGION, regionOperationId.region()); + assertEquals(NAME, regionOperationId.operation()); + regionOperationId = RegionOperationId.of(RegionId.of(PROJECT, REGION), NAME); + assertEquals(OperationId.Type.REGION, regionOperationId.type()); + assertEquals(PROJECT, regionOperationId.project()); + assertEquals(REGION, regionOperationId.region()); + assertEquals(NAME, regionOperationId.operation()); + } + + @Test + public void testToAndFromUrlGlobal() { + GlobalOperationId operationId = GlobalOperationId.of(PROJECT, NAME); + compareOperationId(operationId, GlobalOperationId.fromUrl(operationId.selfLink())); + thrown.expect(IllegalArgumentException.class); + thrown.expectMessage("notMatchingUrl is not a valid global operation URL"); + GlobalOperationId.fromUrl("notMatchingUrl"); + } + + @Test + public void testToAndFromUrlRegion() { + RegionOperationId regionOperationId = RegionOperationId.of(PROJECT, REGION, NAME); + compareRegionOperationId(regionOperationId, + RegionOperationId.fromUrl(regionOperationId.selfLink())); + thrown.expect(IllegalArgumentException.class); + thrown.expectMessage("notMatchingUrl is not a valid region operation URL"); + RegionOperationId.fromUrl("notMatchingUrl"); + } + + @Test + public void testToAndFromUrlZone() { + ZoneOperationId zoneOperationId = ZoneOperationId.of(PROJECT, ZONE, NAME); + compareZoneOperationId(zoneOperationId, ZoneOperationId.fromUrl(zoneOperationId.selfLink())); + thrown.expect(IllegalArgumentException.class); + thrown.expectMessage("notMatchingUrl is not a valid zone operation URL"); + ZoneOperationId.fromUrl("notMatchingUrl"); + } + + @Test + public void testSetProjectId() { + GlobalOperationId operationId = GlobalOperationId.of(PROJECT, NAME); + assertSame(operationId, operationId.setProjectId(PROJECT)); + compareOperationId(operationId, GlobalOperationId.of(NAME).setProjectId(PROJECT)); + ZoneOperationId zoneOperationId = ZoneOperationId.of(PROJECT, ZONE, NAME); + assertSame(zoneOperationId, zoneOperationId.setProjectId(PROJECT)); + compareZoneOperationId(zoneOperationId, ZoneOperationId.of(ZONE, NAME).setProjectId(PROJECT)); + RegionOperationId regionOperationId = RegionOperationId.of(PROJECT, REGION, NAME); + assertSame(regionOperationId, regionOperationId.setProjectId(PROJECT)); + compareRegionOperationId(regionOperationId, + RegionOperationId.of(REGION, NAME).setProjectId(PROJECT)); + } + + @Test + public void testMatchesUrl() { + assertTrue(GlobalOperationId.matchesUrl(GlobalOperationId.of(PROJECT, NAME).selfLink())); + assertFalse(GlobalOperationId.matchesUrl("notMatchingUrl")); + assertTrue( + RegionOperationId.matchesUrl(RegionOperationId.of(PROJECT, REGION, NAME).selfLink())); + assertFalse(RegionOperationId.matchesUrl("notMatchingUrl")); + assertTrue(ZoneOperationId.matchesUrl(ZoneOperationId.of(PROJECT, REGION, NAME).selfLink())); + assertFalse(ZoneOperationId.matchesUrl("notMatchingUrl")); + } + + private void compareOperationId(GlobalOperationId expected, GlobalOperationId value) { + assertEquals(expected, value); + assertEquals(expected.project(), expected.project()); + assertEquals(expected.operation(), expected.operation()); + assertEquals(expected.selfLink(), expected.selfLink()); + assertEquals(expected.hashCode(), expected.hashCode()); + } + + private void compareZoneOperationId(ZoneOperationId expected, ZoneOperationId value) { + assertEquals(expected, value); + assertEquals(expected.project(), expected.project()); + assertEquals(expected.zone(), expected.zone()); + assertEquals(expected.operation(), expected.operation()); + assertEquals(expected.selfLink(), expected.selfLink()); + assertEquals(expected.hashCode(), expected.hashCode()); + } + + private void compareRegionOperationId(RegionOperationId expected, RegionOperationId value) { + assertEquals(expected, value); + assertEquals(expected.type(), value.type()); + assertEquals(expected.project(), expected.project()); + assertEquals(expected.region(), expected.region()); + assertEquals(expected.operation(), expected.operation()); + assertEquals(expected.selfLink(), expected.selfLink()); + assertEquals(expected.hashCode(), expected.hashCode()); + } +} diff --git a/gcloud-java-compute/src/test/java/com/google/cloud/compute/OperationTest.java b/gcloud-java-compute/src/test/java/com/google/cloud/compute/OperationTest.java new file mode 100644 index 000000000000..d45fe48c1134 --- /dev/null +++ b/gcloud-java-compute/src/test/java/com/google/cloud/compute/OperationTest.java @@ -0,0 +1,413 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import static org.easymock.EasyMock.createMock; +import static org.easymock.EasyMock.createStrictMock; +import static org.easymock.EasyMock.expect; +import static org.easymock.EasyMock.replay; +import static org.easymock.EasyMock.verify; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; + +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; + +import org.junit.After; +import org.junit.Test; + +import java.util.List; + +public class OperationTest { + + private static final Operation.OperationError OPERATION_ERROR1 = + new Operation.OperationError("code1", "location1", "message1"); + private static final Operation.OperationError OPERATION_ERROR2 = + new Operation.OperationError("code2", "location2", "message2"); + private static final Operation.OperationWarning OPERATION_WARNING1 = + new Operation.OperationWarning("code1", "message1", ImmutableMap.of("k1", "v1")); + private static final Operation.OperationWarning OPERATION_WARNING2 = + new Operation.OperationWarning("code2", "location2", ImmutableMap.of("k2", "v2")); + private static final String GENERATED_ID = "1"; + private static final String CLIENT_OPERATION_ID = "clientOperationId"; + private static final String OPERATION_TYPE = "delete"; + private static final String TARGET_LINK = "targetLink"; + private static final String TARGET_ID = "42"; + private static final Operation.Status STATUS = Operation.Status.DONE; + private static final String STATUS_MESSAGE = "statusMessage"; + private static final String USER = "user"; + private static final Integer PROGRESS = 100; + private static final Long INSERT_TIME = 1453293540000L; + private static final Long START_TIME = 1453293420000L; + private static final Long END_TIME = 1453293480000L; + private static final List ERRORS = + ImmutableList.of(OPERATION_ERROR1, OPERATION_ERROR2); + private static final List WARNINGS = + ImmutableList.of(OPERATION_WARNING1, OPERATION_WARNING2); + private static final Integer HTTP_ERROR_STATUS_CODE = 404; + private static final String HTTP_ERROR_MESSAGE = "NOT FOUND"; + private static final String DESCRIPTION = "description"; + private static final GlobalOperationId GLOBAL_OPERATION_ID = + GlobalOperationId.of("project", "op"); + private static final ZoneOperationId ZONE_OPERATION_ID = + ZoneOperationId.of("project", "zone", "op"); + private static final RegionOperationId REGION_OPERATION_ID = + RegionOperationId.of("project", "region", "op"); + + private final Compute serviceMockReturnsOptions = createStrictMock(Compute.class); + private final ComputeOptions mockOptions = createMock(ComputeOptions.class); + private Compute compute; + private Operation globalOperation; + private Operation regionOperation; + private Operation zoneOperation; + private Operation operation; + + private void initializeExpectedOperation(int optionsCalls) { + expect(serviceMockReturnsOptions.options()).andReturn(mockOptions).times(optionsCalls); + replay(serviceMockReturnsOptions); + globalOperation = new Operation.Builder(serviceMockReturnsOptions) + .generatedId(GENERATED_ID) + .operationId(GLOBAL_OPERATION_ID) + .clientOperationId(CLIENT_OPERATION_ID) + .operationType(OPERATION_TYPE) + .targetLink(TARGET_LINK) + .targetId(TARGET_ID) + .status(STATUS) + .statusMessage(STATUS_MESSAGE) + .user(USER) + .progress(PROGRESS) + .insertTime(INSERT_TIME) + .startTime(START_TIME) + .endTime(END_TIME) + .errors(ERRORS) + .warnings(WARNINGS) + .httpErrorStatusCode(HTTP_ERROR_STATUS_CODE) + .httpErrorMessage(HTTP_ERROR_MESSAGE) + .description(DESCRIPTION) + .build(); + zoneOperation = new Operation.Builder(serviceMockReturnsOptions) + .generatedId(GENERATED_ID) + .operationId(ZONE_OPERATION_ID) + .clientOperationId(CLIENT_OPERATION_ID) + .operationType(OPERATION_TYPE) + .targetLink(TARGET_LINK) + .targetId(TARGET_ID) + .status(STATUS) + .statusMessage(STATUS_MESSAGE) + .user(USER) + .progress(PROGRESS) + .insertTime(INSERT_TIME) + .startTime(START_TIME) + .endTime(END_TIME) + .errors(ERRORS) + .warnings(WARNINGS) + .httpErrorStatusCode(HTTP_ERROR_STATUS_CODE) + .httpErrorMessage(HTTP_ERROR_MESSAGE) + .description(DESCRIPTION) + .build(); + regionOperation = new Operation.Builder(serviceMockReturnsOptions) + .generatedId(GENERATED_ID) + .operationId(REGION_OPERATION_ID) + .clientOperationId(CLIENT_OPERATION_ID) + .operationType(OPERATION_TYPE) + .targetLink(TARGET_LINK) + .targetId(TARGET_ID) + .status(STATUS) + .statusMessage(STATUS_MESSAGE) + .user(USER) + .progress(PROGRESS) + .insertTime(INSERT_TIME) + .startTime(START_TIME) + .endTime(END_TIME) + .errors(ERRORS) + .warnings(WARNINGS) + .httpErrorStatusCode(HTTP_ERROR_STATUS_CODE) + .httpErrorMessage(HTTP_ERROR_MESSAGE) + .description(DESCRIPTION) + .build(); + compute = createStrictMock(Compute.class); + } + + private void initializeOperation() { + operation = new Operation.Builder(compute) + .generatedId(GENERATED_ID) + .operationId(GLOBAL_OPERATION_ID) + .clientOperationId(CLIENT_OPERATION_ID) + .operationType(OPERATION_TYPE) + .targetLink(TARGET_LINK) + .targetId(TARGET_ID) + .status(STATUS) + .statusMessage(STATUS_MESSAGE) + .user(USER) + .progress(PROGRESS) + .insertTime(INSERT_TIME) + .startTime(START_TIME) + .endTime(END_TIME) + .errors(ERRORS) + .warnings(WARNINGS) + .httpErrorStatusCode(HTTP_ERROR_STATUS_CODE) + .httpErrorMessage(HTTP_ERROR_MESSAGE) + .description(DESCRIPTION) + .build(); + } + + @After + public void tearDown() throws Exception { + verify(serviceMockReturnsOptions); + } + + private void assertEqualsCommonFields(Operation operation) { + assertEquals(GENERATED_ID, operation.generatedId()); + assertEquals(CLIENT_OPERATION_ID, operation.clientOperationId()); + assertEquals(OPERATION_TYPE, operation.operationType()); + assertEquals(TARGET_LINK, operation.targetLink()); + assertEquals(TARGET_ID, operation.targetId()); + assertEquals(STATUS, operation.status()); + assertEquals(STATUS_MESSAGE, operation.statusMessage()); + assertEquals(USER, operation.user()); + assertEquals(PROGRESS, operation.progress()); + assertEquals(INSERT_TIME, operation.insertTime()); + assertEquals(START_TIME, operation.startTime()); + assertEquals(END_TIME, operation.endTime()); + assertEquals(ERRORS, operation.errors()); + assertEquals(WARNINGS, operation.warnings()); + assertEquals(HTTP_ERROR_STATUS_CODE, globalOperation.httpErrorStatusCode()); + assertEquals(HTTP_ERROR_MESSAGE, globalOperation.httpErrorMessage()); + assertEquals(DESCRIPTION, globalOperation.description()); + assertSame(serviceMockReturnsOptions, globalOperation.compute()); + } + + private void assertNullCommonFields(Operation operation) { + assertNull(operation.generatedId()); + assertNull(operation.clientOperationId()); + assertNull(operation.operationType()); + assertNull(operation.targetLink()); + assertNull(operation.targetId()); + assertNull(operation.status()); + assertNull(operation.statusMessage()); + assertNull(operation.user()); + assertNull(operation.progress()); + assertNull(operation.insertTime()); + assertNull(operation.startTime()); + assertNull(operation.endTime()); + assertNull(operation.errors()); + assertNull(operation.warnings()); + assertNull(operation.httpErrorStatusCode()); + assertNull(operation.httpErrorMessage()); + assertNull(operation.description()); + assertSame(serviceMockReturnsOptions, operation.compute()); + } + + @Test + public void testBuilder() { + initializeExpectedOperation(6); + assertEqualsCommonFields(globalOperation); + assertEquals(GLOBAL_OPERATION_ID, globalOperation.operationId()); + assertEqualsCommonFields(regionOperation); + assertEquals(REGION_OPERATION_ID, regionOperation.operationId()); + assertEqualsCommonFields(zoneOperation); + assertEquals(ZONE_OPERATION_ID, zoneOperation.operationId()); + Operation operation = new Operation.Builder(serviceMockReturnsOptions) + .operationId(GLOBAL_OPERATION_ID) + .build(); + assertNullCommonFields(operation); + assertEquals(GLOBAL_OPERATION_ID, operation.operationId()); + operation = new Operation.Builder(serviceMockReturnsOptions) + .operationId(ZONE_OPERATION_ID) + .build(); + assertNullCommonFields(operation); + assertEquals(ZONE_OPERATION_ID, operation.operationId()); + operation = new Operation.Builder(serviceMockReturnsOptions) + .operationId(REGION_OPERATION_ID) + .build(); + assertNullCommonFields(operation); + assertEquals(REGION_OPERATION_ID, operation.operationId()); + } + + @Test + public void testToAndFromPb() { + initializeExpectedOperation(24); + compareOperation(globalOperation, + Operation.fromPb(serviceMockReturnsOptions, globalOperation.toPb())); + assertNotNull(regionOperation.toPb().getRegion()); + compareOperation(regionOperation, + Operation.fromPb(serviceMockReturnsOptions, regionOperation.toPb())); + assertNotNull(zoneOperation.toPb().getZone()); + compareOperation(zoneOperation, + Operation.fromPb(serviceMockReturnsOptions, zoneOperation.toPb())); + Operation operation = new Operation.Builder(serviceMockReturnsOptions) + .operationId(GLOBAL_OPERATION_ID) + .build(); + compareOperation(operation, Operation.fromPb(serviceMockReturnsOptions, operation.toPb())); + operation = new Operation.Builder(serviceMockReturnsOptions) + .operationId(ZONE_OPERATION_ID) + .build(); + compareOperation(operation, Operation.fromPb(serviceMockReturnsOptions, operation.toPb())); + operation = new Operation.Builder(serviceMockReturnsOptions) + .operationId(REGION_OPERATION_ID) + .build(); + compareOperation(operation, Operation.fromPb(serviceMockReturnsOptions, operation.toPb())); + } + + @Test + public void testDeleteTrue() { + initializeExpectedOperation(3); + expect(compute.options()).andReturn(mockOptions); + expect(compute.deleteOperation(GLOBAL_OPERATION_ID)).andReturn(true); + replay(compute); + initializeOperation(); + assertTrue(operation.delete()); + verify(compute); + } + + @Test + public void testDeleteFalse() { + initializeExpectedOperation(3); + expect(compute.options()).andReturn(mockOptions); + expect(compute.deleteOperation(GLOBAL_OPERATION_ID)).andReturn(false); + replay(compute); + initializeOperation(); + assertFalse(operation.delete()); + verify(compute); + } + + @Test + public void testExists_True() throws Exception { + initializeExpectedOperation(3); + Compute.OperationOption[] expectedOptions = {Compute.OperationOption.fields()}; + expect(compute.options()).andReturn(mockOptions); + expect(compute.getOperation(GLOBAL_OPERATION_ID, expectedOptions)).andReturn(globalOperation); + replay(compute); + initializeOperation(); + assertTrue(operation.exists()); + verify(compute); + } + + @Test + public void testExists_False() throws Exception { + initializeExpectedOperation(3); + Compute.OperationOption[] expectedOptions = {Compute.OperationOption.fields()}; + expect(compute.options()).andReturn(mockOptions); + expect(compute.getOperation(GLOBAL_OPERATION_ID, expectedOptions)).andReturn(null); + replay(compute); + initializeOperation(); + assertFalse(operation.exists()); + verify(compute); + } + + @Test + public void testIsDone_True() throws Exception { + initializeExpectedOperation(3); + Compute.OperationOption[] expectedOptions = + {Compute.OperationOption.fields(Compute.OperationField.STATUS)}; + expect(compute.options()).andReturn(mockOptions); + expect(compute.getOperation(GLOBAL_OPERATION_ID, expectedOptions)).andReturn(globalOperation); + replay(compute); + initializeOperation(); + assertTrue(operation.isDone()); + verify(compute); + } + + @Test + public void testIsDone_False() throws Exception { + initializeExpectedOperation(4); + Compute.OperationOption[] expectedOptions = + {Compute.OperationOption.fields(Compute.OperationField.STATUS)}; + expect(compute.options()).andReturn(mockOptions); + expect(compute.getOperation(GLOBAL_OPERATION_ID, expectedOptions)).andReturn( + Operation.fromPb(serviceMockReturnsOptions, globalOperation.toPb().setStatus("PENDING"))); + replay(compute); + initializeOperation(); + assertFalse(operation.isDone()); + verify(compute); + } + @Test + public void testIsDone_NotExists() throws Exception { + initializeExpectedOperation(3); + Compute.OperationOption[] expectedOptions = + {Compute.OperationOption.fields(Compute.OperationField.STATUS)}; + expect(compute.options()).andReturn(mockOptions); + expect(compute.getOperation(GLOBAL_OPERATION_ID, expectedOptions)).andReturn(null); + replay(compute); + initializeOperation(); + assertTrue(operation.isDone()); + verify(compute); + } + + @Test + public void testReload() throws Exception { + initializeExpectedOperation(5); + expect(compute.options()).andReturn(mockOptions); + expect(compute.getOperation(GLOBAL_OPERATION_ID)).andReturn(globalOperation); + replay(compute); + initializeOperation(); + Operation updatedOperation = operation.reload(); + compareOperation(globalOperation, updatedOperation); + verify(compute); + } + + @Test + public void testReloadNull() throws Exception { + initializeExpectedOperation(3); + expect(compute.options()).andReturn(mockOptions); + expect(compute.getOperation(GLOBAL_OPERATION_ID)).andReturn(null); + replay(compute); + initializeOperation(); + assertNull(operation.reload()); + verify(compute); + } + + @Test + public void testReloadWithOptions() throws Exception { + initializeExpectedOperation(5); + expect(compute.options()).andReturn(mockOptions); + expect(compute.getOperation(GLOBAL_OPERATION_ID, Compute.OperationOption.fields())) + .andReturn(globalOperation); + replay(compute); + initializeOperation(); + Operation updatedOperation = operation.reload(Compute.OperationOption.fields()); + compareOperation(globalOperation, updatedOperation); + verify(compute); + } + + private void compareOperation(Operation expected, Operation value) { + assertEquals(expected, value); + assertEquals(expected.compute().options(), value.compute().options()); + assertEquals(expected.operationId(), value.operationId()); + assertEquals(expected.clientOperationId(), value.clientOperationId()); + assertEquals(expected.operationType(), value.operationType()); + assertEquals(expected.targetLink(), value.targetLink()); + assertEquals(expected.targetId(), value.targetId()); + assertEquals(expected.status(), value.status()); + assertEquals(expected.statusMessage(), value.statusMessage()); + assertEquals(expected.user(), value.user()); + assertEquals(expected.progress(), value.progress()); + assertEquals(expected.insertTime(), value.insertTime()); + assertEquals(expected.startTime(), value.startTime()); + assertEquals(expected.endTime(), value.endTime()); + assertEquals(expected.errors(), value.errors()); + assertEquals(expected.warnings(), value.warnings()); + assertEquals(expected.httpErrorStatusCode(), value.httpErrorStatusCode()); + assertEquals(expected.httpErrorMessage(), value.httpErrorMessage()); + assertEquals(expected.description(), value.description()); + assertEquals(expected.hashCode(), value.hashCode()); + } +} diff --git a/gcloud-java-compute/src/test/java/com/google/cloud/compute/RegionIdTest.java b/gcloud-java-compute/src/test/java/com/google/cloud/compute/RegionIdTest.java new file mode 100644 index 000000000000..c6646db93acf --- /dev/null +++ b/gcloud-java-compute/src/test/java/com/google/cloud/compute/RegionIdTest.java @@ -0,0 +1,79 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; + +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; + +public class RegionIdTest { + + private static final String PROJECT = "project"; + private static final String REGION = "region"; + private static final String URL = + "https://www.googleapis.com/compute/v1/projects/project/regions/region"; + + @Rule + public ExpectedException thrown = ExpectedException.none(); + + @Test + public void testOf() { + RegionId regionId = RegionId.of(PROJECT, REGION); + assertEquals(PROJECT, regionId.project()); + assertEquals(REGION, regionId.region()); + assertEquals(URL, regionId.selfLink()); + regionId = RegionId.of(REGION); + assertNull(regionId.project()); + assertEquals(REGION, regionId.region()); + } + + @Test + public void testToAndFromUrl() { + RegionId regionId = RegionId.of(PROJECT, REGION); + compareRegionId(regionId, RegionId.fromUrl(regionId.selfLink())); + } + + @Test + public void testSetProjectId() { + RegionId regionId = RegionId.of(PROJECT, REGION); + assertSame(regionId, regionId.setProjectId(PROJECT)); + compareRegionId(regionId, RegionId.of(REGION).setProjectId(PROJECT)); + thrown.expect(IllegalArgumentException.class); + thrown.expectMessage("notMatchingUrl is not a valid region URL"); + RegionId.fromUrl("notMatchingUrl"); + } + + @Test + public void testMatchesUrl() { + assertTrue(RegionId.matchesUrl(RegionId.of(PROJECT, REGION).selfLink())); + assertFalse(RegionId.matchesUrl("notMatchingUrl")); + } + + private void compareRegionId(RegionId expected, RegionId value) { + assertEquals(expected, value); + assertEquals(expected.project(), expected.project()); + assertEquals(expected.region(), expected.region()); + assertEquals(expected.selfLink(), expected.selfLink()); + assertEquals(expected.hashCode(), expected.hashCode()); + } +} diff --git a/gcloud-java-compute/src/test/java/com/google/cloud/compute/RegionTest.java b/gcloud-java-compute/src/test/java/com/google/cloud/compute/RegionTest.java new file mode 100644 index 000000000000..80e2fdc0b027 --- /dev/null +++ b/gcloud-java-compute/src/test/java/com/google/cloud/compute/RegionTest.java @@ -0,0 +1,89 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import static org.junit.Assert.assertEquals; + +import com.google.common.collect.ImmutableList; + +import org.junit.Test; + +import java.util.List; + +public class RegionTest { + + private static final RegionId REGION_ID = RegionId.of("project", "region"); + private static final String GENERATED_ID = "42"; + private static final Long CREATION_TIMESTAMP = 1453293540000L; + private static final String DESCRIPTION = "description"; + private static final Region.Status STATUS = Region.Status.DOWN; + private static final ZoneId ZONE_ID1 = ZoneId.of("project", "zone1"); + private static final ZoneId ZONE_ID2 = ZoneId.of("project", "zone2"); + private static final List ZONES = ImmutableList.of(ZONE_ID1, ZONE_ID2); + private static final Region.Quota QUOTA1 = + new Region.Quota("METRIC1", 2, 1); + private static final Region.Quota QUOTA2 = + new Region.Quota("METRIC2", 4, 3); + private static final List QUOTAS = ImmutableList.of(QUOTA1, QUOTA2); + private static final DeprecationStatus DEPRECATION_STATUS = + DeprecationStatus.of(DeprecationStatus.Status.DELETED, REGION_ID); + private static final Region REGION = Region.builder() + .regionId(REGION_ID) + .generatedId(GENERATED_ID) + .creationTimestamp(CREATION_TIMESTAMP) + .description(DESCRIPTION) + .status(STATUS) + .zones(ZONES) + .quotas(QUOTAS) + .deprecationStatus(DEPRECATION_STATUS) + .build(); + + @Test + public void testBuilder() { + assertEquals(REGION_ID, REGION.regionId()); + assertEquals(GENERATED_ID, REGION.generatedId()); + assertEquals(CREATION_TIMESTAMP, REGION.creationTimestamp()); + assertEquals(DESCRIPTION, REGION.description()); + assertEquals(STATUS, REGION.status()); + assertEquals(ZONES, REGION.zones()); + assertEquals(QUOTAS, REGION.quotas()); + assertEquals(DEPRECATION_STATUS, REGION.deprecationStatus()); + } + + @Test + public void testToAndFromPb() { + Region region = Region.fromPb(REGION.toPb()); + compareRegions(REGION, region); + assertEquals(REGION_ID.project(), region.regionId().project()); + assertEquals(REGION_ID.region(), region.regionId().region()); + region = Region.builder().regionId(REGION_ID).build(); + compareRegions(region, Region.fromPb(region.toPb())); + } + + private void compareRegions(Region expected, Region value) { + assertEquals(expected, value); + assertEquals(expected.regionId(), value.regionId()); + assertEquals(expected.generatedId(), value.generatedId()); + assertEquals(expected.creationTimestamp(), value.creationTimestamp()); + assertEquals(expected.description(), value.description()); + assertEquals(expected.status(), value.status()); + assertEquals(expected.zones(), value.zones()); + assertEquals(expected.quotas(), value.quotas()); + assertEquals(expected.deprecationStatus(), value.deprecationStatus()); + assertEquals(expected.hashCode(), value.hashCode()); + } +} diff --git a/gcloud-java-compute/src/test/java/com/google/cloud/compute/SchedulingOptionsTest.java b/gcloud-java-compute/src/test/java/com/google/cloud/compute/SchedulingOptionsTest.java new file mode 100644 index 000000000000..de2c66a5ff1e --- /dev/null +++ b/gcloud-java-compute/src/test/java/com/google/cloud/compute/SchedulingOptionsTest.java @@ -0,0 +1,57 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import org.junit.Test; + +public class SchedulingOptionsTest { + + private static final SchedulingOptions SCHEDULING_OPTIONS = SchedulingOptions.preemptible(); + + @Test + public void testFactoryMethods() { + assertTrue(SCHEDULING_OPTIONS.isPreemptible()); + assertFalse(SCHEDULING_OPTIONS.automaticRestart()); + assertEquals(SchedulingOptions.Maintenance.TERMINATE, SCHEDULING_OPTIONS.maintenance()); + SchedulingOptions schedulingOptions = + SchedulingOptions.standard(true, SchedulingOptions.Maintenance.MIGRATE); + assertFalse(schedulingOptions.isPreemptible()); + assertTrue(schedulingOptions.automaticRestart()); + assertEquals(SchedulingOptions.Maintenance.MIGRATE, schedulingOptions.maintenance()); + } + + @Test + public void testToAndFromPb() { + compareSchedulingOptions(SCHEDULING_OPTIONS, + SchedulingOptions.fromPb(SCHEDULING_OPTIONS.toPb())); + SchedulingOptions schedulingOptions = + SchedulingOptions.standard(true, SchedulingOptions.Maintenance.MIGRATE); + compareSchedulingOptions(schedulingOptions, SchedulingOptions.fromPb(schedulingOptions.toPb())); + } + + public void compareSchedulingOptions(SchedulingOptions expected, SchedulingOptions value) { + assertEquals(expected, value); + assertEquals(expected.isPreemptible(), value.isPreemptible()); + assertEquals(expected.maintenance(), value.maintenance()); + assertEquals(expected.automaticRestart(), value.automaticRestart()); + assertEquals(expected.hashCode(), value.hashCode()); + } +} diff --git a/gcloud-java-compute/src/test/java/com/google/cloud/compute/SerializationTest.java b/gcloud-java-compute/src/test/java/com/google/cloud/compute/SerializationTest.java new file mode 100644 index 000000000000..0ef68731183d --- /dev/null +++ b/gcloud-java-compute/src/test/java/com/google/cloud/compute/SerializationTest.java @@ -0,0 +1,305 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import com.google.cloud.AuthCredentials; +import com.google.cloud.BaseSerializationTest; +import com.google.cloud.Restorable; +import com.google.cloud.RetryParams; +import com.google.cloud.compute.AttachedDisk.CreateDiskConfiguration; +import com.google.cloud.compute.AttachedDisk.PersistentDiskConfiguration; +import com.google.cloud.compute.AttachedDisk.ScratchDiskConfiguration; +import com.google.cloud.compute.NetworkInterface.AccessConfig; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; + +import java.io.Serializable; +import java.util.List; + +public class SerializationTest extends BaseSerializationTest { + + private static final Compute COMPUTE = ComputeOptions.builder().projectId("p").build().service(); + private static final Long CREATION_TIMESTAMP = 1453293540000L; + private static final String DESCRIPTION = "description"; + private static final String VALID_DISK_SIZE = "10GB-10TB"; + private static final Long DEFAULT_DISK_SIZE_GB = 10L; + private static final DiskTypeId DISK_TYPE_ID = DiskTypeId.of("project", "zone", "diskType"); + private static final DiskType DISK_TYPE = DiskType.builder() + .diskTypeId(DISK_TYPE_ID) + .creationTimestamp(CREATION_TIMESTAMP) + .description(DESCRIPTION) + .validDiskSize(VALID_DISK_SIZE) + .defaultDiskSizeGb(DEFAULT_DISK_SIZE_GB) + .build(); + private static final MachineTypeId MACHINE_TYPE_ID = MachineTypeId.of("project", "zone", "type"); + private static final Integer GUEST_CPUS = 1; + private static final Integer MEMORY_MB = 2; + private static final List SCRATCH_DISKS = ImmutableList.of(3); + private static final Integer MAXIMUM_PERSISTENT_DISKS = 4; + private static final Long MAXIMUM_PERSISTENT_DISKS_SIZE_GB = 5L; + private static final MachineType MACHINE_TYPE = MachineType.builder() + .machineTypeId(MACHINE_TYPE_ID) + .creationTimestamp(CREATION_TIMESTAMP) + .description(DESCRIPTION) + .cpus(GUEST_CPUS) + .memoryMb(MEMORY_MB) + .scratchDisksSizeGb(SCRATCH_DISKS) + .maximumPersistentDisks(MAXIMUM_PERSISTENT_DISKS) + .maximumPersistentDisksSizeGb(MAXIMUM_PERSISTENT_DISKS_SIZE_GB) + .build(); + private static final RegionId REGION_ID = RegionId.of("project", "region"); + private static final Region.Status REGION_STATUS = Region.Status.DOWN; + private static final ZoneId ZONE_ID1 = ZoneId.of("project", "zone1"); + private static final ZoneId ZONE_ID2 = ZoneId.of("project", "zone2"); + private static final List ZONES = ImmutableList.of(ZONE_ID1, ZONE_ID2); + private static final Region.Quota QUOTA1 = + new Region.Quota("METRIC1", 2, 1); + private static final Region.Quota QUOTA2 = + new Region.Quota("METRIC2", 4, 3); + private static final List QUOTAS = ImmutableList.of(QUOTA1, QUOTA2); + private static final Region REGION = Region.builder() + .regionId(REGION_ID) + .creationTimestamp(CREATION_TIMESTAMP) + .description(DESCRIPTION) + .status(REGION_STATUS) + .zones(ZONES) + .quotas(QUOTAS) + .build(); + private static final ZoneId ZONE_ID = ZoneId.of("project", "zone"); + private static final Zone.Status ZONE_STATUS = Zone.Status.DOWN; + private static final Zone ZONE = Zone.builder() + .zoneId(ZONE_ID) + .creationTimestamp(CREATION_TIMESTAMP) + .description(DESCRIPTION) + .status(ZONE_STATUS) + .region(REGION_ID) + .build(); + private static final DeprecationStatus DEPRECATION_STATUS = + DeprecationStatus.of(DeprecationStatus.Status.DELETED, MACHINE_TYPE_ID); + private static final LicenseId LICENSE_ID = LicenseId.of("project", "license"); + private static final Boolean CHARGES_USE_FEE = true; + private static final License LICENSE = new License(LICENSE_ID, CHARGES_USE_FEE); + private static final GlobalOperationId GLOBAL_OPERATION_ID = + GlobalOperationId.of("project", "op"); + private static final ZoneOperationId ZONE_OPERATION_ID = + ZoneOperationId.of("project", "zone", "op"); + private static final RegionOperationId REGION_OPERATION_ID = + RegionOperationId.of("project", "region", "op"); + private static final Operation GLOBAL_OPERATION = + new Operation.Builder(COMPUTE).operationId(GLOBAL_OPERATION_ID).build(); + private static final Operation ZONE_OPERATION = + new Operation.Builder(COMPUTE).operationId(ZONE_OPERATION_ID).build(); + private static final Operation REGION_OPERATION = + new Operation.Builder(COMPUTE).operationId(REGION_OPERATION_ID).build(); + private static final InstanceId INSTANCE_ID = InstanceId.of("project", "zone", "instance"); + private static final GlobalForwardingRuleId GLOBAL_FORWARDING_RULE_ID = + GlobalForwardingRuleId.of("project", "rule"); + private static final RegionForwardingRuleId REGION_FORWARDING_RULE_ID = + RegionForwardingRuleId.of("project", "region", "rule"); + private static final GlobalAddressId GLOBAL_ADDRESS_ID = GlobalAddressId.of("project", "address"); + private static final RegionAddressId REGION_ADDRESS_ID = + RegionAddressId.of("project", "region", "address"); + private static final AddressInfo.InstanceUsage INSTANCE_USAGE = + new AddressInfo.InstanceUsage(INSTANCE_ID); + private static final AddressInfo.GlobalForwardingUsage GLOBAL_FORWARDING_USAGE = + new AddressInfo.GlobalForwardingUsage(ImmutableList.of(GLOBAL_FORWARDING_RULE_ID)); + private static final AddressInfo.RegionForwardingUsage REGION_FORWARDING_USAGE = + new AddressInfo.RegionForwardingUsage(ImmutableList.of(REGION_FORWARDING_RULE_ID)); + private static final AddressInfo ADDRESS_INFO = AddressInfo.builder(REGION_ADDRESS_ID) + .creationTimestamp(CREATION_TIMESTAMP) + .description(DESCRIPTION) + .usage(INSTANCE_USAGE) + .build(); + private static final Address ADDRESS = new Address.Builder(COMPUTE, REGION_ADDRESS_ID).build(); + private static final DiskId DISK_ID = DiskId.of("project", "zone", "disk"); + private static final SnapshotId SNAPSHOT_ID = SnapshotId.of("project", "snapshot"); + private static final SnapshotInfo SNAPSHOT_INFO = SnapshotInfo.of(SNAPSHOT_ID, DISK_ID); + private static final Snapshot SNAPSHOT = + new Snapshot.Builder(COMPUTE, SNAPSHOT_ID, DISK_ID).build(); + private static final ImageId IMAGE_ID = ImageId.of("project", "image"); + private static final DiskImageConfiguration DISK_IMAGE_CONFIGURATION = + DiskImageConfiguration.of(DISK_ID); + private static final StorageImageConfiguration STORAGE_IMAGE_CONFIGURATION = + StorageImageConfiguration.of("gs:/bucket/file"); + private static final ImageInfo IMAGE_INFO = ImageInfo.of(IMAGE_ID, DISK_IMAGE_CONFIGURATION); + private static final Image IMAGE = + new Image.Builder(COMPUTE, IMAGE_ID, DISK_IMAGE_CONFIGURATION).build(); + private static final StandardDiskConfiguration STANDARD_DISK_CONFIGURATION = + StandardDiskConfiguration.of(DISK_TYPE_ID); + private static final ImageDiskConfiguration IMAGE_DISK_CONFIGURATION = + ImageDiskConfiguration.of(IMAGE_ID); + private static final SnapshotDiskConfiguration SNAPSHOT_DISK_CONFIGURATION = + SnapshotDiskConfiguration.of(SNAPSHOT_ID); + private static final DiskInfo DISK_INFO = DiskInfo.of(DISK_ID, STANDARD_DISK_CONFIGURATION); + private static final Disk DISK = + new Disk.Builder(COMPUTE, DISK_ID, STANDARD_DISK_CONFIGURATION).build(); + private static final SubnetworkId SUBNETWORK_ID = + SubnetworkId.of("project", "region", "subnetwork"); + private static final NetworkId NETWORK_ID = NetworkId.of("project", "network"); + private static final SubnetworkInfo SUBNETWORK_INFO = + SubnetworkInfo.of(SUBNETWORK_ID, NETWORK_ID, "192.168.0.0/16"); + private static final Subnetwork SUBNETWORK = + new Subnetwork.Builder(COMPUTE, SUBNETWORK_ID, NETWORK_ID, "192.168.0.0/16").build(); + private static final StandardNetworkConfiguration STANDARD_NETWORK_CONFIGURATION = + StandardNetworkConfiguration.of("192.168.0.0/16"); + private static final SubnetNetworkConfiguration SUBNET_NETWORK_CONFIGURATION = + SubnetNetworkConfiguration.of(false); + private static final NetworkInfo NETWORK_INFO = + NetworkInfo.of(NETWORK_ID, STANDARD_NETWORK_CONFIGURATION); + private static final Network NETWORK = + new Network.Builder(COMPUTE, NETWORK_ID, STANDARD_NETWORK_CONFIGURATION).build(); + private static final AccessConfig ACCESS_CONFIG = AccessConfig.of("192.168.1.1"); + private static final NetworkInterface NETWORK_INTERFACE = NetworkInterface.builder(NETWORK_ID) + .accessConfigurations(ACCESS_CONFIG) + .build(); + private static final CreateDiskConfiguration CREATE_DISK_CONFIGURATION = + CreateDiskConfiguration.of(IMAGE_ID); + private static final PersistentDiskConfiguration PERSISTENT_DISK_CONFIGURATION = + PersistentDiskConfiguration.of(DISK_ID); + private static final ScratchDiskConfiguration SCRATCH_DISK_CONFIGURATION = + ScratchDiskConfiguration.of(DISK_TYPE_ID); + private static final AttachedDisk ATTACHED_DISK = AttachedDisk.of(CREATE_DISK_CONFIGURATION); + private static final Tags TAGS = Tags.of("tag1", "tag2"); + private static final Metadata METADATA = Metadata.of(ImmutableMap.of("key1", "val1")); + private static final ServiceAccount SERVICE_ACCOUNT = ServiceAccount.of("email"); + private static final SchedulingOptions SCHEDULING_OPTIONS = SchedulingOptions.preemptible(); + private static final InstanceInfo INSTANCE_INFO = + InstanceInfo.of(INSTANCE_ID, MACHINE_TYPE_ID, ATTACHED_DISK, NETWORK_INTERFACE); + private static final Instance INSTANCE = + new Instance.Builder(COMPUTE, INSTANCE_ID, MACHINE_TYPE_ID, ATTACHED_DISK, NETWORK_INTERFACE) + .build(); + private static final Compute.DiskTypeOption DISK_TYPE_OPTION = + Compute.DiskTypeOption.fields(); + private static final Compute.DiskTypeFilter DISK_TYPE_FILTER = + Compute.DiskTypeFilter.equals(Compute.DiskTypeField.SELF_LINK, "selfLink"); + private static final Compute.DiskTypeListOption DISK_TYPE_LIST_OPTION = + Compute.DiskTypeListOption.filter(DISK_TYPE_FILTER); + private static final Compute.DiskTypeAggregatedListOption DISK_TYPE_AGGREGATED_LIST_OPTION = + Compute.DiskTypeAggregatedListOption.filter(DISK_TYPE_FILTER); + private static final Compute.MachineTypeOption MACHINE_TYPE_OPTION = + Compute.MachineTypeOption.fields(); + private static final Compute.MachineTypeFilter MACHINE_TYPE_FILTER = + Compute.MachineTypeFilter.equals(Compute.MachineTypeField.SELF_LINK, "selfLink"); + private static final Compute.MachineTypeListOption MACHINE_TYPE_LIST_OPTION = + Compute.MachineTypeListOption.filter(MACHINE_TYPE_FILTER); + private static final Compute.MachineTypeAggregatedListOption MACHINE_TYPE_AGGREGATED_LIST_OPTION = + Compute.MachineTypeAggregatedListOption.filter(MACHINE_TYPE_FILTER); + private static final Compute.RegionOption REGION_OPTION = Compute.RegionOption.fields(); + private static final Compute.RegionFilter REGION_FILTER = + Compute.RegionFilter.equals(Compute.RegionField.SELF_LINK, "selfLink"); + private static final Compute.RegionListOption REGION_LIST_OPTION = + Compute.RegionListOption.filter(REGION_FILTER); + private static final Compute.ZoneOption ZONE_OPTION = Compute.ZoneOption.fields(); + private static final Compute.ZoneFilter ZONE_FILTER = + Compute.ZoneFilter.equals(Compute.ZoneField.SELF_LINK, "selfLink"); + private static final Compute.ZoneListOption ZONE_LIST_OPTION = + Compute.ZoneListOption.filter(ZONE_FILTER); + private static final Compute.LicenseOption LICENSE_OPTION = Compute.LicenseOption.fields(); + private static final Compute.OperationOption OPERATION_OPTION = Compute.OperationOption.fields(); + private static final Compute.OperationFilter OPERATION_FILTER = + Compute.OperationFilter.equals(Compute.OperationField.SELF_LINK, "selfLink"); + private static final Compute.OperationListOption OPERATION_LIST_OPTION = + Compute.OperationListOption.filter(OPERATION_FILTER); + private static final Compute.AddressOption ADDRESS_OPTION = Compute.AddressOption.fields(); + private static final Compute.AddressFilter ADDRESS_FILTER = + Compute.AddressFilter.equals(Compute.AddressField.SELF_LINK, "selfLink"); + private static final Compute.AddressListOption ADDRESS_LIST_OPTION = + Compute.AddressListOption.filter(ADDRESS_FILTER); + private static final Compute.AddressAggregatedListOption ADDRESS_AGGREGATED_LIST_OPTION = + Compute.AddressAggregatedListOption.filter(ADDRESS_FILTER); + private static final Compute.SnapshotOption SNAPSHOT_OPTION = Compute.SnapshotOption.fields(); + private static final Compute.SnapshotFilter SNAPSHOT_FILTER = + Compute.SnapshotFilter.equals(Compute.SnapshotField.SELF_LINK, "selfLink"); + private static final Compute.SnapshotListOption SNAPSHOT_LIST_OPTION = + Compute.SnapshotListOption.filter(SNAPSHOT_FILTER); + private static final Compute.ImageOption IMAGE_OPTION = Compute.ImageOption.fields(); + private static final Compute.ImageFilter IMAGE_FILTER = + Compute.ImageFilter.equals(Compute.ImageField.SELF_LINK, "selfLink"); + private static final Compute.ImageListOption IMAGE_LIST_OPTION = + Compute.ImageListOption.filter(IMAGE_FILTER); + private static final Compute.DiskOption DISK_OPTION = Compute.DiskOption.fields(); + private static final Compute.DiskFilter DISK_FILTER = + Compute.DiskFilter.equals(Compute.DiskField.SELF_LINK, "selfLink"); + private static final Compute.DiskListOption DISK_LIST_OPTION = + Compute.DiskListOption.filter(DISK_FILTER); + private static final Compute.DiskAggregatedListOption DISK_AGGREGATED_LIST_OPTION = + Compute.DiskAggregatedListOption.filter(DISK_FILTER); + private static final Compute.SubnetworkOption SUBNETWORK_OPTION = + Compute.SubnetworkOption.fields(); + private static final Compute.SubnetworkFilter SUBNETWORK_FILTER = + Compute.SubnetworkFilter.equals(Compute.SubnetworkField.SELF_LINK, "selfLink"); + private static final Compute.SubnetworkListOption SUBNETWORK_LIST_OPTION = + Compute.SubnetworkListOption.filter(SUBNETWORK_FILTER); + private static final Compute.SubnetworkAggregatedListOption SUBNETWORK_AGGREGATED_LIST_OPTION = + Compute.SubnetworkAggregatedListOption.filter(SUBNETWORK_FILTER); + private static final Compute.NetworkOption NETWORK_OPTION = + Compute.NetworkOption.fields(); + private static final Compute.NetworkFilter NETWORK_FILTER = + Compute.NetworkFilter.equals(Compute.NetworkField.SELF_LINK, "selfLink"); + private static final Compute.NetworkListOption NETWORK_LIST_OPTION = + Compute.NetworkListOption.filter(NETWORK_FILTER); + private static final Compute.InstanceOption INSTANCE_OPTION = + Compute.InstanceOption.fields(); + private static final Compute.InstanceFilter INSTANCE_FILTER = + Compute.InstanceFilter.equals(Compute.InstanceField.SELF_LINK, "selfLink"); + private static final Compute.InstanceListOption INSTANCE_LIST_OPTION = + Compute.InstanceListOption.filter(INSTANCE_FILTER); + private static final Compute.InstanceAggregatedListOption INSTANCE_AGGREGATED_LIST_OPTION = + Compute.InstanceAggregatedListOption.filter(INSTANCE_FILTER); + + @Override + protected Serializable[] serializableObjects() { + ComputeOptions options = ComputeOptions.builder() + .projectId("p1") + .authCredentials(AuthCredentials.createForAppEngine()) + .build(); + ComputeOptions otherOptions = options.toBuilder() + .projectId("p2") + .retryParams(RetryParams.defaultInstance()) + .authCredentials(null) + .build(); + return new Serializable[]{DISK_TYPE_ID, DISK_TYPE, MACHINE_TYPE_ID, MACHINE_TYPE, REGION_ID, + REGION, ZONE_ID, ZONE, LICENSE_ID, LICENSE, DEPRECATION_STATUS, GLOBAL_OPERATION_ID, + REGION_OPERATION_ID, ZONE_OPERATION_ID, GLOBAL_OPERATION, REGION_OPERATION, ZONE_OPERATION, + INSTANCE_ID, REGION_FORWARDING_RULE_ID, GLOBAL_FORWARDING_RULE_ID, GLOBAL_ADDRESS_ID, + REGION_ADDRESS_ID, INSTANCE_USAGE, GLOBAL_FORWARDING_USAGE, REGION_FORWARDING_USAGE, + ADDRESS_INFO, ADDRESS, DISK_ID, SNAPSHOT_ID, SNAPSHOT_INFO, SNAPSHOT, IMAGE_ID, + DISK_IMAGE_CONFIGURATION, STORAGE_IMAGE_CONFIGURATION, IMAGE_INFO, IMAGE, + STANDARD_DISK_CONFIGURATION, IMAGE_DISK_CONFIGURATION, SNAPSHOT_DISK_CONFIGURATION, + DISK_INFO, DISK, SUBNETWORK_ID, NETWORK_ID, SUBNETWORK_INFO, SUBNETWORK, + STANDARD_NETWORK_CONFIGURATION, SUBNET_NETWORK_CONFIGURATION, NETWORK_INFO, NETWORK, + ACCESS_CONFIG, NETWORK_INTERFACE, CREATE_DISK_CONFIGURATION, PERSISTENT_DISK_CONFIGURATION, + SCRATCH_DISK_CONFIGURATION, ATTACHED_DISK, TAGS, METADATA, SERVICE_ACCOUNT, + SCHEDULING_OPTIONS, INSTANCE_INFO, INSTANCE, DISK_TYPE_OPTION, DISK_TYPE_FILTER, + DISK_TYPE_LIST_OPTION, DISK_TYPE_AGGREGATED_LIST_OPTION, MACHINE_TYPE_OPTION, + MACHINE_TYPE_FILTER, MACHINE_TYPE_LIST_OPTION, MACHINE_TYPE_AGGREGATED_LIST_OPTION, + REGION_OPTION, REGION_FILTER, REGION_LIST_OPTION, ZONE_OPTION, ZONE_FILTER, + ZONE_LIST_OPTION, LICENSE_OPTION, OPERATION_OPTION, OPERATION_FILTER, OPERATION_LIST_OPTION, + ADDRESS_OPTION, ADDRESS_FILTER, ADDRESS_LIST_OPTION, ADDRESS_AGGREGATED_LIST_OPTION, + SNAPSHOT_OPTION, SNAPSHOT_FILTER, SNAPSHOT_LIST_OPTION, IMAGE_OPTION, IMAGE_FILTER, + IMAGE_LIST_OPTION, DISK_OPTION, DISK_FILTER, DISK_LIST_OPTION, DISK_AGGREGATED_LIST_OPTION, + SUBNETWORK_OPTION, SUBNETWORK_FILTER, SUBNETWORK_LIST_OPTION, + SUBNETWORK_AGGREGATED_LIST_OPTION, NETWORK_OPTION, NETWORK_FILTER, NETWORK_LIST_OPTION, + INSTANCE_OPTION, INSTANCE_FILTER, INSTANCE_LIST_OPTION, INSTANCE_AGGREGATED_LIST_OPTION, + options, otherOptions}; + } + + @Override + protected Restorable[] restorableObjects() { + return null; + } +} diff --git a/gcloud-java-compute/src/test/java/com/google/cloud/compute/ServiceAccountTest.java b/gcloud-java-compute/src/test/java/com/google/cloud/compute/ServiceAccountTest.java new file mode 100644 index 000000000000..dd29589e5037 --- /dev/null +++ b/gcloud-java-compute/src/test/java/com/google/cloud/compute/ServiceAccountTest.java @@ -0,0 +1,47 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import static org.junit.Assert.assertEquals; + +import com.google.common.collect.ImmutableList; + +import org.junit.Test; + +public class ServiceAccountTest { + + private static final ServiceAccount SERVICE_ACCOUNT = + ServiceAccount.of("email", ImmutableList.of("scope1")); + + @Test + public void testOf() { + compareServiceAccount(SERVICE_ACCOUNT, ServiceAccount.of("email", ImmutableList.of("scope1"))); + compareServiceAccount(SERVICE_ACCOUNT, ServiceAccount.of("email", "scope1")); + } + + @Test + public void testToAndFromPb() { + compareServiceAccount(SERVICE_ACCOUNT, ServiceAccount.fromPb(SERVICE_ACCOUNT.toPb())); + } + + public void compareServiceAccount(ServiceAccount expected, ServiceAccount value) { + assertEquals(expected, value); + assertEquals(expected.email(), value.email()); + assertEquals(expected.scopes(), value.scopes()); + assertEquals(expected.hashCode(), value.hashCode()); + } +} diff --git a/gcloud-java-compute/src/test/java/com/google/cloud/compute/SnapshotDiskConfigurationTest.java b/gcloud-java-compute/src/test/java/com/google/cloud/compute/SnapshotDiskConfigurationTest.java new file mode 100644 index 000000000000..5f95891b3252 --- /dev/null +++ b/gcloud-java-compute/src/test/java/com/google/cloud/compute/SnapshotDiskConfigurationTest.java @@ -0,0 +1,112 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + +import com.google.cloud.compute.DiskConfiguration.Type; + +import org.junit.Test; + +public class SnapshotDiskConfigurationTest { + + private static final Long SIZE = 42L; + private static final DiskTypeId DISK_TYPE = DiskTypeId.of("project", "zone", "type"); + private static final SnapshotId SNAPSHOT = SnapshotId.of("project", "snapshot"); + private static final String SNAPSHOT_ID = "snapshotId"; + private static final SnapshotDiskConfiguration DISK_CONFIGURATION = + SnapshotDiskConfiguration.builder(SNAPSHOT) + .sizeGb(SIZE) + .diskType(DISK_TYPE) + .sourceSnapshotId(SNAPSHOT_ID) + .build(); + + @Test + public void testToBuilder() { + compareSnapshotDiskConfiguration(DISK_CONFIGURATION, DISK_CONFIGURATION.toBuilder().build()); + SnapshotId newSnapshot = SnapshotId.of("newProject", "newSnapshot"); + SnapshotDiskConfiguration diskConfiguration = DISK_CONFIGURATION.toBuilder() + .sizeGb(24L) + .sourceSnapshot(newSnapshot) + .sourceSnapshotId("newSnapshotId") + .build(); + assertEquals(24L, diskConfiguration.sizeGb().longValue()); + assertEquals(newSnapshot, diskConfiguration.sourceSnapshot()); + assertEquals("newSnapshotId", diskConfiguration.sourceSnapshotId()); + diskConfiguration = diskConfiguration.toBuilder() + .sizeGb(SIZE) + .sourceSnapshot(SNAPSHOT) + .sourceSnapshotId(SNAPSHOT_ID) + .build(); + compareSnapshotDiskConfiguration(DISK_CONFIGURATION, diskConfiguration); + } + + @Test + public void testToBuilderIncomplete() { + SnapshotDiskConfiguration diskConfiguration = SnapshotDiskConfiguration.of(SNAPSHOT); + compareSnapshotDiskConfiguration(diskConfiguration, diskConfiguration.toBuilder().build()); + } + + @Test + public void testBuilder() { + assertEquals(DISK_TYPE, DISK_CONFIGURATION.diskType()); + assertEquals(SIZE, DISK_CONFIGURATION.sizeGb()); + assertEquals(SNAPSHOT, DISK_CONFIGURATION.sourceSnapshot()); + assertEquals(SNAPSHOT_ID, DISK_CONFIGURATION.sourceSnapshotId()); + assertEquals(Type.SNAPSHOT, DISK_CONFIGURATION.type()); + } + + @Test + public void testToAndFromPb() { + assertTrue(DiskConfiguration.fromPb(DISK_CONFIGURATION.toPb()) + instanceof SnapshotDiskConfiguration); + compareSnapshotDiskConfiguration(DISK_CONFIGURATION, + DiskConfiguration.fromPb(DISK_CONFIGURATION.toPb())); + } + + @Test + public void testOf() { + SnapshotDiskConfiguration configuration = SnapshotDiskConfiguration.of(SNAPSHOT); + assertNull(configuration.diskType()); + assertNull(configuration.sizeGb()); + assertNull(configuration.sourceSnapshotId()); + assertEquals(SNAPSHOT, configuration.sourceSnapshot()); + assertEquals(Type.SNAPSHOT, configuration.type()); + } + + @Test + public void testSetProjectId() { + SnapshotDiskConfiguration configuration = DISK_CONFIGURATION.toBuilder() + .diskType(DiskTypeId.of(DISK_TYPE.zone(), DISK_TYPE.type())) + .sourceSnapshot(SnapshotId.of(SNAPSHOT.snapshot())) + .build(); + compareSnapshotDiskConfiguration(DISK_CONFIGURATION, configuration.setProjectId("project")); + } + + private void compareSnapshotDiskConfiguration(SnapshotDiskConfiguration expected, + SnapshotDiskConfiguration value) { + assertEquals(expected, value); + assertEquals(expected.diskType(), value.diskType()); + assertEquals(expected.sizeGb(), value.sizeGb()); + assertEquals(expected.sourceSnapshot(), value.sourceSnapshot()); + assertEquals(expected.sourceSnapshotId(), value.sourceSnapshotId()); + assertEquals(expected.type(), value.type()); + assertEquals(expected.hashCode(), value.hashCode()); + } +} diff --git a/gcloud-java-compute/src/test/java/com/google/cloud/compute/SnapshotIdTest.java b/gcloud-java-compute/src/test/java/com/google/cloud/compute/SnapshotIdTest.java new file mode 100644 index 000000000000..b723832dbbac --- /dev/null +++ b/gcloud-java-compute/src/test/java/com/google/cloud/compute/SnapshotIdTest.java @@ -0,0 +1,79 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; + +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; + +public class SnapshotIdTest { + + private static final String PROJECT = "project"; + private static final String NAME = "snapshot"; + private static final String URL = + "https://www.googleapis.com/compute/v1/projects/project/global/snapshots/snapshot"; + + @Rule + public ExpectedException thrown = ExpectedException.none(); + + @Test + public void testOf() { + SnapshotId snapshotId = SnapshotId.of(PROJECT, NAME); + assertEquals(PROJECT, snapshotId.project()); + assertEquals(NAME, snapshotId.snapshot()); + assertEquals(URL, snapshotId.selfLink()); + snapshotId = SnapshotId.of(NAME); + assertNull(snapshotId.project()); + assertEquals(NAME, snapshotId.snapshot()); + } + + @Test + public void testToAndFromUrl() { + SnapshotId snapshotId = SnapshotId.of(PROJECT, NAME); + compareSnapshotId(snapshotId, SnapshotId.fromUrl(snapshotId.selfLink())); + thrown.expect(IllegalArgumentException.class); + thrown.expectMessage("notMatchingUrl is not a valid snapshot URL"); + SnapshotId.fromUrl("notMatchingUrl"); + } + + @Test + public void testSetProjectId() { + SnapshotId snapshotId = SnapshotId.of(PROJECT, NAME); + assertSame(snapshotId, snapshotId.setProjectId(PROJECT)); + compareSnapshotId(snapshotId, SnapshotId.of(NAME).setProjectId(PROJECT)); + } + + @Test + public void testMatchesUrl() { + assertTrue(SnapshotId.matchesUrl(SnapshotId.of(PROJECT, NAME).selfLink())); + assertFalse(SnapshotId.matchesUrl("notMatchingUrl")); + } + + private void compareSnapshotId(SnapshotId expected, SnapshotId value) { + assertEquals(expected, value); + assertEquals(expected.project(), expected.project()); + assertEquals(expected.snapshot(), expected.snapshot()); + assertEquals(expected.selfLink(), expected.selfLink()); + assertEquals(expected.hashCode(), expected.hashCode()); + } +} diff --git a/gcloud-java-compute/src/test/java/com/google/cloud/compute/SnapshotInfoTest.java b/gcloud-java-compute/src/test/java/com/google/cloud/compute/SnapshotInfoTest.java new file mode 100644 index 000000000000..38d295184ae6 --- /dev/null +++ b/gcloud-java-compute/src/test/java/com/google/cloud/compute/SnapshotInfoTest.java @@ -0,0 +1,135 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; + +import com.google.cloud.compute.SnapshotInfo.Status; +import com.google.cloud.compute.SnapshotInfo.StorageBytesStatus; +import com.google.common.collect.ImmutableList; + +import org.junit.Test; + +import java.util.List; + +public class SnapshotInfoTest { + + private static final String GENERATED_ID = "42"; + private static final DiskId SOURCE_DISK = DiskId.of("project", "zone", "disk"); + private static final Long CREATION_TIMESTAMP = 1453293540000L; + private static final String DESCRIPTION = "description"; + private static final List LICENSES = ImmutableList.of( + LicenseId.of("project", "license1"), LicenseId.of("project", "license2")); + private static final SnapshotId SNAPSHOT_ID = SnapshotId.of("project", "snapshot"); + private static final Status STATUS = Status.CREATING; + private static final Long DISK_SIZE_GB = 42L; + private static final String SOURCE_DISK_ID = "diskId"; + private static final Long STORAGE_BYTES = 24L; + private static final StorageBytesStatus STORAGE_BYTES_STATUS = StorageBytesStatus.UP_TO_DATE; + private static final SnapshotInfo SNAPSHOT_INFO = SnapshotInfo.builder(SNAPSHOT_ID, SOURCE_DISK) + .generatedId(GENERATED_ID) + .creationTimestamp(CREATION_TIMESTAMP) + .description(DESCRIPTION) + .status(STATUS) + .diskSizeGb(DISK_SIZE_GB) + .licenses(LICENSES) + .sourceDiskId(SOURCE_DISK_ID) + .storageBytes(STORAGE_BYTES) + .storageBytesStatus(STORAGE_BYTES_STATUS) + .build(); + + @Test + public void testToBuilder() { + compareSnapshotInfo(SNAPSHOT_INFO, SNAPSHOT_INFO.toBuilder().build()); + SnapshotInfo snapshotInfo = SNAPSHOT_INFO.toBuilder().description("newDescription").build(); + assertEquals("newDescription", snapshotInfo.description()); + snapshotInfo = snapshotInfo.toBuilder().description("description").build(); + compareSnapshotInfo(SNAPSHOT_INFO, snapshotInfo); + } + + @Test + public void testToBuilderIncomplete() { + SnapshotInfo snapshotInfo = SnapshotInfo.of(SNAPSHOT_ID, SOURCE_DISK); + assertEquals(snapshotInfo, snapshotInfo.toBuilder().build()); + } + + @Test + public void testBuilder() { + assertEquals(GENERATED_ID, SNAPSHOT_INFO.generatedId()); + assertEquals(SNAPSHOT_ID, SNAPSHOT_INFO.snapshotId()); + assertEquals(CREATION_TIMESTAMP, SNAPSHOT_INFO.creationTimestamp()); + assertEquals(DESCRIPTION, SNAPSHOT_INFO.description()); + assertEquals(STATUS, SNAPSHOT_INFO.status()); + assertEquals(DISK_SIZE_GB, SNAPSHOT_INFO.diskSizeGb()); + assertEquals(LICENSES, SNAPSHOT_INFO.licenses()); + assertEquals(SOURCE_DISK, SNAPSHOT_INFO.sourceDisk()); + assertEquals(SOURCE_DISK_ID, SNAPSHOT_INFO.sourceDiskId()); + assertEquals(STORAGE_BYTES, SNAPSHOT_INFO.storageBytes()); + assertEquals(STORAGE_BYTES_STATUS, SNAPSHOT_INFO.storageBytesStatus()); + } + + @Test + public void testOf() { + SnapshotInfo snapshotInfo = SnapshotInfo.of(SNAPSHOT_ID, SOURCE_DISK); + assertNull(snapshotInfo.generatedId()); + assertEquals(SNAPSHOT_ID, snapshotInfo.snapshotId()); + assertNull(snapshotInfo.creationTimestamp()); + assertNull(snapshotInfo.description()); + assertNull(snapshotInfo.status()); + assertNull(snapshotInfo.diskSizeGb()); + assertNull(snapshotInfo.licenses()); + assertEquals(SOURCE_DISK, snapshotInfo.sourceDisk()); + assertNull(snapshotInfo.sourceDiskId()); + assertNull(snapshotInfo.storageBytes()); + assertNull(snapshotInfo.storageBytesStatus()); + } + + @Test + public void testToAndFromPb() { + compareSnapshotInfo(SNAPSHOT_INFO, SnapshotInfo.fromPb(SNAPSHOT_INFO.toPb())); + SnapshotInfo snapshotInfo = SnapshotInfo.of(SNAPSHOT_ID, SOURCE_DISK); + compareSnapshotInfo(snapshotInfo, SnapshotInfo.fromPb(snapshotInfo.toPb())); + snapshotInfo = new SnapshotInfo.BuilderImpl().snapshotId(SNAPSHOT_ID).build(); + compareSnapshotInfo(snapshotInfo, SnapshotInfo.fromPb(snapshotInfo.toPb())); + } + + @Test + public void testSetProjectId() { + SnapshotInfo snapshotInfo = SNAPSHOT_INFO.toBuilder() + .snapshotId(SnapshotId.of("snapshot")) + .sourceDisk(DiskId.of("zone", "disk")) + .build(); + compareSnapshotInfo(SNAPSHOT_INFO, snapshotInfo.setProjectId("project")); + } + + public void compareSnapshotInfo(SnapshotInfo expected, SnapshotInfo value) { + assertEquals(expected, value); + assertEquals(expected.generatedId(), value.generatedId()); + assertEquals(expected.snapshotId(), value.snapshotId()); + assertEquals(expected.creationTimestamp(), value.creationTimestamp()); + assertEquals(expected.description(), value.description()); + assertEquals(expected.status(), value.status()); + assertEquals(expected.diskSizeGb(), value.diskSizeGb()); + assertEquals(expected.licenses(), value.licenses()); + assertEquals(expected.sourceDisk(), value.sourceDisk()); + assertEquals(expected.sourceDiskId(), value.sourceDiskId()); + assertEquals(expected.storageBytes(), value.storageBytes()); + assertEquals(expected.storageBytesStatus(), value.storageBytesStatus()); + assertEquals(expected.hashCode(), value.hashCode()); + } +} diff --git a/gcloud-java-compute/src/test/java/com/google/cloud/compute/SnapshotTest.java b/gcloud-java-compute/src/test/java/com/google/cloud/compute/SnapshotTest.java new file mode 100644 index 000000000000..d74cdf988f8c --- /dev/null +++ b/gcloud-java-compute/src/test/java/com/google/cloud/compute/SnapshotTest.java @@ -0,0 +1,253 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import static org.easymock.EasyMock.createMock; +import static org.easymock.EasyMock.createStrictMock; +import static org.easymock.EasyMock.expect; +import static org.easymock.EasyMock.replay; +import static org.easymock.EasyMock.verify; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; + +import com.google.common.collect.ImmutableList; + +import org.junit.Test; + +import java.util.List; + +public class SnapshotTest { + + private static final String GENERATED_ID = "42"; + private static final DiskId SOURCE_DISK = DiskId.of("project", "zone", "disk"); + private static final Long CREATION_TIMESTAMP = 1453293540000L; + private static final String DESCRIPTION = "description"; + private static final List LICENSES = ImmutableList.of( + LicenseId.of("project", "license1"), LicenseId.of("project", "license2")); + private static final SnapshotId SNAPSHOT_ID = SnapshotId.of("project", "snapshot"); + private static final SnapshotInfo.Status STATUS = SnapshotInfo.Status.CREATING; + private static final Long DISK_SIZE_GB = 42L; + private static final String SOURCE_DISK_ID = "diskId"; + private static final Long STORAGE_BYTES = 24L; + private static final SnapshotInfo.StorageBytesStatus STORAGE_BYTES_STATUS = + SnapshotInfo.StorageBytesStatus.UP_TO_DATE; + + private final Compute serviceMockReturnsOptions = createStrictMock(Compute.class); + private final ComputeOptions mockOptions = createMock(ComputeOptions.class); + private Compute compute; + private Snapshot snapshot; + private Snapshot expectedSnapshot; + + private void initializeExpectedSnapshot(int optionsCalls) { + expect(serviceMockReturnsOptions.options()).andReturn(mockOptions).times(optionsCalls); + replay(serviceMockReturnsOptions); + expectedSnapshot = new Snapshot.Builder(serviceMockReturnsOptions, SNAPSHOT_ID, SOURCE_DISK) + .generatedId(GENERATED_ID) + .creationTimestamp(CREATION_TIMESTAMP) + .description(DESCRIPTION) + .status(STATUS) + .diskSizeGb(DISK_SIZE_GB) + .licenses(LICENSES) + .sourceDiskId(SOURCE_DISK_ID) + .storageBytes(STORAGE_BYTES) + .storageBytesStatus(STORAGE_BYTES_STATUS) + .build(); + compute = createStrictMock(Compute.class); + } + + private void initializeSnapshot() { + snapshot = new Snapshot.Builder(compute, SNAPSHOT_ID, SOURCE_DISK) + .generatedId(GENERATED_ID) + .creationTimestamp(CREATION_TIMESTAMP) + .description(DESCRIPTION) + .status(STATUS) + .diskSizeGb(DISK_SIZE_GB) + .licenses(LICENSES) + .sourceDiskId(SOURCE_DISK_ID) + .storageBytes(STORAGE_BYTES) + .storageBytesStatus(STORAGE_BYTES_STATUS) + .build(); + } + + @Test + public void testToBuilder() { + initializeExpectedSnapshot(8); + compareSnapshot(expectedSnapshot, expectedSnapshot.toBuilder().build()); + Snapshot newSnapshot = expectedSnapshot.toBuilder().description("newDescription").build(); + assertEquals("newDescription", newSnapshot.description()); + newSnapshot = newSnapshot.toBuilder().description("description").build(); + compareSnapshot(expectedSnapshot, newSnapshot); + } + + @Test + public void testToBuilderIncomplete() { + initializeExpectedSnapshot(5); + SnapshotInfo snapshotInfo = SnapshotInfo.of(SNAPSHOT_ID, SOURCE_DISK); + Snapshot snapshot = + new Snapshot(serviceMockReturnsOptions, new SnapshotInfo.BuilderImpl(snapshotInfo)); + compareSnapshot(snapshot, snapshot.toBuilder().build()); + } + + @Test + public void testBuilder() { + initializeExpectedSnapshot(2); + assertEquals(GENERATED_ID, expectedSnapshot.generatedId()); + assertEquals(SNAPSHOT_ID, expectedSnapshot.snapshotId()); + assertEquals(CREATION_TIMESTAMP, expectedSnapshot.creationTimestamp()); + assertEquals(DESCRIPTION, expectedSnapshot.description()); + assertEquals(STATUS, expectedSnapshot.status()); + assertEquals(DISK_SIZE_GB, expectedSnapshot.diskSizeGb()); + assertEquals(LICENSES, expectedSnapshot.licenses()); + assertEquals(SOURCE_DISK, expectedSnapshot.sourceDisk()); + assertEquals(SOURCE_DISK_ID, expectedSnapshot.sourceDiskId()); + assertEquals(STORAGE_BYTES, expectedSnapshot.storageBytes()); + assertEquals(STORAGE_BYTES_STATUS, expectedSnapshot.storageBytesStatus()); + assertSame(serviceMockReturnsOptions, expectedSnapshot.compute()); + SnapshotId otherSnapshotId = SnapshotId.of("otherSnapshot"); + DiskId otherSourceDisk = DiskId.of("zone", "otherDisk"); + Snapshot snapshot = new Snapshot.Builder(serviceMockReturnsOptions, SNAPSHOT_ID, SOURCE_DISK) + .snapshotId(otherSnapshotId) + .sourceDisk(otherSourceDisk) + .build(); + assertNull(snapshot.generatedId()); + assertEquals(otherSnapshotId, snapshot.snapshotId()); + assertNull(snapshot.creationTimestamp()); + assertNull(snapshot.description()); + assertNull(snapshot.status()); + assertNull(snapshot.diskSizeGb()); + assertNull(snapshot.licenses()); + assertEquals(otherSourceDisk, snapshot.sourceDisk()); + assertNull(snapshot.sourceDiskId()); + assertNull(snapshot.storageBytes()); + assertNull(snapshot.storageBytesStatus()); + assertSame(serviceMockReturnsOptions, snapshot.compute()); + } + + @Test + public void testToAndFromPb() { + initializeExpectedSnapshot(8); + compareSnapshot(expectedSnapshot, + Snapshot.fromPb(serviceMockReturnsOptions, expectedSnapshot.toPb())); + Snapshot snapshot = + new Snapshot.Builder(serviceMockReturnsOptions, SNAPSHOT_ID, SOURCE_DISK).build(); + compareSnapshot(snapshot, Snapshot.fromPb(serviceMockReturnsOptions, snapshot.toPb())); + } + + @Test + public void testDeleteOperation() { + initializeExpectedSnapshot(2); + expect(compute.options()).andReturn(mockOptions); + Operation operation = new Operation.Builder(serviceMockReturnsOptions) + .operationId(GlobalOperationId.of("project", "op")) + .build(); + expect(compute.deleteSnapshot(SNAPSHOT_ID)).andReturn(operation); + replay(compute); + initializeSnapshot(); + assertSame(operation, snapshot.delete()); + } + + @Test + public void testDeleteNull() { + initializeExpectedSnapshot(1); + expect(compute.options()).andReturn(mockOptions); + expect(compute.deleteSnapshot(SNAPSHOT_ID)).andReturn(null); + replay(compute); + initializeSnapshot(); + assertNull(snapshot.delete()); + } + + @Test + public void testExists_True() throws Exception { + initializeExpectedSnapshot(1); + Compute.SnapshotOption[] expectedOptions = {Compute.SnapshotOption.fields()}; + expect(compute.options()).andReturn(mockOptions); + expect(compute.getSnapshot(SNAPSHOT_ID.snapshot(), expectedOptions)) + .andReturn(expectedSnapshot); + replay(compute); + initializeSnapshot(); + assertTrue(snapshot.exists()); + verify(compute); + } + + @Test + public void testExists_False() throws Exception { + initializeExpectedSnapshot(1); + Compute.SnapshotOption[] expectedOptions = {Compute.SnapshotOption.fields()}; + expect(compute.options()).andReturn(mockOptions); + expect(compute.getSnapshot(SNAPSHOT_ID.snapshot(), expectedOptions)).andReturn(null); + replay(compute); + initializeSnapshot(); + assertFalse(snapshot.exists()); + verify(compute); + } + + @Test + public void testReload() throws Exception { + initializeExpectedSnapshot(3); + expect(compute.options()).andReturn(mockOptions); + expect(compute.getSnapshot(SNAPSHOT_ID.snapshot())).andReturn(expectedSnapshot); + replay(compute); + initializeSnapshot(); + Snapshot updatedSnapshot = snapshot.reload(); + compareSnapshot(expectedSnapshot, updatedSnapshot); + verify(compute); + } + + @Test + public void testReloadNull() throws Exception { + initializeExpectedSnapshot(1); + expect(compute.options()).andReturn(mockOptions); + expect(compute.getSnapshot(SNAPSHOT_ID.snapshot())).andReturn(null); + replay(compute); + initializeSnapshot(); + assertNull(snapshot.reload()); + verify(compute); + } + + @Test + public void testReloadWithOptions() throws Exception { + initializeExpectedSnapshot(3); + expect(compute.options()).andReturn(mockOptions); + expect(compute.getSnapshot(SNAPSHOT_ID.snapshot(), Compute.SnapshotOption.fields())) + .andReturn(expectedSnapshot); + replay(compute); + initializeSnapshot(); + Snapshot updatedSnapshot = snapshot.reload(Compute.SnapshotOption.fields()); + compareSnapshot(expectedSnapshot, updatedSnapshot); + verify(compute); + } + + public void compareSnapshot(Snapshot expected, Snapshot value) { + assertEquals(expected, value); + assertEquals(expected.compute().options(), value.compute().options()); + assertEquals(expected.generatedId(), value.generatedId()); + assertEquals(expected.snapshotId(), value.snapshotId()); + assertEquals(expected.creationTimestamp(), value.creationTimestamp()); + assertEquals(expected.description(), value.description()); + assertEquals(expected.status(), value.status()); + assertEquals(expected.diskSizeGb(), value.diskSizeGb()); + assertEquals(expected.licenses(), value.licenses()); + assertEquals(expected.sourceDisk(), value.sourceDisk()); + assertEquals(expected.sourceDiskId(), value.sourceDiskId()); + assertEquals(expected.storageBytes(), value.storageBytes()); + assertEquals(expected.storageBytesStatus(), value.storageBytesStatus()); + assertEquals(expected.hashCode(), value.hashCode()); + } +} diff --git a/gcloud-java-compute/src/test/java/com/google/cloud/compute/StandardDiskConfigurationTest.java b/gcloud-java-compute/src/test/java/com/google/cloud/compute/StandardDiskConfigurationTest.java new file mode 100644 index 000000000000..3651eef2ad99 --- /dev/null +++ b/gcloud-java-compute/src/test/java/com/google/cloud/compute/StandardDiskConfigurationTest.java @@ -0,0 +1,103 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + +import com.google.cloud.compute.DiskConfiguration.Type; + +import org.junit.Test; + +public class StandardDiskConfigurationTest { + + private static final Long SIZE = 42L; + private static final DiskTypeId DISK_TYPE = DiskTypeId.of("project", "zone", "type"); + private static final StandardDiskConfiguration DISK_CONFIGURATION = + StandardDiskConfiguration.builder() + .sizeGb(SIZE) + .diskType(DISK_TYPE) + .build(); + + @Test + public void testToBuilder() { + compareStandardDiskConfiguration(DISK_CONFIGURATION, DISK_CONFIGURATION.toBuilder().build()); + StandardDiskConfiguration diskConfiguration = DISK_CONFIGURATION.toBuilder() + .sizeGb(24L) + .build(); + assertEquals(24L, diskConfiguration.sizeGb().longValue()); + diskConfiguration = diskConfiguration.toBuilder() + .sizeGb(SIZE) + .build(); + compareStandardDiskConfiguration(DISK_CONFIGURATION, diskConfiguration); + } + + @Test + public void testToBuilderIncomplete() { + StandardDiskConfiguration diskConfiguration = StandardDiskConfiguration.of(DISK_TYPE); + compareStandardDiskConfiguration(diskConfiguration, diskConfiguration.toBuilder().build()); + } + + @Test + public void testBuilder() { + assertEquals(DISK_TYPE, DISK_CONFIGURATION.diskType()); + assertEquals(SIZE, DISK_CONFIGURATION.sizeGb()); + assertEquals(Type.STANDARD, DISK_CONFIGURATION.type()); + } + + @Test + public void testToAndFromPb() { + assertTrue(DiskConfiguration.fromPb(DISK_CONFIGURATION.toPb()) + instanceof StandardDiskConfiguration); + compareStandardDiskConfiguration(DISK_CONFIGURATION, + DiskConfiguration.fromPb(DISK_CONFIGURATION.toPb())); + } + + @Test + public void testOf() { + StandardDiskConfiguration configuration = StandardDiskConfiguration.of(DISK_TYPE); + assertEquals(DISK_TYPE, configuration.diskType()); + assertNull(configuration.sizeGb()); + assertEquals(Type.STANDARD, configuration.type()); + configuration = StandardDiskConfiguration.of(DISK_TYPE, SIZE); + assertEquals(DISK_TYPE, configuration.diskType()); + assertEquals(SIZE, configuration.sizeGb()); + assertEquals(Type.STANDARD, configuration.type()); + configuration = StandardDiskConfiguration.of(SIZE); + assertNull(configuration.diskType()); + assertEquals(SIZE, configuration.sizeGb()); + assertEquals(Type.STANDARD, configuration.type()); + } + + @Test + public void testSetProjectId() { + StandardDiskConfiguration configuration = DISK_CONFIGURATION.toBuilder() + .diskType(DiskTypeId.of(DISK_TYPE.zone(), DISK_TYPE.type())) + .build(); + compareStandardDiskConfiguration(DISK_CONFIGURATION, configuration.setProjectId("project")); + } + + private void compareStandardDiskConfiguration(StandardDiskConfiguration expected, + StandardDiskConfiguration value) { + assertEquals(expected, value); + assertEquals(expected.diskType(), value.diskType()); + assertEquals(expected.sizeGb(), value.sizeGb()); + assertEquals(expected.type(), value.type()); + assertEquals(expected.hashCode(), value.hashCode()); + } +} diff --git a/gcloud-java-compute/src/test/java/com/google/cloud/compute/StandardNetworkConfigurationTest.java b/gcloud-java-compute/src/test/java/com/google/cloud/compute/StandardNetworkConfigurationTest.java new file mode 100644 index 000000000000..4949327415f0 --- /dev/null +++ b/gcloud-java-compute/src/test/java/com/google/cloud/compute/StandardNetworkConfigurationTest.java @@ -0,0 +1,76 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + +import com.google.cloud.compute.NetworkConfiguration.Type; + +import org.junit.Test; + +public class StandardNetworkConfigurationTest { + + private static final String IP_RANGE = "192.168.0.0/16"; + private static final String GATEWAY_ADDRESS = "192.168.1.1"; + private static final StandardNetworkConfiguration NETWORK_CONFIGURATION = + new StandardNetworkConfiguration(IP_RANGE, GATEWAY_ADDRESS); + + @Test + public void testConstructor() { + assertEquals(Type.STANDARD, NETWORK_CONFIGURATION.type()); + assertEquals(IP_RANGE, NETWORK_CONFIGURATION.ipRange()); + assertEquals(GATEWAY_ADDRESS, NETWORK_CONFIGURATION.gatewayAddress()); + StandardNetworkConfiguration networkConfiguration = + new StandardNetworkConfiguration(IP_RANGE, null); + assertEquals(Type.STANDARD, networkConfiguration.type()); + assertEquals(IP_RANGE, networkConfiguration.ipRange()); + assertNull(networkConfiguration.gatewayAddress()); + } + + @Test + public void testToAndFromPb() { + assertTrue(NetworkConfiguration.fromPb(NETWORK_CONFIGURATION.toPb()) + instanceof StandardNetworkConfiguration); + compareNetworkConfiguration(NETWORK_CONFIGURATION, + NetworkConfiguration.fromPb(NETWORK_CONFIGURATION.toPb())); + StandardNetworkConfiguration networkConfiguration = + new StandardNetworkConfiguration(IP_RANGE, null); + assertTrue(NetworkConfiguration.fromPb(networkConfiguration.toPb()) + instanceof StandardNetworkConfiguration); + compareNetworkConfiguration(networkConfiguration, + NetworkConfiguration.fromPb(networkConfiguration.toPb())); + } + + @Test + public void testOf() { + StandardNetworkConfiguration configuration = StandardNetworkConfiguration.of(IP_RANGE); + assertEquals(Type.STANDARD, configuration.type()); + assertEquals(IP_RANGE, configuration.ipRange()); + assertNull(configuration.gatewayAddress()); + } + + private void compareNetworkConfiguration(StandardNetworkConfiguration expected, + StandardNetworkConfiguration value) { + assertEquals(expected, value); + assertEquals(expected.ipRange(), value.ipRange()); + assertEquals(expected.gatewayAddress(), value.gatewayAddress()); + assertEquals(expected.type(), value.type()); + assertEquals(expected.hashCode(), value.hashCode()); + } +} diff --git a/gcloud-java-compute/src/test/java/com/google/cloud/compute/StorageImageConfigurationTest.java b/gcloud-java-compute/src/test/java/com/google/cloud/compute/StorageImageConfigurationTest.java new file mode 100644 index 000000000000..3ecd80c66097 --- /dev/null +++ b/gcloud-java-compute/src/test/java/com/google/cloud/compute/StorageImageConfigurationTest.java @@ -0,0 +1,105 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; + +import org.junit.Assert; +import org.junit.Test; + +public class StorageImageConfigurationTest { + + private static final String SOURCE = "source"; + private static final ImageConfiguration.SourceType SOURCE_TYPE = ImageConfiguration.SourceType.RAW; + private static final StorageImageConfiguration.ContainerType CONTAINER_TYPE = StorageImageConfiguration.ContainerType.TAR; + private static final Long ARCHIVE_SIZE_BYTES = 42L; + private static final String SHA1 = "sha1"; + private static final StorageImageConfiguration CONFIGURATION = + StorageImageConfiguration.builder(SOURCE) + .sourceType(SOURCE_TYPE) + .containerType(CONTAINER_TYPE) + .archiveSizeBytes(ARCHIVE_SIZE_BYTES) + .sha1(SHA1) + .build(); + + @Test + public void testToBuilder() { + compareRawImageConfiguration(CONFIGURATION, CONFIGURATION.toBuilder().build()); + String newSource = "newSource"; + StorageImageConfiguration configuration = CONFIGURATION.toBuilder().source(newSource).build(); + assertEquals(newSource, configuration.source()); + configuration = configuration.toBuilder().source(SOURCE).build(); + compareRawImageConfiguration(CONFIGURATION, configuration); + } + + @Test + public void testToBuilderIncomplete() { + StorageImageConfiguration configuration = StorageImageConfiguration.of(SOURCE); + compareRawImageConfiguration(configuration, configuration.toBuilder().build()); + } + + @Test + public void testBuilder() { + assertEquals(SOURCE_TYPE, CONFIGURATION.sourceType()); + assertEquals(SOURCE, CONFIGURATION.source()); + assertEquals(CONTAINER_TYPE, CONFIGURATION.containerType()); + assertEquals(ARCHIVE_SIZE_BYTES, CONFIGURATION.archiveSizeBytes()); + assertEquals(SHA1, CONFIGURATION.sha1()); + Assert.assertEquals(ImageConfiguration.Type.STORAGE, CONFIGURATION.type()); + } + + @Test + public void testToAndFromPb() { + assertTrue(ImageConfiguration.fromPb(CONFIGURATION.toPb()) instanceof StorageImageConfiguration); + compareRawImageConfiguration(CONFIGURATION, + ImageConfiguration.fromPb(CONFIGURATION.toPb())); + StorageImageConfiguration configuration = StorageImageConfiguration.of(SOURCE); + compareRawImageConfiguration(configuration, + StorageImageConfiguration.fromPb(configuration.toPb())); + } + + @Test + public void testOf() { + StorageImageConfiguration configuration = StorageImageConfiguration.of(SOURCE); + Assert.assertEquals(ImageConfiguration.Type.STORAGE, configuration.type()); + assertNull(configuration.sourceType()); + assertEquals(SOURCE, configuration.source()); + assertNull(configuration.containerType()); + assertNull(configuration.archiveSizeBytes()); + assertNull(configuration.sha1()); + } + + @Test + public void testSetProjectId() { + assertSame(CONFIGURATION, CONFIGURATION.setProjectId("project")); + } + + private void compareRawImageConfiguration(StorageImageConfiguration expected, + StorageImageConfiguration value) { + assertEquals(expected, value); + assertEquals(expected.type(), value.type()); + assertEquals(expected.source(), value.source()); + assertEquals(expected.sourceType(), value.sourceType()); + assertEquals(expected.containerType(), value.containerType()); + assertEquals(expected.archiveSizeBytes(), value.archiveSizeBytes()); + assertEquals(expected.sha1(), value.sha1()); + assertEquals(expected.hashCode(), value.hashCode()); + } +} diff --git a/gcloud-java-compute/src/test/java/com/google/cloud/compute/SubnetNetworkConfigurationTest.java b/gcloud-java-compute/src/test/java/com/google/cloud/compute/SubnetNetworkConfigurationTest.java new file mode 100644 index 000000000000..5fbec3e99c93 --- /dev/null +++ b/gcloud-java-compute/src/test/java/com/google/cloud/compute/SubnetNetworkConfigurationTest.java @@ -0,0 +1,83 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + +import com.google.common.collect.ImmutableList; + +import org.junit.Assert; +import org.junit.Test; + +import java.util.List; + +public class SubnetNetworkConfigurationTest { + + private static final Boolean AUTO_CREATE_SUBNETWORKS = true; + private static final List SUBNETWORKS = ImmutableList.of( + SubnetworkId.of("project", "region", "subnetwork1"), + SubnetworkId.of("project", "region", "subnetwork2")); + private static final SubnetNetworkConfiguration NETWORK_CONFIGURATION = + new SubnetNetworkConfiguration(AUTO_CREATE_SUBNETWORKS, SUBNETWORKS); + + @Test + public void testConstructor() { + assertEquals(AUTO_CREATE_SUBNETWORKS, NETWORK_CONFIGURATION.autoCreateSubnetworks()); + Assert.assertEquals(NetworkConfiguration.Type.SUBNET, NETWORK_CONFIGURATION.type()); + assertEquals(SUBNETWORKS, NETWORK_CONFIGURATION.subnetworks()); + Assert.assertEquals(NetworkConfiguration.Type.SUBNET, NETWORK_CONFIGURATION.type()); + SubnetNetworkConfiguration networkConfiguration = + new SubnetNetworkConfiguration(AUTO_CREATE_SUBNETWORKS, null); + Assert.assertEquals(NetworkConfiguration.Type.SUBNET, networkConfiguration.type()); + assertEquals(AUTO_CREATE_SUBNETWORKS, networkConfiguration.autoCreateSubnetworks()); + assertNull(networkConfiguration.subnetworks()); + } + + @Test + public void testToAndFromPb() { + assertTrue(NetworkConfiguration.fromPb(NETWORK_CONFIGURATION.toPb()) + instanceof SubnetNetworkConfiguration); + compareNetworkConfiguration(NETWORK_CONFIGURATION, + NetworkConfiguration.fromPb(NETWORK_CONFIGURATION.toPb())); + SubnetNetworkConfiguration networkConfiguration = + new SubnetNetworkConfiguration(AUTO_CREATE_SUBNETWORKS, null); + assertTrue(NetworkConfiguration.fromPb(networkConfiguration.toPb()) + instanceof SubnetNetworkConfiguration); + compareNetworkConfiguration(networkConfiguration, + NetworkConfiguration.fromPb(networkConfiguration.toPb())); + } + + @Test + public void testOf() { + SubnetNetworkConfiguration configuration = + SubnetNetworkConfiguration.of(AUTO_CREATE_SUBNETWORKS); + assertEquals(AUTO_CREATE_SUBNETWORKS, configuration.autoCreateSubnetworks()); + assertNull(configuration.subnetworks()); + Assert.assertEquals(NetworkConfiguration.Type.SUBNET, configuration.type()); + } + + private void compareNetworkConfiguration(SubnetNetworkConfiguration expected, + SubnetNetworkConfiguration value) { + assertEquals(expected, value); + assertEquals(expected.autoCreateSubnetworks(), value.autoCreateSubnetworks()); + assertEquals(expected.subnetworks(), value.subnetworks()); + assertEquals(expected.type(), value.type()); + assertEquals(expected.hashCode(), value.hashCode()); + } +} diff --git a/gcloud-java-compute/src/test/java/com/google/cloud/compute/SubnetworkIdTest.java b/gcloud-java-compute/src/test/java/com/google/cloud/compute/SubnetworkIdTest.java new file mode 100644 index 000000000000..972c0d5f1d13 --- /dev/null +++ b/gcloud-java-compute/src/test/java/com/google/cloud/compute/SubnetworkIdTest.java @@ -0,0 +1,87 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; + +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; + +public class SubnetworkIdTest { + + private static final String PROJECT = "project"; + private static final String REGION = "region"; + private static final String NAME = "subnet"; + private static final String URL = + "https://www.googleapis.com/compute/v1/projects/project/regions/region/subnetworks/subnet"; + + @Rule + public ExpectedException thrown = ExpectedException.none(); + + @Test + public void testOf() { + SubnetworkId subnetworkId = SubnetworkId.of(PROJECT, REGION, NAME); + assertEquals(PROJECT, subnetworkId.project()); + assertEquals(REGION, subnetworkId.region()); + assertEquals(NAME, subnetworkId.subnetwork()); + assertEquals(URL, subnetworkId.selfLink()); + subnetworkId = SubnetworkId.of(REGION, NAME); + assertNull(subnetworkId.project()); + assertEquals(REGION, subnetworkId.region()); + assertEquals(NAME, subnetworkId.subnetwork()); + subnetworkId = SubnetworkId.of(RegionId.of(PROJECT, REGION), NAME); + assertEquals(PROJECT, subnetworkId.project()); + assertEquals(REGION, subnetworkId.region()); + assertEquals(NAME, subnetworkId.subnetwork()); + } + + @Test + public void testToAndFromUrl() { + SubnetworkId subnetworkId = SubnetworkId.of(PROJECT, REGION, NAME); + compareSubnetworkId(subnetworkId, SubnetworkId.fromUrl(subnetworkId.selfLink())); + thrown.expect(IllegalArgumentException.class); + thrown.expectMessage("notMatchingUrl is not a valid subnetwork URL"); + SubnetworkId.fromUrl("notMatchingUrl"); + } + + @Test + public void testSetProjectId() { + SubnetworkId subnetworkId = SubnetworkId.of(PROJECT, REGION, NAME); + assertSame(subnetworkId, subnetworkId.setProjectId(PROJECT)); + compareSubnetworkId(subnetworkId, SubnetworkId.of(REGION, NAME).setProjectId(PROJECT)); + } + + @Test + public void testMatchesUrl() { + assertTrue(SubnetworkId.matchesUrl(SubnetworkId.of(PROJECT, REGION, NAME).selfLink())); + assertFalse(SubnetworkId.matchesUrl("notMatchingUrl")); + } + + private void compareSubnetworkId(SubnetworkId expected, SubnetworkId value) { + assertEquals(expected, value); + assertEquals(expected.project(), expected.project()); + assertEquals(expected.region(), expected.region()); + assertEquals(expected.subnetwork(), expected.subnetwork()); + assertEquals(expected.selfLink(), expected.selfLink()); + assertEquals(expected.hashCode(), expected.hashCode()); + } +} diff --git a/gcloud-java-compute/src/test/java/com/google/cloud/compute/SubnetworkInfoTest.java b/gcloud-java-compute/src/test/java/com/google/cloud/compute/SubnetworkInfoTest.java new file mode 100644 index 000000000000..17907ea2ac3a --- /dev/null +++ b/gcloud-java-compute/src/test/java/com/google/cloud/compute/SubnetworkInfoTest.java @@ -0,0 +1,108 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; + +import org.junit.Test; + +public class SubnetworkInfoTest { + + private static final String GENERATED_ID = "42"; + private static final Long CREATION_TIMESTAMP = 1453293540000L; + private static final String DESCRIPTION = "description"; + private static final SubnetworkId SUBNETWORK_ID = + SubnetworkId.of("project", "region", "subnetwork"); + private static final String GATEWAY_ADDRESS = "192.168.1.1"; + private static final NetworkId NETWORK_ID = NetworkId.of("project", "network"); + private static final String IP_RANGE = "192.168.0.0/16"; + private static final SubnetworkInfo SUBNETWORK_INFO = + SubnetworkInfo.builder(SUBNETWORK_ID, NETWORK_ID, IP_RANGE) + .generatedId(GENERATED_ID) + .creationTimestamp(CREATION_TIMESTAMP) + .description(DESCRIPTION) + .gatewayAddress(GATEWAY_ADDRESS) + .build(); + + @Test + public void testToBuilder() { + compareSubnetworkInfo(SUBNETWORK_INFO, SUBNETWORK_INFO.toBuilder().build()); + SubnetworkInfo subnetworkInfo = + SUBNETWORK_INFO.toBuilder().description("newDescription").build(); + assertEquals("newDescription", subnetworkInfo.description()); + subnetworkInfo = subnetworkInfo.toBuilder().description("description").build(); + compareSubnetworkInfo(SUBNETWORK_INFO, subnetworkInfo); + } + + @Test + public void testToBuilderIncomplete() { + SubnetworkInfo subnetworkInfo = SubnetworkInfo.of(SUBNETWORK_ID, NETWORK_ID, IP_RANGE); + assertEquals(subnetworkInfo, subnetworkInfo.toBuilder().build()); + } + + @Test + public void testBuilder() { + assertEquals(GENERATED_ID, SUBNETWORK_INFO.generatedId()); + assertEquals(SUBNETWORK_ID, SUBNETWORK_INFO.subnetworkId()); + assertEquals(CREATION_TIMESTAMP, SUBNETWORK_INFO.creationTimestamp()); + assertEquals(DESCRIPTION, SUBNETWORK_INFO.description()); + assertEquals(GATEWAY_ADDRESS, SUBNETWORK_INFO.gatewayAddress()); + assertEquals(NETWORK_ID, SUBNETWORK_INFO.network()); + assertEquals(IP_RANGE, SUBNETWORK_INFO.ipRange()); + } + + @Test + public void testOf() { + SubnetworkInfo subnetworkInfo = SubnetworkInfo.of(SUBNETWORK_ID, NETWORK_ID, IP_RANGE); + assertNull(subnetworkInfo.generatedId()); + assertEquals(SUBNETWORK_ID, subnetworkInfo.subnetworkId()); + assertNull(subnetworkInfo.creationTimestamp()); + assertNull(subnetworkInfo.description()); + assertNull(subnetworkInfo.gatewayAddress()); + assertEquals(NETWORK_ID, subnetworkInfo.network()); + assertEquals(IP_RANGE, subnetworkInfo.ipRange()); + } + + @Test + public void testToAndFromPb() { + compareSubnetworkInfo(SUBNETWORK_INFO, SubnetworkInfo.fromPb(SUBNETWORK_INFO.toPb())); + SubnetworkInfo subnetworkInfo = SubnetworkInfo.of(SUBNETWORK_ID, NETWORK_ID, IP_RANGE); + compareSubnetworkInfo(subnetworkInfo, SubnetworkInfo.fromPb(subnetworkInfo.toPb())); + } + + @Test + public void testSetProjectId() { + SubnetworkInfo subnetworkInfo = SUBNETWORK_INFO.toBuilder() + .subnetworkId(SubnetworkId.of("region", "subnetwork")) + .network(NetworkId.of("network")) + .build(); + compareSubnetworkInfo(SUBNETWORK_INFO, subnetworkInfo.setProjectId("project")); + } + + public void compareSubnetworkInfo(SubnetworkInfo expected, SubnetworkInfo value) { + assertEquals(expected, value); + assertEquals(expected.generatedId(), value.generatedId()); + assertEquals(expected.subnetworkId(), value.subnetworkId()); + assertEquals(expected.creationTimestamp(), value.creationTimestamp()); + assertEquals(expected.description(), value.description()); + assertEquals(expected.gatewayAddress(), value.gatewayAddress()); + assertEquals(expected.network(), value.network()); + assertEquals(expected.ipRange(), value.ipRange()); + assertEquals(expected.hashCode(), value.hashCode()); + } +} diff --git a/gcloud-java-compute/src/test/java/com/google/cloud/compute/SubnetworkTest.java b/gcloud-java-compute/src/test/java/com/google/cloud/compute/SubnetworkTest.java new file mode 100644 index 000000000000..6a710738b935 --- /dev/null +++ b/gcloud-java-compute/src/test/java/com/google/cloud/compute/SubnetworkTest.java @@ -0,0 +1,210 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import static org.easymock.EasyMock.createMock; +import static org.easymock.EasyMock.createStrictMock; +import static org.easymock.EasyMock.expect; +import static org.easymock.EasyMock.replay; +import static org.easymock.EasyMock.verify; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; + +import org.junit.Test; + +public class SubnetworkTest { + + private static final String GENERATED_ID = "42"; + private static final Long CREATION_TIMESTAMP = 1453293540000L; + private static final String DESCRIPTION = "description"; + private static final SubnetworkId SUBNETWORK_ID = SubnetworkId.of("project", "region", "network"); + private static final String GATEWAY_ADDRESS = "192.168.1.1"; + private static final NetworkId NETWORK_ID = NetworkId.of("project", "network"); + private static final String IP_RANGE = "192.168.0.0/16"; + + private final Compute serviceMockReturnsOptions = createStrictMock(Compute.class); + private final ComputeOptions mockOptions = createMock(ComputeOptions.class); + private Compute compute; + private Subnetwork subnetwork; + private Subnetwork expectedSubnetwork; + + private void initializeExpectedSubnetwork(int optionsCalls) { + expect(serviceMockReturnsOptions.options()).andReturn(mockOptions).times(optionsCalls); + replay(serviceMockReturnsOptions); + expectedSubnetwork = + new Subnetwork.Builder(serviceMockReturnsOptions, SUBNETWORK_ID, NETWORK_ID, IP_RANGE) + .generatedId(GENERATED_ID) + .creationTimestamp(CREATION_TIMESTAMP) + .description(DESCRIPTION) + .gatewayAddress(GATEWAY_ADDRESS) + .build(); + compute = createStrictMock(Compute.class); + } + + private void initializeSubnetwork() { + subnetwork = + new Subnetwork.Builder(compute, SUBNETWORK_ID, NETWORK_ID, IP_RANGE) + .generatedId(GENERATED_ID) + .creationTimestamp(CREATION_TIMESTAMP) + .description(DESCRIPTION) + .gatewayAddress(GATEWAY_ADDRESS) + .build(); + } + + @Test + public void testToBuilder() { + initializeExpectedSubnetwork(8); + compareSubnetwork(expectedSubnetwork, expectedSubnetwork.toBuilder().build()); + Subnetwork newSubnetwork = expectedSubnetwork.toBuilder().description("newDescription").build(); + assertEquals("newDescription", newSubnetwork.description()); + newSubnetwork = newSubnetwork.toBuilder().description("description").build(); + compareSubnetwork(expectedSubnetwork, newSubnetwork); + } + + @Test + public void testToBuilderIncomplete() { + initializeExpectedSubnetwork(5); + SubnetworkInfo subnetworkInfo = SubnetworkInfo.of(SUBNETWORK_ID, NETWORK_ID, IP_RANGE); + Subnetwork subnetwork = + new Subnetwork(serviceMockReturnsOptions, new SubnetworkInfo.BuilderImpl(subnetworkInfo)); + compareSubnetwork(subnetwork, subnetwork.toBuilder().build()); + } + + @Test + public void testBuilder() { + initializeExpectedSubnetwork(1); + assertEquals(GENERATED_ID, expectedSubnetwork.generatedId()); + assertEquals(SUBNETWORK_ID, expectedSubnetwork.subnetworkId()); + assertEquals(CREATION_TIMESTAMP, expectedSubnetwork.creationTimestamp()); + assertEquals(DESCRIPTION, expectedSubnetwork.description()); + assertEquals(GATEWAY_ADDRESS, expectedSubnetwork.gatewayAddress()); + assertEquals(NETWORK_ID, expectedSubnetwork.network()); + assertEquals(IP_RANGE, expectedSubnetwork.ipRange()); + assertSame(serviceMockReturnsOptions, expectedSubnetwork.compute()); + } + + @Test + public void testToAndFromPb() { + initializeExpectedSubnetwork(8); + compareSubnetwork(expectedSubnetwork, + Subnetwork.fromPb(serviceMockReturnsOptions, expectedSubnetwork.toPb())); + Subnetwork subnetwork = + new Subnetwork.Builder(serviceMockReturnsOptions, SUBNETWORK_ID, NETWORK_ID, IP_RANGE) + .build(); + compareSubnetwork(subnetwork, Subnetwork.fromPb(serviceMockReturnsOptions, subnetwork.toPb())); + } + + @Test + public void testDeleteOperation() { + initializeExpectedSubnetwork(2); + expect(compute.options()).andReturn(mockOptions); + Operation operation = new Operation.Builder(serviceMockReturnsOptions) + .operationId(GlobalOperationId.of("project", "op")) + .build(); + expect(compute.deleteSubnetwork(SUBNETWORK_ID)).andReturn(operation); + replay(compute); + initializeSubnetwork(); + assertSame(operation, subnetwork.delete()); + } + + @Test + public void testDeleteNull() { + initializeExpectedSubnetwork(1); + expect(compute.options()).andReturn(mockOptions); + expect(compute.deleteSubnetwork(SUBNETWORK_ID)).andReturn(null); + replay(compute); + initializeSubnetwork(); + assertNull(subnetwork.delete()); + } + + @Test + public void testExists_True() throws Exception { + initializeExpectedSubnetwork(1); + Compute.SubnetworkOption[] expectedOptions = {Compute.SubnetworkOption.fields()}; + expect(compute.options()).andReturn(mockOptions); + expect(compute.getSubnetwork(SUBNETWORK_ID, expectedOptions)) + .andReturn(expectedSubnetwork); + replay(compute); + initializeSubnetwork(); + assertTrue(subnetwork.exists()); + verify(compute); + } + + @Test + public void testExists_False() throws Exception { + initializeExpectedSubnetwork(1); + Compute.SubnetworkOption[] expectedOptions = {Compute.SubnetworkOption.fields()}; + expect(compute.options()).andReturn(mockOptions); + expect(compute.getSubnetwork(SUBNETWORK_ID, expectedOptions)).andReturn(null); + replay(compute); + initializeSubnetwork(); + assertFalse(subnetwork.exists()); + verify(compute); + } + + @Test + public void testReload() throws Exception { + initializeExpectedSubnetwork(3); + expect(compute.options()).andReturn(mockOptions); + expect(compute.getSubnetwork(SUBNETWORK_ID)).andReturn(expectedSubnetwork); + replay(compute); + initializeSubnetwork(); + Subnetwork updatedSubnetwork = subnetwork.reload(); + compareSubnetwork(expectedSubnetwork, updatedSubnetwork); + verify(compute); + } + + @Test + public void testReloadNull() throws Exception { + initializeExpectedSubnetwork(1); + expect(compute.options()).andReturn(mockOptions); + expect(compute.getSubnetwork(SUBNETWORK_ID)).andReturn(null); + replay(compute); + initializeSubnetwork(); + assertNull(subnetwork.reload()); + verify(compute); + } + + @Test + public void testReloadWithOptions() throws Exception { + initializeExpectedSubnetwork(3); + expect(compute.options()).andReturn(mockOptions); + expect(compute.getSubnetwork(SUBNETWORK_ID, Compute.SubnetworkOption.fields())) + .andReturn(expectedSubnetwork); + replay(compute); + initializeSubnetwork(); + Subnetwork updatedSubnetwork = subnetwork.reload(Compute.SubnetworkOption.fields()); + compareSubnetwork(expectedSubnetwork, updatedSubnetwork); + verify(compute); + } + + public void compareSubnetwork(Subnetwork expected, Subnetwork value) { + assertEquals(expected, value); + assertEquals(expected.compute().options(), value.compute().options()); + assertEquals(expected.generatedId(), value.generatedId()); + assertEquals(expected.subnetworkId(), value.subnetworkId()); + assertEquals(expected.creationTimestamp(), value.creationTimestamp()); + assertEquals(expected.description(), value.description()); + assertEquals(expected.gatewayAddress(), value.gatewayAddress()); + assertEquals(expected.network(), value.network()); + assertEquals(expected.ipRange(), value.ipRange()); + assertEquals(expected.hashCode(), value.hashCode()); + } +} diff --git a/gcloud-java-compute/src/test/java/com/google/cloud/compute/TagsTest.java b/gcloud-java-compute/src/test/java/com/google/cloud/compute/TagsTest.java new file mode 100644 index 000000000000..f626a418fefd --- /dev/null +++ b/gcloud-java-compute/src/test/java/com/google/cloud/compute/TagsTest.java @@ -0,0 +1,69 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; + +import com.google.common.collect.ImmutableList; + +import org.junit.Test; + +public class TagsTest { + + private static final Tags TAGS = Tags.of("tag1", "tag2"); + + @Test + public void testToBuilder() { + Tags tags = TAGS.toBuilder().values("tag1").build(); + assertEquals(ImmutableList.of("tag1"), tags.values()); + compareTags(TAGS, tags.toBuilder().values("tag1", "tag2").build()); + } + + @Test + public void testBuilder() { + Tags tags = Tags.builder().values(ImmutableList.of("tag1", "tag2")).build(); + assertEquals(ImmutableList.of("tag1", "tag2"), tags.values()); + assertNull(tags.fingerprint()); + tags = Tags.builder().add("tag1").add("tag2").build(); + assertEquals(ImmutableList.of("tag1", "tag2"), tags.values()); + assertNull(tags.fingerprint()); + tags = Tags.builder().add("tag1").add("tag2").fingerprint("fingerprint").build(); + assertEquals(ImmutableList.of("tag1", "tag2"), tags.values()); + assertEquals("fingerprint", tags.fingerprint()); + } + + @Test + public void testOf() { + compareTags(TAGS, Tags.of("tag1", "tag2")); + compareTags(TAGS, Tags.of(ImmutableList.of("tag1", "tag2"))); + } + + @Test + public void testToAndFromPb() { + compareTags(TAGS, Tags.fromPb(TAGS.toPb())); + Tags tags = Tags.builder().add("tag1").add("tag2").fingerprint("fingerprint").build(); + compareTags(tags, Tags.fromPb(tags.toPb())); + } + + public void compareTags(Tags expected, Tags value) { + assertEquals(expected, value); + assertEquals(expected.fingerprint(), value.fingerprint()); + assertEquals(expected.values(), value.values()); + assertEquals(expected.hashCode(), value.hashCode()); + } +} diff --git a/gcloud-java-compute/src/test/java/com/google/cloud/compute/ZoneIdTest.java b/gcloud-java-compute/src/test/java/com/google/cloud/compute/ZoneIdTest.java new file mode 100644 index 000000000000..90bdc6de2bf3 --- /dev/null +++ b/gcloud-java-compute/src/test/java/com/google/cloud/compute/ZoneIdTest.java @@ -0,0 +1,79 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; + +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; + +public class ZoneIdTest { + + private static final String PROJECT = "project"; + private static final String ZONE = "zone"; + private static final String URL = + "https://www.googleapis.com/compute/v1/projects/project/zones/zone"; + + @Rule + public ExpectedException thrown = ExpectedException.none(); + + @Test + public void testOf() { + ZoneId zoneId = ZoneId.of(PROJECT, ZONE); + assertEquals(PROJECT, zoneId.project()); + assertEquals(ZONE, zoneId.zone()); + assertEquals(URL, zoneId.selfLink()); + zoneId = ZoneId.of(ZONE); + assertNull(zoneId.project()); + assertEquals(ZONE, zoneId.zone()); + } + + @Test + public void testToAndFromUrl() { + ZoneId zoneId = ZoneId.of(PROJECT, ZONE); + compareZoneId(zoneId, ZoneId.fromUrl(zoneId.selfLink())); + } + + @Test + public void testSetProjectId() { + ZoneId zoneId = ZoneId.of(PROJECT, ZONE); + assertSame(zoneId, zoneId.setProjectId(PROJECT)); + compareZoneId(zoneId, ZoneId.of(ZONE).setProjectId(PROJECT)); + thrown.expect(IllegalArgumentException.class); + thrown.expectMessage("notMatchingUrl is not a valid zone URL"); + ZoneId.fromUrl("notMatchingUrl"); + } + + @Test + public void testMatchesUrl() { + assertTrue(ZoneId.matchesUrl(ZoneId.of(PROJECT, ZONE).selfLink())); + assertFalse(ZoneId.matchesUrl("notMatchingUrl")); + } + + private void compareZoneId(ZoneId expected, ZoneId value) { + assertEquals(expected, value); + assertEquals(expected.project(), expected.project()); + assertEquals(expected.zone(), expected.zone()); + assertEquals(expected.selfLink(), expected.selfLink()); + assertEquals(expected.hashCode(), expected.hashCode()); + } +} diff --git a/gcloud-java-compute/src/test/java/com/google/cloud/compute/ZoneTest.java b/gcloud-java-compute/src/test/java/com/google/cloud/compute/ZoneTest.java new file mode 100644 index 000000000000..b03aabc5b34e --- /dev/null +++ b/gcloud-java-compute/src/test/java/com/google/cloud/compute/ZoneTest.java @@ -0,0 +1,77 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute; + +import static org.junit.Assert.assertEquals; + +import org.junit.Test; + +public class ZoneTest { + + private static final ZoneId ZONE_ID = ZoneId.of("project", "zone"); + private static final RegionId REGION_ID = RegionId.of("project", "region"); + private static final String GENERATED_ID = "42"; + private static final Long CREATION_TIMESTAMP = 1453293540000L; + private static final String DESCRIPTION = "description"; + private static final Zone.Status STATUS = Zone.Status.DOWN; + private static final DeprecationStatus DEPRECATION_STATUS = + DeprecationStatus.of(DeprecationStatus.Status.DELETED, ZONE_ID); + private static final Zone ZONE = Zone.builder() + .zoneId(ZONE_ID) + .generatedId(GENERATED_ID) + .creationTimestamp(CREATION_TIMESTAMP) + .description(DESCRIPTION) + .status(STATUS) + .deprecationStatus(DEPRECATION_STATUS) + .region(REGION_ID) + .build(); + + @Test + public void testBuilder() { + assertEquals(REGION_ID, ZONE.region()); + assertEquals(GENERATED_ID, ZONE.generatedId()); + assertEquals(CREATION_TIMESTAMP, ZONE.creationTimestamp()); + assertEquals(DESCRIPTION, ZONE.description()); + assertEquals(STATUS, ZONE.status()); + assertEquals(REGION_ID, ZONE.region()); + assertEquals(DEPRECATION_STATUS, ZONE.deprecationStatus()); + } + + @Test + public void testToAndFromPb() { + com.google.api.services.compute.model.Zone zonePb = ZONE.toPb(); + assertEquals(REGION_ID.selfLink(), zonePb.getRegion()); + Zone zone = Zone.fromPb(zonePb); + compareZones(ZONE, zone); + assertEquals(ZONE_ID.project(), zone.zoneId().project()); + assertEquals(ZONE_ID.zone(), zone.zoneId().zone()); + zone = Zone.builder().zoneId(ZONE_ID).build(); + compareZones(zone, Zone.fromPb(zone.toPb())); + } + + private void compareZones(Zone expected, Zone value) { + assertEquals(expected, value); + assertEquals(expected.zoneId(), value.zoneId()); + assertEquals(expected.generatedId(), value.generatedId()); + assertEquals(expected.creationTimestamp(), value.creationTimestamp()); + assertEquals(expected.description(), value.description()); + assertEquals(expected.status(), value.status()); + assertEquals(expected.region(), value.region()); + assertEquals(expected.deprecationStatus(), value.deprecationStatus()); + assertEquals(expected.hashCode(), value.hashCode()); + } +} diff --git a/gcloud-java-compute/src/test/java/com/google/cloud/compute/it/ITComputeTest.java b/gcloud-java-compute/src/test/java/com/google/cloud/compute/it/ITComputeTest.java new file mode 100644 index 000000000000..71013a3c1120 --- /dev/null +++ b/gcloud-java-compute/src/test/java/com/google/cloud/compute/it/ITComputeTest.java @@ -0,0 +1,1919 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute.it; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + +import com.google.cloud.Page; +import com.google.cloud.compute.Address; +import com.google.cloud.compute.AddressId; +import com.google.cloud.compute.AddressInfo; +import com.google.cloud.compute.AttachedDisk; +import com.google.cloud.compute.Compute; +import com.google.cloud.compute.DeprecationStatus; +import com.google.cloud.compute.Disk; +import com.google.cloud.compute.DiskConfiguration; +import com.google.cloud.compute.DiskId; +import com.google.cloud.compute.DiskImageConfiguration; +import com.google.cloud.compute.DiskInfo; +import com.google.cloud.compute.DiskType; +import com.google.cloud.compute.DiskTypeId; +import com.google.cloud.compute.GlobalAddressId; +import com.google.cloud.compute.Image; +import com.google.cloud.compute.ImageConfiguration; +import com.google.cloud.compute.ImageDiskConfiguration; +import com.google.cloud.compute.ImageId; +import com.google.cloud.compute.ImageInfo; +import com.google.cloud.compute.Instance; +import com.google.cloud.compute.InstanceId; +import com.google.cloud.compute.InstanceInfo; +import com.google.cloud.compute.License; +import com.google.cloud.compute.LicenseId; +import com.google.cloud.compute.MachineType; +import com.google.cloud.compute.MachineTypeId; +import com.google.cloud.compute.Network; +import com.google.cloud.compute.NetworkConfiguration; +import com.google.cloud.compute.NetworkId; +import com.google.cloud.compute.NetworkInfo; +import com.google.cloud.compute.NetworkInterface; +import com.google.cloud.compute.Operation; +import com.google.cloud.compute.Region; +import com.google.cloud.compute.RegionAddressId; +import com.google.cloud.compute.RegionOperationId; +import com.google.cloud.compute.SchedulingOptions; +import com.google.cloud.compute.Snapshot; +import com.google.cloud.compute.SnapshotDiskConfiguration; +import com.google.cloud.compute.SnapshotId; +import com.google.cloud.compute.SnapshotInfo; +import com.google.cloud.compute.StandardDiskConfiguration; +import com.google.cloud.compute.StandardNetworkConfiguration; +import com.google.cloud.compute.StorageImageConfiguration; +import com.google.cloud.compute.SubnetNetworkConfiguration; +import com.google.cloud.compute.Subnetwork; +import com.google.cloud.compute.SubnetworkId; +import com.google.cloud.compute.SubnetworkInfo; +import com.google.cloud.compute.Zone; +import com.google.cloud.compute.ZoneOperationId; +import com.google.cloud.compute.testing.RemoteComputeHelper; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; + +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.Timeout; + +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Set; + +public class ITComputeTest { + + private static final String REGION = "us-central1"; + private static final String ZONE = "us-central1-a"; + private static final String DISK_TYPE = "local-ssd"; + private static final String MACHINE_TYPE = "f1-micro"; + private static final LicenseId LICENSE_ID = LicenseId.of("ubuntu-os-cloud", "ubuntu-1404-trusty"); + private static final String BASE_RESOURCE_NAME = RemoteComputeHelper.baseResourceName(); + private static final ImageId IMAGE_ID = ImageId.of("debian-cloud", "debian-8-jessie-v20160219"); + private static final String IMAGE_PROJECT = "debian-cloud"; + + private static Compute compute; + + @Rule + public Timeout globalTimeout = Timeout.seconds(300); + + @BeforeClass + public static void beforeClass() { + RemoteComputeHelper computeHelper = RemoteComputeHelper.create(); + compute = computeHelper.options().service(); + } + + @Test + public void testGetDiskType() { + DiskType diskType = compute.getDiskType(ZONE, DISK_TYPE); + // assertNotNull(diskType.generatedId()); + assertEquals(ZONE, diskType.diskTypeId().zone()); + assertEquals(DISK_TYPE, diskType.diskTypeId().type()); + assertNotNull(diskType.creationTimestamp()); + assertNotNull(diskType.description()); + assertNotNull(diskType.validDiskSize()); + assertNotNull(diskType.defaultDiskSizeGb()); + } + + @Test + public void testGetDiskTypeWithSelectedFields() { + DiskType diskType = compute.getDiskType(ZONE, DISK_TYPE, + Compute.DiskTypeOption.fields(Compute.DiskTypeField.CREATION_TIMESTAMP)); + // assertNotNull(diskType.generatedId()); + assertEquals(ZONE, diskType.diskTypeId().zone()); + assertEquals(DISK_TYPE, diskType.diskTypeId().type()); + assertNotNull(diskType.creationTimestamp()); + assertNull(diskType.description()); + assertNull(diskType.validDiskSize()); + assertNull(diskType.defaultDiskSizeGb()); + } + + @Test + public void testListDiskTypes() { + Page diskPage = compute.listDiskTypes(ZONE); + Iterator diskTypeIterator = diskPage.iterateAll(); + assertTrue(diskTypeIterator.hasNext()); + while (diskTypeIterator.hasNext()) { + DiskType diskType = diskTypeIterator.next(); + // assertNotNull(diskType.generatedId()); + assertNotNull(diskType.diskTypeId()); + assertEquals(ZONE, diskType.diskTypeId().zone()); + assertNotNull(diskType.creationTimestamp()); + assertNotNull(diskType.description()); + assertNotNull(diskType.validDiskSize()); + assertNotNull(diskType.defaultDiskSizeGb()); + } + } + + @Test + public void testListDiskTypesWithSelectedFields() { + Page diskPage = compute.listDiskTypes(ZONE, + Compute.DiskTypeListOption.fields(Compute.DiskTypeField.CREATION_TIMESTAMP)); + Iterator diskTypeIterator = diskPage.iterateAll(); + assertTrue(diskTypeIterator.hasNext()); + while (diskTypeIterator.hasNext()) { + DiskType diskType = diskTypeIterator.next(); + assertNull(diskType.generatedId()); + assertNotNull(diskType.diskTypeId()); + assertEquals(ZONE, diskType.diskTypeId().zone()); + assertNotNull(diskType.creationTimestamp()); + assertNull(diskType.description()); + assertNull(diskType.validDiskSize()); + assertNull(diskType.defaultDiskSizeGb()); + } + } + + @Test + public void testListDiskTypesWithFilter() { + Page diskPage = compute.listDiskTypes(ZONE, Compute.DiskTypeListOption.filter( + Compute.DiskTypeFilter.equals(Compute.DiskTypeField.DEFAULT_DISK_SIZE_GB, 375))); + Iterator diskTypeIterator = diskPage.iterateAll(); + assertTrue(diskTypeIterator.hasNext()); + while (diskTypeIterator.hasNext()) { + DiskType diskType = diskTypeIterator.next(); + // todo(mziccard): uncomment or remove once #695 is closed + // assertNotNull(diskType.generatedId()); + assertNotNull(diskType.diskTypeId()); + assertEquals(ZONE, diskType.diskTypeId().zone()); + assertNotNull(diskType.creationTimestamp()); + assertNotNull(diskType.description()); + assertNotNull(diskType.validDiskSize()); + assertEquals(375, (long) diskType.defaultDiskSizeGb()); + } + } + + @Test + public void testAggregatedListDiskTypes() { + Page diskPage = compute.listDiskTypes(); + Iterator diskTypeIterator = diskPage.iterateAll(); + assertTrue(diskTypeIterator.hasNext()); + while (diskTypeIterator.hasNext()) { + DiskType diskType = diskTypeIterator.next(); + // assertNotNull(diskType.generatedId()); + assertNotNull(diskType.diskTypeId()); + assertNotNull(diskType.creationTimestamp()); + assertNotNull(diskType.description()); + assertNotNull(diskType.validDiskSize()); + assertNotNull(diskType.defaultDiskSizeGb()); + } + } + + @Test + public void testAggregatedListDiskTypesWithFilter() { + Page diskPage = compute.listDiskTypes(Compute.DiskTypeAggregatedListOption.filter( + Compute.DiskTypeFilter.notEquals(Compute.DiskTypeField.DEFAULT_DISK_SIZE_GB, 375))); + Iterator diskTypeIterator = diskPage.iterateAll(); + assertTrue(diskTypeIterator.hasNext()); + while (diskTypeIterator.hasNext()) { + DiskType diskType = diskTypeIterator.next(); + // todo(mziccard): uncomment or remove once #695 is closed + // assertNotNull(diskType.generatedId()); + assertNotNull(diskType.diskTypeId()); + assertNotNull(diskType.creationTimestamp()); + assertNotNull(diskType.description()); + assertNotNull(diskType.validDiskSize()); + assertNotEquals(375, (long) diskType.defaultDiskSizeGb()); + } + } + + @Test + public void testGetMachineType() { + MachineType machineType = compute.getMachineType(ZONE, MACHINE_TYPE); + assertEquals(ZONE, machineType.machineTypeId().zone()); + assertEquals(MACHINE_TYPE, machineType.machineTypeId().type()); + assertNotNull(machineType.generatedId()); + assertNotNull(machineType.creationTimestamp()); + assertNotNull(machineType.description()); + assertNotNull(machineType.cpus()); + assertNotNull(machineType.memoryMb()); + assertNotNull(machineType.maximumPersistentDisks()); + assertNotNull(machineType.maximumPersistentDisksSizeGb()); + } + + @Test + public void testGetMachineTypeWithSelectedFields() { + MachineType machineType = compute.getMachineType(ZONE, MACHINE_TYPE, + Compute.MachineTypeOption.fields(Compute.MachineTypeField.ID)); + assertEquals(ZONE, machineType.machineTypeId().zone()); + assertEquals(MACHINE_TYPE, machineType.machineTypeId().type()); + assertNotNull(machineType.generatedId()); + assertNull(machineType.creationTimestamp()); + assertNull(machineType.description()); + assertNull(machineType.cpus()); + assertNull(machineType.memoryMb()); + assertNull(machineType.maximumPersistentDisks()); + assertNull(machineType.maximumPersistentDisksSizeGb()); + } + + @Test + public void testListMachineTypes() { + Page machinePage = compute.listMachineTypes(ZONE); + Iterator machineTypeIterator = machinePage.iterateAll(); + assertTrue(machineTypeIterator.hasNext()); + while (machineTypeIterator.hasNext()) { + MachineType machineType = machineTypeIterator.next(); + assertNotNull(machineType.machineTypeId()); + assertEquals(ZONE, machineType.machineTypeId().zone()); + assertNotNull(machineType.generatedId()); + assertNotNull(machineType.creationTimestamp()); + assertNotNull(machineType.description()); + assertNotNull(machineType.cpus()); + assertNotNull(machineType.memoryMb()); + assertNotNull(machineType.maximumPersistentDisks()); + assertNotNull(machineType.maximumPersistentDisksSizeGb()); + } + } + + @Test + public void testListMachineTypesWithSelectedFields() { + Page machinePage = compute.listMachineTypes(ZONE, + Compute.MachineTypeListOption.fields(Compute.MachineTypeField.CREATION_TIMESTAMP)); + Iterator machineTypeIterator = machinePage.iterateAll(); + assertTrue(machineTypeIterator.hasNext()); + while (machineTypeIterator.hasNext()) { + MachineType machineType = machineTypeIterator.next(); + assertNotNull(machineType.machineTypeId()); + assertEquals(ZONE, machineType.machineTypeId().zone()); + assertNull(machineType.generatedId()); + assertNotNull(machineType.creationTimestamp()); + assertNull(machineType.description()); + assertNull(machineType.cpus()); + assertNull(machineType.memoryMb()); + assertNull(machineType.maximumPersistentDisks()); + assertNull(machineType.maximumPersistentDisksSizeGb()); + } + } + + @Test + public void testListMachineTypesWithFilter() { + Page machinePage = compute.listMachineTypes(ZONE, + Compute.MachineTypeListOption.filter( + Compute.MachineTypeFilter.equals(Compute.MachineTypeField.GUEST_CPUS, 2))); + Iterator machineTypeIterator = machinePage.iterateAll(); + assertTrue(machineTypeIterator.hasNext()); + while (machineTypeIterator.hasNext()) { + MachineType machineType = machineTypeIterator.next(); + assertNotNull(machineType.machineTypeId()); + assertEquals(ZONE, machineType.machineTypeId().zone()); + assertNotNull(machineType.generatedId()); + assertNotNull(machineType.creationTimestamp()); + assertNotNull(machineType.description()); + assertNotNull(machineType.cpus()); + assertEquals(2, (long) machineType.cpus()); + assertNotNull(machineType.memoryMb()); + assertNotNull(machineType.maximumPersistentDisks()); + assertNotNull(machineType.maximumPersistentDisksSizeGb()); + } + } + + @Test + public void testAggregatedListMachineTypes() { + Page machinePage = compute.listMachineTypes(); + Iterator machineTypeIterator = machinePage.iterateAll(); + assertTrue(machineTypeIterator.hasNext()); + while (machineTypeIterator.hasNext()) { + MachineType machineType = machineTypeIterator.next(); + assertNotNull(machineType.machineTypeId()); + assertNotNull(machineType.generatedId()); + assertNotNull(machineType.creationTimestamp()); + assertNotNull(machineType.description()); + assertNotNull(machineType.cpus()); + assertNotNull(machineType.memoryMb()); + assertNotNull(machineType.maximumPersistentDisks()); + assertNotNull(machineType.maximumPersistentDisksSizeGb()); + } + } + + @Test + public void testAggregatedListMachineTypesWithFilter() { + Page machinePage = + compute.listMachineTypes(Compute.MachineTypeAggregatedListOption.filter( + Compute.MachineTypeFilter.notEquals(Compute.MachineTypeField.GUEST_CPUS, 2))); + Iterator machineTypeIterator = machinePage.iterateAll(); + assertTrue(machineTypeIterator.hasNext()); + while (machineTypeIterator.hasNext()) { + MachineType machineType = machineTypeIterator.next(); + assertNotNull(machineType.machineTypeId()); + assertNotNull(machineType.generatedId()); + assertNotNull(machineType.creationTimestamp()); + assertNotNull(machineType.description()); + assertNotNull(machineType.cpus()); + assertNotEquals(2, (long) machineType.cpus()); + assertNotNull(machineType.memoryMb()); + assertNotNull(machineType.maximumPersistentDisks()); + assertNotNull(machineType.maximumPersistentDisksSizeGb()); + } + } + + @Test + public void testGetLicense() { + License license = compute.getLicense(LICENSE_ID); + assertEquals(LICENSE_ID, license.licenseId()); + assertNotNull(license.chargesUseFee()); + } + + @Test + public void testGetLicenseWithSelectedFields() { + License license = compute.getLicense(LICENSE_ID, Compute.LicenseOption.fields()); + assertEquals(LICENSE_ID, license.licenseId()); + assertNull(license.chargesUseFee()); + } + + @Test + public void testGetRegion() { + Region region = compute.getRegion(REGION); + assertEquals(REGION, region.regionId().region()); + assertNotNull(region.description()); + assertNotNull(region.creationTimestamp()); + assertNotNull(region.generatedId()); + assertNotNull(region.quotas()); + assertNotNull(region.status()); + assertNotNull(region.zones()); + } + + @Test + public void testGetRegionWithSelectedFields() { + Region region = compute.getRegion(REGION, Compute.RegionOption.fields(Compute.RegionField.ID)); + assertEquals(REGION, region.regionId().region()); + assertNotNull(region.generatedId()); + assertNull(region.description()); + assertNull(region.creationTimestamp()); + assertNull(region.quotas()); + assertNull(region.status()); + assertNull(region.zones()); + } + + @Test + public void testListRegions() { + Page regionPage = compute.listRegions(); + Iterator regionIterator = regionPage.iterateAll(); + while (regionIterator.hasNext()) { + Region region = regionIterator.next(); + assertNotNull(region.regionId()); + assertNotNull(region.description()); + assertNotNull(region.creationTimestamp()); + assertNotNull(region.generatedId()); + assertNotNull(region.quotas()); + assertNotNull(region.status()); + assertNotNull(region.zones()); + } + } + + @Test + public void testListRegionsWithSelectedFields() { + Page regionPage = + compute.listRegions(Compute.RegionListOption.fields(Compute.RegionField.ID)); + Iterator regionIterator = regionPage.iterateAll(); + while (regionIterator.hasNext()) { + Region region = regionIterator.next(); + assertNotNull(region.regionId()); + assertNull(region.description()); + assertNull(region.creationTimestamp()); + assertNotNull(region.generatedId()); + assertNull(region.quotas()); + assertNull(region.status()); + assertNull(region.zones()); + } + } + + @Test + public void testListRegionsWithFilter() { + Page regionPage = compute.listRegions(Compute.RegionListOption.filter( + Compute.RegionFilter.equals(Compute.RegionField.NAME, REGION))); + Iterator regionIterator = regionPage.iterateAll(); + assertEquals(REGION, regionIterator.next().regionId().region()); + assertFalse(regionIterator.hasNext()); + } + + @Test + public void testGetZone() { + Zone zone = compute.getZone(ZONE); + assertEquals(ZONE, zone.zoneId().zone()); + assertNotNull(zone.generatedId()); + assertNotNull(zone.creationTimestamp()); + assertNotNull(zone.description()); + assertNotNull(zone.status()); + assertNotNull(zone.region()); + } + + @Test + public void testGetZoneWithSelectedFields() { + Zone zone = compute.getZone(ZONE, Compute.ZoneOption.fields(Compute.ZoneField.ID)); + assertEquals(ZONE, zone.zoneId().zone()); + assertNotNull(zone.generatedId()); + assertNull(zone.creationTimestamp()); + assertNull(zone.description()); + assertNull(zone.status()); + assertNull(zone.region()); + } + + @Test + public void testListZones() { + Page zonePage = compute.listZones(); + Iterator zoneIterator = zonePage.iterateAll(); + while (zoneIterator.hasNext()) { + Zone zone = zoneIterator.next(); + assertNotNull(zone.zoneId()); + assertNotNull(zone.generatedId()); + assertNotNull(zone.creationTimestamp()); + assertNotNull(zone.description()); + assertNotNull(zone.status()); + assertNotNull(zone.region()); + } + } + + @Test + public void testListZonesWithSelectedFields() { + Page zonePage = compute.listZones( + Compute.ZoneListOption.fields(Compute.ZoneField.CREATION_TIMESTAMP)); + Iterator zoneIterator = zonePage.iterateAll(); + while (zoneIterator.hasNext()) { + Zone zone = zoneIterator.next(); + assertNotNull(zone.zoneId()); + assertNull(zone.generatedId()); + assertNotNull(zone.creationTimestamp()); + assertNull(zone.description()); + assertNull(zone.status()); + assertNull(zone.region()); + } + } + + @Test + public void testListZonesWithFilter() { + Page zonePage = compute.listZones( + Compute.ZoneListOption.filter(Compute.ZoneFilter.equals(Compute.ZoneField.NAME, ZONE))); + Iterator zoneIterator = zonePage.iterateAll(); + assertEquals(ZONE, zoneIterator.next().zoneId().zone()); + assertFalse(zoneIterator.hasNext()); + } + + @Test + public void testListGlobalOperations() { + Page operationPage = compute.listGlobalOperations(); + Iterator operationIterator = operationPage.iterateAll(); + while (operationIterator.hasNext()) { + Operation operation = operationIterator.next(); + assertNotNull(operation.generatedId()); + assertNotNull(operation.operationId()); + // todo(mziccard): uncomment or remove once #727 is closed + // assertNotNull(operation.creationTimestamp()); + assertNotNull(operation.operationType()); + assertNotNull(operation.status()); + assertNotNull(operation.user()); + } + } + + @Test + public void testListGlobalOperationsWithSelectedFields() { + Page operationPage = + compute.listGlobalOperations(Compute.OperationListOption.fields(Compute.OperationField.ID)); + Iterator operationIterator = operationPage.iterateAll(); + while (operationIterator.hasNext()) { + Operation operation = operationIterator.next(); + assertNotNull(operation.generatedId()); + assertNotNull(operation.operationId()); + assertNull(operation.operationType()); + assertNull(operation.targetLink()); + assertNull(operation.targetId()); + assertNull(operation.operationType()); + assertNull(operation.status()); + assertNull(operation.statusMessage()); + assertNull(operation.user()); + assertNull(operation.progress()); + assertNull(operation.description()); + assertNull(operation.insertTime()); + assertNull(operation.startTime()); + assertNull(operation.endTime()); + assertNull(operation.warnings()); + assertNull(operation.httpErrorMessage()); + } + } + + @Test + public void testListGlobalOperationsWithFilter() { + Page operationPage = compute.listGlobalOperations(Compute.OperationListOption.filter( + Compute.OperationFilter.equals(Compute.OperationField.STATUS, "DONE"))); + Iterator operationIterator = operationPage.iterateAll(); + while (operationIterator.hasNext()) { + Operation operation = operationIterator.next(); + assertNotNull(operation.generatedId()); + assertNotNull(operation.operationId()); + // todo(mziccard): uncomment or remove once #727 is closed + // assertNotNull(operation.creationTimestamp()); + assertNotNull(operation.operationType()); + assertEquals(Operation.Status.DONE, operation.status()); + assertNotNull(operation.user()); + } + } + + @Test + public void testListRegionOperations() { + Page operationPage = compute.listRegionOperations(REGION); + Iterator operationIterator = operationPage.iterateAll(); + while (operationIterator.hasNext()) { + Operation operation = operationIterator.next(); + assertNotNull(operation.generatedId()); + assertNotNull(operation.operationId()); + assertEquals(REGION, operation.operationId().region()); + // todo(mziccard): uncomment or remove once #727 is closed + // assertNotNull(operation.creationTimestamp()); + assertNotNull(operation.operationType()); + assertNotNull(operation.status()); + assertNotNull(operation.user()); + } + } + + @Test + public void testListRegionOperationsWithSelectedFields() { + Page operationPage = compute.listRegionOperations(REGION, + Compute.OperationListOption.fields(Compute.OperationField.ID)); + Iterator operationIterator = operationPage.iterateAll(); + while (operationIterator.hasNext()) { + Operation operation = operationIterator.next(); + assertNotNull(operation.generatedId()); + assertNotNull(operation.operationId()); + assertEquals(REGION, operation.operationId().region()); + assertNull(operation.operationType()); + assertNull(operation.targetLink()); + assertNull(operation.targetId()); + assertNull(operation.operationType()); + assertNull(operation.status()); + assertNull(operation.statusMessage()); + assertNull(operation.user()); + assertNull(operation.progress()); + assertNull(operation.description()); + assertNull(operation.insertTime()); + assertNull(operation.startTime()); + assertNull(operation.endTime()); + assertNull(operation.warnings()); + assertNull(operation.httpErrorMessage()); + } + } + + @Test + public void testListRegionOperationsWithFilter() { + Page operationPage = compute.listRegionOperations(REGION, + Compute.OperationListOption.filter(Compute.OperationFilter.equals( + Compute.OperationField.STATUS, "DONE"))); + Iterator operationIterator = operationPage.iterateAll(); + while (operationIterator.hasNext()) { + Operation operation = operationIterator.next(); + assertNotNull(operation.generatedId()); + assertNotNull(operation.operationId()); + assertEquals(REGION, operation.operationId().region()); + // todo(mziccard): uncomment or remove once #727 is closed + // assertNotNull(operation.creationTimestamp()); + assertNotNull(operation.operationType()); + assertEquals(Operation.Status.DONE, operation.status()); + assertNotNull(operation.user()); + } + } + + @Test + public void testListZoneOperations() { + Page operationPage = compute.listZoneOperations(ZONE); + Iterator operationIterator = operationPage.iterateAll(); + while (operationIterator.hasNext()) { + Operation operation = operationIterator.next(); + assertNotNull(operation.generatedId()); + assertNotNull(operation.operationId()); + Assert.assertEquals(ZONE, operation.operationId().zone()); + // todo(mziccard): uncomment or remove once #727 is closed + // assertNotNull(operation.creationTimestamp()); + assertNotNull(operation.operationType()); + assertNotNull(operation.status()); + assertNotNull(operation.user()); + } + } + + @Test + public void testListZoneOperationsWithSelectedFields() { + Page operationPage = compute.listZoneOperations(ZONE, + Compute.OperationListOption.fields(Compute.OperationField.ID)); + Iterator operationIterator = operationPage.iterateAll(); + while (operationIterator.hasNext()) { + Operation operation = operationIterator.next(); + assertNotNull(operation.generatedId()); + assertNotNull(operation.operationId()); + assertEquals(ZONE, operation.operationId().zone()); + assertNull(operation.operationType()); + assertNull(operation.targetLink()); + assertNull(operation.targetId()); + assertNull(operation.operationType()); + assertNull(operation.status()); + assertNull(operation.statusMessage()); + assertNull(operation.user()); + assertNull(operation.progress()); + assertNull(operation.description()); + assertNull(operation.insertTime()); + assertNull(operation.startTime()); + assertNull(operation.endTime()); + assertNull(operation.warnings()); + assertNull(operation.httpErrorMessage()); + } + } + + @Test + public void testListZoneOperationsWithFilter() { + Page operationPage = compute.listZoneOperations(ZONE, + Compute.OperationListOption.filter(Compute.OperationFilter.equals( + Compute.OperationField.STATUS, "DONE"))); + Iterator operationIterator = operationPage.iterateAll(); + while (operationIterator.hasNext()) { + Operation operation = operationIterator.next(); + assertNotNull(operation.generatedId()); + assertNotNull(operation.operationId()); + assertEquals(ZONE, operation.operationId().zone()); + // todo(mziccard): uncomment or remove once #727 is closed + // assertNotNull(operation.creationTimestamp()); + assertNotNull(operation.operationType()); + assertEquals(Operation.Status.DONE, operation.status()); + assertNotNull(operation.user()); + } + } + + @Test + public void testCreateGetAndDeleteRegionAddress() throws InterruptedException { + String name = BASE_RESOURCE_NAME + "create-and-get-region-address"; + AddressId addressId = RegionAddressId.of(REGION, name); + AddressInfo addressInfo = AddressInfo.of(addressId); + Operation operation = compute.create(addressInfo); + while (!operation.isDone()) { + Thread.sleep(1000L); + } + // test get + Address remoteAddress = compute.getAddress(addressId); + assertNotNull(remoteAddress); + assertTrue(remoteAddress.addressId() instanceof RegionAddressId); + assertEquals(REGION, remoteAddress.addressId().region()); + assertEquals(addressId.address(), remoteAddress.addressId().address()); + assertNotNull(remoteAddress.address()); + assertNotNull(remoteAddress.creationTimestamp()); + assertNotNull(remoteAddress.generatedId()); + assertNotNull(remoteAddress.status()); + // test get with selected fields + remoteAddress = compute.getAddress(addressId, Compute.AddressOption.fields()); + assertNotNull(remoteAddress); + assertTrue(remoteAddress.addressId() instanceof RegionAddressId); + assertEquals(REGION, remoteAddress.addressId().region()); + assertEquals(addressId.address(), remoteAddress.addressId().address()); + assertNull(remoteAddress.address()); + assertNull(remoteAddress.creationTimestamp()); + assertNull(remoteAddress.generatedId()); + operation = remoteAddress.delete(); + while (!operation.isDone()) { + Thread.sleep(1000L); + } + assertNull(compute.getAddress(addressId)); + } + + @Test + public void testListRegionAddresses() throws InterruptedException { + String prefix = BASE_RESOURCE_NAME + "list-region-address"; + String[] addressNames = {prefix + "1", prefix + "2"}; + AddressId firstAddressId = RegionAddressId.of(REGION, addressNames[0]); + AddressId secondAddressId = RegionAddressId.of(REGION, addressNames[1]); + Operation firstOperation = compute.create(AddressInfo.of(firstAddressId)); + Operation secondOperation = compute.create(AddressInfo.of(secondAddressId)); + while (!firstOperation.isDone()) { + Thread.sleep(1000L); + } + while (!secondOperation.isDone()) { + Thread.sleep(1000L); + } + Set addressSet = ImmutableSet.copyOf(addressNames); + // test list + Compute.AddressFilter filter = + Compute.AddressFilter.equals(Compute.AddressField.NAME, prefix + "\\d"); + Page
addressPage = + compute.listRegionAddresses(REGION, Compute.AddressListOption.filter(filter)); + Iterator
addressIterator = addressPage.iterateAll(); + int count = 0; + while (addressIterator.hasNext()) { + Address address = addressIterator.next(); + assertNotNull(address.addressId()); + assertTrue(address.addressId() instanceof RegionAddressId); + assertEquals(REGION, address.addressId().region()); + assertTrue(addressSet.contains(address.addressId().address())); + assertNotNull(address.address()); + assertNotNull(address.creationTimestamp()); + assertNotNull(address.generatedId()); + count++; + } + assertEquals(2, count); + // test list with selected fields + count = 0; + addressPage = compute.listRegionAddresses(REGION, Compute.AddressListOption.filter(filter), + Compute.AddressListOption.fields(Compute.AddressField.ADDRESS)); + addressIterator = addressPage.iterateAll(); + while (addressIterator.hasNext()) { + Address address = addressIterator.next(); + assertTrue(address.addressId() instanceof RegionAddressId); + assertEquals(REGION, address.addressId().region()); + assertTrue(addressSet.contains(address.addressId().address())); + assertNotNull(address.address()); + assertNull(address.creationTimestamp()); + assertNull(address.generatedId()); + assertNull(address.status()); + assertNull(address.usage()); + count++; + } + assertEquals(2, count); + compute.deleteAddress(firstAddressId); + compute.deleteAddress(secondAddressId); + } + + @Test + public void testAggregatedListAddresses() throws InterruptedException { + String prefix = BASE_RESOURCE_NAME + "aggregated-list-address"; + String[] addressNames = {prefix + "1", prefix + "2"}; + AddressId firstAddressId = RegionAddressId.of(REGION, addressNames[0]); + AddressId secondAddressId = GlobalAddressId.of(REGION, addressNames[1]); + Operation firstOperation = compute.create(AddressInfo.of(firstAddressId)); + Operation secondOperation = compute.create(AddressInfo.of(secondAddressId)); + while (!firstOperation.isDone()) { + Thread.sleep(1000L); + } + while (!secondOperation.isDone()) { + Thread.sleep(1000L); + } + Set addressSet = ImmutableSet.copyOf(addressNames); + Compute.AddressFilter filter = + Compute.AddressFilter.equals(Compute.AddressField.NAME, prefix + "\\d"); + Page
addressPage = + compute.listAddresses(Compute.AddressAggregatedListOption.filter(filter)); + Iterator
addressIterator = addressPage.iterateAll(); + int count = 0; + while (addressIterator.hasNext()) { + Address address = addressIterator.next(); + assertNotNull(address.addressId()); + assertTrue(addressSet.contains(address.addressId().address())); + assertNotNull(address.address()); + assertNotNull(address.creationTimestamp()); + assertNotNull(address.generatedId()); + count++; + } + assertEquals(2, count); + compute.deleteAddress(firstAddressId); + compute.deleteAddress(secondAddressId); + } + + @Test + public void testCreateGetAndDeleteGlobalAddress() throws InterruptedException { + String name = BASE_RESOURCE_NAME + "create-and-get-global-address"; + AddressId addressId = GlobalAddressId.of(name); + AddressInfo addressInfo = AddressInfo.of(addressId); + Operation operation = compute.create(addressInfo); + while (!operation.isDone()) { + Thread.sleep(1000L); + } + // test get + Address remoteAddress = compute.getAddress(addressId); + assertNotNull(remoteAddress); + assertTrue(remoteAddress.addressId() instanceof GlobalAddressId); + assertEquals(addressId.address(), remoteAddress.addressId().address()); + assertNotNull(remoteAddress.address()); + assertNotNull(remoteAddress.creationTimestamp()); + assertNotNull(remoteAddress.generatedId()); + assertNotNull(remoteAddress.status()); + // test get with selected fields + remoteAddress = compute.getAddress(addressId, Compute.AddressOption.fields()); + assertNotNull(remoteAddress); + assertTrue(remoteAddress.addressId() instanceof GlobalAddressId); + assertEquals(addressId.address(), remoteAddress.addressId().address()); + assertNull(remoteAddress.address()); + assertNull(remoteAddress.creationTimestamp()); + assertNull(remoteAddress.generatedId()); + operation = remoteAddress.delete(); + while (!operation.isDone()) { + Thread.sleep(1000L); + } + assertNull(compute.getAddress(addressId)); + } + + @Test + public void testListGlobalAddresses() throws InterruptedException { + String prefix = BASE_RESOURCE_NAME + "list-global-address"; + String[] addressNames = {prefix + "1", prefix + "2"}; + AddressId firstAddressId = GlobalAddressId.of(addressNames[0]); + AddressId secondAddressId = GlobalAddressId.of(addressNames[1]); + Operation firstOperation = compute.create(AddressInfo.of(firstAddressId)); + Operation secondOperation = compute.create(AddressInfo.of(secondAddressId)); + while (!firstOperation.isDone()) { + Thread.sleep(1000L); + } + while (!secondOperation.isDone()) { + Thread.sleep(1000L); + } + Set addressSet = ImmutableSet.copyOf(addressNames); + // test list + Compute.AddressFilter filter = + Compute.AddressFilter.equals(Compute.AddressField.NAME, prefix + "\\d"); + Page
addressPage = + compute.listGlobalAddresses(Compute.AddressListOption.filter(filter)); + Iterator
addressIterator = addressPage.iterateAll(); + int count = 0; + while (addressIterator.hasNext()) { + Address address = addressIterator.next(); + assertNotNull(address.addressId()); + assertTrue(address.addressId() instanceof GlobalAddressId); + assertTrue(addressSet.contains(address.addressId().address())); + assertNotNull(address.address()); + assertNotNull(address.creationTimestamp()); + assertNotNull(address.generatedId()); + count++; + } + assertEquals(2, count); + // test list with selected fields + count = 0; + addressPage = compute.listGlobalAddresses(Compute.AddressListOption.filter(filter), + Compute.AddressListOption.fields(Compute.AddressField.ADDRESS)); + addressIterator = addressPage.iterateAll(); + while (addressIterator.hasNext()) { + Address address = addressIterator.next(); + assertTrue(address.addressId() instanceof GlobalAddressId); + assertTrue(addressSet.contains(address.addressId().address())); + assertNotNull(address.address()); + assertNull(address.creationTimestamp()); + assertNull(address.generatedId()); + assertNull(address.status()); + assertNull(address.usage()); + count++; + } + assertEquals(2, count); + compute.deleteAddress(firstAddressId); + compute.deleteAddress(secondAddressId); + } + + @Test + public void testCreateGetResizeAndDeleteStandardDisk() throws InterruptedException { + String name = BASE_RESOURCE_NAME + "create-and-get-standard-disk"; + DiskId diskId = DiskId.of(ZONE, name); + DiskInfo diskInfo = + DiskInfo.of(diskId, StandardDiskConfiguration.of(DiskTypeId.of(ZONE, "pd-ssd"), 100L)); + Operation operation = compute.create(diskInfo); + while (!operation.isDone()) { + Thread.sleep(1000L); + } + // test get + Disk remoteDisk = compute.getDisk(diskId); + assertNotNull(remoteDisk); + assertEquals(ZONE, remoteDisk.diskId().zone()); + assertEquals(diskId.disk(), remoteDisk.diskId().disk()); + assertNotNull(remoteDisk.creationTimestamp()); + assertNotNull(remoteDisk.generatedId()); + assertTrue(remoteDisk.configuration() instanceof StandardDiskConfiguration); + StandardDiskConfiguration remoteConfiguration = remoteDisk.configuration(); + assertEquals(100L, (long) remoteConfiguration.sizeGb()); + assertEquals("pd-ssd", remoteConfiguration.diskType().type()); + assertEquals(DiskConfiguration.Type.STANDARD, remoteConfiguration.type()); + assertNull(remoteDisk.lastAttachTimestamp()); + assertNull(remoteDisk.lastDetachTimestamp()); + operation = remoteDisk.resize(200L); + while (!operation.isDone()) { + Thread.sleep(1000L); + } + // test resize and get with selected fields + remoteDisk = compute.getDisk(diskId, Compute.DiskOption.fields(Compute.DiskField.SIZE_GB)); + assertNotNull(remoteDisk); + assertEquals(ZONE, remoteDisk.diskId().zone()); + assertEquals(diskId.disk(), remoteDisk.diskId().disk()); + assertNull(remoteDisk.creationTimestamp()); + assertNull(remoteDisk.generatedId()); + assertTrue(remoteDisk.configuration() instanceof StandardDiskConfiguration); + remoteConfiguration = remoteDisk.configuration(); + assertEquals(200L, (long) remoteConfiguration.sizeGb()); + assertEquals("pd-ssd", remoteConfiguration.diskType().type()); + assertEquals(DiskConfiguration.Type.STANDARD, remoteConfiguration.type()); + assertNull(remoteDisk.lastAttachTimestamp()); + assertNull(remoteDisk.lastDetachTimestamp()); + operation = remoteDisk.delete(); + while (!operation.isDone()) { + Thread.sleep(1000L); + } + assertNull(compute.getDisk(diskId)); + } + + @Test + public void testCreateGetAndDeleteImageDisk() throws InterruptedException { + String name = BASE_RESOURCE_NAME + "create-and-get-image-disk"; + DiskId diskId = DiskId.of(ZONE, name); + DiskInfo diskInfo = DiskInfo.of(diskId, ImageDiskConfiguration.of(IMAGE_ID)); + Operation operation = compute.create(diskInfo); + while (!operation.isDone()) { + Thread.sleep(1000L); + } + // test get + Disk remoteDisk = compute.getDisk(diskId); + assertNotNull(remoteDisk); + assertEquals(ZONE, remoteDisk.diskId().zone()); + assertEquals(diskId.disk(), remoteDisk.diskId().disk()); + assertEquals(DiskInfo.CreationStatus.READY, remoteDisk.creationStatus()); + assertNotNull(remoteDisk.creationTimestamp()); + assertNotNull(remoteDisk.generatedId()); + assertTrue(remoteDisk.configuration() instanceof ImageDiskConfiguration); + ImageDiskConfiguration remoteConfiguration = remoteDisk.configuration(); + assertEquals(IMAGE_ID, remoteConfiguration.sourceImage()); + assertNotNull(remoteConfiguration.sourceImageId()); + assertEquals(DiskConfiguration.Type.IMAGE, remoteConfiguration.type()); + assertNotNull(remoteConfiguration.sizeGb()); + assertEquals("pd-standard", remoteConfiguration.diskType().type()); + assertNull(remoteDisk.lastAttachTimestamp()); + assertNull(remoteDisk.lastDetachTimestamp()); + // test get with selected fields + remoteDisk = compute.getDisk(diskId, Compute.DiskOption.fields()); + assertNotNull(remoteDisk); + assertEquals(ZONE, remoteDisk.diskId().zone()); + assertEquals(diskId.disk(), remoteDisk.diskId().disk()); + assertNull(remoteDisk.creationTimestamp()); + assertNull(remoteDisk.generatedId()); + assertTrue(remoteDisk.configuration() instanceof ImageDiskConfiguration); + remoteConfiguration = remoteDisk.configuration(); + assertEquals(IMAGE_ID, remoteConfiguration.sourceImage()); + assertNull(remoteConfiguration.sourceImageId()); + assertEquals(DiskConfiguration.Type.IMAGE, remoteConfiguration.type()); + assertNull(remoteConfiguration.sizeGb()); + assertEquals("pd-standard", remoteConfiguration.diskType().type()); + assertNull(remoteDisk.lastAttachTimestamp()); + assertNull(remoteDisk.lastDetachTimestamp()); + operation = remoteDisk.delete(); + while (!operation.isDone()) { + Thread.sleep(1000L); + } + assertNull(compute.getDisk(diskId)); + } + + @Test + public void testCreateGetAndDeleteSnapshotAndSnapshotDisk() throws InterruptedException { + String diskName = BASE_RESOURCE_NAME + "create-and-get-snapshot-disk1"; + String snapshotDiskName = BASE_RESOURCE_NAME + "create-and-get-snapshot-disk2"; + DiskId diskId = DiskId.of(ZONE, diskName); + DiskId snapshotDiskId = DiskId.of(ZONE, snapshotDiskName); + String snapshotName = BASE_RESOURCE_NAME + "create-and-get-snapshot"; + DiskInfo diskInfo = + DiskInfo.of(diskId, StandardDiskConfiguration.of(DiskTypeId.of(ZONE, "pd-ssd"), 100L)); + Operation operation = compute.create(diskInfo); + while (!operation.isDone()) { + Thread.sleep(1000L); + } + Disk remoteDisk = compute.getDisk(diskId); + operation = remoteDisk.createSnapshot(snapshotName); + while (!operation.isDone()) { + Thread.sleep(1000L); + } + // test get snapshot with selected fields + Snapshot snapshot = compute.getSnapshot(snapshotName, + Compute.SnapshotOption.fields(Compute.SnapshotField.CREATION_TIMESTAMP)); + assertNull(snapshot.generatedId()); + assertNotNull(snapshot.snapshotId()); + assertNotNull(snapshot.creationTimestamp()); + assertNull(snapshot.description()); + assertNull(snapshot.status()); + assertNull(snapshot.diskSizeGb()); + assertNull(snapshot.licenses()); + assertNull(snapshot.sourceDisk()); + assertNull(snapshot.sourceDiskId()); + assertNull(snapshot.storageBytes()); + assertNull(snapshot.storageBytesStatus()); + // test get snapshot + snapshot = compute.getSnapshot(snapshotName); + assertNotNull(snapshot.generatedId()); + assertNotNull(snapshot.snapshotId()); + assertNotNull(snapshot.creationTimestamp()); + assertNotNull(snapshot.status()); + assertEquals(100L, (long) snapshot.diskSizeGb()); + assertEquals(diskName, snapshot.sourceDisk().disk()); + assertNotNull(snapshot.sourceDiskId()); + assertNotNull(snapshot.storageBytes()); + assertNotNull(snapshot.storageBytesStatus()); + remoteDisk.delete(); + diskInfo = + DiskInfo.of(snapshotDiskId, SnapshotDiskConfiguration.of(SnapshotId.of(snapshotName))); + operation = compute.create(diskInfo); + while (!operation.isDone()) { + Thread.sleep(1000L); + } + // test get disk + remoteDisk = compute.getDisk(snapshotDiskId); + assertNotNull(remoteDisk); + assertEquals(ZONE, remoteDisk.diskId().zone()); + assertEquals(snapshotDiskId.disk(), remoteDisk.diskId().disk()); + assertEquals(DiskInfo.CreationStatus.READY, remoteDisk.creationStatus()); + assertNotNull(remoteDisk.creationTimestamp()); + assertNotNull(remoteDisk.generatedId()); + assertTrue(remoteDisk.configuration() instanceof SnapshotDiskConfiguration); + SnapshotDiskConfiguration remoteConfiguration = remoteDisk.configuration(); + assertEquals(DiskConfiguration.Type.SNAPSHOT, remoteConfiguration.type()); + assertEquals(snapshotName, remoteConfiguration.sourceSnapshot().snapshot()); + assertEquals(100L, (long) remoteConfiguration.sizeGb()); + assertEquals("pd-standard", remoteConfiguration.diskType().type()); + assertNotNull(remoteConfiguration.sourceSnapshotId()); + assertNull(remoteDisk.lastAttachTimestamp()); + assertNull(remoteDisk.lastDetachTimestamp()); + // test get disk with selected fields + remoteDisk = compute.getDisk(snapshotDiskId, Compute.DiskOption.fields()); + assertNotNull(remoteDisk); + assertEquals(ZONE, remoteDisk.diskId().zone()); + assertEquals(snapshotDiskId.disk(), remoteDisk.diskId().disk()); + assertNull(remoteDisk.creationStatus()); + assertNull(remoteDisk.creationTimestamp()); + assertNull(remoteDisk.generatedId()); + assertTrue(remoteDisk.configuration() instanceof SnapshotDiskConfiguration); + remoteConfiguration = remoteDisk.configuration(); + assertEquals(DiskConfiguration.Type.SNAPSHOT, remoteConfiguration.type()); + assertEquals(snapshotName, remoteConfiguration.sourceSnapshot().snapshot()); + assertNull(remoteConfiguration.sizeGb()); + assertEquals("pd-standard", remoteConfiguration.diskType().type()); + assertNull(remoteDisk.configuration().sourceSnapshotId()); + assertNull(remoteDisk.lastAttachTimestamp()); + assertNull(remoteDisk.lastDetachTimestamp()); + operation = remoteDisk.delete(); + while (!operation.isDone()) { + Thread.sleep(1000L); + } + assertNull(compute.getDisk(snapshotDiskId)); + operation = snapshot.delete(); + while (!operation.isDone()) { + Thread.sleep(1000L); + } + assertNull(compute.getSnapshot(snapshotName)); + } + + @Test + public void testListDisksAndSnapshots() throws InterruptedException { + String prefix = BASE_RESOURCE_NAME + "list-disks-and-snapshots-disk"; + String[] diskNames = {prefix + "1", prefix + "2"}; + DiskId firstDiskId = DiskId.of(ZONE, diskNames[0]); + DiskId secondDiskId = DiskId.of(ZONE, diskNames[1]); + DiskConfiguration configuration = + StandardDiskConfiguration.of(DiskTypeId.of(ZONE, "pd-ssd"), 100L); + Operation firstOperation = compute.create(DiskInfo.of(firstDiskId, configuration)); + Operation secondOperation = compute.create(DiskInfo.of(secondDiskId, configuration)); + while (!firstOperation.isDone()) { + Thread.sleep(1000L); + } + while (!secondOperation.isDone()) { + Thread.sleep(1000L); + } + Set diskSet = ImmutableSet.copyOf(diskNames); + // test list disks + Compute.DiskFilter diskFilter = + Compute.DiskFilter.equals(Compute.DiskField.NAME, prefix + "\\d"); + Page diskPage = compute.listDisks(ZONE, Compute.DiskListOption.filter(diskFilter)); + Iterator diskIterator = diskPage.iterateAll(); + int count = 0; + while (diskIterator.hasNext()) { + Disk remoteDisk = diskIterator.next(); + assertEquals(ZONE, remoteDisk.diskId().zone()); + assertTrue(diskSet.contains(remoteDisk.diskId().disk())); + assertEquals(DiskInfo.CreationStatus.READY, remoteDisk.creationStatus()); + assertNotNull(remoteDisk.creationTimestamp()); + assertNotNull(remoteDisk.generatedId()); + assertTrue(remoteDisk.configuration() instanceof StandardDiskConfiguration); + StandardDiskConfiguration remoteConfiguration = remoteDisk.configuration(); + assertEquals(100L, (long) remoteConfiguration.sizeGb()); + assertEquals("pd-ssd", remoteConfiguration.diskType().type()); + assertEquals(DiskConfiguration.Type.STANDARD, remoteConfiguration.type()); + assertNull(remoteDisk.lastAttachTimestamp()); + assertNull(remoteDisk.lastDetachTimestamp()); + count++; + } + assertEquals(2, count); + // test list disks with selected fields + count = 0; + diskPage = compute.listDisks(ZONE, Compute.DiskListOption.filter(diskFilter), + Compute.DiskListOption.fields(Compute.DiskField.STATUS)); + diskIterator = diskPage.iterateAll(); + while (diskIterator.hasNext()) { + Disk remoteDisk = diskIterator.next(); + assertEquals(ZONE, remoteDisk.diskId().zone()); + assertTrue(diskSet.contains(remoteDisk.diskId().disk())); + assertEquals(DiskInfo.CreationStatus.READY, remoteDisk.creationStatus()); + assertNull(remoteDisk.creationTimestamp()); + assertNull(remoteDisk.generatedId()); + assertTrue(remoteDisk.configuration() instanceof StandardDiskConfiguration); + StandardDiskConfiguration remoteConfiguration = remoteDisk.configuration(); + assertNull(remoteConfiguration.sizeGb()); + assertEquals("pd-ssd", remoteConfiguration.diskType().type()); + assertEquals(DiskConfiguration.Type.STANDARD, remoteConfiguration.type()); + assertNull(remoteDisk.lastAttachTimestamp()); + assertNull(remoteDisk.lastDetachTimestamp()); + count++; + } + assertEquals(2, count); + // test snapshots + SnapshotId firstSnapshotId = SnapshotId.of(diskNames[0]); + SnapshotId secondSnapshotId = SnapshotId.of(diskNames[1]); + firstOperation = compute.create(SnapshotInfo.of(firstSnapshotId, firstDiskId)); + secondOperation = compute.create(SnapshotInfo.of(secondSnapshotId, secondDiskId)); + while (!firstOperation.isDone()) { + Thread.sleep(1000L); + } + while (!secondOperation.isDone()) { + Thread.sleep(1000L); + } + // test list snapshots + Compute.SnapshotFilter snapshotFilter = + Compute.SnapshotFilter.equals(Compute.SnapshotField.NAME, prefix + "\\d"); + Page snapshotPage = + compute.listSnapshots(Compute.SnapshotListOption.filter(snapshotFilter)); + Iterator snapshotIterator = snapshotPage.iterateAll(); + count = 0; + while (snapshotIterator.hasNext()) { + Snapshot remoteSnapshot = snapshotIterator.next(); + assertNotNull(remoteSnapshot.generatedId()); + assertTrue(diskSet.contains(remoteSnapshot.snapshotId().snapshot())); + assertNotNull(remoteSnapshot.creationTimestamp()); + assertNotNull(remoteSnapshot.status()); + assertEquals(100L, (long) remoteSnapshot.diskSizeGb()); + assertTrue(diskSet.contains(remoteSnapshot.sourceDisk().disk())); + assertNotNull(remoteSnapshot.sourceDiskId()); + assertNotNull(remoteSnapshot.storageBytes()); + assertNotNull(remoteSnapshot.storageBytesStatus()); + count++; + } + assertEquals(2, count); + // test list snapshots with selected fields + snapshotPage = compute.listSnapshots(Compute.SnapshotListOption.filter(snapshotFilter), + Compute.SnapshotListOption.fields(Compute.SnapshotField.CREATION_TIMESTAMP)); + snapshotIterator = snapshotPage.iterateAll(); + count = 0; + while (snapshotIterator.hasNext()) { + Snapshot remoteSnapshot = snapshotIterator.next(); + assertNull(remoteSnapshot.generatedId()); + assertTrue(diskSet.contains(remoteSnapshot.snapshotId().snapshot())); + assertNotNull(remoteSnapshot.creationTimestamp()); + assertNull(remoteSnapshot.status()); + assertNull(remoteSnapshot.diskSizeGb()); + assertNull(remoteSnapshot.sourceDisk()); + assertNull(remoteSnapshot.sourceDiskId()); + assertNull(remoteSnapshot.storageBytes()); + assertNull(remoteSnapshot.storageBytesStatus()); + count++; + } + assertEquals(2, count); + compute.deleteDisk(firstDiskId); + compute.deleteDisk(secondDiskId); + compute.deleteSnapshot(firstSnapshotId); + compute.deleteSnapshot(secondSnapshotId); + } + + @Test + public void testAggregatedListDisks() throws InterruptedException { + String prefix = BASE_RESOURCE_NAME + "list-aggregated-disk"; + String[] diskZones = {"us-central1-a", "us-east1-c"}; + String[] diskNames = {prefix + "1", prefix + "2"}; + DiskId firstDiskId = DiskId.of(diskZones[0], diskNames[0]); + DiskId secondDiskId = DiskId.of(diskZones[1], diskNames[1]); + DiskConfiguration configuration = + StandardDiskConfiguration.of(DiskTypeId.of(ZONE, "pd-ssd"), 100L); + Operation firstOperation = compute.create(DiskInfo.of(firstDiskId, configuration)); + Operation secondOperation = compute.create(DiskInfo.of(secondDiskId, configuration)); + while (!firstOperation.isDone()) { + Thread.sleep(1000L); + } + while (!secondOperation.isDone()) { + Thread.sleep(1000L); + } + Set zoneSet = ImmutableSet.copyOf(diskZones); + Set diskSet = ImmutableSet.copyOf(diskNames); + Compute.DiskFilter diskFilter = + Compute.DiskFilter.equals(Compute.DiskField.NAME, prefix + "\\d"); + Page diskPage = compute.listDisks(Compute.DiskAggregatedListOption.filter(diskFilter)); + Iterator diskIterator = diskPage.iterateAll(); + int count = 0; + while (diskIterator.hasNext()) { + Disk remoteDisk = diskIterator.next(); + assertTrue(zoneSet.contains(remoteDisk.diskId().zone())); + assertTrue(diskSet.contains(remoteDisk.diskId().disk())); + assertEquals(DiskInfo.CreationStatus.READY, remoteDisk.creationStatus()); + assertNotNull(remoteDisk.creationTimestamp()); + assertNotNull(remoteDisk.generatedId()); + assertTrue(remoteDisk.configuration() instanceof StandardDiskConfiguration); + StandardDiskConfiguration remoteConfiguration = remoteDisk.configuration(); + assertEquals(100L, (long) remoteConfiguration.sizeGb()); + assertEquals("pd-ssd", remoteConfiguration.diskType().type()); + assertEquals(DiskConfiguration.Type.STANDARD, remoteConfiguration.type()); + count++; + } + assertEquals(2, count); + compute.deleteDisk(firstDiskId); + compute.deleteDisk(secondDiskId); + } + + @Test + public void testCreateGetAndDeprecateImage() throws InterruptedException { + String diskName = BASE_RESOURCE_NAME + "create-and-get-image-disk"; + String imageName = BASE_RESOURCE_NAME + "create-and-get-image"; + DiskId diskId = DiskId.of(ZONE, diskName); + ImageId imageId = ImageId.of(imageName); + DiskInfo diskInfo = + DiskInfo.of(diskId, StandardDiskConfiguration.of(DiskTypeId.of(ZONE, "pd-ssd"), 100L)); + Operation operation = compute.create(diskInfo); + while (!operation.isDone()) { + Thread.sleep(1000L); + } + Disk remoteDisk = compute.getDisk(diskId); + ImageInfo imageInfo = ImageInfo.of(imageId, DiskImageConfiguration.of(diskId)); + operation = compute.create(imageInfo); + while (!operation.isDone()) { + Thread.sleep(1000L); + } + // test get image with selected fields + Image image = compute.getImage(imageId, + Compute.ImageOption.fields(Compute.ImageField.CREATION_TIMESTAMP)); + assertNull(image.generatedId()); + assertNotNull(image.imageId()); + assertNotNull(image.creationTimestamp()); + assertNull(image.description()); + assertNotNull(image.configuration()); + assertTrue(image.configuration() instanceof DiskImageConfiguration); + DiskImageConfiguration remoteConfiguration = image.configuration(); + Assert.assertEquals(ImageConfiguration.Type.DISK, remoteConfiguration.type()); + assertEquals(diskName, remoteConfiguration.sourceDisk().disk()); + assertNull(image.status()); + assertNull(image.diskSizeGb()); + assertNull(image.licenses()); + assertNull(image.deprecationStatus()); + // test get image + image = compute.getImage(imageId); + assertNotNull(image.generatedId()); + assertNotNull(image.imageId()); + assertNotNull(image.creationTimestamp()); + assertNotNull(image.configuration()); + assertTrue(image.configuration() instanceof DiskImageConfiguration); + remoteConfiguration = image.configuration(); + assertEquals(ImageConfiguration.Type.DISK, remoteConfiguration.type()); + assertEquals(diskName, remoteConfiguration.sourceDisk().disk()); + assertEquals(100L, (long) image.diskSizeGb()); + assertNotNull(image.status()); + assertNull(image.deprecationStatus()); + // test deprecate image + DeprecationStatus deprecationStatus = + DeprecationStatus.builder(DeprecationStatus.Status.DEPRECATED, imageId) + .deprecated(System.currentTimeMillis()) + .build(); + operation = image.deprecate(deprecationStatus); + while (!operation.isDone()) { + Thread.sleep(1000L); + } + image = compute.getImage(imageId); + assertEquals(deprecationStatus, image.deprecationStatus()); + remoteDisk.delete(); + operation = image.delete(); + while (!operation.isDone()) { + Thread.sleep(1000L); + } + assertNull(compute.getImage(imageId)); + } + + @Test + public void testListImages() { + Page imagePage = compute.listImages(IMAGE_PROJECT); + Iterator imageIterator = imagePage.iterateAll(); + int count = 0; + while (imageIterator.hasNext()) { + count++; + Image image = imageIterator.next(); + assertNotNull(image.generatedId()); + assertNotNull(image.imageId()); + assertNotNull(image.creationTimestamp()); + assertNotNull(image.configuration()); + assertNotNull(image.status()); + assertNotNull(image.diskSizeGb()); + } + assertTrue(count > 0); + } + + @Test + public void testListImagesWithSelectedFields() { + Page imagePage = + compute.listImages(IMAGE_PROJECT, Compute.ImageListOption.fields(Compute.ImageField.ID)); + Iterator imageIterator = imagePage.iterateAll(); + int count = 0; + while (imageIterator.hasNext()) { + count++; + Image image = imageIterator.next(); + assertNotNull(image.generatedId()); + assertNotNull(image.imageId()); + assertNull(image.creationTimestamp()); + assertNotNull(image.configuration()); + assertNull(image.status()); + assertNull(image.diskSizeGb()); + assertNull(image.licenses()); + assertNull(image.deprecationStatus()); + } + assertTrue(count > 0); + } + + @Test + public void testListImagesWithFilter() { + Page imagePage = compute.listImages(IMAGE_PROJECT, Compute.ImageListOption.filter( + Compute.ImageFilter.equals(Compute.ImageField.ARCHIVE_SIZE_BYTES, 365056004L))); + Iterator imageIterator = imagePage.iterateAll(); + int count = 0; + while (imageIterator.hasNext()) { + count++; + Image image = imageIterator.next(); + assertNotNull(image.generatedId()); + assertNotNull(image.imageId()); + assertNotNull(image.creationTimestamp()); + assertNotNull(image.configuration()); + assertNotNull(image.status()); + assertNotNull(image.diskSizeGb()); + assertEquals(365056004L, + (long) image.configuration().archiveSizeBytes()); + } + assertTrue(count > 0); + } + + @Test + public void testCreateAndGetNetwork() throws InterruptedException { + String name = BASE_RESOURCE_NAME + "create-and-get-network"; + NetworkId networkId = NetworkId.of(name); + NetworkInfo networkInfo = + NetworkInfo.of(networkId, StandardNetworkConfiguration.of("192.168.0.0/16")); + Operation operation = compute.create(networkInfo); + while (!operation.isDone()) { + Thread.sleep(1000L); + } + // test get network with selected fields + Network network = compute.getNetwork(networkId.network(), + Compute.NetworkOption.fields(Compute.NetworkField.CREATION_TIMESTAMP)); + assertEquals(networkId.network(), network.networkId().network()); + assertNull(network.generatedId()); + assertNotNull(network.creationTimestamp()); + assertNull(network.description()); + assertEquals(NetworkConfiguration.Type.STANDARD, network.configuration().type()); + StandardNetworkConfiguration remoteConfiguration = network.configuration(); + assertEquals("192.168.0.0/16", remoteConfiguration.ipRange()); + // test get network + network = compute.getNetwork(networkId.network()); + assertEquals(networkId.network(), network.networkId().network()); + assertNotNull(network.generatedId()); + assertNotNull(network.creationTimestamp()); + assertEquals(NetworkConfiguration.Type.STANDARD, network.configuration().type()); + remoteConfiguration = network.configuration(); + assertEquals("192.168.0.0/16", remoteConfiguration.ipRange()); + operation = network.delete(); + while (!operation.isDone()) { + Thread.sleep(1000L); + } + assertNull(compute.getNetwork(name)); + } + + @Test + public void testListNetworks() throws InterruptedException { + String name = BASE_RESOURCE_NAME + "list-network"; + NetworkId networkId = NetworkId.of(name); + NetworkInfo networkInfo = + NetworkInfo.of(networkId, StandardNetworkConfiguration.of("192.168.0.0/16")); + Operation operation = compute.create(networkInfo); + while (!operation.isDone()) { + Thread.sleep(1000L); + } + // test list + Compute.NetworkFilter filter = Compute.NetworkFilter.equals(Compute.NetworkField.NAME, name); + Page networkPage = compute.listNetworks(Compute.NetworkListOption.filter(filter)); + Iterator networkIterator = networkPage.iterateAll(); + int count = 0; + while (networkIterator.hasNext()) { + Network network = networkIterator.next(); + assertEquals(networkId.network(), network.networkId().network()); + assertNotNull(network.generatedId()); + assertNotNull(network.creationTimestamp()); + assertEquals(NetworkConfiguration.Type.STANDARD, network.configuration().type()); + StandardNetworkConfiguration remoteConfiguration = network.configuration(); + assertEquals("192.168.0.0/16", remoteConfiguration.ipRange()); + count++; + } + assertEquals(1, count); + // test list with selected fields + count = 0; + networkPage = compute.listNetworks(Compute.NetworkListOption.filter(filter), + Compute.NetworkListOption.fields(Compute.NetworkField.CREATION_TIMESTAMP)); + networkIterator = networkPage.iterateAll(); + while (networkIterator.hasNext()) { + Network network = networkIterator.next(); + assertEquals(networkId.network(), network.networkId().network()); + assertNull(network.generatedId()); + assertNotNull(network.creationTimestamp()); + assertNull(network.description()); + assertEquals(NetworkConfiguration.Type.STANDARD, network.configuration().type()); + StandardNetworkConfiguration remoteConfiguration = network.configuration(); + assertEquals("192.168.0.0/16", remoteConfiguration.ipRange()); + count++; + } + assertEquals(1, count); + operation = compute.deleteNetwork(networkId); + while (!operation.isDone()) { + Thread.sleep(1000L); + } + assertNull(compute.getNetwork(name)); + } + + @Test + public void testCreateNetworkAndSubnetwork() throws InterruptedException { + String networkName = BASE_RESOURCE_NAME + "create-subnetwork-network"; + NetworkId networkId = NetworkId.of(networkName); + NetworkInfo networkInfo = NetworkInfo.of(networkId, SubnetNetworkConfiguration.of(false)); + Operation operation = compute.create(networkInfo); + while (!operation.isDone()) { + Thread.sleep(1000L); + } + // test get network + Network network = compute.getNetwork(networkId.network()); + assertEquals(networkId.network(), network.networkId().network()); + assertNotNull(network.generatedId()); + assertNotNull(network.creationTimestamp()); + assertEquals(NetworkConfiguration.Type.SUBNET, network.configuration().type()); + assertTrue(network.configuration() instanceof SubnetNetworkConfiguration); + assertFalse(network.configuration().autoCreateSubnetworks()); + String subnetworkName = BASE_RESOURCE_NAME + "create-subnetwork-subnetwork"; + SubnetworkId subnetworkId = SubnetworkId.of(REGION, subnetworkName); + SubnetworkInfo subnetworkInfo = SubnetworkInfo.of(subnetworkId, networkId, "192.168.0.0/16"); + operation = compute.create(subnetworkInfo); + while (!operation.isDone()) { + Thread.sleep(1000L); + } + // test get subnetwork with selected fields + Subnetwork subnetwork = compute.getSubnetwork(subnetworkId, + Compute.SubnetworkOption.fields(Compute.SubnetworkField.CREATION_TIMESTAMP)); + assertNull(subnetwork.generatedId()); + assertEquals(subnetworkId.subnetwork(), subnetwork.subnetworkId().subnetwork()); + assertNotNull(subnetwork.creationTimestamp()); + assertNull(subnetwork.description()); + assertNull(subnetwork.gatewayAddress()); + assertNull(subnetwork.network()); + assertNull(subnetwork.ipRange()); + // test get subnetwork + subnetwork = compute.getSubnetwork(subnetworkId); + assertNotNull(subnetwork.generatedId()); + assertEquals(subnetworkId.subnetwork(), subnetwork.subnetworkId().subnetwork()); + assertNotNull(subnetwork.creationTimestamp()); + assertNotNull(subnetwork.gatewayAddress()); + assertEquals(networkId.network(), subnetwork.network().network()); + assertEquals("192.168.0.0/16", subnetwork.ipRange()); + // test list subnetworks + Compute.SubnetworkFilter filter = + Compute.SubnetworkFilter.equals(Compute.SubnetworkField.NAME, subnetworkName); + Page subnetworkPage = + compute.listSubnetworks(REGION, Compute.SubnetworkListOption.filter(filter)); + Iterator subnetworkIterator = subnetworkPage.iterateAll(); + int count = 0; + while (subnetworkIterator.hasNext()) { + Subnetwork remoteSubnetwork = subnetworkIterator.next(); + assertNotNull(remoteSubnetwork.generatedId()); + assertEquals(subnetworkId.subnetwork(), remoteSubnetwork.subnetworkId().subnetwork()); + assertNotNull(remoteSubnetwork.creationTimestamp()); + assertNotNull(remoteSubnetwork.gatewayAddress()); + assertEquals(networkId.network(), remoteSubnetwork.network().network()); + assertEquals("192.168.0.0/16", remoteSubnetwork.ipRange()); + count++; + } + assertEquals(1, count); + // test list subnetworks with selected fields + subnetworkPage = compute.listSubnetworks(REGION, Compute.SubnetworkListOption.filter(filter), + Compute.SubnetworkListOption.fields(Compute.SubnetworkField.CREATION_TIMESTAMP)); + subnetworkIterator = subnetworkPage.iterateAll(); + count = 0; + while (subnetworkIterator.hasNext()) { + Subnetwork remoteSubnetwork = subnetworkIterator.next(); + assertNull(remoteSubnetwork.generatedId()); + assertEquals(subnetworkId.subnetwork(), remoteSubnetwork.subnetworkId().subnetwork()); + assertNotNull(remoteSubnetwork.creationTimestamp()); + assertNull(remoteSubnetwork.description()); + assertNull(remoteSubnetwork.gatewayAddress()); + assertNull(remoteSubnetwork.network()); + assertNull(remoteSubnetwork.ipRange()); + count++; + } + assertEquals(1, count); + operation = subnetwork.delete(); + while (!operation.isDone()) { + Thread.sleep(1000L); + } + operation = compute.deleteNetwork(networkId); + while (!operation.isDone()) { + Thread.sleep(1000L); + } + assertNull(compute.getSubnetwork(subnetworkId)); + assertNull(compute.getNetwork(networkName)); + } + + @Test + public void testAggregatedListSubnetworks() throws InterruptedException { + String networkName = BASE_RESOURCE_NAME + "list-subnetwork-network"; + NetworkId networkId = NetworkId.of(networkName); + NetworkInfo networkInfo = NetworkInfo.of(networkId, SubnetNetworkConfiguration.of(false)); + Operation operation = compute.create(networkInfo); + while (!operation.isDone()) { + Thread.sleep(1000L); + } + String prefix = BASE_RESOURCE_NAME + "list-subnetwork"; + String[] regionNames = {"us-central1", "us-east1"}; + String[] subnetworkNames = {prefix + "1", prefix + "2"}; + String[] ipRanges = {"10.128.0.0/20", "10.132.0.0/20"}; + SubnetworkId firstSubnetworkId = SubnetworkId.of(regionNames[0], subnetworkNames[0]); + SubnetworkId secondSubnetworkId = SubnetworkId.of(regionNames[1], subnetworkNames[1]); + SubnetworkInfo firstSubnetworkInfo = + SubnetworkInfo.of(firstSubnetworkId, networkId, ipRanges[0]); + SubnetworkInfo secondSubnetworkInfo = + SubnetworkInfo.of(secondSubnetworkId, networkId, ipRanges[1]); + Operation firstOperation = compute.create(firstSubnetworkInfo); + Operation secondOperation = compute.create(secondSubnetworkInfo); + while (!firstOperation.isDone()) { + Thread.sleep(1000L); + } + while (!secondOperation.isDone()) { + Thread.sleep(1000L); + } + Set regionSet = ImmutableSet.copyOf(regionNames); + Set subnetworkSet = ImmutableSet.copyOf(subnetworkNames); + Set rangeSet = ImmutableSet.copyOf(ipRanges); + Compute.SubnetworkFilter subnetworkFilter = + Compute.SubnetworkFilter.equals(Compute.SubnetworkField.NAME, prefix + "\\d"); + Page subnetworkPage = + compute.listSubnetworks(Compute.SubnetworkAggregatedListOption.filter(subnetworkFilter)); + Iterator subnetworkIterator = subnetworkPage.iterateAll(); + int count = 0; + while (subnetworkIterator.hasNext()) { + Subnetwork remoteSubnetwork = subnetworkIterator.next(); + assertNotNull(remoteSubnetwork.generatedId()); + assertTrue(regionSet.contains(remoteSubnetwork.subnetworkId().region())); + assertTrue(subnetworkSet.contains(remoteSubnetwork.subnetworkId().subnetwork())); + assertNotNull(remoteSubnetwork.creationTimestamp()); + assertNotNull(remoteSubnetwork.gatewayAddress()); + assertEquals(networkId.network(), remoteSubnetwork.network().network()); + assertTrue(rangeSet.contains(remoteSubnetwork.ipRange())); + count++; + } + assertEquals(2, count); + firstOperation = compute.deleteSubnetwork(firstSubnetworkId); + secondOperation = compute.deleteSubnetwork(secondSubnetworkId); + while (!firstOperation.isDone()) { + Thread.sleep(1000L); + } + while (!secondOperation.isDone()) { + Thread.sleep(1000L); + } + operation = compute.deleteNetwork(networkId); + while (!operation.isDone()) { + Thread.sleep(1000L); + } + assertNull(compute.getNetwork(networkName)); + } + + @Test + public void testCreateGetAndDeleteInstance() throws InterruptedException { + String instanceName = BASE_RESOURCE_NAME + "create-and-get-instance"; + String addressName = BASE_RESOURCE_NAME + "create-and-get-instance-address"; + // Create an address to assign to the instance + AddressId addressId = RegionAddressId.of(REGION, addressName); + AddressInfo addressInfo = AddressInfo.of(addressId); + Operation operation = compute.create(addressInfo); + while (!operation.isDone()) { + Thread.sleep(1000L); + } + Address address = compute.getAddress(addressId); + // Create an instance + InstanceId instanceId = InstanceId.of(ZONE, instanceName); + NetworkId networkId = NetworkId.of("default"); + NetworkInterface networkInterface = NetworkInterface.builder(networkId) + .accessConfigurations(NetworkInterface.AccessConfig.builder().name("NAT").natIp(address.address()).build()) + .build(); + AttachedDisk disk1 = AttachedDisk.of("dev0", + AttachedDisk.CreateDiskConfiguration.builder(IMAGE_ID).autoDelete(true).build()); + AttachedDisk disk2 = + AttachedDisk.of("dev1", AttachedDisk.ScratchDiskConfiguration.of(DiskTypeId.of(ZONE, DISK_TYPE))); + InstanceInfo instanceInfo = + InstanceInfo.builder(instanceId, MachineTypeId.of(ZONE, "n1-standard-1")) + .attachedDisks(disk1, disk2) + .networkInterfaces(networkInterface) + .build(); + operation = compute.create(instanceInfo); + while (!operation.isDone()) { + Thread.sleep(1000L); + } + // test get + Instance remoteInstance = compute.getInstance(instanceId); + assertEquals(instanceName, remoteInstance.instanceId().instance()); + assertEquals(ZONE, remoteInstance.instanceId().zone()); + assertEquals(InstanceInfo.Status.RUNNING, remoteInstance.status()); + assertEquals("n1-standard-1", remoteInstance.machineType().type()); + assertEquals(ZONE, remoteInstance.machineType().zone()); + assertNotNull(remoteInstance.creationTimestamp()); + Set deviceSet = ImmutableSet.of("dev0", "dev1"); + assertEquals(2, remoteInstance.attachedDisks().size()); + for (AttachedDisk remoteAttachedDisk : remoteInstance.attachedDisks()) { + assertTrue(deviceSet.contains(remoteAttachedDisk.deviceName())); + } + Assert.assertEquals(AttachedDisk.AttachedDiskConfiguration.Type.PERSISTENT, + remoteInstance.attachedDisks().get(0).configuration().type()); + AttachedDisk.PersistentDiskConfiguration remoteConfiguration = + remoteInstance.attachedDisks().get(0).configuration(); + assertEquals(instanceName, remoteConfiguration.sourceDisk().disk()); + assertEquals(ZONE, remoteConfiguration.sourceDisk().zone()); + assertTrue(remoteConfiguration.boot()); + assertTrue(remoteConfiguration.autoDelete()); + assertEquals(1, remoteInstance.networkInterfaces().size()); + NetworkInterface remoteNetworkInterface = remoteInstance.networkInterfaces().get(0); + assertNotNull(remoteNetworkInterface.name()); + assertEquals("default", remoteNetworkInterface.network().network()); + List remoteAccessConfigurations = remoteNetworkInterface.accessConfigurations(); + assertNotNull(remoteAccessConfigurations); + assertEquals(1, remoteAccessConfigurations.size()); + NetworkInterface.AccessConfig remoteAccessConfig = remoteAccessConfigurations.get(0); + assertEquals(address.address(), remoteAccessConfig.natIp()); + assertEquals("NAT", remoteAccessConfig.name()); + assertNotNull(remoteInstance.metadata()); + assertNotNull(remoteInstance.tags()); + // test get with selected fields + remoteInstance = compute.getInstance(instanceId, + Compute.InstanceOption.fields(Compute.InstanceField.CREATION_TIMESTAMP)); + assertEquals(instanceName, remoteInstance.instanceId().instance()); + assertEquals(ZONE, remoteInstance.instanceId().zone()); + assertNull(remoteInstance.machineType()); + assertNotNull(remoteInstance.creationTimestamp()); + assertNull(remoteInstance.attachedDisks()); + assertNull(remoteInstance.networkInterfaces()); + assertNull(remoteInstance.metadata()); + assertNull(remoteInstance.tags()); + // test get default serial port output + String serialPortOutput = remoteInstance.getSerialPortOutput(); + assertNotNull(serialPortOutput); + // test get serial port output by number + String newSerialPortOutput = remoteInstance.getSerialPortOutput(1); + assertTrue(newSerialPortOutput.contains(serialPortOutput)); + operation = remoteInstance.delete(); + while (!operation.isDone()) { + Thread.sleep(1000L); + } + assertNull(compute.getInstance(instanceId)); + address.delete(); + } + + @Test + public void testStartStopAndResetInstance() throws InterruptedException { + String instanceName = BASE_RESOURCE_NAME + "start-stop-reset-instance"; + InstanceId instanceId = InstanceId.of(ZONE, instanceName); + NetworkId networkId = NetworkId.of("default"); + NetworkInterface networkInterface = NetworkInterface.builder(networkId).build(); + AttachedDisk disk = AttachedDisk.of("dev0", + AttachedDisk.CreateDiskConfiguration.builder(IMAGE_ID).autoDelete(true).build()); + InstanceInfo instanceInfo = + InstanceInfo.builder(instanceId, MachineTypeId.of(ZONE, MACHINE_TYPE)) + .attachedDisks(disk) + .networkInterfaces(networkInterface) + .build(); + Operation operation = compute.create(instanceInfo); + while (!operation.isDone()) { + Thread.sleep(1000L); + } + Instance remoteInstance = compute.getInstance(instanceId, + Compute.InstanceOption.fields(Compute.InstanceField.STATUS)); + assertEquals(InstanceInfo.Status.RUNNING, remoteInstance.status()); + operation = remoteInstance.stop(); + while (!operation.isDone()) { + Thread.sleep(1000L); + } + remoteInstance = compute.getInstance(instanceId, + Compute.InstanceOption.fields(Compute.InstanceField.STATUS)); + assertEquals(InstanceInfo.Status.TERMINATED, remoteInstance.status()); + operation = remoteInstance.start(); + while (!operation.isDone()) { + Thread.sleep(1000L); + } + remoteInstance = compute.getInstance(instanceId, + Compute.InstanceOption.fields(Compute.InstanceField.STATUS)); + assertEquals(InstanceInfo.Status.RUNNING, remoteInstance.status()); + operation = remoteInstance.reset(); + while (!operation.isDone()) { + Thread.sleep(1000L); + } + remoteInstance = compute.getInstance(instanceId, + Compute.InstanceOption.fields(Compute.InstanceField.STATUS)); + assertEquals(InstanceInfo.Status.RUNNING, remoteInstance.status()); + remoteInstance.delete(); + } + + @Test + public void testSetInstanceProperties() throws InterruptedException { + String instanceName = BASE_RESOURCE_NAME + "set-properties-instance"; + InstanceId instanceId = InstanceId.of(ZONE, instanceName); + NetworkId networkId = NetworkId.of("default"); + NetworkInterface networkInterface = NetworkInterface.builder(networkId).build(); + AttachedDisk disk = AttachedDisk.of("dev0", + AttachedDisk.CreateDiskConfiguration.builder(IMAGE_ID).autoDelete(true).build()); + InstanceInfo instanceInfo = + InstanceInfo.builder(instanceId, MachineTypeId.of(ZONE, MACHINE_TYPE)) + .attachedDisks(disk) + .networkInterfaces(networkInterface) + .build(); + Operation operation = compute.create(instanceInfo); + while (!operation.isDone()) { + Thread.sleep(1000L); + } + Instance remoteInstance = compute.getInstance(instanceId); + // test set tags + List tags = ImmutableList.of("tag1", "tag2"); + operation = remoteInstance.setTags(tags); + while (!operation.isDone()) { + Thread.sleep(1000L); + } + remoteInstance = compute.getInstance(instanceId); + assertEquals(tags, remoteInstance.tags().values()); + // test set metadata + Map metadata = ImmutableMap.of("key", "value"); + operation = remoteInstance.setMetadata(metadata); + while (!operation.isDone()) { + Thread.sleep(1000L); + } + remoteInstance = compute.getInstance(instanceId); + assertEquals(metadata, remoteInstance.metadata().values()); + // test set machine type + operation = remoteInstance.stop(); + while (!operation.isDone()) { + Thread.sleep(1000L); + } + operation = remoteInstance.setMachineType(MachineTypeId.of(ZONE, "n1-standard-1")); + while (!operation.isDone()) { + Thread.sleep(1000L); + } + remoteInstance = compute.getInstance(instanceId); + assertEquals("n1-standard-1", remoteInstance.machineType().type()); + assertEquals(ZONE, remoteInstance.machineType().zone()); + // test set scheduling options + SchedulingOptions options = SchedulingOptions.standard(false, SchedulingOptions.Maintenance.TERMINATE); + operation = remoteInstance.setSchedulingOptions(options); + while (!operation.isDone()) { + Thread.sleep(1000L); + } + remoteInstance = compute.getInstance(instanceId); + assertEquals(options, remoteInstance.schedulingOptions()); + remoteInstance.delete(); + } + + @Test + public void testAttachAndDetachDisk() throws InterruptedException { + String instanceName = BASE_RESOURCE_NAME + "attach-and-detach-disk-instance"; + String diskName = BASE_RESOURCE_NAME + "attach-and-detach-disk"; + InstanceId instanceId = InstanceId.of(ZONE, instanceName); + NetworkId networkId = NetworkId.of("default"); + NetworkInterface networkInterface = NetworkInterface.builder(networkId).build(); + AttachedDisk disk = AttachedDisk.of("dev0", + AttachedDisk.CreateDiskConfiguration.builder(IMAGE_ID).autoDelete(true).build()); + InstanceInfo instanceInfo = + InstanceInfo.builder(instanceId, MachineTypeId.of(ZONE, MACHINE_TYPE)) + .attachedDisks(disk) + .networkInterfaces(networkInterface) + .build(); + Operation instanceOperation = compute.create(instanceInfo); + DiskId diskId = DiskId.of(ZONE, diskName); + Operation diskOperation = compute.create(DiskInfo.of(diskId, + StandardDiskConfiguration.of(DiskTypeId.of(ZONE, "pd-ssd")))); + while (!instanceOperation.isDone()) { + Thread.sleep(1000L); + } + while (!diskOperation.isDone()) { + Thread.sleep(1000L); + } + Instance remoteInstance = compute.getInstance(instanceId); + // test attach disk + instanceOperation = remoteInstance.attachDisk("dev1", + AttachedDisk.PersistentDiskConfiguration.builder(diskId).build()); + while (!instanceOperation.isDone()) { + Thread.sleep(1000L); + } + remoteInstance = compute.getInstance(instanceId); + Set deviceSet = ImmutableSet.of("dev0", "dev1"); + assertEquals(2, remoteInstance.attachedDisks().size()); + for (AttachedDisk remoteAttachedDisk : remoteInstance.attachedDisks()) { + assertTrue(deviceSet.contains(remoteAttachedDisk.deviceName())); + } + // test set disk auto-delete + instanceOperation = remoteInstance.setDiskAutoDelete("dev1", true); + while (!instanceOperation.isDone()) { + Thread.sleep(1000L); + } + remoteInstance = compute.getInstance(instanceId); + assertEquals(2, remoteInstance.attachedDisks().size()); + for (AttachedDisk remoteAttachedDisk : remoteInstance.attachedDisks()) { + assertTrue(deviceSet.contains(remoteAttachedDisk.deviceName())); + assertTrue(remoteAttachedDisk.configuration().autoDelete()); + } + // test detach disk + instanceOperation = remoteInstance.detachDisk("dev1"); + while (!instanceOperation.isDone()) { + Thread.sleep(1000L); + } + remoteInstance = compute.getInstance(instanceId); + assertEquals(1, remoteInstance.attachedDisks().size()); + assertEquals("dev0", remoteInstance.attachedDisks().get(0).deviceName()); + remoteInstance.delete(); + compute.deleteDisk(diskId); + } + + @Test + public void testAddAndRemoveAccessConfig() throws InterruptedException { + String instanceName = BASE_RESOURCE_NAME + "add-and-remove-access-instance"; + String addressName = BASE_RESOURCE_NAME + "add-and-remove-access-address"; + InstanceId instanceId = InstanceId.of(ZONE, instanceName); + NetworkId networkId = NetworkId.of("default"); + NetworkInterface networkInterface = NetworkInterface.builder(networkId).build(); + AttachedDisk disk = AttachedDisk.of("dev0", + AttachedDisk.CreateDiskConfiguration.builder(IMAGE_ID).autoDelete(true).build()); + InstanceInfo instanceInfo = + InstanceInfo.builder(instanceId, MachineTypeId.of(ZONE, MACHINE_TYPE)) + .attachedDisks(disk) + .networkInterfaces(networkInterface) + .build(); + Operation instanceOperation = compute.create(instanceInfo); + AddressId addressId = RegionAddressId.of(REGION, addressName); + AddressInfo addressInfo = AddressInfo.of(addressId); + Operation addressOperation = compute.create(addressInfo); + while (!addressOperation.isDone()) { + Thread.sleep(1000L); + } + while (!instanceOperation.isDone()) { + Thread.sleep(1000L); + } + while (!addressOperation.isDone()) { + Thread.sleep(1000L); + } + Address remoteAddress = compute.getAddress(addressId); + Instance remoteInstance = compute.getInstance(instanceId); + String networkInterfaceName = remoteInstance.networkInterfaces().get(0).name(); + // test add access config + NetworkInterface.AccessConfig accessConfig = NetworkInterface.AccessConfig.builder() + .natIp(remoteAddress.address()) + .name("NAT") + .build(); + instanceOperation = remoteInstance.addAccessConfig(networkInterfaceName, accessConfig); + while (!instanceOperation.isDone()) { + Thread.sleep(1000L); + } + remoteInstance = compute.getInstance(instanceId); + List accessConfigurations = + remoteInstance.networkInterfaces().get(0).accessConfigurations(); + assertEquals(1, accessConfigurations.size()); + assertEquals("NAT", accessConfigurations.get(0).name()); + // test delete access config + instanceOperation = remoteInstance.deleteAccessConfig(networkInterfaceName, "NAT"); + while (!instanceOperation.isDone()) { + Thread.sleep(1000L); + } + remoteInstance = compute.getInstance(instanceId); + assertTrue(remoteInstance.networkInterfaces().get(0).accessConfigurations().isEmpty()); + remoteInstance.delete(); + remoteAddress.delete(); + } +} diff --git a/gcloud-java-compute/src/test/java/com/google/cloud/compute/testing/RemoteComputeHelperTest.java b/gcloud-java-compute/src/test/java/com/google/cloud/compute/testing/RemoteComputeHelperTest.java new file mode 100644 index 000000000000..947bd4f1ea8c --- /dev/null +++ b/gcloud-java-compute/src/test/java/com/google/cloud/compute/testing/RemoteComputeHelperTest.java @@ -0,0 +1,98 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.compute.testing; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertSame; +import static org.junit.Assert.assertTrue; + +import com.google.cloud.compute.ComputeOptions; +import com.google.cloud.compute.testing.RemoteComputeHelper.ComputeHelperException; + +import org.junit.Test; + +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.util.regex.Pattern; + +public class RemoteComputeHelperTest { + + private static final String PROJECT_ID = "project-id"; + private static final String JSON_KEY = "{\n" + + " \"private_key_id\": \"somekeyid\",\n" + + " \"private_key\": \"-----BEGIN PRIVATE KEY-----\\nMIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggS" + + "kAgEAAoIBAQC+K2hSuFpAdrJI\\nnCgcDz2M7t7bjdlsadsasad+fvRSW6TjNQZ3p5LLQY1kSZRqBqylRkzteMOyHg" + + "aR\\n0Pmxh3ILCND5men43j3h4eDbrhQBuxfEMalkG92sL+PNQSETY2tnvXryOvmBRwa/\\nQP/9dJfIkIDJ9Fw9N4" + + "Bhhhp6mCcRpdQjV38H7JsyJ7lih/oNjECgYAt\\nknddadwkwewcVxHFhcZJO+XWf6ofLUXpRwiTZakGMn8EE1uVa2" + + "LgczOjwWHGi99MFjxSer5m9\\n1tCa3/KEGKiS/YL71JvjwX3mb+cewlkcmweBKZHM2JPTk0ZednFSpVZMtycjkbLa" + + "\\ndYOS8V85AgMBewECggEBAKksaldajfDZDV6nGqbFjMiizAKJolr/M3OQw16K6o3/\\n0S31xIe3sSlgW0+UbYlF" + + "4U8KifhManD1apVSC3csafaspP4RZUHFhtBywLO9pR5c\\nr6S5aLp+gPWFyIp1pfXbWGvc5VY/v9x7ya1VEa6rXvL" + + "sKupSeWAW4tMj3eo/64ge\\nsdaceaLYw52KeBYiT6+vpsnYrEkAHO1fF/LavbLLOFJmFTMxmsNaG0tuiJHgjshB\\" + + "n82DpMCbXG9YcCgI/DbzuIjsdj2JC1cascSP//3PmefWysucBQe7Jryb6NQtASmnv\\nCdDw/0jmZTEjpe4S1lxfHp" + + "lAhHFtdgYTvyYtaLZiVVkCgYEA8eVpof2rceecw/I6\\n5ng1q3Hl2usdWV/4mZMvR0fOemacLLfocX6IYxT1zA1FF" + + "JlbXSRsJMf/Qq39mOR2\\nSpW+hr4jCoHeRVYLgsbggtrevGmILAlNoqCMpGZ6vDmJpq6ECV9olliDvpPgWOP+\\nm" + + "YPDreFBGxWvQrADNbRt2dmGsrsCgYEAyUHqB2wvJHFqdmeBsaacewzV8x9WgmeX\\ngUIi9REwXlGDW0Mz50dxpxcK" + + "CAYn65+7TCnY5O/jmL0VRxU1J2mSWyWTo1C+17L0\\n3fUqjxL1pkefwecxwecvC+gFFYdJ4CQ/MHHXU81Lwl1iWdF" + + "Cd2UoGddYaOF+KNeM\\nHC7cmqra+JsCgYEAlUNywzq8nUg7282E+uICfCB0LfwejuymR93CtsFgb7cRd6ak\\nECR" + + "8FGfCpH8ruWJINllbQfcHVCX47ndLZwqv3oVFKh6pAS/vVI4dpOepP8++7y1u\\ncoOvtreXCX6XqfrWDtKIvv0vjl" + + "HBhhhp6mCcRpdQjV38H7JsyJ7lih/oNjECgYAt\\nkndj5uNl5SiuVxHFhcZJO+XWf6ofLUregtevZakGMn8EE1uVa" + + "2AY7eafmoU/nZPT\\n00YB0TBATdCbn/nBSuKDESkhSg9s2GEKQZG5hBmL5uCMfo09z3SfxZIhJdlerreP\\nJ7gSi" + + "dI12N+EZxYd4xIJh/HFDgp7RRO87f+WJkofMQKBgGTnClK1VMaCRbJZPriw\\nEfeFCoOX75MxKwXs6xgrw4W//AYG" + + "GUjDt83lD6AZP6tws7gJ2IwY/qP7+lyhjEqN\\nHtfPZRGFkGZsdaksdlaksd323423d+15/UvrlRSFPNj1tWQmNKk" + + "XyRDW4IG1Oa2p\\nrALStNBx5Y9t0/LQnFI4w3aG\\n-----END PRIVATE KEY-----\\n\",\n" + + " \"client_email\": \"someclientid@developer.gserviceaccount.com\",\n" + + " \"client_id\": \"someclientid.apps.googleusercontent.com\",\n" + + " \"type\": \"service_account\"\n" + + "}"; + private static final InputStream JSON_KEY_STREAM = new ByteArrayInputStream(JSON_KEY.getBytes()); + private static final String BASE_RESOURCE_NAME_REGEX = "test-[0-9a-f]{24}-"; + private static final Pattern BASE_RESOURCE_NAME_PATTERN = + Pattern.compile(BASE_RESOURCE_NAME_REGEX); + + @Test + public void testBaseResourceName() { + String baseResourceName = RemoteComputeHelper.baseResourceName(); + assertTrue(BASE_RESOURCE_NAME_PATTERN.matcher(baseResourceName).matches()); + } + + @Test + public void testCreateFromStream() { + RemoteComputeHelper helper = RemoteComputeHelper.create(PROJECT_ID, JSON_KEY_STREAM); + ComputeOptions options = helper.options(); + assertEquals(PROJECT_ID, options.projectId()); + assertEquals(60000, options.connectTimeout()); + assertEquals(60000, options.readTimeout()); + assertEquals(10, options.retryParams().retryMaxAttempts()); + assertEquals(6, options.retryParams().retryMinAttempts()); + assertEquals(30000, options.retryParams().maxRetryDelayMillis()); + assertEquals(120000, options.retryParams().totalRetryPeriodMillis()); + assertEquals(250, options.retryParams().initialRetryDelayMillis()); + } + + @Test + public void testComputeHelperException() { + ComputeHelperException exception = new ComputeHelperException("message", null); + assertEquals("message", exception.getMessage()); + assertNull(exception.getCause()); + IOException cause = new IOException("message"); + exception = ComputeHelperException.translate(cause); + assertEquals("message", exception.getMessage()); + assertSame(cause, exception.getCause()); + } +} diff --git a/gcloud-java-examples/README.md b/gcloud-java-examples/README.md index 2aab551da6f6..f8bbde47b701 100644 --- a/gcloud-java-examples/README.md +++ b/gcloud-java-examples/README.md @@ -62,6 +62,18 @@ To run examples from your command line: mvn exec:java -Dexec.mainClass="com.google.cloud.examples.bigquery.BigQueryExample" -Dexec.args="query 'select * from new_dataset_id.new_table_id'" ``` + * Here's an example run of `ComputeExample`. + + Before running the example, go to the [Google Developers Console][developers-console] to ensure + that Compute API is enabled. + ``` + mvn exec:java -Dexec.mainClass="com.google.cloud.examples.compute.ComputeExample" -Dexec.args="create image-disk us-central1-a test-disk debian-cloud debian-8-jessie-v20160329" + mvn exec:java -Dexec.mainClass="com.google.cloud.examples.compute.ComputeExample" -Dexec.args="create instance us-central1-a test-instance n1-standard-1 test-disk default" + mvn exec:java -Dexec.mainClass="com.google.cloud.examples.compute.ComputeExample" -Dexec.args="add-access-config us-central1-a test-instance nic0 NAT" + mvn exec:java -Dexec.mainClass="com.google.cloud.examples.compute.ComputeExample" -Dexec.args="delete instance us-central1-a test-instance" + mvn exec:java -Dexec.mainClass="com.google.cloud.examples.compute.ComputeExample" -Dexec.args="delete disk us-central1-a test-disk" + ``` + * Here's an example run of `DatastoreExample`. Be sure to change the placeholder project ID "your-project-id" with your own project ID. Also note that you have to enable the Google Cloud Datastore API on the [Google Developers Console][developers-console] before running the following commands. diff --git a/gcloud-java-examples/src/main/java/com/google/cloud/examples/compute/ComputeExample.java b/gcloud-java-examples/src/main/java/com/google/cloud/examples/compute/ComputeExample.java new file mode 100644 index 000000000000..d7f8e671cb72 --- /dev/null +++ b/gcloud-java-examples/src/main/java/com/google/cloud/examples/compute/ComputeExample.java @@ -0,0 +1,2539 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.examples.compute; + +import com.google.cloud.compute.Address; +import com.google.cloud.compute.AddressId; +import com.google.cloud.compute.AddressInfo; +import com.google.cloud.compute.AttachedDisk; +import com.google.cloud.compute.AttachedDisk.PersistentDiskConfiguration; +import com.google.cloud.compute.Compute; +import com.google.cloud.compute.ComputeOptions; +import com.google.cloud.compute.Disk; +import com.google.cloud.compute.DiskConfiguration; +import com.google.cloud.compute.DiskId; +import com.google.cloud.compute.DiskImageConfiguration; +import com.google.cloud.compute.DiskInfo; +import com.google.cloud.compute.DiskType; +import com.google.cloud.compute.DiskTypeId; +import com.google.cloud.compute.GlobalAddressId; +import com.google.cloud.compute.GlobalOperationId; +import com.google.cloud.compute.Image; +import com.google.cloud.compute.ImageDiskConfiguration; +import com.google.cloud.compute.ImageId; +import com.google.cloud.compute.ImageInfo; +import com.google.cloud.compute.Instance; +import com.google.cloud.compute.InstanceId; +import com.google.cloud.compute.InstanceInfo; +import com.google.cloud.compute.LicenseId; +import com.google.cloud.compute.MachineType; +import com.google.cloud.compute.MachineTypeId; +import com.google.cloud.compute.Network; +import com.google.cloud.compute.NetworkId; +import com.google.cloud.compute.NetworkInfo; +import com.google.cloud.compute.NetworkInterface; +import com.google.cloud.compute.NetworkInterface.AccessConfig; +import com.google.cloud.compute.Operation; +import com.google.cloud.compute.Region; +import com.google.cloud.compute.RegionAddressId; +import com.google.cloud.compute.RegionId; +import com.google.cloud.compute.RegionOperationId; +import com.google.cloud.compute.SchedulingOptions; +import com.google.cloud.compute.SchedulingOptions.Maintenance; +import com.google.cloud.compute.Snapshot; +import com.google.cloud.compute.SnapshotDiskConfiguration; +import com.google.cloud.compute.SnapshotId; +import com.google.cloud.compute.SnapshotInfo; +import com.google.cloud.compute.StandardDiskConfiguration; +import com.google.cloud.compute.StandardNetworkConfiguration; +import com.google.cloud.compute.SubnetNetworkConfiguration; +import com.google.cloud.compute.Subnetwork; +import com.google.cloud.compute.SubnetworkId; +import com.google.cloud.compute.SubnetworkInfo; +import com.google.cloud.compute.Zone; +import com.google.cloud.compute.ZoneId; +import com.google.cloud.compute.ZoneOperationId; +import com.google.cloud.compute.spi.ComputeRpc.Tuple; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; + +import java.util.Arrays; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; + +/** + * An example of using Google Compute. + * + *

This example demonstrates a simple/typical Compute usage. + * + *

Steps needed for running the example: + *

    + *
  1. login using gcloud SDK - {@code gcloud auth login}.
  2. + *
  3. compile using maven - {@code mvn compile}
  4. + *
  5. run using maven - + *
    {@code mvn exec:java -Dexec.mainClass="com.google.cloud.examples.compute.ComputeExample"
    + *  -Dexec.args="[]
    + * list networks |
    + * list region-operations  |
    + * list instances ? |
    + * list regions |
    + * list zones |
    + * list zone-operations  |
    + * list disks ? |
    + * list subnetworks ? |
    + * list machineTypes ? |
    + * list global-operations |
    + * list images |
    + * list diskTypes ? |
    + * list snapshots |
    + * list addresses ? |
    + * create subnet-network  true|false |
    + * create image-disk   ?  |
    + * create subnetwork     |
    + * create address ? 
    | + * create snapshot | + * create snapshot-disk | + * create image | + * create standard-network | + * create instance | + * create standard-disk ? | + * info region | + * info region-operation | + * info machineType | + * info snapshot | + * info disk | + * info image | + * info diskType | + * info network | + * info zone-operation | + * info subnetwork | + * info address ?
    | + * info instance | + * info license | + * info global-operation | + * info zone | + * delete region-operation | + * delete zone-operation | + * delete subnetwork | + * delete address ?
    | + * delete snapshot | + * delete disk | + * delete image | + * delete instance | + * delete global-operation | + * delete network | + * get-serial-port | + * set-machine-type | + * set-disk-auto-delete true|false | + * set-scheduling-options preemptible|(standard true|false MIGRATE|TERMINATE) | + * add-access-config ? | + * delete-access-config | + * attach-disk | + * detach-disk | + * start | + * stop | + * reset | + * set-tags * | + * set-metadata *"}
    + *
  6. + *
+ * + *

The first parameter is an optional {@code project_id} (logged-in project will be used if not + * supplied). Second parameter is a Compute operation and can be used to demonstrate its usage. For + * operations that apply to more than one entity (`list`, `create`, `info` and `delete`) the third + * parameter specifies the entity. + */ +public class ComputeExample { + + private static final Map CREATE_ACTIONS = new HashMap<>(); + private static final Map INFO_ACTIONS = new HashMap<>(); + private static final Map LIST_ACTIONS = new HashMap<>(); + private static final Map DELETE_ACTIONS = new HashMap<>(); + private static final Map ACTIONS = new HashMap<>(); + + static class Triple { + + private final X x; + private final Y y; + private final Z z; + + private Triple(X x, Y y, Z z) { + this.x = x; + this.y = y; + this.z = z; + } + + public static Triple of(X x, Y y, Z z) { + return new Triple<>(x, y, z); + } + + X x() { + return x; + } + + Y y() { + return y; + } + + Z z() { + return z; + } + } + + private abstract static class ComputeAction { + + abstract void run(Compute compute, T request) throws Exception; + + abstract T parse(String... args) throws Exception; + + protected String params() { + return ""; + } + } + + private static class ParentAction extends ComputeAction> { + + private final Map subActions; + + ParentAction(Map subActions) { + this.subActions = ImmutableMap.copyOf(subActions); + } + + @Override + @SuppressWarnings("unchecked") + void run(Compute compute, Tuple subaction) throws Exception { + subaction.x().run(compute, subaction.y()); + } + + @Override + Tuple parse(String... args) throws Exception { + if (args.length >= 1) { + ComputeAction action = subActions.get(args[0]); + if (action != null) { + Object actionArguments = action.parse(Arrays.copyOfRange(args, 1, args.length)); + return Tuple.of(action, actionArguments); + } else { + throw new IllegalArgumentException("Unrecognized entity '" + args[0] + "'."); + } + } + throw new IllegalArgumentException("Missing required entity."); + } + + @Override + public String params() { + StringBuilder builder = new StringBuilder(); + for (Map.Entry entry : subActions.entrySet()) { + builder.append('\n').append(entry.getKey()); + String param = entry.getValue().params(); + if (param != null && !param.isEmpty()) { + builder.append(' ').append(param); + } + } + return builder.toString(); + } + } + + private abstract static class OptionalZoneAction extends ComputeAction { + @Override + ZoneId parse(String... args) throws Exception { + String message; + if (args.length == 1) { + return ZoneId.of(args[0]); + } else if (args.length > 1) { + message = "Too many arguments."; + } else { + return null; + } + throw new IllegalArgumentException(message); + } + + @Override + public String params() { + return "?"; + } + } + + private abstract static class OptionalRegionAction extends ComputeAction { + @Override + RegionId parse(String... args) throws Exception { + String message; + if (args.length == 1) { + return RegionId.of(args[0]); + } else if (args.length > 1) { + message = "Too many arguments."; + } else { + return null; + } + throw new IllegalArgumentException(message); + } + + @Override + public String params() { + return "?"; + } + } + + private abstract static class NoArgsAction extends ComputeAction { + @Override + Void parse(String... args) throws Exception { + if (args.length == 0) { + return null; + } + throw new IllegalArgumentException("This action takes no arguments."); + } + } + + /** + * This class demonstrates how to list Compute disk types. + * + * @see DiskTypes: + * list + * @see + * DiskTypes: aggregated list + */ + private static class ListDiskTypesAction extends OptionalZoneAction { + @Override + public void run(Compute compute, ZoneId zone) { + Iterator diskTypeIterator; + if (zone != null) { + diskTypeIterator = compute.listDiskTypes(zone.zone()).iterateAll(); + } else { + diskTypeIterator = compute.listDiskTypes().iterateAll(); + } + while (diskTypeIterator.hasNext()) { + System.out.println(diskTypeIterator.next()); + } + } + } + + /** + * This class demonstrates how to retrieve information on a Compute disk type. + * + * @see DiskTypes: + * get + */ + private static class DiskTypeInfoAction extends ComputeAction { + @Override + public void run(Compute compute, DiskTypeId diskType) { + System.out.println("Disk type info: " + compute.getDiskType(diskType)); + } + + @Override + DiskTypeId parse(String... args) throws Exception { + String message; + if (args.length == 2) { + return DiskTypeId.of(args[0], args[1]); + } else if (args.length < 2) { + message = "Missing required zone and disk type id."; + } else { + message = "Too many arguments."; + } + throw new IllegalArgumentException(message); + } + + @Override + public String params() { + return " "; + } + } + + /** + * This class demonstrates how to list Compute machine types. + * + * @see + * MachineTypes: list + * @see + * MachineTypes: aggregated list + */ + private static class ListMachineTypesAction extends OptionalZoneAction { + @Override + public void run(Compute compute, ZoneId zone) { + Iterator machineTypeIterator; + if (zone != null) { + machineTypeIterator = compute.listMachineTypes(zone.zone()).iterateAll(); + } else { + machineTypeIterator = compute.listMachineTypes().iterateAll(); + } + while (machineTypeIterator.hasNext()) { + System.out.println(machineTypeIterator.next()); + } + } + } + + /** + * This class demonstrates how to retrieve information on a Compute machine type. + * + * @see + * MachineTypes: get + */ + private static class MachineTypeInfoAction extends ComputeAction { + @Override + public void run(Compute compute, MachineTypeId machineType) { + System.out.println("Machine type info: " + compute.getMachineType(machineType)); + } + + @Override + MachineTypeId parse(String... args) throws Exception { + String message; + if (args.length == 2) { + return MachineTypeId.of(args[0], args[1]); + } else if (args.length < 2) { + message = "Missing required zone and machine type id."; + } else { + message = "Too many arguments."; + } + throw new IllegalArgumentException(message); + } + + @Override + public String params() { + return " "; + } + } + + /** + * This class demonstrates how to list Compute regions. + * + * @see Regions: + * list + */ + private static class ListRegionsAction extends NoArgsAction { + @Override + public void run(Compute compute, Void arg) { + Iterator regionIterator = compute.listRegions().iterateAll(); + while (regionIterator.hasNext()) { + System.out.println(regionIterator.next()); + } + } + } + + /** + * This class demonstrates how to retrieve information on a Compute region. + * + * @see + * Regions: get + */ + private static class RegionInfoAction extends ComputeAction { + @Override + public void run(Compute compute, RegionId region) { + System.out.println("Region info: " + compute.getRegion(region.region())); + } + + @Override + RegionId parse(String... args) throws Exception { + String message; + if (args.length == 1) { + return RegionId.of(args[0]); + } else if (args.length > 1) { + message = "Too many arguments."; + } else { + message = "Missing required region id."; + } + throw new IllegalArgumentException(message); + } + + @Override + public String params() { + return ""; + } + } + + /** + * This class demonstrates how to list Compute zones. + * + * @see Zones: list + * + */ + private static class ListZonesAction extends NoArgsAction { + @Override + public void run(Compute compute, Void arg) { + Iterator zoneIterator = compute.listZones().iterateAll(); + while (zoneIterator.hasNext()) { + System.out.println(zoneIterator.next()); + } + } + } + + /** + * This class demonstrates how to retrieve information on a Compute zone. + * + * @see Zones: get + */ + private static class ZoneInfoAction extends ComputeAction { + @Override + public void run(Compute compute, ZoneId zone) { + System.out.println("Zone info: " + compute.getZone(zone.zone())); + } + + @Override + ZoneId parse(String... args) throws Exception { + String message; + if (args.length == 1) { + return ZoneId.of(args[0]); + } else if (args.length > 1) { + message = "Too many arguments."; + } else { + message = "Missing required zone id."; + } + throw new IllegalArgumentException(message); + } + + @Override + public String params() { + return ""; + } + } + + /** + * This class demonstrates how to retrieve information on a Compute license. + * + * @see License: + * get + */ + private static class LicenseInfoAction extends ComputeAction { + @Override + public void run(Compute compute, LicenseId license) { + System.out.println("License info: " + compute.getLicense(license.license())); + } + + @Override + LicenseId parse(String... args) throws Exception { + String message; + if (args.length == 1) { + return LicenseId.of(args[0]); + } else if (args.length > 1) { + message = "Too many arguments."; + } else { + message = "Missing required license id."; + } + throw new IllegalArgumentException(message); + } + + @Override + public String params() { + return ""; + } + } + + /** + * This class demonstrates how to list Compute global operations. + * + * @see + * GLobalOperations: list + */ + private static class ListGlobalOperationsAction extends NoArgsAction { + @Override + public void run(Compute compute, Void arg) { + Iterator operationIterator = compute.listGlobalOperations().iterateAll(); + while (operationIterator.hasNext()) { + System.out.println(operationIterator.next()); + } + } + } + + /** + * This class demonstrates how to list Compute zone operations. + * + * @see + * ZoneOperations: list + */ + private static class ListZoneOperationsAction extends ComputeAction { + @Override + public void run(Compute compute, ZoneId zone) { + Iterator operationIterator = compute.listZoneOperations(zone.zone()).iterateAll(); + while (operationIterator.hasNext()) { + System.out.println(operationIterator.next()); + } + } + + @Override + ZoneId parse(String... args) throws Exception { + String message; + if (args.length == 1) { + return ZoneId.of(args[0]); + } else if (args.length > 1) { + message = "Too many arguments."; + } else { + message = "Missing required zone id."; + } + throw new IllegalArgumentException(message); + } + + @Override + public String params() { + return ""; + } + } + + /** + * This class demonstrates how to list Compute region operations. + * + * @see + * RegionOperations: list + */ + private static class ListRegionOperationsAction extends ComputeAction { + @Override + public void run(Compute compute, RegionId region) { + Iterator operationIterator = + compute.listRegionOperations(region.region()).iterateAll(); + while (operationIterator.hasNext()) { + System.out.println(operationIterator.next()); + } + } + + @Override + RegionId parse(String... args) throws Exception { + String message; + if (args.length == 1) { + return RegionId.of(args[0]); + } else if (args.length > 1) { + message = "Too many arguments."; + } else { + message = "Missing required region id."; + } + throw new IllegalArgumentException(message); + } + + @Override + public String params() { + return ""; + } + } + + private abstract static class GlobalOperationAction extends ComputeAction { + @Override + GlobalOperationId parse(String... args) throws Exception { + String message; + if (args.length == 1) { + return GlobalOperationId.of(args[0]); + } else if (args.length > 1) { + message = "Too many arguments."; + } else { + message = "Missing required operation id."; + } + throw new IllegalArgumentException(message); + } + + @Override + public String params() { + return ""; + } + } + + private abstract static class ZoneOperationAction extends ComputeAction { + @Override + ZoneOperationId parse(String... args) throws Exception { + String message; + if (args.length == 2) { + return ZoneOperationId.of(args[0], (args[1])); + } else if (args.length > 2) { + message = "Too many arguments."; + } else { + message = "Missing required zone and operation id."; + } + throw new IllegalArgumentException(message); + } + + @Override + public String params() { + return " "; + } + } + + private abstract static class RegionOperationAction extends ComputeAction { + @Override + RegionOperationId parse(String... args) throws Exception { + String message; + if (args.length == 2) { + return RegionOperationId.of(args[0], (args[1])); + } else if (args.length > 2) { + message = "Too many arguments."; + } else { + message = "Missing required region and operation id."; + } + throw new IllegalArgumentException(message); + } + + @Override + public String params() { + return " "; + } + } + + /** + * This class demonstrates how to retrieve information on a Compute global operation. + * + * @see + * GlobalOperations: get + */ + private static class GlobalOperationInfoAction extends GlobalOperationAction { + @Override + public void run(Compute compute, GlobalOperationId operation) { + System.out.println("Operation info: " + compute.getOperation(operation)); + } + } + + /** + * This class demonstrates how to retrieve information on a Compute zone operation. + * + * @see + * ZoneOperations: get + */ + private static class ZoneOperationInfoAction extends ZoneOperationAction { + @Override + public void run(Compute compute, ZoneOperationId operation) { + System.out.println("Operation info: " + compute.getOperation(operation)); + } + } + + /** + * This class demonstrates how to retrieve information on a Compute region operation. + * + * @see + * RegionOperations: get + */ + private static class RegionOperationInfoAction extends RegionOperationAction { + @Override + public void run(Compute compute, RegionOperationId operation) { + System.out.println("Operation info: " + compute.getOperation(operation)); + } + } + + /** + * This class demonstrates how to delete a Compute global operation. + * + * @see + * GlobalOperations: delete + */ + private static class DeleteGlobalOperationAction extends GlobalOperationAction { + @Override + public void run(Compute compute, GlobalOperationId operation) { + if (compute.deleteOperation(operation)) { + System.out.println("Operation " + operation + " was deleted"); + } else { + System.out.println("Operation " + operation + " not found"); + } + } + } + + /** + * This class demonstrates how to delete a Compute zone operation. + * + * @see + * ZoneOperations: delete + */ + private static class DeleteZoneOperationAction extends ZoneOperationAction { + @Override + public void run(Compute compute, ZoneOperationId operation) { + if (compute.deleteOperation(operation)) { + System.out.println("Operation " + operation + " was deleted"); + } else { + System.out.println("Operation " + operation + " not found"); + } + } + } + + /** + * This class demonstrates how to delete a Compute region operation. + * + * @see + * RegionOperations: delete + */ + private static class DeleteRegionOperationAction extends RegionOperationAction { + @Override + public void run(Compute compute, RegionOperationId operation) { + if (compute.deleteOperation(operation)) { + System.out.println("Operation " + operation + " was deleted"); + } else { + System.out.println("Operation " + operation + " not found"); + } + } + } + + /** + * This class demonstrates how to list Compute addresses. + * + * @see + * Addresses: list + * @see + * Addresses: aggerated list + */ + private static class ListAddressesAction extends OptionalRegionAction { + @Override + public void run(Compute compute, RegionId region) { + Iterator

addressIterator; + if (region != null) { + addressIterator = compute.listRegionAddresses(region.region()).iterateAll(); + } else { + addressIterator = compute.listAddresses().iterateAll(); + } + while (addressIterator.hasNext()) { + System.out.println(addressIterator.next()); + } + } + } + + private abstract static class AddressAction extends ComputeAction { + @Override + AddressId parse(String... args) throws Exception { + String message; + if (args.length == 2) { + return RegionAddressId.of(args[0], (args[1])); + } else if (args.length == 1) { + return GlobalAddressId.of(args[0]); + } else if (args.length > 2) { + message = "Too many arguments."; + } else { + message = "Missing required address id."; + } + throw new IllegalArgumentException(message); + } + + @Override + public String params() { + return "?
"; + } + } + + /** + * This class demonstrates how to retrieve information on a Compute address. + * + * @see + * Addresses: get + * @see + * Global Addresses: get + */ + private static class AddressInfoAction extends AddressAction { + @Override + public void run(Compute compute, AddressId address) { + System.out.println("Address info: " + compute.getAddress(address)); + } + } + + /** + * This class demonstrates how to delete a Compute address. + * + * @see + * Addresses: delete + * @see + * Global Addresses: delete + */ + private static class DeleteAddressAction extends AddressAction { + @Override + public void run(Compute compute, AddressId address) throws InterruptedException { + Operation operation = compute.deleteAddress(address); + if (operation == null) { + System.out.println("Address " + address + " does not exist"); + return; + } + while (!operation.isDone()) { + System.out.println( + "Waiting for operation " + operation.operationId().operation() + " to complete"); + Thread.sleep(1000L); + } + operation = operation.reload(); + if (operation.errors() == null) { + System.out.println("Address " + address + " was deleted"); + } else { + System.out.println("Deletion of address " + address + " failed"); + System.out.println("Error: " + operation.errors()); + } + } + } + + /** + * This class demonstrates how to create a Compute address. + * + * @see + * Addresses: insert + * @see + * Global Addresses: insert + */ + private static class CreateAddressAction extends AddressAction { + @Override + public void run(Compute compute, AddressId address) throws InterruptedException { + Operation operation = compute.create(AddressInfo.of(address)); + while (!operation.isDone()) { + System.out.println( + "Waiting for operation " + operation.operationId().operation() + " to complete"); + Thread.sleep(1000L); + } + operation = operation.reload(); + if (operation.errors() == null) { + System.out.println("Address " + address + " was created"); + } else { + System.out.println("Creation of address " + address + " failed"); + System.out.println("Error: " + operation.errors()); + } + } + } + + /** + * This class demonstrates how to list Compute snapshots. + * + * @see + * Snapshots: list + */ + private static class ListSnapshotsAction extends NoArgsAction { + @Override + public void run(Compute compute, Void arg) { + Iterator snapshotIterator = compute.listSnapshots().iterateAll(); + while (snapshotIterator.hasNext()) { + System.out.println(snapshotIterator.next()); + } + } + } + + private abstract static class SnapshotAction extends ComputeAction { + @Override + SnapshotId parse(String... args) throws Exception { + String message; + if (args.length == 1) { + return SnapshotId.of(args[0]); + } else if (args.length > 1) { + message = "Too many arguments."; + } else { + message = "Missing required shapshot id."; + } + throw new IllegalArgumentException(message); + } + + @Override + public String params() { + return ""; + } + } + + /** + * This class demonstrates how to retrieve information on a Compute snapshot. + * + * @see + * Snapshots: get + */ + private static class SnapshotInfoAction extends SnapshotAction { + @Override + public void run(Compute compute, SnapshotId snapshot) { + System.out.println("Snapshot info: " + compute.getSnapshot(snapshot.snapshot())); + } + } + + /** + * This class demonstrates how to delete a Compute snapshot. + * + * @see + * Snapshots: delete + */ + private static class DeleteSnapshotAction extends SnapshotAction { + @Override + public void run(Compute compute, SnapshotId snapshot) throws InterruptedException { + Operation operation = compute.deleteSnapshot(snapshot.snapshot()); + if (operation == null) { + System.out.println("Snapshot " + snapshot + " does not exist"); + return; + } + while (!operation.isDone()) { + System.out.println( + "Waiting for operation " + operation.operationId().operation() + " to complete"); + Thread.sleep(1000L); + } + operation = operation.reload(); + if (operation.errors() == null) { + System.out.println("Snapshot " + snapshot + " was deleted"); + } else { + System.out.println("Deletion of snapshot " + snapshot + " failed"); + System.out.println("Error: " + operation.errors()); + } + } + } + + /** + * This class demonstrates how to create a Compute snapshot. + * + * @see + * Snapshots: insert + */ + private static class CreateSnapshotAction extends ComputeAction { + @Override + public void run(Compute compute, SnapshotInfo snapshot) throws InterruptedException { + Operation operation = compute.create(snapshot); + while (!operation.isDone()) { + System.out.println( + "Waiting for operation " + operation.operationId().operation() + " to complete"); + Thread.sleep(1000L); + } + operation = operation.reload(); + if (operation.errors() == null) { + System.out.println("Snapshot " + snapshot.snapshotId() + " was created"); + } else { + System.out.println("Creation of snapshot " + snapshot.snapshotId() + " failed"); + System.out.println("Error: " + operation.errors()); + } + } + + @Override + SnapshotInfo parse(String... args) throws Exception { + String message; + if (args.length == 3) { + String snapshot = args[0]; + String zone = args[1]; + String disk = args[2]; + return SnapshotInfo.of(SnapshotId.of(snapshot), DiskId.of(zone, disk)); + } else if (args.length > 3) { + message = "Too many arguments."; + } else { + message = "Missing required arguments."; + } + throw new IllegalArgumentException(message); + } + + @Override + protected String params() { + return " "; + } + } + + /** + * This class demonstrates how to list Compute images. + * + * @see Images: list + * + */ + private static class ListImagesAction extends NoArgsAction { + @Override + public void run(Compute compute, Void arg) { + Iterator imageIterator = compute.listImages().iterateAll(); + while (imageIterator.hasNext()) { + System.out.println(imageIterator.next()); + } + } + } + + private abstract static class ImageAction extends ComputeAction { + @Override + ImageId parse(String... args) throws Exception { + String message; + if (args.length == 1) { + return ImageId.of(args[0]); + } else if (args.length > 1) { + message = "Too many arguments."; + } else { + message = "Missing required image id."; + } + throw new IllegalArgumentException(message); + } + + @Override + public String params() { + return ""; + } + } + + /** + * This class demonstrates how to retrieve information on a Compute image. + * + * @see Images: get + * + */ + private static class ImageInfoAction extends ImageAction { + @Override + public void run(Compute compute, ImageId image) { + System.out.println("Image info: " + compute.getImage(image)); + } + } + + /** + * This class demonstrates how to delete a Compute image. + * + * @see Images: + * delete + */ + private static class DeleteImageAction extends ImageAction { + @Override + public void run(Compute compute, ImageId image) throws InterruptedException { + Operation operation = compute.deleteImage(image); + if (operation == null) { + System.out.println("Image " + image + " does not exist"); + return; + } + while (!operation.isDone()) { + System.out.println( + "Waiting for operation " + operation.operationId().operation() + " to complete"); + Thread.sleep(1000L); + } + operation = operation.reload(); + if (operation.errors() == null) { + System.out.println("Image " + image + " was deleted"); + } else { + System.out.println("Deletion of image " + image + " failed"); + System.out.println("Error: " + operation.errors()); + } + } + } + + /** + * This class demonstrates how to create a Compute image. + * + * @see Images: + * insert + */ + private static class CreateImageAction extends ComputeAction { + @Override + public void run(Compute compute, ImageInfo image) throws InterruptedException { + Operation operation = compute.create(image); + while (!operation.isDone()) { + System.out.println( + "Waiting for operation " + operation.operationId().operation() + " to complete"); + Thread.sleep(1000L); + } + operation = operation.reload(); + if (operation.errors() == null) { + System.out.println("Image " + image.imageId() + " was created"); + } else { + System.out.println("Creation of image " + image.imageId() + " failed"); + System.out.println("Error: " + operation.errors()); + } + } + + @Override + ImageInfo parse(String... args) throws Exception { + String message; + if (args.length == 3) { + String image = args[0]; + String zone = args[1]; + String disk = args[2]; + return ImageInfo.of(ImageId.of(image), DiskImageConfiguration.of(DiskId.of(zone, disk))); + } else if (args.length > 3) { + message = "Too many arguments."; + } else { + message = "Missing required arguments."; + } + throw new IllegalArgumentException(message); + } + + @Override + protected String params() { + return " "; + } + } + + private abstract static class DiskAction extends ComputeAction { + @Override + DiskId parse(String... args) throws Exception { + String message; + if (args.length == 2) { + return DiskId.of(args[0], args[1]); + } else if (args.length > 2) { + message = "Too many arguments."; + } else { + message = "Missing required zone and disk id."; + } + throw new IllegalArgumentException(message); + } + + @Override + public String params() { + return " "; + } + } + + /** + * This class demonstrates how to retrieve information on a Compute disk. + * + * @see + * Snapshots: get + */ + private static class DiskInfoAction extends DiskAction { + @Override + public void run(Compute compute, DiskId disk) { + System.out.println("Disk info: " + compute.getDisk(disk)); + } + } + + /** + * This class demonstrates how to delete a Compute disk. + * + * @see Disks: + * delete + */ + private static class DeleteDiskAction extends DiskAction { + @Override + public void run(Compute compute, DiskId disk) throws InterruptedException { + Operation operation = compute.deleteDisk(disk); + if (operation == null) { + System.out.println("Disk " + disk + " does not exist"); + return; + } + while (!operation.isDone()) { + System.out.println( + "Waiting for operation " + operation.operationId().operation() + " to complete"); + Thread.sleep(1000L); + } + operation = operation.reload(); + if (operation.errors() == null) { + System.out.println("Disk " + disk + " was deleted"); + } else { + System.out.println("Deletion of disk " + disk + " failed"); + System.out.println("Error: " + operation.errors()); + } + } + } + + /** + * This class demonstrates how to list Compute disks. + * + * @see Disks: list + * + */ + private static class ListDisksAction extends OptionalZoneAction { + @Override + public void run(Compute compute, ZoneId zone) { + Iterator diskIterator = compute.listDisks().iterateAll(); + while (diskIterator.hasNext()) { + System.out.println(diskIterator.next()); + } + } + } + + private abstract static class CreateDiskAction extends ComputeAction { + @Override + public void run(Compute compute, DiskInfo disk) throws InterruptedException { + Operation operation = compute.create(disk); + while (!operation.isDone()) { + System.out.println( + "Waiting for operation " + operation.operationId().operation() + " to complete"); + Thread.sleep(1000L); + } + operation = operation.reload(); + if (operation.errors() == null) { + System.out.println("Disk " + disk.diskId() + " was created"); + } else { + System.out.println("Creation of disk " + disk.diskId() + " failed"); + System.out.println("Error: " + operation.errors()); + } + } + + static DiskId parseDiskId(String[] args) { + return DiskId.of(args[0], args[1]); + } + } + + /** + * This class demonstrates how to create a Compute disk given its type and size. + * + * @see Disks: + * insert + */ + private static class CreateStandardDiskAction extends CreateDiskAction { + @Override + DiskInfo parse(String... args) throws Exception { + if (args.length >= 3) { + DiskId diskId = parseDiskId(args); + String diskType = args[2]; + DiskConfiguration configuration; + if (args.length == 4) { + try { + configuration = StandardDiskConfiguration.of(DiskTypeId.of(diskId.zone(), diskType), + Integer.parseInt(args[3])); + } catch (NumberFormatException ex) { + throw new IllegalArgumentException("Error parsing disk size parameter."); + } + } else if (args.length == 3) { + configuration = StandardDiskConfiguration.of(DiskTypeId.of(diskId.zone(), diskType)); + } else { + throw new IllegalArgumentException("Too many arguments."); + } + return DiskInfo.of(diskId, configuration); + } else { + throw new IllegalArgumentException("Missing required arguments."); + } + } + + @Override + protected String params() { + return " ?"; + } + } + + /** + * This class demonstrates how to create a Compute disk given a source snapshot. + * + * @see Disks: + * insert + */ + private static class CreateSnapshotDiskAction extends CreateDiskAction { + @Override + DiskInfo parse(String... args) throws Exception { + if (args.length == 3) { + DiskId diskId = parseDiskId(args); + return DiskInfo.of(diskId, SnapshotDiskConfiguration.of(SnapshotId.of(args[2]))); + } else if (args.length > 3) { + throw new IllegalArgumentException("Too many arguments."); + } else { + throw new IllegalArgumentException("Missing required arguments."); + } + } + + @Override + protected String params() { + return " "; + } + } + + /** + * This class demonstrates how to create a Compute disk given a source image. + * + * @see Disks: + * insert + */ + private static class CreateImageDiskAction extends CreateDiskAction { + @Override + DiskInfo parse(String... args) throws Exception { + if (args.length == 3) { + DiskId diskId = parseDiskId(args); + return DiskInfo.of(diskId, ImageDiskConfiguration.of(ImageId.of(args[2]))); + } else if (args.length == 4) { + DiskId diskId = parseDiskId(args); + return DiskInfo.of(diskId, ImageDiskConfiguration.of(ImageId.of(args[2], args[3]))); + } else if (args.length > 4) { + throw new IllegalArgumentException("Too many arguments."); + } else { + throw new IllegalArgumentException("Missing required arguments."); + } + } + + @Override + protected String params() { + return " ? "; + } + } + + /** + * This class demonstrates how to list Compute networks. + * + * @see Networks: + * list + */ + private static class ListNetworksAction extends NoArgsAction { + @Override + public void run(Compute compute, Void arg) { + Iterator networkIterator = compute.listNetworks().iterateAll(); + while (networkIterator.hasNext()) { + System.out.println(networkIterator.next()); + } + } + } + + private abstract static class NetworkAction extends ComputeAction { + @Override + NetworkId parse(String... args) throws Exception { + String message; + if (args.length == 1) { + return NetworkId.of(args[0]); + } else if (args.length > 1) { + message = "Too many arguments."; + } else { + message = "Missing required network id."; + } + throw new IllegalArgumentException(message); + } + + @Override + public String params() { + return ""; + } + } + + /** + * This class demonstrates how to retrieve information on a Compute network. + * + * @see Networks: + * get + */ + private static class NetworkInfoAction extends NetworkAction { + @Override + public void run(Compute compute, NetworkId network) { + System.out.println("Network info: " + compute.getNetwork(network.network())); + } + } + + /** + * This class demonstrates how to delete a Compute network. + * + * @see Networks: + * delete + */ + private static class DeleteNetworkAction extends NetworkAction { + @Override + public void run(Compute compute, NetworkId network) throws InterruptedException { + Operation operation = compute.deleteNetwork(network.network()); + if (operation == null) { + System.out.println("Network " + network + " does not exist"); + return; + } + while (!operation.isDone()) { + System.out.println( + "Waiting for operation " + operation.operationId().operation() + " to complete"); + Thread.sleep(1000L); + } + operation = operation.reload(); + if (operation.errors() == null) { + System.out.println("Network " + network + " was deleted"); + } else { + System.out.println("Deletion of network " + network + " failed"); + System.out.println("Error: " + operation.errors()); + } + } + } + + private abstract static class CreateNetworkAction extends ComputeAction { + @Override + public void run(Compute compute, NetworkInfo network) throws InterruptedException { + Operation operation = compute.create(network); + while (!operation.isDone()) { + System.out.println( + "Waiting for operation " + operation.operationId().operation() + " to complete"); + Thread.sleep(1000L); + } + operation = operation.reload(); + if (operation.errors() == null) { + System.out.println("Network " + network.networkId() + " was created"); + } else { + System.out.println("Creation of network " + network.networkId() + " failed"); + System.out.println("Error: " + operation.errors()); + } + } + } + + /** + * This class demonstrates how to create a Compute network with no subnetworks. + * + * @see Networks: + * insert + */ + private static class CreateStandardNetworkAction extends CreateNetworkAction { + @Override + NetworkInfo parse(String... args) throws Exception { + if (args.length == 2) { + return NetworkInfo.of(NetworkId.of(args[0]), StandardNetworkConfiguration.of(args[1])); + } else if (args.length > 2) { + throw new IllegalArgumentException("Too many arguments."); + } else { + throw new IllegalArgumentException("Missing required arguments."); + } + } + + @Override + protected String params() { + return " "; + } + } + + /** + * This class demonstrates how to create a Compute network that supports the creation of + * subnetworks (either manual or automatic). + * + * @see Networks: + * insert + */ + private static class CreateSubnetNetworkAction extends CreateNetworkAction { + @Override + NetworkInfo parse(String... args) throws Exception { + if (args.length == 2) { + boolean autoCreateSubnetworks; + switch (args[1]) { + case "true": + autoCreateSubnetworks = true; + break; + case "false": + autoCreateSubnetworks = false; + break; + default: + throw new IllegalArgumentException( + "Couldn't parse autoCreateSubnetworks argument (must be either true or false)."); + } + return NetworkInfo.of(NetworkId.of(args[0]), + SubnetNetworkConfiguration.of(autoCreateSubnetworks)); + } else if (args.length > 2) { + throw new IllegalArgumentException("Too many arguments."); + } else { + throw new IllegalArgumentException("Missing required arguments."); + } + } + + @Override + protected String params() { + return " true|false"; + } + } + + /** + * This class demonstrates how to list Compute subnetworks. + * + * @see + * Subnetworks: list + */ + private static class ListSubnetworksAction extends OptionalRegionAction { + + @Override + public void run(Compute compute, RegionId region) { + Iterator subnetworkIterator; + if (region != null) { + subnetworkIterator = compute.listSubnetworks(region.region()).iterateAll(); + } else { + subnetworkIterator = compute.listSubnetworks().iterateAll(); + } + while (subnetworkIterator.hasNext()) { + System.out.println(subnetworkIterator.next()); + } + } + } + + private abstract static class SubnetworkAction extends ComputeAction { + @Override + SubnetworkId parse(String... args) throws Exception { + String message; + if (args.length == 2) { + return SubnetworkId.of(args[0], args[1]); + } else if (args.length > 2) { + message = "Too many arguments."; + } else { + message = "Missing required region and subnetwork."; + } + throw new IllegalArgumentException(message); + } + + @Override + public String params() { + return " "; + } + } + + /** + * This class demonstrates how to retrieve information on a Compute subnetwork. + * + * @see + * Subnetworks: get + */ + private static class SubnetworkInfoAction extends SubnetworkAction { + @Override + public void run(Compute compute, SubnetworkId subnetwork) { + System.out.println("Subnetwork info: " + compute.getSubnetwork(subnetwork)); + } + } + + /** + * This class demonstrates how to delete a Compute subnetwork. + * + * @see + * Subnetworks: delete + */ + private static class DeleteSubnetworkAction extends SubnetworkAction { + @Override + public void run(Compute compute, SubnetworkId subnetwork) throws InterruptedException { + Operation operation = compute.deleteSubnetwork(subnetwork); + if (operation == null) { + System.out.println("Subnetwork " + subnetwork + " does not exist"); + return; + } + while (!operation.isDone()) { + System.out.println( + "Waiting for operation " + operation.operationId().operation() + " to complete"); + Thread.sleep(1000L); + } + operation = operation.reload(); + if (operation.errors() == null) { + System.out.println("Subnetwork " + subnetwork + " was deleted"); + } else { + System.out.println("Deletion of subnetwork " + subnetwork + " failed"); + System.out.println("Error: " + operation.errors()); + } + } + } + + /** + * This class demonstrates how to create a Compute subnetwork. + * + * @see + * Subnetworks: insert + */ + private static class CreateSubnetworkAction extends ComputeAction { + @Override + public void run(Compute compute, SubnetworkInfo subnetwork) throws InterruptedException { + Operation operation = compute.create(subnetwork); + while (!operation.isDone()) { + System.out.println( + "Waiting for operation " + operation.operationId().operation() + " to complete"); + Thread.sleep(1000L); + } + operation = operation.reload(); + if (operation.errors() == null) { + System.out.println("Subnetwork " + subnetwork.subnetworkId() + " was created"); + } else { + System.out.println("Creation of subnetwork " + subnetwork.subnetworkId() + " failed"); + System.out.println("Error: " + operation.errors()); + } + } + + @Override + SubnetworkInfo parse(String... args) throws Exception { + String message; + if (args.length == 4) { + SubnetworkId subnetwork = SubnetworkId.of(args[0], args[1]); + return SubnetworkInfo.of(subnetwork, NetworkId.of(args[2]), args[3]); + } else if (args.length > 4) { + message = "Too many arguments."; + } else { + message = "Missing required arguments."; + } + throw new IllegalArgumentException(message); + } + + @Override + protected String params() { + return " "; + } + } + + /** + * This class demonstrates how to list Compute instances. + * + * @see Instances: + * list + */ + private static class ListInstancesAction extends OptionalZoneAction { + @Override + public void run(Compute compute, ZoneId zone) { + Iterator instanceIterator; + if (zone != null) { + instanceIterator = compute.listInstances(zone.zone()).iterateAll(); + } else { + instanceIterator = compute.listInstances().iterateAll(); + } + while (instanceIterator.hasNext()) { + System.out.println(instanceIterator.next()); + } + } + } + + private abstract static class InstanceAction extends ComputeAction { + @Override + InstanceId parse(String... args) throws Exception { + String message; + if (args.length == 2) { + return InstanceId.of(args[0], args[1]); + } else if (args.length > 2) { + message = "Too many arguments."; + } else { + message = "Missing required zone and instance."; + } + throw new IllegalArgumentException(message); + } + + @Override + public String params() { + return " "; + } + } + + /** + * This class demonstrates how to retrieve information on a Compute instance. + * + * @see Instances: + * get + */ + private static class InstanceInfoAction extends InstanceAction { + @Override + public void run(Compute compute, InstanceId instance) { + System.out.println("Instance info: " + compute.getInstance(instance)); + } + } + + /** + * This class demonstrates how to delete a Compute instance. + * + * @see + * Instances: delete + */ + private static class DeleteInstanceAction extends InstanceAction { + @Override + public void run(Compute compute, InstanceId instance) throws InterruptedException { + Operation operation = compute.deleteInstance(instance); + if (operation == null) { + System.out.println("Instance " + instance + " does not exist"); + return; + } + while (!operation.isDone()) { + System.out.println( + "Waiting for operation " + operation.operationId().operation() + " to complete"); + Thread.sleep(1000L); + } + operation = operation.reload(); + if (operation.errors() == null) { + System.out.println("Instance " + instance + " was deleted"); + } else { + System.out.println("Deletion of instance " + instance + " failed"); + System.out.println("Error: " + operation.errors()); + } + } + } + + /** + * This class demonstrates how to create a Compute instance. + * + * @see + * Instances: insert + */ + private static class CreateInstanceAction extends ComputeAction { + @Override + public void run(Compute compute, InstanceInfo instance) throws InterruptedException { + Operation operation = compute.create(instance); + while (!operation.isDone()) { + System.out.println( + "Waiting for operation " + operation.operationId().operation() + " to complete"); + Thread.sleep(1000L); + } + operation = operation.reload(); + if (operation.errors() == null) { + System.out.println("Instance " + instance.instanceId() + " was created"); + } else { + System.out.println("Creation of instance " + instance.instanceId() + " failed"); + System.out.println("Error: " + operation.errors()); + } + } + + @Override + InstanceInfo parse(String... args) throws Exception { + String message; + if (args.length == 5) { + String zone = args[0]; + String instance = args[1]; + InstanceId instanceId = InstanceId.of(zone, instance); + MachineTypeId machineTypeId = MachineTypeId.of(zone, args[2]); + DiskId diskId = DiskId.of(zone, args[3]); + AttachedDisk disk = + AttachedDisk.of(PersistentDiskConfiguration.builder(diskId).boot(true).build()); + NetworkInterface networkInterface = NetworkInterface.of(args[4]); + return InstanceInfo.of(instanceId, machineTypeId, disk, networkInterface); + } else if (args.length > 5) { + message = "Too many arguments."; + } else { + message = "Missing required arguments."; + } + throw new IllegalArgumentException(message); + } + + @Override + protected String params() { + return " "; + } + } + + /** + * This class demonstrates how to get the serial port output for a Compute instance. + * + * @see + * Instances: getSerialPortOutput + */ + private static class GetSerialPortAction extends ComputeAction> { + @Override + public void run(Compute compute, Tuple instanceAndPort) + throws InterruptedException { + InstanceId instance = instanceAndPort.x(); + Integer port = instanceAndPort.y(); + String serialPortOutput; + if (port != null) { + System.out.println("Getting serial port " + port + " output for instance " + instance); + serialPortOutput = compute.getSerialPortOutput(instance, port); + } else { + System.out.println("Getting serial port output for instance " + instance); + serialPortOutput = compute.getSerialPortOutput(instance); + } + System.out.println(serialPortOutput); + } + + @Override + Tuple parse(String... args) throws Exception { + if (args.length >= 2) { + InstanceId instanceId = InstanceId.of(args[0], args[1]); + Integer port = null; + if (args.length == 3) { + try { + port = Integer.parseInt(args[2]); + } catch (NumberFormatException ex) { + throw new IllegalArgumentException( + "Error parsing portNumber parameter (must be a number)"); + } + } else if (args.length > 3) { + throw new IllegalArgumentException("Too many arguments."); + } + return Tuple.of(instanceId, port); + } else { + throw new IllegalArgumentException("Missing required arguments."); + } + } + + @Override + protected String params() { + return " "; + } + } + + /** + * This class demonstrates how to add an access configuration to a Compute instance network + * interface. + * + * @see + * Instances: addAccessConfig + */ + private static class AddAccessConfigAction + extends ComputeAction> { + @Override + public void run(Compute compute, Triple interfaceAndConfig) + throws InterruptedException { + InstanceId instance = interfaceAndConfig.x(); + String networkInterface = interfaceAndConfig.y(); + AccessConfig accessConfig = interfaceAndConfig.z(); + Operation operation = compute.addAccessConfig(instance, networkInterface, accessConfig); + if (operation == null) { + System.out.println("Instance " + instance + " does not exist"); + return; + } + while (!operation.isDone()) { + System.out.println( + "Waiting for operation " + operation.operationId().operation() + " to complete"); + Thread.sleep(1000L); + } + operation = operation.reload(); + if (operation.errors() == null) { + System.out.println("Access config added to network interface " + networkInterface + + " of instance " + instance); + } else { + System.out.println("Attempt to add access config to network interface " + networkInterface + + " of instance " + instance + " failed"); + System.out.println("Error: " + operation.errors()); + } + } + + @Override + Triple parse(String... args) throws Exception { + String message; + if (args.length >= 4) { + InstanceId instance = InstanceId.of(args[0], args[1]); + String networkInterface = args[2]; + String accessConfig = args[3]; + if (args.length == 4) { + return Triple.of(instance, networkInterface, + AccessConfig.builder().name(accessConfig).build()); + } else if (args.length == 5) { + return Triple.of(instance, networkInterface, + AccessConfig.builder().name(accessConfig).natIp(args[4]).build()); + } else { + message = "Too many arguments."; + } + } else { + message = "Missing required arguments."; + } + throw new IllegalArgumentException(message); + } + + @Override + protected String params() { + return " ?"; + } + } + + /** + * This class demonstrates how to delete an access configuration from a Compute instance network + * interface. + * + * @see + * Instances: deleteAccessConfig + */ + private static class DeleteAccessConfigAction extends + ComputeAction> { + @Override + public void run(Compute compute, Triple interfaceAndConfig) + throws InterruptedException { + InstanceId instance = interfaceAndConfig.x(); + String networkInterface = interfaceAndConfig.y(); + String accessConfig = interfaceAndConfig.z(); + Operation operation = compute.deleteAccessConfig(instance, networkInterface, accessConfig); + if (operation == null) { + System.out.println("Instance " + instance + " does not exist"); + return; + } + while (!operation.isDone()) { + System.out.println( + "Waiting for operation " + operation.operationId().operation() + " to complete"); + Thread.sleep(1000L); + } + operation = operation.reload(); + if (operation.errors() == null) { + System.out.println("Access config deleted from network interface " + networkInterface + + " of instance " + instance); + } else { + System.out.println("Attempt to delete access config from network interface " + + networkInterface + " of instance " + instance + " failed"); + System.out.println("Error: " + operation.errors()); + } + } + + @Override + Triple parse(String... args) throws Exception { + String message; + if (args.length == 4) { + InstanceId instance = InstanceId.of(args[0], args[1]); + String networkInterface = args[2]; + String accessConfig = args[3]; + return Triple.of(instance, networkInterface, accessConfig); + } else if (args.length > 4) { + message = "Too many arguments."; + } else { + message = "Missing required arguments."; + } + throw new IllegalArgumentException(message); + } + + @Override + protected String params() { + return " "; + } + } + + /** + * This class demonstrates how to attach a persistent disk to a Compute instance. + * + * @see + * Instances: attachDisk + */ + private static class AttachDiskAction + extends ComputeAction> { + @Override + public void run(Compute compute, Triple + instanceAndDisk) throws InterruptedException { + InstanceId instance = instanceAndDisk.x(); + String deviceName = instanceAndDisk.y(); + PersistentDiskConfiguration diskConfiguration = instanceAndDisk.z(); + Operation operation = compute.attachDisk(instance, deviceName, diskConfiguration); + if (operation == null) { + System.out.println("Instance " + instance + " does not exist"); + return; + } + while (!operation.isDone()) { + System.out.println( + "Waiting for operation " + operation.operationId().operation() + " to complete"); + Thread.sleep(1000L); + } + operation = operation.reload(); + if (operation.errors() == null) { + System.out.println("Disk attached to instance " + instance); + } else { + System.out.println("Attempt to attach disk to instance " + instance + " failed"); + System.out.println("Error: " + operation.errors()); + } + } + + @Override + Triple parse(String... args) throws Exception { + String message; + if (args.length == 4) { + String zone = args[0]; + String instance = args[1]; + String deviceName = args[2]; + String disk = args[3]; + return Triple.of(InstanceId.of(zone, instance), deviceName, + PersistentDiskConfiguration.of(DiskId.of(zone, disk))); + } else if (args.length > 4) { + message = "Too many arguments."; + } else { + message = "Missing required arguments."; + } + throw new IllegalArgumentException(message); + } + + @Override + protected String params() { + return " "; + } + } + + /** + * This class demonstrates how to detach a persistent disk from a Compute instance. + * + * @see + * Instances: detachDisk + */ + private static class DetachDiskAction extends ComputeAction> { + @Override + public void run(Compute compute, Tuple instanceAndDevice) + throws InterruptedException { + InstanceId instance = instanceAndDevice.x(); + String deviceName = instanceAndDevice.y(); + Operation operation = compute.detachDisk(instance, deviceName); + if (operation == null) { + System.out.println("Instance " + instance + " does not exist"); + return; + } + while (!operation.isDone()) { + System.out.println( + "Waiting for operation " + operation.operationId().operation() + " to complete"); + Thread.sleep(1000L); + } + operation = operation.reload(); + if (operation.errors() == null) { + System.out.println("Disk detached from instance " + instance); + } else { + System.out.println("Attempt to detach disk from instance " + instance + " failed"); + System.out.println("Error: " + operation.errors()); + } + } + + @Override + Tuple parse(String... args) throws Exception { + String message; + if (args.length == 3) { + String zone = args[0]; + String instance = args[1]; + String deviceName = args[2]; + return Tuple.of(InstanceId.of(zone, instance), deviceName); + } else if (args.length > 4) { + message = "Too many arguments."; + } else { + message = "Missing required arguments."; + } + throw new IllegalArgumentException(message); + } + + @Override + protected String params() { + return " "; + } + } + + /** + * This class demonstrates how to set the auto-delete property of a disk attached to a Compute + * instance. + * + * @see + * Instances: setDiskAutoDelete + */ + private static class SetDiskAutoDeleteAction + extends ComputeAction> { + @Override + public void run(Compute compute, Triple deviceAndAutoDelete) + throws InterruptedException { + InstanceId instance = deviceAndAutoDelete.x(); + String deviceName = deviceAndAutoDelete.y(); + Boolean autoDelete = deviceAndAutoDelete.z(); + Operation operation = compute.setDiskAutoDelete(instance, deviceName, autoDelete); + if (operation == null) { + System.out.println("Instance " + instance + " does not exist"); + return; + } + while (!operation.isDone()) { + System.out.println( + "Waiting for operation " + operation.operationId().operation() + " to complete"); + Thread.sleep(1000L); + } + operation = operation.reload(); + if (operation.errors() == null) { + System.out.println("Auto-delete set for device " + deviceName + " of instance " + instance); + } else { + System.out.println("Attempt to set auto-delete for device " + deviceName + " of instance " + + instance + " failed"); + System.out.println("Error: " + operation.errors()); + } + } + + @Override + Triple parse(String... args) throws Exception { + String message; + if (args.length == 4) { + InstanceId instance = InstanceId.of(args[0], args[1]); + String deviceName = args[2]; + boolean autoDelete; + switch (args[3]) { + case "true": + autoDelete = true; + break; + case "false": + autoDelete = false; + break; + default: + throw new IllegalArgumentException( + "Couldn't parse autoDelete argument (must be either true or false)."); + } + return Triple.of(instance, deviceName, autoDelete); + } else if (args.length > 3) { + message = "Too many arguments."; + } else { + message = "Missing required arguments."; + } + throw new IllegalArgumentException(message); + } + + @Override + protected String params() { + return " true|false"; + } + } + + /** + * This class demonstrates how to set the machine type for a Compute instance. + * + * @see + * Instances: setMachineType + */ + private static class SetMachineTypeAction + extends ComputeAction> { + @Override + public void run(Compute compute, Tuple instanceAndType) + throws InterruptedException { + InstanceId instance = instanceAndType.x(); + MachineTypeId machineType = instanceAndType.y(); + Operation operation = compute.setMachineType(instance, machineType); + if (operation == null) { + System.out.println("Instance " + instance + " does not exist"); + return; + } + while (!operation.isDone()) { + System.out.println( + "Waiting for operation " + operation.operationId().operation() + " to complete"); + Thread.sleep(1000L); + } + operation = operation.reload(); + if (operation.errors() == null) { + System.out.println("Machine type set for instance " + instance); + } else { + System.out.println("Attempt to set machine type for instance " + instance + " failed"); + System.out.println("Error: " + operation.errors()); + } + } + + @Override + Tuple parse(String... args) throws Exception { + String message; + if (args.length == 3) { + String zone = args[0]; + String instance = args[1]; + String machineType = args[2]; + return Tuple.of(InstanceId.of(zone, instance), MachineTypeId.of(zone, machineType)); + } else if (args.length > 3) { + message = "Too many arguments."; + } else { + message = "Missing required arguments."; + } + throw new IllegalArgumentException(message); + } + + @Override + protected String params() { + return " "; + } + } + + /** + * This class demonstrates how to set the tags for a Compute instance. + * + * @see + * Instances: setTags + */ + private static class SetTagsAction extends ComputeAction>> { + @Override + public void run(Compute compute, Tuple> instanceAndTags) + throws InterruptedException { + InstanceId instanceId = instanceAndTags.x(); + List tags = instanceAndTags.y(); + Instance instance = compute.getInstance(instanceId); + if (instance == null) { + System.out.println("Instance " + instance + " does not exist"); + return; + } + Operation operation = instance.setTags(tags); + while (!operation.isDone()) { + System.out.println( + "Waiting for operation " + operation.operationId().operation() + " to complete"); + Thread.sleep(1000L); + } + operation = operation.reload(); + if (operation.errors() == null) { + System.out.println("Tags set for instance " + instanceId); + } else { + System.out.println("Attempt to set tags for instance " + instanceId + " failed"); + System.out.println("Error: " + operation.errors()); + } + } + + @Override + Tuple> parse(String... args) throws Exception { + if (args.length >= 2) { + InstanceId instanceId = InstanceId.of(args[0], args[1]); + List tags = Lists.newArrayListWithCapacity(args.length - 2); + tags.addAll(Arrays.asList(args).subList(2, args.length)); + return Tuple.of(instanceId, tags); + } else { + throw new IllegalArgumentException("Missing required arguments."); + } + } + + @Override + protected String params() { + return " *"; + } + } + + /** + * This class demonstrates how to set the metadata for a Compute instance. + * + * @see + * Instances: setMetadata + */ + private static class SetMetadataAction extends ComputeAction>> { + @Override + public void run(Compute compute, Tuple> instanceAndMetadata) + throws InterruptedException { + InstanceId instanceId = instanceAndMetadata.x(); + Map metadata = instanceAndMetadata.y(); + Instance instance = compute.getInstance(instanceId); + if (instance == null) { + System.out.println("Instance " + instance + " does not exist"); + return; + } + Operation operation = instance.setMetadata(metadata); + while (!operation.isDone()) { + System.out.println( + "Waiting for operation " + operation.operationId().operation() + " to complete"); + Thread.sleep(1000L); + } + operation = operation.reload(); + if (operation.errors() == null) { + System.out.println("Metadata set for instance " + instanceId); + } else { + System.out.println("Attempt to set metadata for instance " + instanceId + " failed"); + System.out.println("Error: " + operation.errors()); + } + } + + @Override + Tuple> parse(String... args) throws Exception { + if (args.length >= 2) { + if ((args.length & 0x1) == 0x1) { + throw new IllegalArgumentException("Metadata must be a list of key-value pairs."); + } + InstanceId instanceId = InstanceId.of(args[0], args[1]); + Map metadata = Maps.newHashMapWithExpectedSize((args.length / 2) - 1); + for (int i = 2; i < args.length; i += 2) { + metadata.put(args[i], args[i + 1]); + } + return Tuple.of(instanceId, metadata); + } else { + throw new IllegalArgumentException("Missing required arguments."); + } + } + + @Override + protected String params() { + return " ( )*"; + } + } + + /** + * This class demonstrates how to set the scheduling options for a Compute instance. + * + * @see + * Instances: setScheduling + */ + private static class SetSchedulingOptionsAction extends ComputeAction> { + @Override + public void run(Compute compute, Tuple instanceAndScheduling) + throws InterruptedException { + InstanceId instanceId = instanceAndScheduling.x(); + SchedulingOptions schedulingOptions = instanceAndScheduling.y(); + Operation operation = compute.setSchedulingOptions(instanceId, schedulingOptions); + if (operation == null) { + System.out.println("Instance " + instanceId + " does not exist"); + return; + } + while (!operation.isDone()) { + System.out.println( + "Waiting for operation " + operation.operationId().operation() + " to complete"); + Thread.sleep(1000L); + } + operation = operation.reload(); + if (operation.errors() == null) { + System.out.println("Scheduling options set for instance " + instanceId); + } else { + System.out.println( + "Attempt to set scheduling options for instance " + instanceId + " failed"); + System.out.println("Error: " + operation.errors()); + } + } + + @Override + Tuple parse(String... args) throws Exception { + String message; + if (args.length > 2) { + InstanceId instanceId = InstanceId.of(args[0], args[1]); + if (args.length == 3 && args[2].equals("preemptible")) { + return Tuple.of(instanceId, SchedulingOptions.preemptible()); + } else if (args.length == 5 && args[2].equals("standard")) { + boolean automaticRestart; + switch (args[3]) { + case "true": + automaticRestart = true; + break; + case "false": + automaticRestart = false; + break; + default: + throw new IllegalArgumentException( + "Couldn't parse automaticRestart argument (must be either true or false)."); + } + Maintenance maintenance = Maintenance.valueOf(args[4]); + return Tuple.of(instanceId, SchedulingOptions.standard(automaticRestart, maintenance)); + } else { + message = "Unexpected command line arguments."; + } + } else { + message = "Missing required arguments."; + } + throw new IllegalArgumentException(message); + } + + @Override + protected String params() { + return " preemptible|(standard true|false MIGRATE|TERMINATE)"; + } + } + + /** + * This class demonstrates how to reset a Compute instance. + * + * @see + * Instances: reset + */ + private static class ResetInstanceAction extends InstanceAction { + @Override + public void run(Compute compute, InstanceId instance) throws InterruptedException { + Operation operation = compute.reset(instance); + if (operation == null) { + System.out.println("Instance " + instance + " does not exist"); + return; + } + while (!operation.isDone()) { + System.out.println( + "Waiting for operation " + operation.operationId().operation() + " to complete"); + Thread.sleep(1000L); + } + operation = operation.reload(); + if (operation.errors() == null) { + System.out.println("Instance " + instance + " was reset"); + } else { + System.out.println("Attempt to reset instance " + instance + " failed"); + System.out.println("Error: " + operation.errors()); + } + } + } + + /** + * This class demonstrates how to stop a Compute instance. + * + * @see + * Instances: stop + */ + private static class StopInstanceAction extends InstanceAction { + @Override + public void run(Compute compute, InstanceId instance) throws InterruptedException { + Operation operation = compute.stop(instance); + if (operation == null) { + System.out.println("Instance " + instance + " does not exist"); + return; + } + while (!operation.isDone()) { + System.out.println( + "Waiting for operation " + operation.operationId().operation() + " to complete"); + Thread.sleep(1000L); + } + operation = operation.reload(); + if (operation.errors() == null) { + System.out.println("Instance " + instance + " was stopped"); + } else { + System.out.println("Attempt to stop instance " + instance + " failed"); + System.out.println("Error: " + operation.errors()); + } + } + } + + /** + * This class demonstrates how to start a Compute instance. + * + * @see + * Instances: start + */ + private static class StartInstanceAction extends InstanceAction { + @Override + public void run(Compute compute, InstanceId instance) throws InterruptedException { + Operation operation = compute.start(instance); + if (operation == null) { + System.out.println("Instance " + instance + " does not exist"); + return; + } + while (!operation.isDone()) { + System.out.println( + "Waiting for operation " + operation.operationId().operation() + " to complete"); + Thread.sleep(1000L); + } + operation = operation.reload(); + if (operation.errors() == null) { + System.out.println("Instance " + instance + " was started"); + } else { + System.out.println("Attempt to start instance " + instance + " failed"); + System.out.println("Error: " + operation.errors()); + } + } + } + + static { + CREATE_ACTIONS.put("address", new CreateAddressAction()); + CREATE_ACTIONS.put("snapshot", new CreateSnapshotAction()); + CREATE_ACTIONS.put("image", new CreateImageAction()); + CREATE_ACTIONS.put("standard-disk", new CreateStandardDiskAction()); + CREATE_ACTIONS.put("snapshot-disk", new CreateSnapshotDiskAction()); + CREATE_ACTIONS.put("image-disk", new CreateImageDiskAction()); + CREATE_ACTIONS.put("standard-network", new CreateStandardNetworkAction()); + CREATE_ACTIONS.put("subnet-network", new CreateSubnetNetworkAction()); + CREATE_ACTIONS.put("subnetwork", new CreateSubnetworkAction()); + CREATE_ACTIONS.put("instance", new CreateInstanceAction()); + INFO_ACTIONS.put("diskType", new DiskTypeInfoAction()); + INFO_ACTIONS.put("machineType", new MachineTypeInfoAction()); + INFO_ACTIONS.put("region", new RegionInfoAction()); + INFO_ACTIONS.put("zone", new ZoneInfoAction()); + INFO_ACTIONS.put("global-operation", new GlobalOperationInfoAction()); + INFO_ACTIONS.put("zone-operation", new ZoneOperationInfoAction()); + INFO_ACTIONS.put("region-operation", new RegionOperationInfoAction()); + INFO_ACTIONS.put("license", new LicenseInfoAction()); + INFO_ACTIONS.put("address", new AddressInfoAction()); + INFO_ACTIONS.put("snapshot", new SnapshotInfoAction()); + INFO_ACTIONS.put("image", new ImageInfoAction()); + INFO_ACTIONS.put("disk", new DiskInfoAction()); + INFO_ACTIONS.put("network", new NetworkInfoAction()); + INFO_ACTIONS.put("subnetwork", new SubnetworkInfoAction()); + INFO_ACTIONS.put("instance", new InstanceInfoAction()); + LIST_ACTIONS.put("diskTypes", new ListDiskTypesAction()); + LIST_ACTIONS.put("machineTypes", new ListMachineTypesAction()); + LIST_ACTIONS.put("regions", new ListRegionsAction()); + LIST_ACTIONS.put("zones", new ListZonesAction()); + LIST_ACTIONS.put("global-operations", new ListGlobalOperationsAction()); + LIST_ACTIONS.put("zone-operations", new ListZoneOperationsAction()); + LIST_ACTIONS.put("region-operations", new ListRegionOperationsAction()); + LIST_ACTIONS.put("addresses", new ListAddressesAction()); + LIST_ACTIONS.put("snapshots", new ListSnapshotsAction()); + LIST_ACTIONS.put("images", new ListImagesAction()); + LIST_ACTIONS.put("disks", new ListDisksAction()); + LIST_ACTIONS.put("networks", new ListNetworksAction()); + LIST_ACTIONS.put("subnetworks", new ListSubnetworksAction()); + LIST_ACTIONS.put("instances", new ListInstancesAction()); + DELETE_ACTIONS.put("global-operation", new DeleteGlobalOperationAction()); + DELETE_ACTIONS.put("zone-operation", new DeleteZoneOperationAction()); + DELETE_ACTIONS.put("region-operation", new DeleteRegionOperationAction()); + DELETE_ACTIONS.put("address", new DeleteAddressAction()); + DELETE_ACTIONS.put("snapshot", new DeleteSnapshotAction()); + DELETE_ACTIONS.put("image", new DeleteImageAction()); + DELETE_ACTIONS.put("disk", new DeleteDiskAction()); + DELETE_ACTIONS.put("network", new DeleteNetworkAction()); + DELETE_ACTIONS.put("subnetwork", new DeleteSubnetworkAction()); + DELETE_ACTIONS.put("instance", new DeleteInstanceAction()); + ACTIONS.put("create", new ParentAction(CREATE_ACTIONS)); + ACTIONS.put("info", new ParentAction(INFO_ACTIONS)); + ACTIONS.put("list", new ParentAction(LIST_ACTIONS)); + ACTIONS.put("delete", new ParentAction(DELETE_ACTIONS)); + ACTIONS.put("get-serial-port", new GetSerialPortAction()); + ACTIONS.put("add-access-config", new AddAccessConfigAction()); + ACTIONS.put("delete-access-config", new DeleteAccessConfigAction()); + ACTIONS.put("attach-disk", new AttachDiskAction()); + ACTIONS.put("detach-disk", new DetachDiskAction()); + ACTIONS.put("set-disk-auto-delete", new SetDiskAutoDeleteAction()); + ACTIONS.put("set-machine-type", new SetMachineTypeAction()); + ACTIONS.put("set-tags", new SetTagsAction()); + ACTIONS.put("set-metadata", new SetMetadataAction()); + ACTIONS.put("set-scheduling-options", new SetSchedulingOptionsAction()); + ACTIONS.put("reset", new ResetInstanceAction()); + ACTIONS.put("stop", new StopInstanceAction()); + ACTIONS.put("start", new StartInstanceAction()); + } + + private static void printUsage() { + StringBuilder actionAndParams = new StringBuilder(); + for (Map.Entry entry : ACTIONS.entrySet()) { + actionAndParams.append("\n\t").append(entry.getKey()); + + String param = entry.getValue().params(); + if (param != null && !param.isEmpty()) { + actionAndParams.append(' ').append(param.replace("\n", "\n\t\t")); + } + } + System.out.printf("Usage: %s [] operation [entity] *%s%n", + ComputeExample.class.getSimpleName(), actionAndParams); + } + + @SuppressWarnings("unchecked") + public static void main(String... args) throws Exception { + if (args.length < 1) { + System.out.println("Missing required project id and action"); + printUsage(); + return; + } + ComputeOptions.Builder optionsBuilder = ComputeOptions.builder(); + ComputeAction action; + String actionName; + if (args.length >= 2 && !ACTIONS.containsKey(args[0])) { + actionName = args[1]; + optionsBuilder.projectId(args[0]); + action = ACTIONS.get(args[1]); + args = Arrays.copyOfRange(args, 2, args.length); + } else { + actionName = args[0]; + action = ACTIONS.get(args[0]); + args = Arrays.copyOfRange(args, 1, args.length); + } + if (action == null) { + System.out.println("Unrecognized action."); + printUsage(); + return; + } + Compute compute = optionsBuilder.build().service(); + Object request; + try { + request = action.parse(args); + } catch (IllegalArgumentException ex) { + System.out.println("Invalid input for action '" + actionName + "'. " + ex.getMessage()); + System.out.println("Expected: " + action.params()); + return; + } catch (Exception ex) { + System.out.println("Failed to parse request."); + ex.printStackTrace(); + return; + } + action.run(compute, request); + } +} diff --git a/gcloud-java-examples/src/main/java/com/google/cloud/examples/compute/snippets/CreateAddressDiskAndInstance.java b/gcloud-java-examples/src/main/java/com/google/cloud/examples/compute/snippets/CreateAddressDiskAndInstance.java new file mode 100644 index 000000000000..5334f746c95b --- /dev/null +++ b/gcloud-java-examples/src/main/java/com/google/cloud/examples/compute/snippets/CreateAddressDiskAndInstance.java @@ -0,0 +1,111 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.examples.compute.snippets; + +import com.google.cloud.compute.Address; +import com.google.cloud.compute.AddressInfo; +import com.google.cloud.compute.AttachedDisk; +import com.google.cloud.compute.AttachedDisk.PersistentDiskConfiguration; +import com.google.cloud.compute.Compute; +import com.google.cloud.compute.ComputeOptions; +import com.google.cloud.compute.DiskId; +import com.google.cloud.compute.DiskInfo; +import com.google.cloud.compute.ImageDiskConfiguration; +import com.google.cloud.compute.ImageId; +import com.google.cloud.compute.InstanceId; +import com.google.cloud.compute.InstanceInfo; +import com.google.cloud.compute.MachineTypeId; +import com.google.cloud.compute.NetworkId; +import com.google.cloud.compute.NetworkInterface; +import com.google.cloud.compute.NetworkInterface.AccessConfig; +import com.google.cloud.compute.Operation; +import com.google.cloud.compute.RegionAddressId; + +/** + * A snippet for Google Cloud Compute Engine showing how to create a disk and an address. The + * snippet also shows how to create a virtual machine instance using the created disk and address. + */ +public class CreateAddressDiskAndInstance { + + public static void main(String... args) throws InterruptedException { + // Create a service object + // Credentials are inferred from the environment. + Compute compute = ComputeOptions.defaultInstance().service(); + + // Create an external region address + RegionAddressId addressId = RegionAddressId.of("us-central1", "test-address"); + Operation operation = compute.create(AddressInfo.of(addressId)); + // Wait for operation to complete + while (!operation.isDone()) { + Thread.sleep(1000L); + } + // Check operation errors + operation = operation.reload(); + if (operation.errors() == null) { + System.out.println("Address " + addressId + " was successfully created"); + } else { + // inspect operation.errors() + throw new RuntimeException("Address creation failed"); + } + + // Create a persistent disk + ImageId imageId = ImageId.of("debian-cloud", "debian-8-jessie-v20160329"); + DiskId diskId = DiskId.of("us-central1-a", "test-disk"); + ImageDiskConfiguration diskConfiguration = ImageDiskConfiguration.of(imageId); + DiskInfo disk = DiskInfo.of(diskId, diskConfiguration); + operation = compute.create(disk); + // Wait for operation to complete + while (!operation.isDone()) { + Thread.sleep(1000L); + } + // Check operation errors + operation = operation.reload(); + if (operation.errors() == null) { + System.out.println("Disk " + diskId + " was successfully created"); + } else { + // inspect operation.errors() + throw new RuntimeException("Disk creation failed"); + } + + // Create a virtual machine instance + Address externalIp = compute.getAddress(addressId); + InstanceId instanceId = InstanceId.of("us-central1-a", "test-instance"); + NetworkId networkId = NetworkId.of("default"); + PersistentDiskConfiguration attachConfiguration = + PersistentDiskConfiguration.builder(diskId).boot(true).build(); + AttachedDisk attachedDisk = AttachedDisk.of("dev0", attachConfiguration); + NetworkInterface networkInterface = NetworkInterface.builder(networkId) + .accessConfigurations(AccessConfig.of(externalIp.address())) + .build(); + MachineTypeId machineTypeId = MachineTypeId.of("us-central1-a", "n1-standard-1"); + InstanceInfo instance = + InstanceInfo.of(instanceId, machineTypeId, attachedDisk, networkInterface); + operation = compute.create(instance); + // Wait for operation to complete + while (!operation.isDone()) { + Thread.sleep(1000L); + } + // Check operation errors + operation = operation.reload(); + if (operation.errors() == null) { + System.out.println("Instance " + instanceId + " was successfully created"); + } else { + // inspect operation.errors() + throw new RuntimeException("Instance creation failed"); + } + } +} diff --git a/gcloud-java-examples/src/main/java/com/google/cloud/examples/compute/snippets/CreateInstance.java b/gcloud-java-examples/src/main/java/com/google/cloud/examples/compute/snippets/CreateInstance.java new file mode 100644 index 000000000000..d8162908d133 --- /dev/null +++ b/gcloud-java-examples/src/main/java/com/google/cloud/examples/compute/snippets/CreateInstance.java @@ -0,0 +1,54 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.examples.compute.snippets; + +import com.google.cloud.compute.AttachedDisk; +import com.google.cloud.compute.Compute; +import com.google.cloud.compute.ComputeOptions; +import com.google.cloud.compute.ImageId; +import com.google.cloud.compute.Instance; +import com.google.cloud.compute.InstanceId; +import com.google.cloud.compute.InstanceInfo; +import com.google.cloud.compute.MachineTypeId; +import com.google.cloud.compute.NetworkId; +import com.google.cloud.compute.NetworkInterface; +import com.google.cloud.compute.Operation; + +/** + * A snippet for Google Cloud Compute Engine showing how to create a virtual machine instance. + */ +public class CreateInstance { + + public static void main(String... args) throws InterruptedException { + Compute compute = ComputeOptions.defaultInstance().service(); + ImageId imageId = ImageId.of("debian-cloud", "debian-8-jessie-v20160329"); + NetworkId networkId = NetworkId.of("default"); + AttachedDisk attachedDisk = AttachedDisk.of(AttachedDisk.CreateDiskConfiguration.of(imageId)); + NetworkInterface networkInterface = NetworkInterface.of(networkId); + InstanceId instanceId = InstanceId.of("us-central1-a", "instance-name"); + MachineTypeId machineTypeId = MachineTypeId.of("us-central1-a", "n1-standard-1"); + Operation operation = + compute.create(InstanceInfo.of(instanceId, machineTypeId, attachedDisk, networkInterface)); + while (!operation.isDone()) { + Thread.sleep(1000L); + } + if (operation.errors() == null) { + // use instance + Instance instance = compute.getInstance(instanceId); + } + } +} diff --git a/gcloud-java-examples/src/main/java/com/google/cloud/examples/compute/snippets/CreateSnapshot.java b/gcloud-java-examples/src/main/java/com/google/cloud/examples/compute/snippets/CreateSnapshot.java new file mode 100644 index 000000000000..cc8029936186 --- /dev/null +++ b/gcloud-java-examples/src/main/java/com/google/cloud/examples/compute/snippets/CreateSnapshot.java @@ -0,0 +1,48 @@ +/* + * Copyright 2016 Google Inc. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.examples.compute.snippets; + +import com.google.cloud.compute.Compute; +import com.google.cloud.compute.ComputeOptions; +import com.google.cloud.compute.Disk; +import com.google.cloud.compute.DiskId; +import com.google.cloud.compute.Operation; +import com.google.cloud.compute.Snapshot; + +/** + * A snippet for Google Cloud Compute Engine showing how to create a snapshot of a disk if the disk + * exists. + */ +public class CreateSnapshot { + + public static void main(String... args) throws InterruptedException { + Compute compute = ComputeOptions.defaultInstance().service(); + DiskId diskId = DiskId.of("us-central1-a", "disk-name"); + Disk disk = compute.getDisk(diskId, Compute.DiskOption.fields()); + if (disk != null) { + String snapshotName = "disk-name-snapshot"; + Operation operation = disk.createSnapshot(snapshotName); + while (!operation.isDone()) { + Thread.sleep(1000L); + } + if (operation.errors() == null) { + // use snapshot + Snapshot snapshot = compute.getSnapshot("disk-name-snapshot"); + } + } + } +} diff --git a/gcloud-java/pom.xml b/gcloud-java/pom.xml index 7dbd14e591cc..e26dfa63ab01 100644 --- a/gcloud-java/pom.xml +++ b/gcloud-java/pom.xml @@ -19,6 +19,11 @@ gcloud-java-bigquery ${project.version} + + ${project.groupId} + gcloud-java-compute + ${project.version} + ${project.groupId} gcloud-java-core diff --git a/pom.xml b/pom.xml index 752f7613f207..87de4a252021 100644 --- a/pom.xml +++ b/pom.xml @@ -95,6 +95,7 @@ gcloud-java gcloud-java-bigquery + gcloud-java-compute gcloud-java-contrib gcloud-java-core gcloud-java-datastore