diff --git a/0.18.1/.buildinfo b/0.18.1/.buildinfo new file mode 100644 index 000000000000..2f2287011325 --- /dev/null +++ b/0.18.1/.buildinfo @@ -0,0 +1,4 @@ +# Sphinx build info version 1 +# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. +config: 2311b0f3f0e958e7927415b2ae696345 +tags: 645f666f9bcd5a90fca523b33c5a78b7 diff --git a/0.18.1/_modules/gcloud/bigquery/_helpers.html b/0.18.1/_modules/gcloud/bigquery/_helpers.html new file mode 100644 index 000000000000..dc6adcafaa28 --- /dev/null +++ b/0.18.1/_modules/gcloud/bigquery/_helpers.html @@ -0,0 +1,399 @@ + + + + + + + + gcloud.bigquery._helpers — gcloud 0.18.1 documentation + + + + + + + + + + + + + + +
+ +
+ +
+ + + Report an Issue + +
+
+ +
+ +

Source code for gcloud.bigquery._helpers

+# Copyright 2015 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Shared elper functions for BigQuery API classes."""
+
+from gcloud._helpers import _datetime_from_microseconds
+
+
+def _not_null(value, field):
+    """Check whether 'value' should be coerced to 'field' type."""
+    return value is not None or field.mode != 'NULLABLE'
+
+
+def _int_from_json(value, field):
+    """Coerce 'value' to an int, if set or not nullable."""
+    if _not_null(value, field):
+        return int(value)
+
+
+def _float_from_json(value, field):
+    """Coerce 'value' to a float, if set or not nullable."""
+    if _not_null(value, field):
+        return float(value)
+
+
+def _bool_from_json(value, field):
+    """Coerce 'value' to a bool, if set or not nullable."""
+    if _not_null(value, field):
+        return value.lower() in ['t', 'true', '1']
+
+
+def _datetime_from_json(value, field):
+    """Coerce 'value' to a datetime, if set or not nullable."""
+    if _not_null(value, field):
+        # value will be a float in seconds, to microsecond precision, in UTC.
+        return _datetime_from_microseconds(1e6 * float(value))
+
+
+def _record_from_json(value, field):
+    """Coerce 'value' to a mapping, if set or not nullable."""
+    if _not_null(value, field):
+        record = {}
+        for subfield, cell in zip(field.fields, value['f']):
+            converter = _CELLDATA_FROM_JSON[subfield.field_type]
+            if field.mode == 'REPEATED':
+                value = [converter(item, field) for item in cell['v']]
+            else:
+                value = converter(cell['v'], field)
+            record[subfield.name] = value
+        return record
+
+
+def _string_from_json(value, _):
+    """NOOP string -> string coercion"""
+    return value
+
+
+_CELLDATA_FROM_JSON = {
+    'INTEGER': _int_from_json,
+    'FLOAT': _float_from_json,
+    'BOOLEAN': _bool_from_json,
+    'TIMESTAMP': _datetime_from_json,
+    'RECORD': _record_from_json,
+    'STRING': _string_from_json,
+}
+
+
+def _rows_from_json(rows, schema):
+    """Convert JSON row data to rows w/ appropriate types."""
+    rows_data = []
+    for row in rows:
+        row_data = []
+        for field, cell in zip(schema, row['f']):
+            converter = _CELLDATA_FROM_JSON[field.field_type]
+            if field.mode == 'REPEATED':
+                row_data.append([converter(item, field)
+                                 for item in cell['v']])
+            else:
+                row_data.append(converter(cell['v'], field))
+        rows_data.append(tuple(row_data))
+    return rows_data
+
+
+class _ConfigurationProperty(object):
+    """Base property implementation.
+
+    Values will be stored on a `_configuration` helper attribute of the
+    property's job instance.
+
+    :type name: string
+    :param name:  name of the property
+    """
+
+    def __init__(self, name):
+        self.name = name
+        self._backing_name = '_%s' % (self.name,)
+
+    def __get__(self, instance, owner):
+        """Descriptor protocal:  accesstor"""
+        if instance is None:
+            return self
+        return getattr(instance._configuration, self._backing_name)
+
+    def _validate(self, value):
+        """Subclasses override to impose validation policy."""
+        pass
+
+    def __set__(self, instance, value):
+        """Descriptor protocal:  mutator"""
+        self._validate(value)
+        setattr(instance._configuration, self._backing_name, value)
+
+    def __delete__(self, instance):
+        """Descriptor protocal:  deleter"""
+        delattr(instance._configuration, self._backing_name)
+
+
+class _TypedProperty(_ConfigurationProperty):
+    """Property implementation:  validates based on value type.
+
+    :type name: string
+    :param name:  name of the property
+
+    :type property_type: type or sequence of types
+    :param property_type: type to be validated
+    """
+    def __init__(self, name, property_type):
+        super(_TypedProperty, self).__init__(name)
+        self.property_type = property_type
+
+    def _validate(self, value):
+        """Ensure that 'value' is of the appropriate type.
+
+        :raises: ValueError on a type mismatch.
+        """
+        if not isinstance(value, self.property_type):
+            raise ValueError('Required type: %s' % (self.property_type,))
+
+
+class _EnumProperty(_ConfigurationProperty):
+    """Psedo-enumeration class.
+
+    Subclasses must define ``ALLOWED`` as a class-level constant:  it must
+    be a sequence of strings.
+
+    :type name: string
+    :param name:  name of the property
+    """
+    def _validate(self, value):
+        """Check that ``value`` is one of the allowed values.
+
+        :raises: ValueError if value is not allowed.
+        """
+        if value not in self.ALLOWED:
+            raise ValueError('Pass one of: %s' ', '.join(self.ALLOWED))
+
+ +
+ +
+ + + + + + + \ No newline at end of file diff --git a/0.18.1/_modules/gcloud/bigquery/client.html b/0.18.1/_modules/gcloud/bigquery/client.html new file mode 100644 index 000000000000..a7f676f25395 --- /dev/null +++ b/0.18.1/_modules/gcloud/bigquery/client.html @@ -0,0 +1,509 @@ + + + + + + + + gcloud.bigquery.client — gcloud 0.18.1 documentation + + + + + + + + + + + + + + +
+ +
+ +
+ + + Report an Issue + +
+
+ +
+ +

Source code for gcloud.bigquery.client

+# Copyright 2015 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Client for interacting with the Google BigQuery API."""
+
+
+from gcloud.client import JSONClient
+from gcloud.bigquery.connection import Connection
+from gcloud.bigquery.dataset import Dataset
+from gcloud.bigquery.job import CopyJob
+from gcloud.bigquery.job import ExtractTableToStorageJob
+from gcloud.bigquery.job import LoadTableFromStorageJob
+from gcloud.bigquery.job import QueryJob
+from gcloud.bigquery.query import QueryResults
+
+
+
[docs]class Client(JSONClient): + """Client to bundle configuration needed for API requests. + + :type project: str + :param project: the project which the client acts on behalf of. Will be + passed when creating a dataset / job. If not passed, + falls back to the default inferred from the environment. + + :type credentials: :class:`oauth2client.client.OAuth2Credentials` or + :class:`NoneType` + :param credentials: The OAuth2 Credentials to use for the connection + owned by this client. If not passed (and if no ``http`` + object is passed), falls back to the default inferred + from the environment. + + :type http: :class:`httplib2.Http` or class that defines ``request()``. + :param http: An optional HTTP object to make requests. If not passed, an + ``http`` object is created that is bound to the + ``credentials`` for the current object. + """ + + _connection_class = Connection + +
[docs] def list_datasets(self, include_all=False, max_results=None, + page_token=None): + """List datasets for the project associated with this client. + + See: + https://cloud.google.com/bigquery/docs/reference/v2/datasets/list + + :type include_all: boolean + :param include_all: True if results include hidden datasets. + + :type max_results: int + :param max_results: maximum number of datasets to return, If not + passed, defaults to a value set by the API. + + :type page_token: str + :param page_token: opaque marker for the next "page" of datasets. If + not passed, the API will return the first page of + datasets. + + :rtype: tuple, (list, str) + :returns: list of :class:`gcloud.bigquery.dataset.Dataset`, plus a + "next page token" string: if the token is not None, + indicates that more datasets can be retrieved with another + call (pass that value as ``page_token``). + """ + params = {} + + if include_all: + params['all'] = True + + if max_results is not None: + params['maxResults'] = max_results + + if page_token is not None: + params['pageToken'] = page_token + + path = '/projects/%s/datasets' % (self.project,) + resp = self.connection.api_request(method='GET', path=path, + query_params=params) + datasets = [Dataset.from_api_repr(resource, self) + for resource in resp.get('datasets', ())] + return datasets, resp.get('nextPageToken')
+ +
[docs] def dataset(self, dataset_name): + """Construct a dataset bound to this client. + + :type dataset_name: str + :param dataset_name: Name of the dataset. + + :rtype: :class:`gcloud.bigquery.dataset.Dataset` + :returns: a new ``Dataset`` instance + """ + return Dataset(dataset_name, client=self)
+ +
[docs] def job_from_resource(self, resource): + """Detect correct job type from resource and instantiate. + + :type resource: dict + :param resource: one job resource from API response + + :rtype: One of: + :class:`gcloud.bigquery.job.LoadTableFromStorageJob`, + :class:`gcloud.bigquery.job.CopyJob`, + :class:`gcloud.bigquery.job.ExtractTableToStorageJob`, + :class:`gcloud.bigquery.job.QueryJob`, + :class:`gcloud.bigquery.job.RunSyncQueryJob` + :returns: the job instance, constructed via the resource + """ + config = resource['configuration'] + if 'load' in config: + return LoadTableFromStorageJob.from_api_repr(resource, self) + elif 'copy' in config: + return CopyJob.from_api_repr(resource, self) + elif 'extract' in config: + return ExtractTableToStorageJob.from_api_repr(resource, self) + elif 'query' in config: + return QueryJob.from_api_repr(resource, self) + raise ValueError('Cannot parse job resource')
+ +
[docs] def list_jobs(self, max_results=None, page_token=None, all_users=None, + state_filter=None): + """List jobs for the project associated with this client. + + See: + https://cloud.google.com/bigquery/docs/reference/v2/jobs/list + + :type max_results: int + :param max_results: maximum number of jobs to return, If not + passed, defaults to a value set by the API. + + :type page_token: str + :param page_token: opaque marker for the next "page" of jobs. If + not passed, the API will return the first page of + jobs. + + :type all_users: boolean + :param all_users: if true, include jobs owned by all users in the + project. + + :type state_filter: str + :param state_filter: if passed, include only jobs matching the given + state. One of + + * ``"done"`` + * ``"pending"`` + * ``"running"`` + + :rtype: tuple, (list, str) + :returns: list of job instances, plus a "next page token" string: + if the token is not ``None``, indicates that more jobs can be + retrieved with another call, passing that value as + ``page_token``). + """ + params = {'projection': 'full'} + + if max_results is not None: + params['maxResults'] = max_results + + if page_token is not None: + params['pageToken'] = page_token + + if all_users is not None: + params['allUsers'] = all_users + + if state_filter is not None: + params['stateFilter'] = state_filter + + path = '/projects/%s/jobs' % (self.project,) + resp = self.connection.api_request(method='GET', path=path, + query_params=params) + jobs = [self.job_from_resource(resource) + for resource in resp.get('jobs', ())] + return jobs, resp.get('nextPageToken')
+ +
[docs] def load_table_from_storage(self, job_name, destination, *source_uris): + """Construct a job for loading data into a table from CloudStorage. + + See: + https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load + + :type job_name: str + :param job_name: Name of the job. + + :type destination: :class:`gcloud.bigquery.table.Table` + :param destination: Table into which data is to be loaded. + + :type source_uris: sequence of string + :param source_uris: URIs of data files to be loaded; in format + ``gs://<bucket_name>/<object_name_or_glob>``. + + :rtype: :class:`gcloud.bigquery.job.LoadTableFromStorageJob` + :returns: a new ``LoadTableFromStorageJob`` instance + """ + return LoadTableFromStorageJob(job_name, destination, source_uris, + client=self)
+ +
[docs] def copy_table(self, job_name, destination, *sources): + """Construct a job for copying one or more tables into another table. + + See: + https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.copy + + :type job_name: str + :param job_name: Name of the job. + + :type destination: :class:`gcloud.bigquery.table.Table` + :param destination: Table into which data is to be copied. + + :type sources: sequence of :class:`gcloud.bigquery.table.Table` + :param sources: tables to be copied. + + :rtype: :class:`gcloud.bigquery.job.CopyJob` + :returns: a new ``CopyJob`` instance + """ + return CopyJob(job_name, destination, sources, client=self)
+ +
[docs] def extract_table_to_storage(self, job_name, source, *destination_uris): + """Construct a job for extracting a table into Cloud Storage files. + + See: + https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.extract + + :type job_name: str + :param job_name: Name of the job. + + :type source: :class:`gcloud.bigquery.table.Table` + :param source: table to be extracted. + + :type destination_uris: sequence of string + :param destination_uris: URIs of CloudStorage file(s) into which + table data is to be extracted; in format + ``gs://<bucket_name>/<object_name_or_glob>``. + + :rtype: :class:`gcloud.bigquery.job.ExtractTableToStorageJob` + :returns: a new ``ExtractTableToStorageJob`` instance + """ + return ExtractTableToStorageJob(job_name, source, destination_uris, + client=self)
+ +
[docs] def run_async_query(self, job_name, query): + """Construct a job for running a SQL query asynchronously. + + See: + https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.query + + :type job_name: str + :param job_name: Name of the job. + + :type query: str + :param query: SQL query to be executed + + :rtype: :class:`gcloud.bigquery.job.QueryJob` + :returns: a new ``QueryJob`` instance + """ + return QueryJob(job_name, query, client=self)
+ +
[docs] def run_sync_query(self, query): + """Run a SQL query synchronously. + + :type query: str + :param query: SQL query to be executed + + :rtype: :class:`gcloud.bigquery.query.QueryResults` + :returns: a new ``QueryResults`` instance + """ + return QueryResults(query, client=self)
+
+ +
+ +
+ + + + + + + \ No newline at end of file diff --git a/0.18.1/_modules/gcloud/bigquery/connection.html b/0.18.1/_modules/gcloud/bigquery/connection.html new file mode 100644 index 000000000000..e505a1e463e1 --- /dev/null +++ b/0.18.1/_modules/gcloud/bigquery/connection.html @@ -0,0 +1,267 @@ + + + + + + + + gcloud.bigquery.connection — gcloud 0.18.1 documentation + + + + + + + + + + + + + + +
+ +
+ +
+ + + Report an Issue + +
+
+ +
+ +

Source code for gcloud.bigquery.connection

+# Copyright 2015 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Create / interact with gcloud bigquery connections."""
+
+from gcloud import connection as base_connection
+
+
+
[docs]class Connection(base_connection.JSONConnection): + """A connection to Google Cloud BigQuery via the JSON REST API.""" + + API_BASE_URL = 'https://www.googleapis.com' + """The base of the API call URL.""" + + API_VERSION = 'v2' + """The version of the API, used in building the API call's URL.""" + + API_URL_TEMPLATE = '{api_base_url}/bigquery/{api_version}{path}' + """A template for the URL of a particular API call.""" + + SCOPE = ('https://www.googleapis.com/auth/bigquery', + 'https://www.googleapis.com/auth/cloud-platform') + """The scopes required for authenticating as a Cloud BigQuery consumer."""
+
+ +
+ +
+ + + + + + + \ No newline at end of file diff --git a/0.18.1/_modules/gcloud/bigquery/dataset.html b/0.18.1/_modules/gcloud/bigquery/dataset.html new file mode 100644 index 000000000000..c05107be4f75 --- /dev/null +++ b/0.18.1/_modules/gcloud/bigquery/dataset.html @@ -0,0 +1,813 @@ + + + + + + + + gcloud.bigquery.dataset — gcloud 0.18.1 documentation + + + + + + + + + + + + + + +
+ +
+ +
+ + + Report an Issue + +
+
+ +
+ +

Source code for gcloud.bigquery.dataset

+# Copyright 2015 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Define API Datasets."""
+import six
+
+from gcloud._helpers import _datetime_from_microseconds
+from gcloud.exceptions import NotFound
+from gcloud.bigquery.table import Table
+
+
+
[docs]class AccessGrant(object): + """Represent grant of an access role to an entity. + + Every entry in the access list will have exactly one of + ``userByEmail``, ``groupByEmail``, ``domain``, ``specialGroup`` or + ``view`` set. And if anything but ``view`` is set, it'll also have a + ``role`` specified. ``role`` is omitted for a ``view``, since + ``view`` s are always read-only. + + See https://cloud.google.com/bigquery/docs/reference/v2/datasets. + + :type role: string + :param role: Role granted to the entity. One of + + * ``'OWNER'`` + * ``'WRITER'`` + * ``'READER'`` + + May also be ``None`` if the ``entity_type`` is ``view``. + + :type entity_type: string + :param entity_type: Type of entity being granted the role. One of + :attr:`ENTITY_TYPES`. + + :type entity_id: string + :param entity_id: ID of entity being granted the role. + + :raises: :class:`ValueError` if the ``entity_type`` is not among + :attr:`ENTITY_TYPES`, or if a ``view`` has ``role`` set or + a non ``view`` **does not** have a ``role`` set. + """ + + ENTITY_TYPES = frozenset(['userByEmail', 'groupByEmail', 'domain', + 'specialGroup', 'view']) + """Allowed entity types.""" + + def __init__(self, role, entity_type, entity_id): + if entity_type not in self.ENTITY_TYPES: + message = 'Entity type %r not among: %s' % ( + entity_type, ', '.join(self.ENTITY_TYPES)) + raise ValueError(message) + if entity_type == 'view': + if role is not None: + raise ValueError('Role must be None for a view. Received ' + 'role: %r' % (role,)) + else: + if role is None: + raise ValueError('Role must be set for entity ' + 'type %r' % (entity_type,)) + + self.role = role + self.entity_type = entity_type + self.entity_id = entity_id + + def __eq__(self, other): + return ( + self.role == other.role and + self.entity_type == other.entity_type and + self.entity_id == other.entity_id) + + def __repr__(self): + return '<AccessGrant: role=%s, %s=%s>' % ( + self.role, self.entity_type, self.entity_id)
+ + +
[docs]class Dataset(object): + """Datasets are containers for tables. + + See: + https://cloud.google.com/bigquery/docs/reference/v2/datasets + + :type name: string + :param name: the name of the dataset + + :type client: :class:`gcloud.bigquery.client.Client` + :param client: A client which holds credentials and project configuration + for the dataset (which requires a project). + + :type access_grants: list of :class:`AccessGrant` + :param access_grants: roles granted to entities for this dataset + """ + + _access_grants = None + + def __init__(self, name, client, access_grants=()): + self.name = name + self._client = client + self._properties = {} + # Let the @property do validation. + self.access_grants = access_grants + + @property + def project(self): + """Project bound to the dataset. + + :rtype: string + :returns: the project (derived from the client). + """ + return self._client.project + + @property + def path(self): + """URL path for the dataset's APIs. + + :rtype: string + :returns: the path based on project and dataste name. + """ + return '/projects/%s/datasets/%s' % (self.project, self.name) + + @property + def access_grants(self): + """Dataset's access grants. + + :rtype: list of :class:`AccessGrant` + :returns: roles granted to entities for this dataset + """ + return list(self._access_grants) + + @access_grants.setter + def access_grants(self, value): + """Update dataset's access grants + + :type value: list of :class:`AccessGrant` + :param value: roles granted to entities for this dataset + + :raises: TypeError if 'value' is not a sequence, or ValueError if + any item in the sequence is not an AccessGrant + """ + if not all(isinstance(field, AccessGrant) for field in value): + raise ValueError('Values must be AccessGrant instances') + self._access_grants = tuple(value) + + @property + def created(self): + """Datetime at which the dataset was created. + + :rtype: ``datetime.datetime``, or ``NoneType`` + :returns: the creation time (None until set from the server). + """ + creation_time = self._properties.get('creationTime') + if creation_time is not None: + # creation_time will be in milliseconds. + return _datetime_from_microseconds(1000.0 * creation_time) + + @property + def dataset_id(self): + """ID for the dataset resource. + + :rtype: string, or ``NoneType`` + :returns: the ID (None until set from the server). + """ + return self._properties.get('id') + + @property + def etag(self): + """ETag for the dataset resource. + + :rtype: string, or ``NoneType`` + :returns: the ETag (None until set from the server). + """ + return self._properties.get('etag') + + @property + def modified(self): + """Datetime at which the dataset was last modified. + + :rtype: ``datetime.datetime``, or ``NoneType`` + :returns: the modification time (None until set from the server). + """ + modified_time = self._properties.get('lastModifiedTime') + if modified_time is not None: + # modified_time will be in milliseconds. + return _datetime_from_microseconds(1000.0 * modified_time) + + @property + def self_link(self): + """URL for the dataset resource. + + :rtype: string, or ``NoneType`` + :returns: the URL (None until set from the server). + """ + return self._properties.get('selfLink') + + @property + def default_table_expiration_ms(self): + """Default expiration time for tables in the dataset. + + :rtype: integer, or ``NoneType`` + :returns: The time in milliseconds, or None (the default). + """ + return self._properties.get('defaultTableExpirationMs') + + @default_table_expiration_ms.setter + def default_table_expiration_ms(self, value): + """Update default expiration time for tables in the dataset. + + :type value: integer, or ``NoneType`` + :param value: new default time, in milliseconds + + :raises: ValueError for invalid value types. + """ + if not isinstance(value, six.integer_types) and value is not None: + raise ValueError("Pass an integer, or None") + self._properties['defaultTableExpirationMs'] = value + + @property + def description(self): + """Description of the dataset. + + :rtype: string, or ``NoneType`` + :returns: The description as set by the user, or None (the default). + """ + return self._properties.get('description') + + @description.setter + def description(self, value): + """Update description of the dataset. + + :type value: string, or ``NoneType`` + :param value: new description + + :raises: ValueError for invalid value types. + """ + if not isinstance(value, six.string_types) and value is not None: + raise ValueError("Pass a string, or None") + self._properties['description'] = value + + @property + def friendly_name(self): + """Title of the dataset. + + :rtype: string, or ``NoneType`` + :returns: The name as set by the user, or None (the default). + """ + return self._properties.get('friendlyName') + + @friendly_name.setter + def friendly_name(self, value): + """Update title of the dataset. + + :type value: string, or ``NoneType`` + :param value: new title + + :raises: ValueError for invalid value types. + """ + if not isinstance(value, six.string_types) and value is not None: + raise ValueError("Pass a string, or None") + self._properties['friendlyName'] = value + + @property + def location(self): + """Location in which the dataset is hosted. + + :rtype: string, or ``NoneType`` + :returns: The location as set by the user, or None (the default). + """ + return self._properties.get('location') + + @location.setter + def location(self, value): + """Update location in which the dataset is hosted. + + :type value: string, or ``NoneType`` + :param value: new location + + :raises: ValueError for invalid value types. + """ + if not isinstance(value, six.string_types) and value is not None: + raise ValueError("Pass a string, or None") + self._properties['location'] = value + + @classmethod +
[docs] def from_api_repr(cls, resource, client): + """Factory: construct a dataset given its API representation + + :type resource: dict + :param resource: dataset resource representation returned from the API + + :type client: :class:`gcloud.bigquery.client.Client` + :param client: Client which holds credentials and project + configuration for the dataset. + + :rtype: :class:`gcloud.bigquery.dataset.Dataset` + :returns: Dataset parsed from ``resource``. + """ + if ('datasetReference' not in resource or + 'datasetId' not in resource['datasetReference']): + raise KeyError('Resource lacks required identity information:' + '["datasetReference"]["datasetId"]') + name = resource['datasetReference']['datasetId'] + dataset = cls(name, client=client) + dataset._set_properties(resource) + return dataset
+ + def _require_client(self, client): + """Check client or verify over-ride. + + :type client: :class:`gcloud.bigquery.client.Client` or ``NoneType`` + :param client: the client to use. If not passed, falls back to the + ``client`` stored on the current dataset. + + :rtype: :class:`gcloud.bigquery.client.Client` + :returns: The client passed in or the currently bound client. + """ + if client is None: + client = self._client + return client + + @staticmethod + def _parse_access_grants(access): + """Parse a resource fragment into a set of access grants. + + ``role`` augments the entity type and present **unless** the entity + type is ``view``. + + :type access: list of mappings + :param access: each mapping represents a single access grant + + :rtype: list of :class:`AccessGrant` + :returns: a list of parsed grants + :raises: :class:`ValueError` if a grant in ``access`` has more keys + than ``role`` and one additional key. + """ + result = [] + for grant in access: + grant = grant.copy() + role = grant.pop('role', None) + entity_type, entity_id = grant.popitem() + if len(grant) != 0: + raise ValueError('Grant has unexpected keys remaining.', grant) + result.append( + AccessGrant(role, entity_type, entity_id)) + return result + + def _set_properties(self, api_response): + """Update properties from resource in body of ``api_response`` + + :type api_response: httplib2.Response + :param api_response: response returned from an API call + """ + self._properties.clear() + cleaned = api_response.copy() + access = cleaned.pop('access', ()) + self.access_grants = self._parse_access_grants(access) + if 'creationTime' in cleaned: + cleaned['creationTime'] = float(cleaned['creationTime']) + if 'lastModifiedTime' in cleaned: + cleaned['lastModifiedTime'] = float(cleaned['lastModifiedTime']) + if 'defaultTableExpirationMs' in cleaned: + cleaned['defaultTableExpirationMs'] = int( + cleaned['defaultTableExpirationMs']) + self._properties.update(cleaned) + + def _build_access_resource(self): + """Generate a resource fragment for dataset's access grants.""" + result = [] + for grant in self.access_grants: + info = {grant.entity_type: grant.entity_id} + if grant.role is not None: + info['role'] = grant.role + result.append(info) + return result + + def _build_resource(self): + """Generate a resource for ``create`` or ``update``.""" + resource = { + 'datasetReference': { + 'projectId': self.project, 'datasetId': self.name}, + } + if self.default_table_expiration_ms is not None: + value = self.default_table_expiration_ms + resource['defaultTableExpirationMs'] = value + + if self.description is not None: + resource['description'] = self.description + + if self.friendly_name is not None: + resource['friendlyName'] = self.friendly_name + + if self.location is not None: + resource['location'] = self.location + + if len(self.access_grants) > 0: + resource['access'] = self._build_access_resource() + + return resource + +
[docs] def create(self, client=None): + """API call: create the dataset via a PUT request + + See: + https://cloud.google.com/bigquery/docs/reference/v2/tables/insert + + :type client: :class:`gcloud.bigquery.client.Client` or ``NoneType`` + :param client: the client to use. If not passed, falls back to the + ``client`` stored on the current dataset. + """ + client = self._require_client(client) + path = '/projects/%s/datasets' % (self.project,) + api_response = client.connection.api_request( + method='POST', path=path, data=self._build_resource()) + self._set_properties(api_response)
+ +
[docs] def exists(self, client=None): + """API call: test for the existence of the dataset via a GET request + + See + https://cloud.google.com/bigquery/docs/reference/v2/datasets/get + + :type client: :class:`gcloud.bigquery.client.Client` or ``NoneType`` + :param client: the client to use. If not passed, falls back to the + ``client`` stored on the current dataset. + + :rtype: bool + :returns: Boolean indicating existence of the dataset. + """ + client = self._require_client(client) + + try: + client.connection.api_request(method='GET', path=self.path, + query_params={'fields': 'id'}) + except NotFound: + return False + else: + return True
+ +
[docs] def reload(self, client=None): + """API call: refresh dataset properties via a GET request + + See + https://cloud.google.com/bigquery/docs/reference/v2/datasets/get + + :type client: :class:`gcloud.bigquery.client.Client` or ``NoneType`` + :param client: the client to use. If not passed, falls back to the + ``client`` stored on the current dataset. + """ + client = self._require_client(client) + + api_response = client.connection.api_request( + method='GET', path=self.path) + self._set_properties(api_response)
+ +
[docs] def patch(self, client=None, **kw): + """API call: update individual dataset properties via a PATCH request + + See + https://cloud.google.com/bigquery/docs/reference/v2/datasets/patch + + :type client: :class:`gcloud.bigquery.client.Client` or ``NoneType`` + :param client: the client to use. If not passed, falls back to the + ``client`` stored on the current dataset. + + :type kw: ``dict`` + :param kw: properties to be patched. + + :raises: ValueError for invalid value types. + """ + client = self._require_client(client) + + partial = {} + + if 'default_table_expiration_ms' in kw: + value = kw['default_table_expiration_ms'] + if not isinstance(value, six.integer_types) and value is not None: + raise ValueError("Pass an integer, or None") + partial['defaultTableExpirationMs'] = value + + if 'description' in kw: + partial['description'] = kw['description'] + + if 'friendly_name' in kw: + partial['friendlyName'] = kw['friendly_name'] + + if 'location' in kw: + partial['location'] = kw['location'] + + api_response = client.connection.api_request( + method='PATCH', path=self.path, data=partial) + self._set_properties(api_response)
+ +
[docs] def update(self, client=None): + """API call: update dataset properties via a PUT request + + See + https://cloud.google.com/bigquery/docs/reference/v2/datasets/update + + :type client: :class:`gcloud.bigquery.client.Client` or ``NoneType`` + :param client: the client to use. If not passed, falls back to the + ``client`` stored on the current dataset. + """ + client = self._require_client(client) + api_response = client.connection.api_request( + method='PUT', path=self.path, data=self._build_resource()) + self._set_properties(api_response)
+ +
[docs] def delete(self, client=None): + """API call: delete the dataset via a DELETE request + + See: + https://cloud.google.com/bigquery/docs/reference/v2/tables/delete + + :type client: :class:`gcloud.bigquery.client.Client` or ``NoneType`` + :param client: the client to use. If not passed, falls back to the + ``client`` stored on the current dataset. + """ + client = self._require_client(client) + client.connection.api_request(method='DELETE', path=self.path)
+ +
[docs] def list_tables(self, max_results=None, page_token=None): + """List tables for the project associated with this client. + + See: + https://cloud.google.com/bigquery/docs/reference/v2/tables/list + + :type max_results: int + :param max_results: maximum number of tables to return, If not + passed, defaults to a value set by the API. + + :type page_token: string + :param page_token: opaque marker for the next "page" of datasets. If + not passed, the API will return the first page of + datasets. + + :rtype: tuple, (list, str) + :returns: list of :class:`gcloud.bigquery.table.Table`, plus a + "next page token" string: if not ``None``, indicates that + more tables can be retrieved with another call (pass that + value as ``page_token``). + """ + params = {} + + if max_results is not None: + params['maxResults'] = max_results + + if page_token is not None: + params['pageToken'] = page_token + + path = '/projects/%s/datasets/%s/tables' % (self.project, self.name) + connection = self._client.connection + resp = connection.api_request(method='GET', path=path, + query_params=params) + tables = [Table.from_api_repr(resource, self) + for resource in resp.get('tables', ())] + return tables, resp.get('nextPageToken')
+ +
[docs] def table(self, name, schema=()): + """Construct a table bound to this dataset. + + :type name: string + :param name: Name of the table. + + :type schema: list of :class:`gcloud.bigquery.table.SchemaField` + :param schema: The table's schema + + :rtype: :class:`gcloud.bigquery.table.Table` + :returns: a new ``Table`` instance + """ + return Table(name, dataset=self, schema=schema)
+
+ +
+ +
+ + + + + + + \ No newline at end of file diff --git a/0.18.1/_modules/gcloud/bigquery/job.html b/0.18.1/_modules/gcloud/bigquery/job.html new file mode 100644 index 000000000000..06856765b6c3 --- /dev/null +++ b/0.18.1/_modules/gcloud/bigquery/job.html @@ -0,0 +1,1339 @@ + + + + + + + + gcloud.bigquery.job — gcloud 0.18.1 documentation + + + + + + + + + + + + + + +
+ +
+ +
+ + + Report an Issue + +
+
+ +
+ +

Source code for gcloud.bigquery.job

+# Copyright 2015 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Define API Jobs."""
+
+import six
+
+from gcloud.exceptions import NotFound
+from gcloud._helpers import _datetime_from_microseconds
+from gcloud.bigquery.dataset import Dataset
+from gcloud.bigquery.table import SchemaField
+from gcloud.bigquery.table import Table
+from gcloud.bigquery.table import _build_schema_resource
+from gcloud.bigquery.table import _parse_schema_resource
+from gcloud.bigquery._helpers import _EnumProperty
+from gcloud.bigquery._helpers import _TypedProperty
+
+
+
[docs]class UDFResource(object): + """Describe a single user-defined function (UDF) resource. + :type udf_type: str + :param udf_type: the type of the resource ('inlineCode' or 'resourceUri') + + :type value: str + :param value: the inline code or resource URI + + See + https://cloud.google.com/bigquery/user-defined-functions#api + """ + def __init__(self, udf_type, value): + self.udf_type = udf_type + self.value = value + + def __eq__(self, other): + return( + self.udf_type == other.udf_type and + self.value == other.value)
+ + +def _build_udf_resources(resources): + """ + :type resources: sequence of :class:`UDFResource` + :param resources: fields to be appended + + :rtype: mapping + :returns: a mapping describing userDefinedFunctionResources for the query. + """ + udfs = [] + for resource in resources: + udf = {resource.udf_type: resource.value} + udfs.append(udf) + return udfs + + +
[docs]class UDFResourcesProperty(object): + """Custom property type for :class:`QueryJob`. + + Also used by :class:`~gcloud.bigquery.query.Query`. + """ + def __get__(self, instance, owner): + """Descriptor protocal: accesstor""" + if instance is None: + return self + return list(instance._udf_resources) + + def __set__(self, instance, value): + """Descriptor protocal: mutator""" + if not all(isinstance(u, UDFResource) for u in value): + raise ValueError("udf items must be UDFResource") + instance._udf_resources = tuple(value)
+ + +
[docs]class Compression(_EnumProperty): + """Pseudo-enum for ``compression`` properties.""" + GZIP = 'GZIP' + NONE = 'NONE' + ALLOWED = (GZIP, NONE)
+ + +
[docs]class CreateDisposition(_EnumProperty): + """Pseudo-enum for ``create_disposition`` properties.""" + CREATE_IF_NEEDED = 'CREATE_IF_NEEDED' + CREATE_NEVER = 'CREATE_NEVER' + ALLOWED = (CREATE_IF_NEEDED, CREATE_NEVER)
+ + +
[docs]class DestinationFormat(_EnumProperty): + """Pseudo-enum for ``destination_format`` properties.""" + CSV = 'CSV' + NEWLINE_DELIMITED_JSON = 'NEWLINE_DELIMITED_JSON' + AVRO = 'AVRO' + ALLOWED = (CSV, NEWLINE_DELIMITED_JSON, AVRO)
+ + +
[docs]class Encoding(_EnumProperty): + """Pseudo-enum for ``encoding`` properties.""" + UTF_8 = 'UTF-8' + ISO_8559_1 = 'ISO-8559-1' + ALLOWED = (UTF_8, ISO_8559_1)
+ + +
[docs]class QueryPriority(_EnumProperty): + """Pseudo-enum for ``QueryJob.priority`` property.""" + INTERACTIVE = 'INTERACTIVE' + BATCH = 'BATCH' + ALLOWED = (INTERACTIVE, BATCH)
+ + +
[docs]class SourceFormat(_EnumProperty): + """Pseudo-enum for ``source_format`` properties.""" + CSV = 'CSV' + DATASTORE_BACKUP = 'DATASTORE_BACKUP' + NEWLINE_DELIMITED_JSON = 'NEWLINE_DELIMITED_JSON' + ALLOWED = (CSV, DATASTORE_BACKUP, NEWLINE_DELIMITED_JSON)
+ + +
[docs]class WriteDisposition(_EnumProperty): + """Pseudo-enum for ``write_disposition`` properties.""" + WRITE_APPEND = 'WRITE_APPEND' + WRITE_TRUNCATE = 'WRITE_TRUNCATE' + WRITE_EMPTY = 'WRITE_EMPTY' + ALLOWED = (WRITE_APPEND, WRITE_TRUNCATE, WRITE_EMPTY)
+ + +class _BaseJob(object): + """Base class for jobs. + + :type client: :class:`gcloud.bigquery.client.Client` + :param client: A client which holds credentials and project configuration + for the dataset (which requires a project). + """ + def __init__(self, client): + self._client = client + self._properties = {} + + @property + def project(self): + """Project bound to the job. + + :rtype: string + :returns: the project (derived from the client). + """ + return self._client.project + + def _require_client(self, client): + """Check client or verify over-ride. + + :type client: :class:`gcloud.bigquery.client.Client` or ``NoneType`` + :param client: the client to use. If not passed, falls back to the + ``client`` stored on the current dataset. + + :rtype: :class:`gcloud.bigquery.client.Client` + :returns: The client passed in or the currently bound client. + """ + if client is None: + client = self._client + return client + + +class _AsyncJob(_BaseJob): + """Base class for asynchronous jobs. + + :type name: string + :param name: the name of the job + + :type client: :class:`gcloud.bigquery.client.Client` + :param client: A client which holds credentials and project configuration + for the dataset (which requires a project). + """ + def __init__(self, name, client): + super(_AsyncJob, self).__init__(client) + self.name = name + + @property + def job_type(self): + """Type of job + + :rtype: string + :returns: one of 'load', 'copy', 'extract', 'query' + """ + return self._JOB_TYPE + + @property + def path(self): + """URL path for the job's APIs. + + :rtype: string + :returns: the path based on project and job name. + """ + return '/projects/%s/jobs/%s' % (self.project, self.name) + + @property + def etag(self): + """ETag for the job resource. + + :rtype: string, or ``NoneType`` + :returns: the ETag (None until set from the server). + """ + return self._properties.get('etag') + + @property + def self_link(self): + """URL for the job resource. + + :rtype: string, or ``NoneType`` + :returns: the URL (None until set from the server). + """ + return self._properties.get('selfLink') + + @property + def user_email(self): + """E-mail address of user who submitted the job. + + :rtype: string, or ``NoneType`` + :returns: the URL (None until set from the server). + """ + return self._properties.get('user_email') + + @property + def created(self): + """Datetime at which the job was created. + + :rtype: ``datetime.datetime``, or ``NoneType`` + :returns: the creation time (None until set from the server). + """ + statistics = self._properties.get('statistics') + if statistics is not None: + millis = statistics.get('creationTime') + if millis is not None: + return _datetime_from_microseconds(millis * 1000.0) + + @property + def started(self): + """Datetime at which the job was started. + + :rtype: ``datetime.datetime``, or ``NoneType`` + :returns: the start time (None until set from the server). + """ + statistics = self._properties.get('statistics') + if statistics is not None: + millis = statistics.get('startTime') + if millis is not None: + return _datetime_from_microseconds(millis * 1000.0) + + @property + def ended(self): + """Datetime at which the job finished. + + :rtype: ``datetime.datetime``, or ``NoneType`` + :returns: the end time (None until set from the server). + """ + statistics = self._properties.get('statistics') + if statistics is not None: + millis = statistics.get('endTime') + if millis is not None: + return _datetime_from_microseconds(millis * 1000.0) + + @property + def error_result(self): + """Error information about the job as a whole. + + :rtype: mapping, or ``NoneType`` + :returns: the error information (None until set from the server). + """ + status = self._properties.get('status') + if status is not None: + return status.get('errorResult') + + @property + def errors(self): + """Information about individual errors generated by the job. + + :rtype: list of mappings, or ``NoneType`` + :returns: the error information (None until set from the server). + """ + status = self._properties.get('status') + if status is not None: + return status.get('errors') + + @property + def state(self): + """Status of the job. + + :rtype: string, or ``NoneType`` + :returns: the state (None until set from the server). + """ + status = self._properties.get('status') + if status is not None: + return status.get('state') + + def _scrub_local_properties(self, cleaned): + """Helper: handle subclass properties in cleaned.""" + pass + + def _set_properties(self, api_response): + """Update properties from resource in body of ``api_response`` + + :type api_response: httplib2.Response + :param api_response: response returned from an API call + """ + cleaned = api_response.copy() + self._scrub_local_properties(cleaned) + + statistics = cleaned.get('statistics', {}) + if 'creationTime' in statistics: + statistics['creationTime'] = float(statistics['creationTime']) + if 'startTime' in statistics: + statistics['startTime'] = float(statistics['startTime']) + if 'endTime' in statistics: + statistics['endTime'] = float(statistics['endTime']) + + self._properties.clear() + self._properties.update(cleaned) + + @classmethod + def _get_resource_config(cls, resource): + """Helper for :meth:`from_api_repr` + + :type resource: dict + :param resource: resource for the job + + :rtype: dict + :returns: tuple (string, dict), where the first element is the + job name and the second contains job-specific configuration. + :raises: :class:`KeyError` if the resource has no identifier, or + is missing the appropriate configuration. + """ + if ('jobReference' not in resource or + 'jobId' not in resource['jobReference']): + raise KeyError('Resource lacks required identity information: ' + '["jobReference"]["jobId"]') + name = resource['jobReference']['jobId'] + if ('configuration' not in resource or + cls._JOB_TYPE not in resource['configuration']): + raise KeyError('Resource lacks required configuration: ' + '["configuration"]["%s"]' % cls._JOB_TYPE) + config = resource['configuration'][cls._JOB_TYPE] + return name, config + + def begin(self, client=None): + """API call: begin the job via a POST request + + See: + https://cloud.google.com/bigquery/docs/reference/v2/jobs/insert + + :type client: :class:`gcloud.bigquery.client.Client` or ``NoneType`` + :param client: the client to use. If not passed, falls back to the + ``client`` stored on the current dataset. + """ + client = self._require_client(client) + path = '/projects/%s/jobs' % (self.project,) + api_response = client.connection.api_request( + method='POST', path=path, data=self._build_resource()) + self._set_properties(api_response) + + def exists(self, client=None): + """API call: test for the existence of the job via a GET request + + See + https://cloud.google.com/bigquery/docs/reference/v2/jobs/get + + :type client: :class:`gcloud.bigquery.client.Client` or ``NoneType`` + :param client: the client to use. If not passed, falls back to the + ``client`` stored on the current dataset. + + :rtype: bool + :returns: Boolean indicating existence of the job. + """ + client = self._require_client(client) + + try: + client.connection.api_request(method='GET', path=self.path, + query_params={'fields': 'id'}) + except NotFound: + return False + else: + return True + + def reload(self, client=None): + """API call: refresh job properties via a GET request + + See + https://cloud.google.com/bigquery/docs/reference/v2/jobs/get + + :type client: :class:`gcloud.bigquery.client.Client` or ``NoneType`` + :param client: the client to use. If not passed, falls back to the + ``client`` stored on the current dataset. + """ + client = self._require_client(client) + + api_response = client.connection.api_request( + method='GET', path=self.path) + self._set_properties(api_response) + + def cancel(self, client=None): + """API call: cancel job via a POST request + + See + https://cloud.google.com/bigquery/docs/reference/v2/jobs/cancel + + :type client: :class:`gcloud.bigquery.client.Client` or ``NoneType`` + :param client: the client to use. If not passed, falls back to the + ``client`` stored on the current dataset. + """ + client = self._require_client(client) + + api_response = client.connection.api_request( + method='POST', path='%s/cancel' % (self.path,)) + self._set_properties(api_response) + + +class _LoadConfiguration(object): + """User-settable configuration options for load jobs. + + Values which are ``None`` -> server defaults. + """ + _allow_jagged_rows = None + _allow_quoted_newlines = None + _create_disposition = None + _encoding = None + _field_delimiter = None + _ignore_unknown_values = None + _max_bad_records = None + _quote_character = None + _skip_leading_rows = None + _source_format = None + _write_disposition = None + + +
[docs]class LoadTableFromStorageJob(_AsyncJob): + """Asynchronous job for loading data into a table from CloudStorage. + + :type name: string + :param name: the name of the job + + :type destination: :class:`gcloud.bigquery.table.Table` + :param destination: Table into which data is to be loaded. + + :type source_uris: sequence of string + :param source_uris: URIs of one or more data files to be loaded, in + format ``gs://<bucket_name>/<object_name_or_glob>``. + + :type client: :class:`gcloud.bigquery.client.Client` + :param client: A client which holds credentials and project configuration + for the dataset (which requires a project). + + :type schema: list of :class:`gcloud.bigquery.table.SchemaField` + :param schema: The job's schema + """ + + _schema = None + _JOB_TYPE = 'load' + + def __init__(self, name, destination, source_uris, client, schema=()): + super(LoadTableFromStorageJob, self).__init__(name, client) + self.destination = destination + self.source_uris = source_uris + # Let the @property do validation. + self.schema = schema + self._configuration = _LoadConfiguration() + + @property + def schema(self): + """Table's schema. + + :rtype: list of :class:`SchemaField` + :returns: fields describing the schema + """ + return list(self._schema) + + @schema.setter + def schema(self, value): + """Update table's schema + + :type value: list of :class:`SchemaField` + :param value: fields describing the schema + + :raises: TypeError if 'value' is not a sequence, or ValueError if + any item in the sequence is not a SchemaField + """ + if not all(isinstance(field, SchemaField) for field in value): + raise ValueError('Schema items must be fields') + self._schema = tuple(value) + + @property + def input_file_bytes(self): + """Count of bytes loaded from source files. + + :rtype: integer, or ``NoneType`` + :returns: the count (None until set from the server). + """ + statistics = self._properties.get('statistics') + if statistics is not None: + return int(statistics['load']['inputFileBytes']) + + @property + def input_files(self): + """Count of source files. + + :rtype: integer, or ``NoneType`` + :returns: the count (None until set from the server). + """ + statistics = self._properties.get('statistics') + if statistics is not None: + return int(statistics['load']['inputFiles']) + + @property + def output_bytes(self): + """Count of bytes saved to destination table. + + :rtype: integer, or ``NoneType`` + :returns: the count (None until set from the server). + """ + statistics = self._properties.get('statistics') + if statistics is not None: + return int(statistics['load']['outputBytes']) + + @property + def output_rows(self): + """Count of rows saved to destination table. + + :rtype: integer, or ``NoneType`` + :returns: the count (None until set from the server). + """ + statistics = self._properties.get('statistics') + if statistics is not None: + return int(statistics['load']['outputRows']) + + allow_jagged_rows = _TypedProperty('allow_jagged_rows', bool) + """See: + https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load.allowJaggedRows + """ + + allow_quoted_newlines = _TypedProperty('allow_quoted_newlines', bool) + """See: + https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load.allowQuotedNewlines + """ + + create_disposition = CreateDisposition('create_disposition') + """See: + https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load.createDisposition + """ + + encoding = Encoding('encoding') + """See: + https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load.encoding + """ + + field_delimiter = _TypedProperty('field_delimiter', six.string_types) + """See: + https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load.fieldDelimiter + """ + + ignore_unknown_values = _TypedProperty('ignore_unknown_values', bool) + """See: + https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load.ignoreUnknownValues + """ + + max_bad_records = _TypedProperty('max_bad_records', six.integer_types) + """See: + https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load.maxBadRecords + """ + + quote_character = _TypedProperty('quote_character', six.string_types) + """See: + https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load.quote + """ + + skip_leading_rows = _TypedProperty('skip_leading_rows', six.integer_types) + """See: + https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load.skipLeadingRows + """ + + source_format = SourceFormat('source_format') + """See: + https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load.sourceFormat + """ + + write_disposition = WriteDisposition('write_disposition') + """See: + https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load.writeDisposition + """ + + def _populate_config_resource(self, configuration): + """Helper for _build_resource: copy config properties to resource""" + if self.allow_jagged_rows is not None: + configuration['allowJaggedRows'] = self.allow_jagged_rows + if self.allow_quoted_newlines is not None: + configuration['allowQuotedNewlines'] = self.allow_quoted_newlines + if self.create_disposition is not None: + configuration['createDisposition'] = self.create_disposition + if self.encoding is not None: + configuration['encoding'] = self.encoding + if self.field_delimiter is not None: + configuration['fieldDelimiter'] = self.field_delimiter + if self.ignore_unknown_values is not None: + configuration['ignoreUnknownValues'] = self.ignore_unknown_values + if self.max_bad_records is not None: + configuration['maxBadRecords'] = self.max_bad_records + if self.quote_character is not None: + configuration['quote'] = self.quote_character + if self.skip_leading_rows is not None: + configuration['skipLeadingRows'] = self.skip_leading_rows + if self.source_format is not None: + configuration['sourceFormat'] = self.source_format + if self.write_disposition is not None: + configuration['writeDisposition'] = self.write_disposition + + def _build_resource(self): + """Generate a resource for :meth:`begin`.""" + resource = { + 'jobReference': { + 'projectId': self.project, + 'jobId': self.name, + }, + 'configuration': { + self._JOB_TYPE: { + 'sourceUris': self.source_uris, + 'destinationTable': { + 'projectId': self.destination.project, + 'datasetId': self.destination.dataset_name, + 'tableId': self.destination.name, + }, + }, + }, + } + configuration = resource['configuration'][self._JOB_TYPE] + self._populate_config_resource(configuration) + + if len(self.schema) > 0: + configuration['schema'] = { + 'fields': _build_schema_resource(self.schema)} + + return resource + + def _scrub_local_properties(self, cleaned): + """Helper: handle subclass properties in cleaned.""" + schema = cleaned.pop('schema', {'fields': ()}) + self.schema = _parse_schema_resource(schema) + + @classmethod +
[docs] def from_api_repr(cls, resource, client): + """Factory: construct a job given its API representation + + .. note: + + This method assumes that the project found in the resource matches + the client's project. + + :type resource: dict + :param resource: dataset job representation returned from the API + + :type client: :class:`gcloud.bigquery.client.Client` + :param client: Client which holds credentials and project + configuration for the dataset. + + :rtype: :class:`gcloud.bigquery.job.LoadTableFromStorageJob` + :returns: Job parsed from ``resource``. + """ + name, config = cls._get_resource_config(resource) + dest_config = config['destinationTable'] + dataset = Dataset(dest_config['datasetId'], client) + destination = Table(dest_config['tableId'], dataset) + source_urls = config.get('sourceUris', ()) + job = cls(name, destination, source_urls, client=client) + job._set_properties(resource) + return job
+ + +class _CopyConfiguration(object): + """User-settable configuration options for copy jobs. + + Values which are ``None`` -> server defaults. + """ + _create_disposition = None + _write_disposition = None + + +
[docs]class CopyJob(_AsyncJob): + """Asynchronous job: copy data into a table from other tables. + + :type name: string + :param name: the name of the job + + :type destination: :class:`gcloud.bigquery.table.Table` + :param destination: Table into which data is to be loaded. + + :type sources: list of :class:`gcloud.bigquery.table.Table` + :param sources: Table into which data is to be loaded. + + :type client: :class:`gcloud.bigquery.client.Client` + :param client: A client which holds credentials and project configuration + for the dataset (which requires a project). + """ + + _JOB_TYPE = 'copy' + + def __init__(self, name, destination, sources, client): + super(CopyJob, self).__init__(name, client) + self.destination = destination + self.sources = sources + self._configuration = _CopyConfiguration() + + create_disposition = CreateDisposition('create_disposition') + """See: + https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.copy.createDisposition + """ + + write_disposition = WriteDisposition('write_disposition') + """See: + https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.copy.writeDisposition + """ + + def _populate_config_resource(self, configuration): + """Helper for _build_resource: copy config properties to resource""" + if self.create_disposition is not None: + configuration['createDisposition'] = self.create_disposition + if self.write_disposition is not None: + configuration['writeDisposition'] = self.write_disposition + + def _build_resource(self): + """Generate a resource for :meth:`begin`.""" + + source_refs = [{ + 'projectId': table.project, + 'datasetId': table.dataset_name, + 'tableId': table.name, + } for table in self.sources] + + resource = { + 'jobReference': { + 'projectId': self.project, + 'jobId': self.name, + }, + 'configuration': { + self._JOB_TYPE: { + 'sourceTables': source_refs, + 'destinationTable': { + 'projectId': self.destination.project, + 'datasetId': self.destination.dataset_name, + 'tableId': self.destination.name, + }, + }, + }, + } + configuration = resource['configuration'][self._JOB_TYPE] + self._populate_config_resource(configuration) + + return resource + + @classmethod +
[docs] def from_api_repr(cls, resource, client): + """Factory: construct a job given its API representation + + .. note: + + This method assumes that the project found in the resource matches + the client's project. + + :type resource: dict + :param resource: dataset job representation returned from the API + + :type client: :class:`gcloud.bigquery.client.Client` + :param client: Client which holds credentials and project + configuration for the dataset. + + :rtype: :class:`gcloud.bigquery.job.CopyJob` + :returns: Job parsed from ``resource``. + """ + name, config = cls._get_resource_config(resource) + dest_config = config['destinationTable'] + dataset = Dataset(dest_config['datasetId'], client) + destination = Table(dest_config['tableId'], dataset) + sources = [] + for source_config in config['sourceTables']: + dataset = Dataset(source_config['datasetId'], client) + sources.append(Table(source_config['tableId'], dataset)) + job = cls(name, destination, sources, client=client) + job._set_properties(resource) + return job
+ + +class _ExtractConfiguration(object): + """User-settable configuration options for extract jobs. + + Values which are ``None`` -> server defaults. + """ + _compression = None + _destination_format = None + _field_delimiter = None + _print_header = None + + +
[docs]class ExtractTableToStorageJob(_AsyncJob): + """Asynchronous job: extract data from a table into Cloud Storage. + + :type name: string + :param name: the name of the job + + :type source: :class:`gcloud.bigquery.table.Table` + :param source: Table into which data is to be loaded. + + :type destination_uris: list of string + :param destination_uris: URIs describing Cloud Storage blobs into which + extracted data will be written, in format + ``gs://<bucket_name>/<object_name_or_glob>``. + + :type client: :class:`gcloud.bigquery.client.Client` + :param client: A client which holds credentials and project configuration + for the dataset (which requires a project). + """ + _JOB_TYPE = 'extract' + + def __init__(self, name, source, destination_uris, client): + super(ExtractTableToStorageJob, self).__init__(name, client) + self.source = source + self.destination_uris = destination_uris + self._configuration = _ExtractConfiguration() + + compression = Compression('compression') + """See: + https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.extracted.compression + """ + + destination_format = DestinationFormat('destination_format') + """See: + https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.extracted.destinationFormat + """ + + field_delimiter = _TypedProperty('field_delimiter', six.string_types) + """See: + https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.extracted.fieldDelimiter + """ + + print_header = _TypedProperty('print_header', bool) + """See: + https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.extracted.printHeader + """ + + def _populate_config_resource(self, configuration): + """Helper for _build_resource: copy config properties to resource""" + if self.compression is not None: + configuration['compression'] = self.compression + if self.destination_format is not None: + configuration['destinationFormat'] = self.destination_format + if self.field_delimiter is not None: + configuration['fieldDelimiter'] = self.field_delimiter + if self.print_header is not None: + configuration['printHeader'] = self.print_header + + def _build_resource(self): + """Generate a resource for :meth:`begin`.""" + + source_ref = { + 'projectId': self.source.project, + 'datasetId': self.source.dataset_name, + 'tableId': self.source.name, + } + + resource = { + 'jobReference': { + 'projectId': self.project, + 'jobId': self.name, + }, + 'configuration': { + self._JOB_TYPE: { + 'sourceTable': source_ref, + 'destinationUris': self.destination_uris, + }, + }, + } + configuration = resource['configuration'][self._JOB_TYPE] + self._populate_config_resource(configuration) + + return resource + + @classmethod +
[docs] def from_api_repr(cls, resource, client): + """Factory: construct a job given its API representation + + .. note: + + This method assumes that the project found in the resource matches + the client's project. + + :type resource: dict + :param resource: dataset job representation returned from the API + + :type client: :class:`gcloud.bigquery.client.Client` + :param client: Client which holds credentials and project + configuration for the dataset. + + :rtype: :class:`gcloud.bigquery.job.ExtractTableToStorageJob` + :returns: Job parsed from ``resource``. + """ + name, config = cls._get_resource_config(resource) + source_config = config['sourceTable'] + dataset = Dataset(source_config['datasetId'], client) + source = Table(source_config['tableId'], dataset) + destination_uris = config['destinationUris'] + job = cls(name, source, destination_uris, client=client) + job._set_properties(resource) + return job
+ + +class _AsyncQueryConfiguration(object): + """User-settable configuration options for asynchronous query jobs. + + Values which are ``None`` -> server defaults. + """ + _allow_large_results = None + _create_disposition = None + _default_dataset = None + _destination = None + _flatten_results = None + _priority = None + _use_query_cache = None + _use_legacy_sql = None + _write_disposition = None + + +
[docs]class QueryJob(_AsyncJob): + """Asynchronous job: query tables. + + :type name: string + :param name: the name of the job + + :type query: string + :param query: SQL query string + + :type client: :class:`gcloud.bigquery.client.Client` + :param client: A client which holds credentials and project configuration + for the dataset (which requires a project). + + :type udf_resources: tuple + :param udf_resources: An iterable of + :class:`gcloud.bigquery.job.UDFResource` + (empty by default) + """ + _JOB_TYPE = 'query' + _UDF_KEY = 'userDefinedFunctionResources' + + def __init__(self, name, query, client, udf_resources=()): + super(QueryJob, self).__init__(name, client) + self.query = query + self.udf_resources = udf_resources + self._configuration = _AsyncQueryConfiguration() + + allow_large_results = _TypedProperty('allow_large_results', bool) + """See: + https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.query.allowLargeResults + """ + + create_disposition = CreateDisposition('create_disposition') + """See: + https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.query.createDisposition + """ + + default_dataset = _TypedProperty('default_dataset', Dataset) + """See: + https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.query.defaultDataset + """ + + destination = _TypedProperty('destination', Table) + """See: + https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.query.destinationTable + """ + + flatten_results = _TypedProperty('flatten_results', bool) + """See: + https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.query.flattenResults + """ + + priority = QueryPriority('priority') + """See: + https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.query.priority + """ + + udf_resources = UDFResourcesProperty() + + use_query_cache = _TypedProperty('use_query_cache', bool) + """See: + https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.query.useQueryCache + """ + + use_legacy_sql = _TypedProperty('use_legacy_sql', bool) + """See: + https://cloud.google.com/bigquery/docs/\ + reference/v2/jobs#configuration.query.useLegacySql + """ + + write_disposition = WriteDisposition('write_disposition') + """See: + https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.query.writeDisposition + """ + + def _destination_table_resource(self): + """Create a JSON resource for the destination table. + + Helper for :meth:`_populate_config_resource` and + :meth:`_scrub_local_properties` + """ + if self.destination is not None: + return { + 'projectId': self.destination.project, + 'datasetId': self.destination.dataset_name, + 'tableId': self.destination.name, + } + + def _populate_config_resource(self, configuration): + """Helper for _build_resource: copy config properties to resource""" + if self.allow_large_results is not None: + configuration['allowLargeResults'] = self.allow_large_results + if self.create_disposition is not None: + configuration['createDisposition'] = self.create_disposition + if self.default_dataset is not None: + configuration['defaultDataset'] = { + 'projectId': self.default_dataset.project, + 'datasetId': self.default_dataset.name, + } + if self.destination is not None: + table_res = self._destination_table_resource() + configuration['destinationTable'] = table_res + if self.flatten_results is not None: + configuration['flattenResults'] = self.flatten_results + if self.priority is not None: + configuration['priority'] = self.priority + if self.use_query_cache is not None: + configuration['useQueryCache'] = self.use_query_cache + if self.use_legacy_sql is not None: + configuration['useLegacySql'] = self.use_legacy_sql + if self.write_disposition is not None: + configuration['writeDisposition'] = self.write_disposition + if len(self._udf_resources) > 0: + configuration[self._UDF_KEY] = _build_udf_resources( + self._udf_resources) + + def _build_resource(self): + """Generate a resource for :meth:`begin`.""" + + resource = { + 'jobReference': { + 'projectId': self.project, + 'jobId': self.name, + }, + 'configuration': { + self._JOB_TYPE: { + 'query': self.query, + }, + }, + } + configuration = resource['configuration'][self._JOB_TYPE] + self._populate_config_resource(configuration) + + return resource + + def _scrub_local_properties(self, cleaned): + """Helper: handle subclass properties in cleaned. + + .. note: + + This method assumes that the project found in the resource matches + the client's project. + """ + configuration = cleaned['configuration']['query'] + dest_remote = configuration.get('destinationTable') + + if dest_remote is None: + if self.destination is not None: + del self.destination + else: + dest_local = self._destination_table_resource() + if dest_remote != dest_local: + dataset = self._client.dataset(dest_remote['datasetId']) + self.destination = dataset.table(dest_remote['tableId']) + + @classmethod +
[docs] def from_api_repr(cls, resource, client): + """Factory: construct a job given its API representation + + :type resource: dict + :param resource: dataset job representation returned from the API + + :type client: :class:`gcloud.bigquery.client.Client` + :param client: Client which holds credentials and project + configuration for the dataset. + + :rtype: :class:`gcloud.bigquery.job.RunAsyncQueryJob` + :returns: Job parsed from ``resource``. + """ + name, config = cls._get_resource_config(resource) + query = config['query'] + job = cls(name, query, client=client) + job._set_properties(resource) + return job
+
+ +
+ +
+ + + + + + + \ No newline at end of file diff --git a/0.18.1/_modules/gcloud/bigquery/query.html b/0.18.1/_modules/gcloud/bigquery/query.html new file mode 100644 index 000000000000..28973cd07e54 --- /dev/null +++ b/0.18.1/_modules/gcloud/bigquery/query.html @@ -0,0 +1,610 @@ + + + + + + + + gcloud.bigquery.query — gcloud 0.18.1 documentation + + + + + + + + + + + + + + +
+ +
+ +
+ + + Report an Issue + +
+
+ +
+ +

Source code for gcloud.bigquery.query

+# Copyright 2015 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Define API Queries."""
+
+import six
+
+from gcloud.bigquery._helpers import _TypedProperty
+from gcloud.bigquery._helpers import _rows_from_json
+from gcloud.bigquery.dataset import Dataset
+from gcloud.bigquery.job import QueryJob
+from gcloud.bigquery.job import UDFResourcesProperty
+from gcloud.bigquery.job import _build_udf_resources
+from gcloud.bigquery.table import _parse_schema_resource
+
+
+class _SyncQueryConfiguration(object):
+    """User-settable configuration options for synchronous query jobs.
+
+    Values which are ``None`` -> server defaults.
+    """
+    _default_dataset = None
+    _dry_run = None
+    _max_results = None
+    _timeout_ms = None
+    _preserve_nulls = None
+    _use_query_cache = None
+    _use_legacy_sql = None
+
+
+
[docs]class QueryResults(object): + """Synchronous job: query tables. + + :type query: string + :param query: SQL query string + + :type client: :class:`gcloud.bigquery.client.Client` + :param client: A client which holds credentials and project configuration + for the dataset (which requires a project). + + :type udf_resources: tuple + :param udf_resources: An iterable of + :class:`gcloud.bigquery.job.UDFResource` + (empty by default) + """ + + _UDF_KEY = 'userDefinedFunctionResources' + + def __init__(self, query, client, udf_resources=()): + self._client = client + self._properties = {} + self.query = query + self._configuration = _SyncQueryConfiguration() + self.udf_resources = udf_resources + self._job = None + + @property + def project(self): + """Project bound to the job. + + :rtype: string + :returns: the project (derived from the client). + """ + return self._client.project + + def _require_client(self, client): + """Check client or verify over-ride. + + :type client: :class:`gcloud.bigquery.client.Client` or ``NoneType`` + :param client: the client to use. If not passed, falls back to the + ``client`` stored on the current dataset. + + :rtype: :class:`gcloud.bigquery.client.Client` + :returns: The client passed in or the currently bound client. + """ + if client is None: + client = self._client + return client + + @property + def cache_hit(self): + """Query results served from cache. + + See: + https://cloud.google.com/bigquery/docs/reference/v2/jobs/query#cacheHit + + :rtype: boolean or ``NoneType`` + :returns: True if the query results were served from cache (None + until set by the server). + """ + return self._properties.get('cacheHit') + + @property + def complete(self): + """Server completed query. + + See: + https://cloud.google.com/bigquery/docs/reference/v2/jobs/query#jobComplete + + :rtype: boolean or ``NoneType`` + :returns: True if the query completed on the server (None + until set by the server). + """ + return self._properties.get('jobComplete') + + @property + def errors(self): + """Errors generated by the query. + + See: + https://cloud.google.com/bigquery/docs/reference/v2/jobs/query#errors + + :rtype: list of mapping, or ``NoneType`` + :returns: Mappings describing errors generated on the server (None + until set by the server). + """ + return self._properties.get('errors') + + @property + def name(self): + """Job name, generated by the back-end. + + See: + https://cloud.google.com/bigquery/docs/reference/v2/jobs/query#jobReference + + :rtype: list of mapping, or ``NoneType`` + :returns: Mappings describing errors generated on the server (None + until set by the server). + """ + return self._properties.get('jobReference', {}).get('jobId') + + @property + def job(self): + """Job instance used to run the query. + + :rtype: :class:`gcloud.bigquery.job.QueryJob`, or ``NoneType`` + :returns: Job instance used to run the query (None until + ``jobReference`` property is set by the server). + """ + if self._job is None: + job_ref = self._properties.get('jobReference') + if job_ref is not None: + self._job = QueryJob(job_ref['jobId'], self.query, + self._client) + return self._job + + @property + def page_token(self): + """Token for fetching next bach of results. + + See: + https://cloud.google.com/bigquery/docs/reference/v2/jobs/query#pageToken + + :rtype: string, or ``NoneType`` + :returns: Token generated on the server (None until set by the server). + """ + return self._properties.get('pageToken') + + @property + def total_rows(self): + """Total number of rows returned by the query + + See: + https://cloud.google.com/bigquery/docs/reference/v2/jobs/query#totalRows + + :rtype: integer, or ``NoneType`` + :returns: Count generated on the server (None until set by the server). + """ + return self._properties.get('totalRows') + + @property + def total_bytes_processed(self): + """Total number of bytes processed by the query + + See: + https://cloud.google.com/bigquery/docs/reference/v2/jobs/query#totalBytesProcessed + + :rtype: integer, or ``NoneType`` + :returns: Count generated on the server (None until set by the server). + """ + return self._properties.get('totalBytesProcessed') + + @property + def rows(self): + """Query results. + + See: + https://cloud.google.com/bigquery/docs/reference/v2/jobs/query#rows + + :rtype: list of tuples of row values, or ``NoneType`` + :returns: fields describing the schema (None until set by the server). + """ + return _rows_from_json(self._properties.get('rows', ()), self.schema) + + @property + def schema(self): + """Schema for query results. + + See: + https://cloud.google.com/bigquery/docs/reference/v2/jobs/query#schema + + :rtype: list of :class:`SchemaField`, or ``NoneType`` + :returns: fields describing the schema (None until set by the server). + """ + return _parse_schema_resource(self._properties.get('schema', {})) + + default_dataset = _TypedProperty('default_dataset', Dataset) + """See: + https://cloud.google.com/bigquery/docs/reference/v2/jobs/query#defaultDataset + """ + + dry_run = _TypedProperty('dry_run', bool) + """See: + https://cloud.google.com/bigquery/docs/reference/v2/jobs/query#dryRun + """ + + max_results = _TypedProperty('max_results', six.integer_types) + """See: + https://cloud.google.com/bigquery/docs/reference/v2/jobs/query#maxResults + """ + + preserve_nulls = _TypedProperty('preserve_nulls', bool) + """See: + https://cloud.google.com/bigquery/docs/reference/v2/jobs/query#preserveNulls + """ + + timeout_ms = _TypedProperty('timeout_ms', six.integer_types) + """See: + https://cloud.google.com/bigquery/docs/reference/v2/jobs/query#timeoutMs + """ + + udf_resources = UDFResourcesProperty() + + use_query_cache = _TypedProperty('use_query_cache', bool) + """See: + https://cloud.google.com/bigquery/docs/reference/v2/jobs/query#useQueryCache + """ + + use_legacy_sql = _TypedProperty('use_legacy_sql', bool) + """See: + https://cloud.google.com/bigquery/docs/\ + reference/v2/jobs/query#useLegacySql + """ + + def _set_properties(self, api_response): + """Update properties from resource in body of ``api_response`` + + :type api_response: httplib2.Response + :param api_response: response returned from an API call + """ + self._properties.clear() + self._properties.update(api_response) + + def _build_resource(self): + """Generate a resource for :meth:`begin`.""" + resource = {'query': self.query} + + if self.default_dataset is not None: + resource['defaultDataset'] = { + 'projectId': self.project, + 'datasetId': self.default_dataset.name, + } + + if self.max_results is not None: + resource['maxResults'] = self.max_results + + if self.preserve_nulls is not None: + resource['preserveNulls'] = self.preserve_nulls + + if self.timeout_ms is not None: + resource['timeoutMs'] = self.timeout_ms + + if self.use_query_cache is not None: + resource['useQueryCache'] = self.use_query_cache + + if self.use_legacy_sql is not None: + resource['useLegacySql'] = self.use_legacy_sql + + if self.dry_run is not None: + resource['dryRun'] = self.dry_run + + if len(self._udf_resources) > 0: + resource[self._UDF_KEY] = _build_udf_resources(self._udf_resources) + + return resource + +
[docs] def run(self, client=None): + """API call: run the query via a POST request + + See: + https://cloud.google.com/bigquery/docs/reference/v2/jobs/query + + :type client: :class:`gcloud.bigquery.client.Client` or ``NoneType`` + :param client: the client to use. If not passed, falls back to the + ``client`` stored on the current dataset. + """ + client = self._require_client(client) + path = '/projects/%s/queries' % (self.project,) + api_response = client.connection.api_request( + method='POST', path=path, data=self._build_resource()) + self._set_properties(api_response)
+ +
[docs] def fetch_data(self, max_results=None, page_token=None, start_index=None, + timeout_ms=None, client=None): + """API call: fetch a page of query result data via a GET request + + See: + https://cloud.google.com/bigquery/docs/reference/v2/jobs/getQueryResults + + :type max_results: integer or ``NoneType`` + :param max_results: maximum number of rows to return. + + :type page_token: string or ``NoneType`` + :param page_token: token representing a cursor into the table's rows. + + :type start_index: integer or ``NoneType`` + :param start_index: zero-based index of starting row + + :type timeout_ms: integer or ``NoneType`` + :param timeout_ms: timeout, in milliseconds, to wait for query to + complete + + :type client: :class:`gcloud.bigquery.client.Client` or ``NoneType`` + :param client: the client to use. If not passed, falls back to the + ``client`` stored on the current dataset. + + :rtype: tuple + :returns: ``(row_data, total_rows, page_token)``, where ``row_data`` + is a list of tuples, one per result row, containing only + the values; ``total_rows`` is a count of the total number + of rows in the table; and ``page_token`` is an opaque + string which can be used to fetch the next batch of rows + (``None`` if no further batches can be fetched). + :raises: ValueError if the query has not yet been executed. + """ + if self.name is None: + raise ValueError("Query not yet executed: call 'run()'") + + client = self._require_client(client) + params = {} + + if max_results is not None: + params['maxResults'] = max_results + + if page_token is not None: + params['pageToken'] = page_token + + if start_index is not None: + params['startIndex'] = start_index + + if timeout_ms is not None: + params['timeoutMs'] = timeout_ms + + path = '/projects/%s/queries/%s' % (self.project, self.name) + response = client.connection.api_request(method='GET', + path=path, + query_params=params) + self._set_properties(response) + + total_rows = response.get('totalRows') + if total_rows is not None: + total_rows = int(total_rows) + page_token = response.get('pageToken') + rows_data = _rows_from_json(response.get('rows', ()), self.schema) + + return rows_data, total_rows, page_token
+
+ +
+ +
+ + + + + + + \ No newline at end of file diff --git a/0.18.1/_modules/gcloud/bigquery/table.html b/0.18.1/_modules/gcloud/bigquery/table.html new file mode 100644 index 000000000000..9d4daacc3fb1 --- /dev/null +++ b/0.18.1/_modules/gcloud/bigquery/table.html @@ -0,0 +1,1256 @@ + + + + + + + + gcloud.bigquery.table — gcloud 0.18.1 documentation + + + + + + + + + + + + + + +
+ +
+ +
+ + + Report an Issue + +
+
+ +
+ +

Source code for gcloud.bigquery.table

+# Copyright 2015 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Define API Datasets."""
+
+import datetime
+import json
+import os
+
+import six
+
+from gcloud._helpers import _datetime_from_microseconds
+from gcloud._helpers import _microseconds_from_datetime
+from gcloud._helpers import _millis_from_datetime
+from gcloud.exceptions import NotFound
+from gcloud.streaming.http_wrapper import Request
+from gcloud.streaming.http_wrapper import make_api_request
+from gcloud.streaming.transfer import RESUMABLE_UPLOAD
+from gcloud.streaming.transfer import Upload
+from gcloud.bigquery._helpers import _rows_from_json
+
+
+_MARKER = object()
+
+
+
[docs]class SchemaField(object): + """Describe a single field within a table schema. + + :type name: str + :param name: the name of the field + + :type field_type: str + :param field_type: the type of the field (one of 'STRING', 'INTEGER', + 'FLOAT', 'BOOLEAN', 'TIMESTAMP' or 'RECORD') + + :type mode: str + :param mode: the type of the field (one of 'NULLABLE', 'REQUIRED', + or 'REPEATED') + + :type description: str + :param description: optional description for the field + + :type fields: list of :class:`SchemaField`, or None + :param fields: subfields (requires ``field_type`` of 'RECORD'). + """ + def __init__(self, name, field_type, mode='NULLABLE', description=None, + fields=None): + self.name = name + self.field_type = field_type + self.mode = mode + self.description = description + self.fields = fields + + def __eq__(self, other): + return ( + self.name == other.name and + self.field_type.lower() == other.field_type.lower() and + self.mode == other.mode and + self.description == other.description and + self.fields == other.fields)
+ + +
[docs]class Table(object): + """Tables represent a set of rows whose values correspond to a schema. + + See: + https://cloud.google.com/bigquery/docs/reference/v2/tables + + :type name: str + :param name: the name of the table + + :type dataset: :class:`gcloud.bigquery.dataset.Dataset` + :param dataset: The dataset which contains the table. + + :type schema: list of :class:`SchemaField` + :param schema: The table's schema + """ + + _schema = None + + def __init__(self, name, dataset, schema=()): + self.name = name + self._dataset = dataset + self._properties = {} + # Let the @property do validation. + self.schema = schema + + @property + def project(self): + """Project bound to the table. + + :rtype: str + :returns: the project (derived from the dataset). + """ + return self._dataset.project + + @property + def dataset_name(self): + """Name of dataset containing the table. + + :rtype: str + :returns: the ID (derived from the dataset). + """ + return self._dataset.name + + @property + def path(self): + """URL path for the table's APIs. + + :rtype: str + :returns: the path based on project and dataste name. + """ + return '%s/tables/%s' % (self._dataset.path, self.name) + + @property + def schema(self): + """Table's schema. + + :rtype: list of :class:`SchemaField` + :returns: fields describing the schema + """ + return list(self._schema) + + @schema.setter + def schema(self, value): + """Update table's schema + + :type value: list of :class:`SchemaField` + :param value: fields describing the schema + + :raises: TypeError if 'value' is not a sequence, or ValueError if + any item in the sequence is not a SchemaField + """ + if not all(isinstance(field, SchemaField) for field in value): + raise ValueError('Schema items must be fields') + self._schema = tuple(value) + + @property + def created(self): + """Datetime at which the table was created. + + :rtype: ``datetime.datetime``, or ``NoneType`` + :returns: the creation time (None until set from the server). + """ + creation_time = self._properties.get('creationTime') + if creation_time is not None: + # creation_time will be in milliseconds. + return _datetime_from_microseconds(1000.0 * creation_time) + + @property + def etag(self): + """ETag for the table resource. + + :rtype: str, or ``NoneType`` + :returns: the ETag (None until set from the server). + """ + return self._properties.get('etag') + + @property + def modified(self): + """Datetime at which the table was last modified. + + :rtype: ``datetime.datetime``, or ``NoneType`` + :returns: the modification time (None until set from the server). + """ + modified_time = self._properties.get('lastModifiedTime') + if modified_time is not None: + # modified_time will be in milliseconds. + return _datetime_from_microseconds(1000.0 * modified_time) + + @property + def num_bytes(self): + """The size of the table in bytes. + + :rtype: integer, or ``NoneType`` + :returns: the byte count (None until set from the server). + """ + num_bytes_as_str = self._properties.get('numBytes') + if num_bytes_as_str is not None: + return int(num_bytes_as_str) + + @property + def num_rows(self): + """The number of rows in the table. + + :rtype: integer, or ``NoneType`` + :returns: the row count (None until set from the server). + """ + num_rows_as_str = self._properties.get('numRows') + if num_rows_as_str is not None: + return int(num_rows_as_str) + + @property + def self_link(self): + """URL for the table resource. + + :rtype: str, or ``NoneType`` + :returns: the URL (None until set from the server). + """ + return self._properties.get('selfLink') + + @property + def table_id(self): + """ID for the table resource. + + :rtype: str, or ``NoneType`` + :returns: the ID (None until set from the server). + """ + return self._properties.get('id') + + @property + def table_type(self): + """The type of the table. + + Possible values are "TABLE" or "VIEW". + + :rtype: str, or ``NoneType`` + :returns: the URL (None until set from the server). + """ + return self._properties.get('type') + + @property + def description(self): + """Description of the table. + + :rtype: str, or ``NoneType`` + :returns: The description as set by the user, or None (the default). + """ + return self._properties.get('description') + + @description.setter + def description(self, value): + """Update description of the table. + + :type value: str, or ``NoneType`` + :param value: new description + + :raises: ValueError for invalid value types. + """ + if not isinstance(value, six.string_types) and value is not None: + raise ValueError("Pass a string, or None") + self._properties['description'] = value + + @property + def expires(self): + """Datetime at which the table will be removed. + + :rtype: ``datetime.datetime``, or ``NoneType`` + :returns: the expiration time, or None + """ + expiration_time = self._properties.get('expirationTime') + if expiration_time is not None: + # expiration_time will be in milliseconds. + return _datetime_from_microseconds(1000.0 * expiration_time) + + @expires.setter + def expires(self, value): + """Update datetime at which the table will be removed. + + :type value: ``datetime.datetime``, or ``NoneType`` + :param value: the new expiration time, or None + """ + if not isinstance(value, datetime.datetime) and value is not None: + raise ValueError("Pass a datetime, or None") + self._properties['expirationTime'] = _millis_from_datetime(value) + + @property + def friendly_name(self): + """Title of the table. + + :rtype: str, or ``NoneType`` + :returns: The name as set by the user, or None (the default). + """ + return self._properties.get('friendlyName') + + @friendly_name.setter + def friendly_name(self, value): + """Update title of the table. + + :type value: str, or ``NoneType`` + :param value: new title + + :raises: ValueError for invalid value types. + """ + if not isinstance(value, six.string_types) and value is not None: + raise ValueError("Pass a string, or None") + self._properties['friendlyName'] = value + + @property + def location(self): + """Location in which the table is hosted. + + :rtype: str, or ``NoneType`` + :returns: The location as set by the user, or None (the default). + """ + return self._properties.get('location') + + @location.setter + def location(self, value): + """Update location in which the table is hosted. + + :type value: str, or ``NoneType`` + :param value: new location + + :raises: ValueError for invalid value types. + """ + if not isinstance(value, six.string_types) and value is not None: + raise ValueError("Pass a string, or None") + self._properties['location'] = value + + @property + def view_query(self): + """SQL query defining the table as a view. + + :rtype: str, or ``NoneType`` + :returns: The query as set by the user, or None (the default). + """ + view = self._properties.get('view') + if view is not None: + return view.get('query') + + @view_query.setter + def view_query(self, value): + """Update SQL query defining the table as a view. + + :type value: str + :param value: new query + + :raises: ValueError for invalid value types. + """ + if not isinstance(value, six.string_types): + raise ValueError("Pass a string") + self._properties['view'] = {'query': value} + + @view_query.deleter + def view_query(self): + """Delete SQL query defining the table as a view.""" + self._properties.pop('view', None) + + @classmethod +
[docs] def from_api_repr(cls, resource, dataset): + """Factory: construct a table given its API representation + + :type resource: dict + :param resource: table resource representation returned from the API + + :type dataset: :class:`gcloud.bigquery.dataset.Dataset` + :param dataset: The dataset containing the table. + + :rtype: :class:`gcloud.bigquery.table.Table` + :returns: Table parsed from ``resource``. + """ + if ('tableReference' not in resource or + 'tableId' not in resource['tableReference']): + raise KeyError('Resource lacks required identity information:' + '["tableReference"]["tableId"]') + table_name = resource['tableReference']['tableId'] + table = cls(table_name, dataset=dataset) + table._set_properties(resource) + return table
+ + def _require_client(self, client): + """Check client or verify over-ride. + + :type client: :class:`gcloud.bigquery.client.Client` or ``NoneType`` + :param client: the client to use. If not passed, falls back to the + ``client`` stored on the current dataset. + + :rtype: :class:`gcloud.bigquery.client.Client` + :returns: The client passed in or the currently bound client. + """ + if client is None: + client = self._dataset._client + return client + + def _set_properties(self, api_response): + """Update properties from resource in body of ``api_response`` + + :type api_response: httplib2.Response + :param api_response: response returned from an API call + """ + self._properties.clear() + cleaned = api_response.copy() + schema = cleaned.pop('schema', {'fields': ()}) + self.schema = _parse_schema_resource(schema) + if 'creationTime' in cleaned: + cleaned['creationTime'] = float(cleaned['creationTime']) + if 'lastModifiedTime' in cleaned: + cleaned['lastModifiedTime'] = float(cleaned['lastModifiedTime']) + if 'expirationTime' in cleaned: + cleaned['expirationTime'] = float(cleaned['expirationTime']) + self._properties.update(cleaned) + + def _build_resource(self): + """Generate a resource for ``create`` or ``update``.""" + resource = { + 'tableReference': { + 'projectId': self._dataset.project, + 'datasetId': self._dataset.name, + 'tableId': self.name}, + } + if self.description is not None: + resource['description'] = self.description + + if self.expires is not None: + value = _millis_from_datetime(self.expires) + resource['expirationTime'] = value + + if self.friendly_name is not None: + resource['friendlyName'] = self.friendly_name + + if self.location is not None: + resource['location'] = self.location + + if self.view_query is not None: + view = resource['view'] = {} + view['query'] = self.view_query + elif self._schema: + resource['schema'] = { + 'fields': _build_schema_resource(self._schema) + } + else: + raise ValueError("Set either 'view_query' or 'schema'.") + + return resource + +
[docs] def create(self, client=None): + """API call: create the dataset via a PUT request + + See: + https://cloud.google.com/bigquery/docs/reference/v2/tables/insert + + :type client: :class:`gcloud.bigquery.client.Client` or ``NoneType`` + :param client: the client to use. If not passed, falls back to the + ``client`` stored on the current dataset. + """ + client = self._require_client(client) + path = '/projects/%s/datasets/%s/tables' % ( + self._dataset.project, self._dataset.name) + api_response = client.connection.api_request( + method='POST', path=path, data=self._build_resource()) + self._set_properties(api_response)
+ +
[docs] def exists(self, client=None): + """API call: test for the existence of the table via a GET request + + See + https://cloud.google.com/bigquery/docs/reference/v2/tables/get + + :type client: :class:`gcloud.bigquery.client.Client` or ``NoneType`` + :param client: the client to use. If not passed, falls back to the + ``client`` stored on the current dataset. + + :rtype: bool + :returns: Boolean indicating existence of the table. + """ + client = self._require_client(client) + + try: + client.connection.api_request(method='GET', path=self.path, + query_params={'fields': 'id'}) + except NotFound: + return False + else: + return True
+ +
[docs] def reload(self, client=None): + """API call: refresh table properties via a GET request + + See + https://cloud.google.com/bigquery/docs/reference/v2/tables/get + + :type client: :class:`gcloud.bigquery.client.Client` or ``NoneType`` + :param client: the client to use. If not passed, falls back to the + ``client`` stored on the current dataset. + """ + client = self._require_client(client) + + api_response = client.connection.api_request( + method='GET', path=self.path) + self._set_properties(api_response)
+ +
[docs] def patch(self, + client=None, + friendly_name=_MARKER, + description=_MARKER, + location=_MARKER, + expires=_MARKER, + view_query=_MARKER, + schema=_MARKER): + """API call: update individual table properties via a PATCH request + + See + https://cloud.google.com/bigquery/docs/reference/v2/tables/patch + + :type client: :class:`gcloud.bigquery.client.Client` or ``NoneType`` + :param client: the client to use. If not passed, falls back to the + ``client`` stored on the current dataset. + + :type friendly_name: str or ``NoneType`` + :param friendly_name: point in time at which the table expires. + + :type description: str or ``NoneType`` + :param description: point in time at which the table expires. + + :type location: str or ``NoneType`` + :param location: point in time at which the table expires. + + :type expires: :class:`datetime.datetime` or ``NoneType`` + :param expires: point in time at which the table expires. + + :type view_query: str + :param view_query: SQL query defining the table as a view + + :type schema: list of :class:`SchemaField` + :param schema: fields describing the schema + + :raises: ValueError for invalid value types. + """ + client = self._require_client(client) + + partial = {} + + if expires is not _MARKER: + if (not isinstance(expires, datetime.datetime) and + expires is not None): + raise ValueError("Pass a datetime, or None") + partial['expirationTime'] = _millis_from_datetime(expires) + + if description is not _MARKER: + partial['description'] = description + + if friendly_name is not _MARKER: + partial['friendlyName'] = friendly_name + + if location is not _MARKER: + partial['location'] = location + + if view_query is not _MARKER: + if view_query is None: + partial['view'] = None + else: + partial['view'] = {'query': view_query} + + if schema is not _MARKER: + if schema is None: + partial['schema'] = None + else: + partial['schema'] = { + 'fields': _build_schema_resource(schema)} + + api_response = client.connection.api_request( + method='PATCH', path=self.path, data=partial) + self._set_properties(api_response)
+ +
[docs] def update(self, client=None): + """API call: update table properties via a PUT request + + See + https://cloud.google.com/bigquery/docs/reference/v2/tables/update + + :type client: :class:`gcloud.bigquery.client.Client` or ``NoneType`` + :param client: the client to use. If not passed, falls back to the + ``client`` stored on the current dataset. + """ + client = self._require_client(client) + api_response = client.connection.api_request( + method='PUT', path=self.path, data=self._build_resource()) + self._set_properties(api_response)
+ +
[docs] def delete(self, client=None): + """API call: delete the table via a DELETE request + + See: + https://cloud.google.com/bigquery/docs/reference/v2/tables/delete + + :type client: :class:`gcloud.bigquery.client.Client` or ``NoneType`` + :param client: the client to use. If not passed, falls back to the + ``client`` stored on the current dataset. + """ + client = self._require_client(client) + client.connection.api_request(method='DELETE', path=self.path)
+ +
[docs] def fetch_data(self, max_results=None, page_token=None, client=None): + """API call: fetch the table data via a GET request + + See: + https://cloud.google.com/bigquery/docs/reference/v2/tabledata/list + + .. note:: + + This method assumes that its instance's ``schema`` attribute is + up-to-date with the schema as defined on the back-end: if the + two schemas are not identical, the values returned may be + incomplete. To ensure that the local copy of the schema is + up-to-date, call the table's ``reload`` method. + + :type max_results: integer or ``NoneType`` + :param max_results: maximum number of rows to return. + + :type page_token: str or ``NoneType`` + :param page_token: token representing a cursor into the table's rows. + + :type client: :class:`gcloud.bigquery.client.Client` or ``NoneType`` + :param client: the client to use. If not passed, falls back to the + ``client`` stored on the current dataset. + + :rtype: tuple + :returns: ``(row_data, total_rows, page_token)``, where ``row_data`` + is a list of tuples, one per result row, containing only + the values; ``total_rows`` is a count of the total number + of rows in the table; and ``page_token`` is an opaque + string which can be used to fetch the next batch of rows + (``None`` if no further batches can be fetched). + """ + client = self._require_client(client) + params = {} + + if max_results is not None: + params['maxResults'] = max_results + + if page_token is not None: + params['pageToken'] = page_token + + response = client.connection.api_request(method='GET', + path='%s/data' % self.path, + query_params=params) + total_rows = response.get('totalRows') + if total_rows is not None: + total_rows = int(total_rows) + page_token = response.get('pageToken') + rows_data = _rows_from_json(response.get('rows', ()), self._schema) + + return rows_data, total_rows, page_token
+ +
[docs] def insert_data(self, + rows, + row_ids=None, + skip_invalid_rows=None, + ignore_unknown_values=None, + template_suffix=None, + client=None): + """API call: insert table data via a POST request + + See: + https://cloud.google.com/bigquery/docs/reference/v2/tabledata/insertAll + + :type rows: list of tuples + :param rows: Row data to be inserted. Each tuple should contain data + for each schema field on the current table and in the + same order as the schema fields. + + :type row_ids: list of string + :param row_ids: Unique ids, one per row being inserted. If not + passed, no de-duplication occurs. + + :type skip_invalid_rows: boolean or ``NoneType`` + :param skip_invalid_rows: skip rows w/ invalid data? + + :type ignore_unknown_values: boolean or ``NoneType`` + :param ignore_unknown_values: ignore columns beyond schema? + + :type template_suffix: str or ``NoneType`` + :param template_suffix: treat ``name`` as a template table and provide + a suffix. BigQuery will create the table + ``<name> + <template_suffix>`` based on the + schema of the template table. See: + https://cloud.google.com/bigquery/streaming-data-into-bigquery#template-tables + + :type client: :class:`gcloud.bigquery.client.Client` or ``NoneType`` + :param client: the client to use. If not passed, falls back to the + ``client`` stored on the current dataset. + + :rtype: list of mappings + :returns: One mapping per row with insert errors: the "index" key + identifies the row, and the "errors" key contains a list + of the mappings describing one or more problems with the + row. + """ + client = self._require_client(client) + rows_info = [] + data = {'rows': rows_info} + + for index, row in enumerate(rows): + row_info = {} + + for field, value in zip(self._schema, row): + if field.field_type == 'TIMESTAMP' and value is not None: + # BigQuery stores TIMESTAMP data internally as a + # UNIX timestamp with microsecond precision. + # Specifies the number of seconds since the epoch. + value = _microseconds_from_datetime(value) * 1e-6 + row_info[field.name] = value + + info = {'json': row_info} + if row_ids is not None: + info['insertId'] = row_ids[index] + + rows_info.append(info) + + if skip_invalid_rows is not None: + data['skipInvalidRows'] = skip_invalid_rows + + if ignore_unknown_values is not None: + data['ignoreUnknownValues'] = ignore_unknown_values + + if template_suffix is not None: + data['templateSuffix'] = template_suffix + + response = client.connection.api_request( + method='POST', + path='%s/insertAll' % self.path, + data=data) + errors = [] + + for error in response.get('insertErrors', ()): + errors.append({'index': int(error['index']), + 'errors': error['errors']}) + + return errors
+ + # pylint: disable=too-many-arguments,too-many-locals +
[docs] def upload_from_file(self, + file_obj, + source_format, + rewind=False, + size=None, + num_retries=6, + allow_jagged_rows=None, + allow_quoted_newlines=None, + create_disposition=None, + encoding=None, + field_delimiter=None, + ignore_unknown_values=None, + max_bad_records=None, + quote_character=None, + skip_leading_rows=None, + write_disposition=None, + client=None): + """Upload the contents of this table from a file-like object. + + The content type of the upload will either be + - The value passed in to the function (if any) + - ``text/csv``. + + :type file_obj: file + :param file_obj: A file handle opened in binary mode for reading. + + :type source_format: str + :param source_format: one of 'CSV' or 'NEWLINE_DELIMITED_JSON'. + job configuration option; see + :meth:`gcloud.bigquery.job.LoadJob` + + :type rewind: boolean + :param rewind: If True, seek to the beginning of the file handle before + writing the file to Cloud Storage. + + :type size: int + :param size: The number of bytes to read from the file handle. + If not provided, we'll try to guess the size using + :func:`os.fstat`. (If the file handle is not from the + filesystem this won't be possible.) + + :type num_retries: integer + :param num_retries: Number of upload retries. Defaults to 6. + + :type allow_jagged_rows: boolean + :param allow_jagged_rows: job configuration option; see + :meth:`gcloud.bigquery.job.LoadJob` + + :type allow_quoted_newlines: boolean + :param allow_quoted_newlines: job configuration option; see + :meth:`gcloud.bigquery.job.LoadJob` + + :type create_disposition: str + :param create_disposition: job configuration option; see + :meth:`gcloud.bigquery.job.LoadJob` + + :type encoding: str + :param encoding: job configuration option; see + :meth:`gcloud.bigquery.job.LoadJob` + + :type field_delimiter: str + :param field_delimiter: job configuration option; see + :meth:`gcloud.bigquery.job.LoadJob` + + :type ignore_unknown_values: boolean + :param ignore_unknown_values: job configuration option; see + :meth:`gcloud.bigquery.job.LoadJob` + + :type max_bad_records: integer + :param max_bad_records: job configuration option; see + :meth:`gcloud.bigquery.job.LoadJob` + + :type quote_character: str + :param quote_character: job configuration option; see + :meth:`gcloud.bigquery.job.LoadJob` + + :type skip_leading_rows: integer + :param skip_leading_rows: job configuration option; see + :meth:`gcloud.bigquery.job.LoadJob` + + :type write_disposition: str + :param write_disposition: job configuration option; see + :meth:`gcloud.bigquery.job.LoadJob` + + :type client: :class:`gcloud.storage.client.Client` or ``NoneType`` + :param client: Optional. The client to use. If not passed, falls back + to the ``client`` stored on the current dataset. + + :rtype: :class:`gcloud.bigquery.jobs.LoadTableFromStorageJob` + :returns: the job instance used to load the data (e.g., for + querying status) + :raises: :class:`ValueError` if ``size`` is not passed in and can not + be determined, or if the ``file_obj`` can be detected to be + a file opened in text mode. + """ + client = self._require_client(client) + connection = client.connection + content_type = 'application/octet-stream' + + # Rewind the file if desired. + if rewind: + file_obj.seek(0, os.SEEK_SET) + + mode = getattr(file_obj, 'mode', None) + if mode is not None and mode != 'rb': + raise ValueError( + "Cannot upload files opened in text mode: use " + "open(filename, mode='rb')") + + # Get the basic stats about the file. + total_bytes = size + if total_bytes is None: + if hasattr(file_obj, 'fileno'): + total_bytes = os.fstat(file_obj.fileno()).st_size + else: + raise ValueError('total bytes could not be determined. Please ' + 'pass an explicit size.') + headers = { + 'Accept': 'application/json', + 'Accept-Encoding': 'gzip, deflate', + 'User-Agent': connection.USER_AGENT, + 'content-type': 'application/json', + } + + metadata = { + 'configuration': { + 'load': { + 'sourceFormat': source_format, + 'schema': { + 'fields': _build_schema_resource(self._schema), + }, + 'destinationTable': { + 'projectId': self._dataset.project, + 'datasetId': self._dataset.name, + 'tableId': self.name, + } + } + } + } + + _configure_job_metadata(metadata, allow_jagged_rows, + allow_quoted_newlines, create_disposition, + encoding, field_delimiter, + ignore_unknown_values, max_bad_records, + quote_character, skip_leading_rows, + write_disposition) + + upload = Upload(file_obj, content_type, total_bytes, + auto_transfer=False) + + url_builder = _UrlBuilder() + upload_config = _UploadConfig() + + # Base URL may change once we know simple vs. resumable. + base_url = connection.API_BASE_URL + '/upload' + path = '/projects/%s/jobs' % (self._dataset.project,) + upload_url = connection.build_api_url(api_base_url=base_url, path=path) + + # Use apitools 'Upload' facility. + request = Request(upload_url, 'POST', headers, + body=json.dumps(metadata)) + + upload.configure_request(upload_config, request, url_builder) + query_params = url_builder.query_params + base_url = connection.API_BASE_URL + '/upload' + request.url = connection.build_api_url(api_base_url=base_url, + path=path, + query_params=query_params) + upload.initialize_upload(request, connection.http) + + if upload.strategy == RESUMABLE_UPLOAD: + http_response = upload.stream_file(use_chunks=True) + else: + http_response = make_api_request(connection.http, request, + retries=num_retries) + response_content = http_response.content + if not isinstance(response_content, + six.string_types): # pragma: NO COVER Python3 + response_content = response_content.decode('utf-8') + return client.job_from_resource(json.loads(response_content))
+ # pylint: enable=too-many-arguments,too-many-locals + + +def _configure_job_metadata(metadata, # pylint: disable=too-many-arguments + allow_jagged_rows, + allow_quoted_newlines, + create_disposition, + encoding, + field_delimiter, + ignore_unknown_values, + max_bad_records, + quote_character, + skip_leading_rows, + write_disposition): + """Helper for :meth:`Table.upload_from_file`.""" + load_config = metadata['configuration']['load'] + + if allow_jagged_rows is not None: + load_config['allowJaggedRows'] = allow_jagged_rows + + if allow_quoted_newlines is not None: + load_config['allowQuotedNewlines'] = allow_quoted_newlines + + if create_disposition is not None: + load_config['createDisposition'] = create_disposition + + if encoding is not None: + load_config['encoding'] = encoding + + if field_delimiter is not None: + load_config['fieldDelimiter'] = field_delimiter + + if ignore_unknown_values is not None: + load_config['ignoreUnknownValues'] = ignore_unknown_values + + if max_bad_records is not None: + load_config['maxBadRecords'] = max_bad_records + + if quote_character is not None: + load_config['quote'] = quote_character + + if skip_leading_rows is not None: + load_config['skipLeadingRows'] = skip_leading_rows + + if write_disposition is not None: + load_config['writeDisposition'] = write_disposition + + +def _parse_schema_resource(info): + """Parse a resource fragment into a schema field. + + :type info: mapping + :param info: should contain a "fields" key to be parsed + + :rtype: list of :class:`SchemaField`, or ``NoneType`` + :returns: a list of parsed fields, or ``None`` if no "fields" key is + present in ``info``. + """ + if 'fields' not in info: + return None + + schema = [] + for r_field in info['fields']: + name = r_field['name'] + field_type = r_field['type'] + mode = r_field.get('mode', 'NULLABLE') + description = r_field.get('description') + sub_fields = _parse_schema_resource(r_field) + schema.append( + SchemaField(name, field_type, mode, description, sub_fields)) + return schema + + +def _build_schema_resource(fields): + """Generate a resource fragment for a schema. + + :type fields: sequence of :class:`SchemaField` + :param fields: schema to be dumped + + :rtype: mapping + :returns: a mapping describing the schema of the supplied fields. + """ + infos = [] + for field in fields: + info = {'name': field.name, + 'type': field.field_type, + 'mode': field.mode} + if field.description is not None: + info['description'] = field.description + if field.fields is not None: + info['fields'] = _build_schema_resource(field.fields) + infos.append(info) + return infos + + +class _UploadConfig(object): + """Faux message FBO apitools' 'configure_request'.""" + accept = ['*/*'] + max_size = None + resumable_multipart = True + resumable_path = u'/upload/bigquery/v2/projects/{project}/jobs' + simple_multipart = True + simple_path = u'/upload/bigquery/v2/projects/{project}/jobs' + + +class _UrlBuilder(object): + """Faux builder FBO apitools' 'configure_request'""" + def __init__(self): + self.query_params = {} + self._relative_path = '' +
+ +
+ +
+ + + + + + + \ No newline at end of file diff --git a/0.18.1/_modules/gcloud/bigtable/client.html b/0.18.1/_modules/gcloud/bigtable/client.html new file mode 100644 index 000000000000..1020b7f74d97 --- /dev/null +++ b/0.18.1/_modules/gcloud/bigtable/client.html @@ -0,0 +1,713 @@ + + + + + + + + gcloud.bigtable.client — gcloud 0.18.1 documentation + + + + + + + + + + + + + + +
+ +
+ +
+ + + Report an Issue + +
+
+ +
+ +

Source code for gcloud.bigtable.client

+# Copyright 2015 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Parent client for calling the Google Cloud Bigtable API.
+
+This is the base from which all interactions with the API occur.
+
+In the hierarchy of API concepts
+
+* a :class:`Client` owns an :class:`.Instance`
+* a :class:`.Instance` owns a :class:`Table <gcloud.bigtable.table.Table>`
+* a :class:`Table <gcloud.bigtable.table.Table>` owns a
+  :class:`ColumnFamily <.column_family.ColumnFamily>`
+* a :class:`Table <gcloud.bigtable.table.Table>` owns a :class:`Row <.row.Row>`
+  (and all the cells in the row)
+"""
+
+
+from pkg_resources import get_distribution
+
+from grpc.beta import implementations
+
+from gcloud.bigtable._generated import (
+    bigtable_instance_admin_pb2 as instance_admin_v2_pb2)
+# V1 table admin service
+from gcloud.bigtable._generated import (
+    bigtable_table_admin_pb2 as table_admin_v2_pb2)
+# V1 data service
+from gcloud.bigtable._generated import (
+    bigtable_pb2 as data_v2_pb2)
+
+from gcloud.bigtable._generated import (
+    operations_grpc_pb2 as operations_grpc_v2_pb2)
+
+from gcloud.bigtable.cluster import DEFAULT_SERVE_NODES
+from gcloud.bigtable.instance import Instance
+from gcloud.bigtable.instance import _EXISTING_INSTANCE_LOCATION_ID
+from gcloud.client import _ClientFactoryMixin
+from gcloud.client import _ClientProjectMixin
+from gcloud.credentials import get_credentials
+
+
+TABLE_STUB_FACTORY_V2 = (
+    table_admin_v2_pb2.beta_create_BigtableTableAdmin_stub)
+TABLE_ADMIN_HOST_V2 = 'bigtableadmin.googleapis.com'
+"""Table Admin API request host."""
+TABLE_ADMIN_PORT_V2 = 443
+"""Table Admin API request port."""
+
+INSTANCE_STUB_FACTORY_V2 = (
+    instance_admin_v2_pb2.beta_create_BigtableInstanceAdmin_stub)
+INSTANCE_ADMIN_HOST_V2 = 'bigtableadmin.googleapis.com'
+"""Cluster Admin API request host."""
+INSTANCE_ADMIN_PORT_V2 = 443
+"""Cluster Admin API request port."""
+
+DATA_STUB_FACTORY_V2 = data_v2_pb2.beta_create_Bigtable_stub
+DATA_API_HOST_V2 = 'bigtable.googleapis.com'
+"""Data API request host."""
+DATA_API_PORT_V2 = 443
+"""Data API request port."""
+
+OPERATIONS_STUB_FACTORY_V2 = operations_grpc_v2_pb2.beta_create_Operations_stub
+OPERATIONS_API_HOST_V2 = INSTANCE_ADMIN_HOST_V2
+OPERATIONS_API_PORT_V2 = INSTANCE_ADMIN_PORT_V2
+
+ADMIN_SCOPE = 'https://www.googleapis.com/auth/bigtable.admin'
+"""Scope for interacting with the Cluster Admin and Table Admin APIs."""
+DATA_SCOPE = 'https://www.googleapis.com/auth/bigtable.data'
+"""Scope for reading and writing table data."""
+READ_ONLY_SCOPE = 'https://www.googleapis.com/auth/bigtable.data.readonly'
+"""Scope for reading table data."""
+
+DEFAULT_TIMEOUT_SECONDS = 10
+"""The default timeout to use for API requests."""
+
+DEFAULT_USER_AGENT = 'gcloud-python/{0}'.format(
+    get_distribution('gcloud').version)
+"""The default user agent for API requests."""
+
+
+
[docs]class Client(_ClientFactoryMixin, _ClientProjectMixin): + """Client for interacting with Google Cloud Bigtable API. + + .. note:: + + Since the Cloud Bigtable API requires the gRPC transport, no + ``http`` argument is accepted by this class. + + :type project: :class:`str` or :func:`unicode <unicode>` + :param project: (Optional) The ID of the project which owns the + instances, tables and data. If not provided, will + attempt to determine from the environment. + + :type credentials: + :class:`OAuth2Credentials <oauth2client.client.OAuth2Credentials>` or + :data:`NoneType <types.NoneType>` + :param credentials: (Optional) The OAuth2 Credentials to use for this + client. If not provided, defaults to the Google + Application Default Credentials. + + :type read_only: bool + :param read_only: (Optional) Boolean indicating if the data scope should be + for reading only (or for writing as well). Defaults to + :data:`False`. + + :type admin: bool + :param admin: (Optional) Boolean indicating if the client will be used to + interact with the Instance Admin or Table Admin APIs. This + requires the :const:`ADMIN_SCOPE`. Defaults to :data:`False`. + + :type user_agent: str + :param user_agent: (Optional) The user agent to be used with API request. + Defaults to :const:`DEFAULT_USER_AGENT`. + + :type timeout_seconds: int + :param timeout_seconds: Number of seconds for request time-out. If not + passed, defaults to + :const:`DEFAULT_TIMEOUT_SECONDS`. + + :raises: :class:`ValueError <exceptions.ValueError>` if both ``read_only`` + and ``admin`` are :data:`True` + """ + + def __init__(self, project=None, credentials=None, + read_only=False, admin=False, user_agent=DEFAULT_USER_AGENT, + timeout_seconds=DEFAULT_TIMEOUT_SECONDS): + _ClientProjectMixin.__init__(self, project=project) + if credentials is None: + credentials = get_credentials() + + if read_only and admin: + raise ValueError('A read-only client cannot also perform' + 'administrative actions.') + + scopes = [] + if read_only: + scopes.append(READ_ONLY_SCOPE) + else: + scopes.append(DATA_SCOPE) + + if admin: + scopes.append(ADMIN_SCOPE) + + self._admin = bool(admin) + try: + credentials = credentials.create_scoped(scopes) + except AttributeError: + pass + self._credentials = credentials + self.user_agent = user_agent + self.timeout_seconds = timeout_seconds + + # These will be set in start(). + self._data_stub_internal = None + self._instance_stub_internal = None + self._operations_stub_internal = None + self._table_stub_internal = None + +
[docs] def copy(self): + """Make a copy of this client. + + Copies the local data stored as simple types but does not copy the + current state of any open connections with the Cloud Bigtable API. + + :rtype: :class:`.Client` + :returns: A copy of the current client. + """ + credentials = self._credentials + copied_creds = credentials.create_scoped(credentials.scopes) + return self.__class__( + self.project, + copied_creds, + READ_ONLY_SCOPE in copied_creds.scopes, + self._admin, + self.user_agent, + self.timeout_seconds, + )
+ + @property + def credentials(self): + """Getter for client's credentials. + + :rtype: + :class:`OAuth2Credentials <oauth2client.client.OAuth2Credentials>` + :returns: The credentials stored on the client. + """ + return self._credentials + + @property + def project_name(self): + """Project name to be used with Instance Admin API. + + .. note:: + + This property will not change if ``project`` does not, but the + return value is not cached. + + The project name is of the form + + ``"projects/{project}"`` + + :rtype: str + :returns: The project name to be used with the Cloud Bigtable Admin + API RPC service. + """ + return 'projects/' + self.project + + @property + def _data_stub(self): + """Getter for the gRPC stub used for the Data API. + + :rtype: :class:`grpc.beta._stub._AutoIntermediary` + :returns: A gRPC stub object. + :raises: :class:`ValueError <exceptions.ValueError>` if the current + client has not been :meth:`start`-ed. + """ + if self._data_stub_internal is None: + raise ValueError('Client has not been started.') + return self._data_stub_internal + + @property + def _instance_stub(self): + """Getter for the gRPC stub used for the Instance Admin API. + + :rtype: :class:`grpc.beta._stub._AutoIntermediary` + :returns: A gRPC stub object. + :raises: :class:`ValueError <exceptions.ValueError>` if the current + client is not an admin client or if it has not been + :meth:`start`-ed. + """ + if not self._admin: + raise ValueError('Client is not an admin client.') + if self._instance_stub_internal is None: + raise ValueError('Client has not been started.') + return self._instance_stub_internal + + @property + def _operations_stub(self): + """Getter for the gRPC stub used for the Operations API. + + :rtype: :class:`grpc.beta._stub._AutoIntermediary` + :returns: A gRPC stub object. + :raises: :class:`ValueError <exceptions.ValueError>` if the current + client is not an admin client or if it has not been + :meth:`start`-ed. + """ + if not self._admin: + raise ValueError('Client is not an admin client.') + if self._operations_stub_internal is None: + raise ValueError('Client has not been started.') + return self._operations_stub_internal + + @property + def _table_stub(self): + """Getter for the gRPC stub used for the Table Admin API. + + :rtype: :class:`grpc.beta._stub._AutoIntermediary` + :returns: A gRPC stub object. + :raises: :class:`ValueError <exceptions.ValueError>` if the current + client is not an admin client or if it has not been + :meth:`start`-ed. + """ + if not self._admin: + raise ValueError('Client is not an admin client.') + if self._table_stub_internal is None: + raise ValueError('Client has not been started.') + return self._table_stub_internal + + def _make_data_stub(self): + """Creates gRPC stub to make requests to the Data API. + + :rtype: :class:`grpc.beta._stub._AutoIntermediary` + :returns: A gRPC stub object. + """ + return _make_stub(self, DATA_STUB_FACTORY_V2, + DATA_API_HOST_V2, DATA_API_PORT_V2) + + def _make_instance_stub(self): + """Creates gRPC stub to make requests to the Instance Admin API. + + :rtype: :class:`grpc.beta._stub._AutoIntermediary` + :returns: A gRPC stub object. + """ + return _make_stub(self, INSTANCE_STUB_FACTORY_V2, + INSTANCE_ADMIN_HOST_V2, INSTANCE_ADMIN_PORT_V2) + + def _make_operations_stub(self): + """Creates gRPC stub to make requests to the Operations API. + + These are for long-running operations of the Instance Admin API, + hence the host and port matching. + + :rtype: :class:`grpc.beta._stub._AutoIntermediary` + :returns: A gRPC stub object. + """ + return _make_stub(self, OPERATIONS_STUB_FACTORY_V2, + OPERATIONS_API_HOST_V2, OPERATIONS_API_PORT_V2) + + def _make_table_stub(self): + """Creates gRPC stub to make requests to the Table Admin API. + + :rtype: :class:`grpc.beta._stub._AutoIntermediary` + :returns: A gRPC stub object. + """ + return _make_stub(self, TABLE_STUB_FACTORY_V2, + TABLE_ADMIN_HOST_V2, TABLE_ADMIN_PORT_V2) + +
[docs] def is_started(self): + """Check if the client has been started. + + :rtype: bool + :returns: Boolean indicating if the client has been started. + """ + return self._data_stub_internal is not None
+ +
[docs] def start(self): + """Prepare the client to make requests. + + Activates gRPC contexts for making requests to the Bigtable + Service(s). + """ + if self.is_started(): + return + + # NOTE: We __enter__ the stubs more-or-less permanently. This is + # because only after entering the context managers is the + # connection created. We don't want to immediately close + # those connections since the client will make many + # requests with it over HTTP/2. + self._data_stub_internal = self._make_data_stub() + self._data_stub_internal.__enter__() + if self._admin: + self._instance_stub_internal = self._make_instance_stub() + self._operations_stub_internal = self._make_operations_stub() + self._table_stub_internal = self._make_table_stub() + + self._instance_stub_internal.__enter__() + self._operations_stub_internal.__enter__() + self._table_stub_internal.__enter__()
+ + def __enter__(self): + """Starts the client as a context manager.""" + self.start() + return self + +
[docs] def stop(self): + """Closes all the open gRPC clients.""" + if not self.is_started(): + return + + # When exit-ing, we pass None as the exception type, value and + # traceback to __exit__. + self._data_stub_internal.__exit__(None, None, None) + if self._admin: + self._instance_stub_internal.__exit__(None, None, None) + self._operations_stub_internal.__exit__(None, None, None) + self._table_stub_internal.__exit__(None, None, None) + + self._data_stub_internal = None + self._instance_stub_internal = None + self._operations_stub_internal = None + self._table_stub_internal = None
+ + def __exit__(self, exc_type, exc_val, exc_t): + """Stops the client as a context manager.""" + self.stop() + +
[docs] def instance(self, instance_id, location=_EXISTING_INSTANCE_LOCATION_ID, + display_name=None, serve_nodes=DEFAULT_SERVE_NODES): + """Factory to create a instance associated with this client. + + :type instance_id: str + :param instance_id: The ID of the instance. + + :type location: string + :param location: location name, in form + ``projects/<project>/locations/<location>``; used to + set up the instance's cluster. + + :type display_name: str + :param display_name: (Optional) The display name for the instance in + the Cloud Console UI. (Must be between 4 and 30 + characters.) If this value is not set in the + constructor, will fall back to the instance ID. + + :type serve_nodes: int + :param serve_nodes: (Optional) The number of nodes in the instance's + cluster; used to set up the instance's cluster. + + :rtype: :class:`.Instance` + :returns: an instance owned by this client. + """ + return Instance(instance_id, self, location, + display_name=display_name, serve_nodes=serve_nodes)
+ +
[docs] def list_instances(self): + """List instances owned by the project. + + :rtype: tuple + :returns: A pair of results, the first is a list of + :class:`.Instance` objects returned and the second is a + list of strings (the failed locations in the request). + """ + request_pb = instance_admin_v2_pb2.ListInstancesRequest( + parent=self.project_name) + + response = self._instance_stub.ListInstances( + request_pb, self.timeout_seconds) + + instances = [Instance.from_pb(instance_pb, self) + for instance_pb in response.instances] + return instances, response.failed_locations
+ + +class _MetadataPlugin(object): + """Callable class to transform metadata for gRPC requests. + + :type client: :class:`.client.Client` + :param client: The client that owns the instance. + Provides authorization and user agent. + """ + + def __init__(self, client): + self._credentials = client.credentials + self._user_agent = client.user_agent + + def __call__(self, unused_context, callback): + """Adds authorization header to request metadata.""" + access_token = self._credentials.get_access_token().access_token + headers = [ + ('Authorization', 'Bearer ' + access_token), + ('User-agent', self._user_agent), + ] + callback(headers, None) + + +def _make_stub(client, stub_factory, host, port): + """Makes a stub for an RPC service. + + Uses / depends on the beta implementation of gRPC. + + :type client: :class:`.client.Client` + :param client: The client that owns the instance. + Provides authorization and user agent. + + :type stub_factory: callable + :param stub_factory: A factory which will create a gRPC stub for + a given service. + + :type host: str + :param host: The host for the service. + + :type port: int + :param port: The port for the service. + + :rtype: :class:`grpc.beta._stub._AutoIntermediary` + :returns: The stub object used to make gRPC requests to a given API. + """ + # Leaving the first argument to ssl_channel_credentials() as None + # loads root certificates from `grpc/_adapter/credentials/roots.pem`. + transport_creds = implementations.ssl_channel_credentials(None, None, None) + custom_metadata_plugin = _MetadataPlugin(client) + auth_creds = implementations.metadata_call_credentials( + custom_metadata_plugin, name='google_creds') + channel_creds = implementations.composite_channel_credentials( + transport_creds, auth_creds) + channel = implementations.secure_channel(host, port, channel_creds) + return stub_factory(channel) +
+ +
+ +
+ + + + + + + \ No newline at end of file diff --git a/0.18.1/_modules/gcloud/bigtable/cluster.html b/0.18.1/_modules/gcloud/bigtable/cluster.html new file mode 100644 index 000000000000..ccf40552f576 --- /dev/null +++ b/0.18.1/_modules/gcloud/bigtable/cluster.html @@ -0,0 +1,617 @@ + + + + + + + + gcloud.bigtable.cluster — gcloud 0.18.1 documentation + + + + + + + + + + + + + + +
+ +
+ +
+ + + Report an Issue + +
+
+ +
+ +

Source code for gcloud.bigtable.cluster

+# Copyright 2015 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""User friendly container for Google Cloud Bigtable Cluster."""
+
+
+import re
+
+from google.longrunning import operations_pb2
+
+from gcloud.bigtable._generated import (
+    instance_pb2 as data_v2_pb2)
+from gcloud.bigtable._generated import (
+    bigtable_instance_admin_pb2 as messages_v2_pb2)
+
+
+_CLUSTER_NAME_RE = re.compile(r'^projects/(?P<project>[^/]+)/'
+                              r'instances/(?P<instance>[^/]+)/clusters/'
+                              r'(?P<cluster_id>[a-z][-a-z0-9]*)$')
+_OPERATION_NAME_RE = re.compile(r'^operations/'
+                                r'projects/([^/]+)/'
+                                r'instances/([^/]+)/'
+                                r'clusters/([a-z][-a-z0-9]*)/'
+                                r'operations/(?P<operation_id>\d+)$')
+_TYPE_URL_MAP = {
+}
+
+DEFAULT_SERVE_NODES = 3
+"""Default number of nodes to use when creating a cluster."""
+
+
+def _prepare_create_request(cluster):
+    """Creates a protobuf request for a CreateCluster request.
+
+    :type cluster: :class:`Cluster`
+    :param cluster: The cluster to be created.
+
+    :rtype: :class:`.messages_v2_pb2.CreateClusterRequest`
+    :returns: The CreateCluster request object containing the cluster info.
+    """
+    return messages_v2_pb2.CreateClusterRequest(
+        parent=cluster._instance.name,
+        cluster_id=cluster.cluster_id,
+        cluster=data_v2_pb2.Cluster(
+            serve_nodes=cluster.serve_nodes,
+        ),
+    )
+
+
+def _parse_pb_any_to_native(any_val, expected_type=None):
+    """Convert a serialized "google.protobuf.Any" value to actual type.
+
+    :type any_val: :class:`google.protobuf.any_pb2.Any`
+    :param any_val: A serialized protobuf value container.
+
+    :type expected_type: str
+    :param expected_type: (Optional) The type URL we expect ``any_val``
+                          to have.
+
+    :rtype: object
+    :returns: The de-serialized object.
+    :raises: :class:`ValueError <exceptions.ValueError>` if the
+             ``expected_type`` does not match the ``type_url`` on the input.
+    """
+    if expected_type is not None and expected_type != any_val.type_url:
+        raise ValueError('Expected type: %s, Received: %s' % (
+            expected_type, any_val.type_url))
+    container_class = _TYPE_URL_MAP[any_val.type_url]
+    return container_class.FromString(any_val.value)
+
+
+def _process_operation(operation_pb):
+    """Processes a create protobuf response.
+
+    :type operation_pb: :class:`google.longrunning.operations_pb2.Operation`
+    :param operation_pb: The long-running operation response from a
+                         Create/Update/Undelete cluster request.
+
+    :rtype: tuple
+    :returns: integer ID of the operation (``operation_id``).
+    :raises: :class:`ValueError <exceptions.ValueError>` if the operation name
+             doesn't match the :data:`_OPERATION_NAME_RE` regex.
+    """
+    match = _OPERATION_NAME_RE.match(operation_pb.name)
+    if match is None:
+        raise ValueError('Operation name was not in the expected '
+                         'format after a cluster modification.',
+                         operation_pb.name)
+    operation_id = int(match.group('operation_id'))
+
+    return operation_id
+
+
+
[docs]class Operation(object): + """Representation of a Google API Long-Running Operation. + + In particular, these will be the result of operations on + clusters using the Cloud Bigtable API. + + :type op_type: str + :param op_type: The type of operation being performed. Expect + ``create``, ``update`` or ``undelete``. + + :type op_id: int + :param op_id: The ID of the operation. + + :type cluster: :class:`Cluster` + :param cluster: The cluster that created the operation. + """ + + def __init__(self, op_type, op_id, cluster=None): + self.op_type = op_type + self.op_id = op_id + self._cluster = cluster + self._complete = False + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return False + return (other.op_type == self.op_type and + other.op_id == self.op_id and + other._cluster == self._cluster and + other._complete == self._complete) + + def __ne__(self, other): + return not self.__eq__(other) + +
[docs] def finished(self): + """Check if the operation has finished. + + :rtype: bool + :returns: A boolean indicating if the current operation has completed. + :raises: :class:`ValueError <exceptions.ValueError>` if the operation + has already completed. + """ + if self._complete: + raise ValueError('The operation has completed.') + + operation_name = ('operations/' + self._cluster.name + + '/operations/%d' % (self.op_id,)) + request_pb = operations_pb2.GetOperationRequest(name=operation_name) + # We expect a `google.longrunning.operations_pb2.Operation`. + client = self._cluster._instance._client + operation_pb = client._operations_stub.GetOperation( + request_pb, client.timeout_seconds) + + if operation_pb.done: + self._complete = True + return True + else: + return False
+ + +
[docs]class Cluster(object): + """Representation of a Google Cloud Bigtable Cluster. + + We can use a :class:`Cluster` to: + + * :meth:`reload` itself + * :meth:`create` itself + * :meth:`update` itself + * :meth:`delete` itself + * :meth:`undelete` itself + + .. note:: + + For now, we leave out the ``default_storage_type`` (an enum) + which if not sent will end up as :data:`.data_v2_pb2.STORAGE_SSD`. + + :type cluster_id: str + :param cluster_id: The ID of the cluster. + + :type instance: :class:`.instance.Instance` + :param instance: The instance where the cluster resides. + + :type serve_nodes: int + :param serve_nodes: (Optional) The number of nodes in the cluster. + Defaults to :data:`DEFAULT_SERVE_NODES`. + """ + + def __init__(self, cluster_id, instance, + serve_nodes=DEFAULT_SERVE_NODES): + self.cluster_id = cluster_id + self._instance = instance + self.serve_nodes = serve_nodes + self.location = None + + def _update_from_pb(self, cluster_pb): + """Refresh self from the server-provided protobuf. + + Helper for :meth:`from_pb` and :meth:`reload`. + """ + if not cluster_pb.serve_nodes: # Simple field (int32) + raise ValueError('Cluster protobuf does not contain serve_nodes') + self.serve_nodes = cluster_pb.serve_nodes + self.location = cluster_pb.location + + @classmethod +
[docs] def from_pb(cls, cluster_pb, instance): + """Creates a cluster instance from a protobuf. + + :type cluster_pb: :class:`instance_pb2.Cluster` + :param cluster_pb: A cluster protobuf object. + + :type instance: :class:`.instance.Instance>` + :param instance: The instance that owns the cluster. + + :rtype: :class:`Cluster` + :returns: The cluster parsed from the protobuf response. + :raises: + :class:`ValueError <exceptions.ValueError>` if the cluster + name does not match + ``projects/{project}/instances/{instance}/clusters/{cluster_id}`` + or if the parsed project ID does not match the project ID + on the client. + """ + match = _CLUSTER_NAME_RE.match(cluster_pb.name) + if match is None: + raise ValueError('Cluster protobuf name was not in the ' + 'expected format.', cluster_pb.name) + if match.group('project') != instance._client.project: + raise ValueError('Project ID on cluster does not match the ' + 'project ID on the client') + if match.group('instance') != instance.instance_id: + raise ValueError('Instance ID on cluster does not match the ' + 'instance ID on the client') + + result = cls(match.group('cluster_id'), instance) + result._update_from_pb(cluster_pb) + return result
+ +
[docs] def copy(self): + """Make a copy of this cluster. + + Copies the local data stored as simple types and copies the client + attached to this instance. + + :rtype: :class:`.Cluster` + :returns: A copy of the current cluster. + """ + new_instance = self._instance.copy() + return self.__class__(self.cluster_id, new_instance, + serve_nodes=self.serve_nodes)
+ + @property + def name(self): + """Cluster name used in requests. + + .. note:: + This property will not change if ``_instance`` and ``cluster_id`` + do not, but the return value is not cached. + + The cluster name is of the form + + ``"projects/{project}/instances/{instance}/clusters/{cluster_id}"`` + + :rtype: str + :returns: The cluster name. + """ + return self._instance.name + '/clusters/' + self.cluster_id + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return False + # NOTE: This does not compare the configuration values, such as + # the serve_nodes. Instead, it only compares + # identifying values instance, cluster ID and client. This is + # intentional, since the same cluster can be in different states + # if not synchronized. Clusters with similar instance/cluster + # settings but different clients can't be used in the same way. + return (other.cluster_id == self.cluster_id and + other._instance == self._instance) + + def __ne__(self, other): + return not self.__eq__(other) + +
[docs] def reload(self): + """Reload the metadata for this cluster.""" + request_pb = messages_v2_pb2.GetClusterRequest(name=self.name) + # We expect a `._generated.instance_pb2.Cluster`. + cluster_pb = self._instance._client._instance_stub.GetCluster( + request_pb, self._instance._client.timeout_seconds) + + # NOTE: _update_from_pb does not check that the project, instance and + # cluster ID on the response match the request. + self._update_from_pb(cluster_pb)
+ +
[docs] def create(self): + """Create this cluster. + + .. note:: + + Uses the ``project``, ``instance`` and ``cluster_id`` on the + current :class:`Cluster` in addition to the ``serve_nodes``. + To change them before creating, reset the values via + + .. code:: python + + cluster.serve_nodes = 8 + cluster.cluster_id = 'i-changed-my-mind' + + before calling :meth:`create`. + + :rtype: :class:`Operation` + :returns: The long-running operation corresponding to the + create operation. + """ + request_pb = _prepare_create_request(self) + # We expect a `google.longrunning.operations_pb2.Operation`. + operation_pb = self._instance._client._instance_stub.CreateCluster( + request_pb, self._instance._client.timeout_seconds) + + op_id = _process_operation(operation_pb) + return Operation('create', op_id, cluster=self)
+ +
[docs] def update(self): + """Update this cluster. + + .. note:: + + Updates the ``serve_nodes``. If you'd like to + change them before updating, reset the values via + + .. code:: python + + cluster.serve_nodes = 8 + + before calling :meth:`update`. + + :rtype: :class:`Operation` + :returns: The long-running operation corresponding to the + update operation. + """ + request_pb = data_v2_pb2.Cluster( + name=self.name, + serve_nodes=self.serve_nodes, + ) + # Ignore expected `._generated.instance_pb2.Cluster`. + operation_pb = self._instance._client._instance_stub.UpdateCluster( + request_pb, self._instance._client.timeout_seconds) + + op_id = _process_operation(operation_pb) + return Operation('update', op_id, cluster=self)
+ +
[docs] def delete(self): + """Delete this cluster. + + Marks a cluster and all of its tables for permanent deletion in 7 days. + + Immediately upon completion of the request: + + * Billing will cease for all of the cluster's reserved resources. + * The cluster's ``delete_time`` field will be set 7 days in the future. + + Soon afterward: + + * All tables within the cluster will become unavailable. + + Prior to the cluster's ``delete_time``: + + * The cluster can be recovered with a call to ``UndeleteCluster``. + * All other attempts to modify or delete the cluster will be rejected. + + At the cluster's ``delete_time``: + + * The cluster and **all of its tables** will immediately and + irrevocably disappear from the API, and their data will be + permanently deleted. + """ + request_pb = messages_v2_pb2.DeleteClusterRequest(name=self.name) + # We expect a `google.protobuf.empty_pb2.Empty` + self._instance._client._instance_stub.DeleteCluster( + request_pb, self._instance._client.timeout_seconds)
+
+ +
+ +
+ + + + + + + \ No newline at end of file diff --git a/0.18.1/_modules/gcloud/bigtable/column_family.html b/0.18.1/_modules/gcloud/bigtable/column_family.html new file mode 100644 index 000000000000..5bc1e097bf63 --- /dev/null +++ b/0.18.1/_modules/gcloud/bigtable/column_family.html @@ -0,0 +1,575 @@ + + + + + + + + gcloud.bigtable.column_family — gcloud 0.18.1 documentation + + + + + + + + + + + + + + +
+ +
+ +
+ + + Report an Issue + +
+
+ +
+ +

Source code for gcloud.bigtable.column_family

+# Copyright 2015 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""User friendly container for Google Cloud Bigtable Column Family."""
+
+
+import datetime
+
+from google.protobuf import duration_pb2
+
+from gcloud._helpers import _total_seconds
+from gcloud.bigtable._generated import (
+    table_pb2 as table_v2_pb2)
+from gcloud.bigtable._generated import (
+    bigtable_table_admin_pb2 as table_admin_v2_pb2)
+
+
+def _timedelta_to_duration_pb(timedelta_val):
+    """Convert a Python timedelta object to a duration protobuf.
+
+    .. note::
+
+        The Python timedelta has a granularity of microseconds while
+        the protobuf duration type has a duration of nanoseconds.
+
+    :type timedelta_val: :class:`datetime.timedelta`
+    :param timedelta_val: A timedelta object.
+
+    :rtype: :class:`google.protobuf.duration_pb2.Duration`
+    :returns: A duration object equivalent to the time delta.
+    """
+    seconds_decimal = _total_seconds(timedelta_val)
+    # Truncate the parts other than the integer.
+    seconds = int(seconds_decimal)
+    if seconds_decimal < 0:
+        signed_micros = timedelta_val.microseconds - 10**6
+    else:
+        signed_micros = timedelta_val.microseconds
+    # Convert nanoseconds to microseconds.
+    nanos = 1000 * signed_micros
+    return duration_pb2.Duration(seconds=seconds, nanos=nanos)
+
+
+def _duration_pb_to_timedelta(duration_pb):
+    """Convert a duration protobuf to a Python timedelta object.
+
+    .. note::
+
+        The Python timedelta has a granularity of microseconds while
+        the protobuf duration type has a duration of nanoseconds.
+
+    :type duration_pb: :class:`google.protobuf.duration_pb2.Duration`
+    :param duration_pb: A protobuf duration object.
+
+    :rtype: :class:`datetime.timedelta`
+    :returns: The converted timedelta object.
+    """
+    return datetime.timedelta(
+        seconds=duration_pb.seconds,
+        microseconds=(duration_pb.nanos / 1000.0),
+    )
+
+
+
[docs]class GarbageCollectionRule(object): + """Garbage collection rule for column families within a table. + + Cells in the column family (within a table) fitting the rule will be + deleted during garbage collection. + + .. note:: + + This class is a do-nothing base class for all GC rules. + + .. note:: + + A string ``gc_expression`` can also be used with API requests, but + that value would be superceded by a ``gc_rule``. As a result, we + don't support that feature and instead support via native classes. + """ + + def __ne__(self, other): + return not self.__eq__(other)
+ + +
[docs]class MaxVersionsGCRule(GarbageCollectionRule): + """Garbage collection limiting the number of versions of a cell. + + :type max_num_versions: int + :param max_num_versions: The maximum number of versions + """ + + def __init__(self, max_num_versions): + self.max_num_versions = max_num_versions + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return False + return other.max_num_versions == self.max_num_versions + +
[docs] def to_pb(self): + """Converts the garbage collection rule to a protobuf. + + :rtype: :class:`.table_v2_pb2.GcRule` + :returns: The converted current object. + """ + return table_v2_pb2.GcRule(max_num_versions=self.max_num_versions)
+ + +
[docs]class MaxAgeGCRule(GarbageCollectionRule): + """Garbage collection limiting the age of a cell. + + :type max_age: :class:`datetime.timedelta` + :param max_age: The maximum age allowed for a cell in the table. + """ + + def __init__(self, max_age): + self.max_age = max_age + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return False + return other.max_age == self.max_age + +
[docs] def to_pb(self): + """Converts the garbage collection rule to a protobuf. + + :rtype: :class:`.table_v2_pb2.GcRule` + :returns: The converted current object. + """ + max_age = _timedelta_to_duration_pb(self.max_age) + return table_v2_pb2.GcRule(max_age=max_age)
+ + +
[docs]class GCRuleUnion(GarbageCollectionRule): + """Union of garbage collection rules. + + :type rules: list + :param rules: List of :class:`GarbageCollectionRule`. + """ + + def __init__(self, rules): + self.rules = rules + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return False + return other.rules == self.rules + +
[docs] def to_pb(self): + """Converts the union into a single GC rule as a protobuf. + + :rtype: :class:`.table_v2_pb2.GcRule` + :returns: The converted current object. + """ + union = table_v2_pb2.GcRule.Union( + rules=[rule.to_pb() for rule in self.rules]) + return table_v2_pb2.GcRule(union=union)
+ + +
[docs]class GCRuleIntersection(GarbageCollectionRule): + """Intersection of garbage collection rules. + + :type rules: list + :param rules: List of :class:`GarbageCollectionRule`. + """ + + def __init__(self, rules): + self.rules = rules + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return False + return other.rules == self.rules + +
[docs] def to_pb(self): + """Converts the intersection into a single GC rule as a protobuf. + + :rtype: :class:`.table_v2_pb2.GcRule` + :returns: The converted current object. + """ + intersection = table_v2_pb2.GcRule.Intersection( + rules=[rule.to_pb() for rule in self.rules]) + return table_v2_pb2.GcRule(intersection=intersection)
+ + +
[docs]class ColumnFamily(object): + """Representation of a Google Cloud Bigtable Column Family. + + We can use a :class:`ColumnFamily` to: + + * :meth:`create` itself + * :meth:`update` itself + * :meth:`delete` itself + + :type column_family_id: str + :param column_family_id: The ID of the column family. Must be of the + form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. + + :type table: :class:`Table <gcloud.bigtable.table.Table>` + :param table: The table that owns the column family. + + :type gc_rule: :class:`GarbageCollectionRule` + :param gc_rule: (Optional) The garbage collection settings for this + column family. + """ + + def __init__(self, column_family_id, table, gc_rule=None): + self.column_family_id = column_family_id + self._table = table + self.gc_rule = gc_rule + + @property + def name(self): + """Column family name used in requests. + + .. note:: + + This property will not change if ``column_family_id`` does not, but + the return value is not cached. + + The table name is of the form + + ``"projects/../zones/../clusters/../tables/../columnFamilies/.."`` + + :rtype: str + :returns: The column family name. + """ + return self._table.name + '/columnFamilies/' + self.column_family_id + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return False + return (other.column_family_id == self.column_family_id and + other._table == self._table and + other.gc_rule == self.gc_rule) + + def __ne__(self, other): + return not self.__eq__(other) + +
[docs] def to_pb(self): + """Converts the column family to a protobuf. + + :rtype: :class:`.table_v2_pb2.ColumnFamily` + :returns: The converted current object. + """ + if self.gc_rule is None: + return table_v2_pb2.ColumnFamily() + else: + return table_v2_pb2.ColumnFamily(gc_rule=self.gc_rule.to_pb())
+ +
[docs] def create(self): + """Create this column family.""" + column_family = self.to_pb() + request_pb = table_admin_v2_pb2.ModifyColumnFamiliesRequest( + name=self._table.name) + request_pb.modifications.add( + id=self.column_family_id, + create=column_family, + ) + client = self._table._instance._client + # We expect a `.table_v2_pb2.ColumnFamily`. We ignore it since the only + # data it contains are the GC rule and the column family ID already + # stored on this instance. + client._table_stub.ModifyColumnFamilies(request_pb, + client.timeout_seconds)
+ +
[docs] def update(self): + """Update this column family. + + .. note:: + + Only the GC rule can be updated. By changing the column family ID, + you will simply be referring to a different column family. + """ + column_family = self.to_pb() + request_pb = table_admin_v2_pb2.ModifyColumnFamiliesRequest( + name=self._table.name) + request_pb.modifications.add( + id=self.column_family_id, + update=column_family) + client = self._table._instance._client + # We expect a `.table_v2_pb2.ColumnFamily`. We ignore it since the only + # data it contains are the GC rule and the column family ID already + # stored on this instance. + client._table_stub.ModifyColumnFamilies(request_pb, + client.timeout_seconds)
+ +
[docs] def delete(self): + """Delete this column family.""" + request_pb = table_admin_v2_pb2.ModifyColumnFamiliesRequest( + name=self._table.name) + request_pb.modifications.add( + id=self.column_family_id, + drop=True) + client = self._table._instance._client + # We expect a `google.protobuf.empty_pb2.Empty` + client._table_stub.ModifyColumnFamilies(request_pb, + client.timeout_seconds)
+ + +def _gc_rule_from_pb(gc_rule_pb): + """Convert a protobuf GC rule to a native object. + + :type gc_rule_pb: :class:`.table_v2_pb2.GcRule` + :param gc_rule_pb: The GC rule to convert. + + :rtype: :class:`GarbageCollectionRule` or :data:`NoneType <types.NoneType>` + :returns: An instance of one of the native rules defined + in :module:`column_family` or :data:`None` if no values were + set on the protobuf passed in. + :raises: :class:`ValueError <exceptions.ValueError>` if the rule name + is unexpected. + """ + rule_name = gc_rule_pb.WhichOneof('rule') + if rule_name is None: + return None + + if rule_name == 'max_num_versions': + return MaxVersionsGCRule(gc_rule_pb.max_num_versions) + elif rule_name == 'max_age': + max_age = _duration_pb_to_timedelta(gc_rule_pb.max_age) + return MaxAgeGCRule(max_age) + elif rule_name == 'union': + return GCRuleUnion([_gc_rule_from_pb(rule) + for rule in gc_rule_pb.union.rules]) + elif rule_name == 'intersection': + rules = [_gc_rule_from_pb(rule) + for rule in gc_rule_pb.intersection.rules] + return GCRuleIntersection(rules) + else: + raise ValueError('Unexpected rule name', rule_name) +
+ +
+ +
+ + + + + + + \ No newline at end of file diff --git a/0.18.1/_modules/gcloud/bigtable/happybase/batch.html b/0.18.1/_modules/gcloud/bigtable/happybase/batch.html new file mode 100644 index 000000000000..0c76b84f3c12 --- /dev/null +++ b/0.18.1/_modules/gcloud/bigtable/happybase/batch.html @@ -0,0 +1,559 @@ + + + + + + + + gcloud.bigtable.happybase.batch — gcloud 0.18.1 documentation + + + + + + + + + + + + + + +
+ +
+ +
+ + + Report an Issue + +
+
+ +
+ +

Source code for gcloud.bigtable.happybase.batch

+# Copyright 2016 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Google Cloud Bigtable HappyBase batch module."""
+
+
+import datetime
+import warnings
+
+import six
+
+from gcloud._helpers import _datetime_from_microseconds
+from gcloud.bigtable.row_filters import TimestampRange
+
+
+_WAL_SENTINEL = object()
+# Assumed granularity of timestamps in Cloud Bigtable.
+_ONE_MILLISECOND = datetime.timedelta(microseconds=1000)
+_WARN = warnings.warn
+_WAL_WARNING = ('The wal argument (Write-Ahead-Log) is not '
+                'supported by Cloud Bigtable.')
+
+
+
[docs]class Batch(object): + """Batch class for accumulating mutations. + + .. note:: + + When using a batch with ``transaction=False`` as a context manager + (i.e. in a ``with`` statement), mutations will still be sent as + row mutations even if the context manager exits with an error. + This behavior is in place to match the behavior in the HappyBase + HBase / Thrift implementation. + + :type table: :class:`Table <gcloud.bigtable.happybase.table.Table>` + :param table: The table where mutations will be applied. + + :type timestamp: int + :param timestamp: (Optional) Timestamp (in milliseconds since the epoch) + that all mutations will be applied at. + + :type batch_size: int + :param batch_size: (Optional) The maximum number of mutations to allow + to accumulate before committing them. + + :type transaction: bool + :param transaction: Flag indicating if the mutations should be sent + transactionally or not. If ``transaction=True`` and + an error occurs while a :class:`Batch` is active, + then none of the accumulated mutations will be + committed. If ``batch_size`` is set, the mutation + can't be transactional. + + :type wal: object + :param wal: Unused parameter (Boolean for using the HBase Write Ahead Log). + Provided for compatibility with HappyBase, but irrelevant for + Cloud Bigtable since it does not have a Write Ahead Log. + + :raises: :class:`TypeError <exceptions.TypeError>` if ``batch_size`` + is set and ``transaction=True``. + :class:`ValueError <exceptions.ValueError>` if ``batch_size`` + is not positive. + """ + + def __init__(self, table, timestamp=None, batch_size=None, + transaction=False, wal=_WAL_SENTINEL): + if wal is not _WAL_SENTINEL: + _WARN(_WAL_WARNING) + + if batch_size is not None: + if transaction: + raise TypeError('When batch_size is set, a Batch cannot be ' + 'transactional') + if batch_size <= 0: + raise ValueError('batch_size must be positive') + + self._table = table + self._batch_size = batch_size + self._timestamp = self._delete_range = None + + # Timestamp is in milliseconds, convert to microseconds. + if timestamp is not None: + self._timestamp = _datetime_from_microseconds(1000 * timestamp) + # For deletes, we get the very next timestamp (assuming timestamp + # granularity is milliseconds). This is because HappyBase users + # expect HBase deletes to go **up to** and **including** the + # timestamp while Cloud Bigtable Time Ranges **exclude** the + # final timestamp. + next_timestamp = self._timestamp + _ONE_MILLISECOND + self._delete_range = TimestampRange(end=next_timestamp) + + self._transaction = transaction + + # Internal state for tracking mutations. + self._row_map = {} + self._mutation_count = 0 + +
[docs] def send(self): + """Send / commit the batch of mutations to the server.""" + for row in self._row_map.values(): + # commit() does nothing if row hasn't accumulated any mutations. + row.commit() + + self._row_map.clear() + self._mutation_count = 0
+ + def _try_send(self): + """Send / commit the batch if mutations have exceeded batch size.""" + if self._batch_size and self._mutation_count >= self._batch_size: + self.send() + + def _get_row(self, row_key): + """Gets a row that will hold mutations. + + If the row is not already cached on the current batch, a new row will + be created. + + :type row_key: str + :param row_key: The row key for a row stored in the map. + + :rtype: :class:`Row <gcloud.bigtable.row.Row>` + :returns: The newly created or stored row that will hold mutations. + """ + if row_key not in self._row_map: + table = self._table._low_level_table + self._row_map[row_key] = table.row(row_key) + + return self._row_map[row_key] + +
[docs] def put(self, row, data, wal=_WAL_SENTINEL): + """Insert data into a row in the table owned by this batch. + + :type row: str + :param row: The row key where the mutation will be "put". + + :type data: dict + :param data: Dictionary containing the data to be inserted. The keys + are columns names (of the form ``fam:col``) and the values + are strings (bytes) to be stored in those columns. + + :type wal: object + :param wal: Unused parameter (to over-ride the default on the + instance). Provided for compatibility with HappyBase, but + irrelevant for Cloud Bigtable since it does not have a + Write Ahead Log. + """ + if wal is not _WAL_SENTINEL: + _WARN(_WAL_WARNING) + + row_object = self._get_row(row) + # Make sure all the keys are valid before beginning + # to add mutations. + column_pairs = _get_column_pairs(six.iterkeys(data), + require_qualifier=True) + for column_family_id, column_qualifier in column_pairs: + value = data[column_family_id + ':' + column_qualifier] + row_object.set_cell(column_family_id, column_qualifier, + value, timestamp=self._timestamp) + + self._mutation_count += len(data) + self._try_send()
+ + def _delete_columns(self, columns, row_object): + """Adds delete mutations for a list of columns and column families. + + :type columns: list + :param columns: Iterable containing column names (as + strings). Each column name can be either + + * an entire column family: ``fam`` or ``fam:`` + * a single column: ``fam:col`` + + :type row_object: :class:`Row <gcloud_bigtable.row.Row>` + :param row_object: The row which will hold the delete mutations. + + :raises: :class:`ValueError <exceptions.ValueError>` if the delete + timestamp range is set on the current batch, but a + column family delete is attempted. + """ + column_pairs = _get_column_pairs(columns) + for column_family_id, column_qualifier in column_pairs: + if column_qualifier is None: + if self._delete_range is not None: + raise ValueError('The Cloud Bigtable API does not support ' + 'adding a timestamp to ' + '"DeleteFromFamily" ') + row_object.delete_cells(column_family_id, + columns=row_object.ALL_COLUMNS) + else: + row_object.delete_cell(column_family_id, + column_qualifier, + time_range=self._delete_range) + +
[docs] def delete(self, row, columns=None, wal=_WAL_SENTINEL): + """Delete data from a row in the table owned by this batch. + + :type row: str + :param row: The row key where the delete will occur. + + :type columns: list + :param columns: (Optional) Iterable containing column names (as + strings). Each column name can be either + + * an entire column family: ``fam`` or ``fam:`` + * a single column: ``fam:col`` + + If not used, will delete the entire row. + + :type wal: object + :param wal: Unused parameter (to over-ride the default on the + instance). Provided for compatibility with HappyBase, but + irrelevant for Cloud Bigtable since it does not have a + Write Ahead Log. + + :raises: If the delete timestamp range is set on the + current batch, but a full row delete is attempted. + """ + if wal is not _WAL_SENTINEL: + _WARN(_WAL_WARNING) + + row_object = self._get_row(row) + + if columns is None: + # Delete entire row. + if self._delete_range is not None: + raise ValueError('The Cloud Bigtable API does not support ' + 'adding a timestamp to "DeleteFromRow" ' + 'mutations') + row_object.delete() + self._mutation_count += 1 + else: + self._delete_columns(columns, row_object) + self._mutation_count += len(columns) + + self._try_send()
+ + def __enter__(self): + """Enter context manager, no set-up required.""" + return self + + def __exit__(self, exc_type, exc_value, traceback): + """Exit context manager, no set-up required. + + :type exc_type: type + :param exc_type: The type of the exception if one occurred while the + context manager was active. Otherwise, :data:`None`. + + :type exc_value: :class:`Exception <exceptions.Exception>` + :param exc_value: An instance of ``exc_type`` if an exception occurred + while the context was active. + Otherwise, :data:`None`. + + :type traceback: ``traceback`` type + :param traceback: The traceback where the exception occurred (if one + did occur). Otherwise, :data:`None`. + """ + # If the context manager encountered an exception and the batch is + # transactional, we don't commit the mutations. + if self._transaction and exc_type is not None: + return + + # NOTE: For non-transactional batches, this will even commit mutations + # if an error occurred during the context manager. + self.send()
+ + +def _get_column_pairs(columns, require_qualifier=False): + """Turns a list of column or column families into parsed pairs. + + Turns a column family (``fam`` or ``fam:``) into a pair such + as ``['fam', None]`` and turns a column (``fam:col``) into + ``['fam', 'col']``. + + :type columns: list + :param columns: Iterable containing column names (as + strings). Each column name can be either + + * an entire column family: ``fam`` or ``fam:`` + * a single column: ``fam:col`` + + :type require_qualifier: bool + :param require_qualifier: Boolean indicating if the columns should + all have a qualifier or not. + + :rtype: list + :returns: List of pairs, where the first element in each pair is the + column family and the second is the column qualifier + (or :data:`None`). + :raises: :class:`ValueError <exceptions.ValueError>` if any of the columns + are not of the expected format. + :class:`ValueError <exceptions.ValueError>` if + ``require_qualifier`` is :data:`True` and one of the values is + for an entire column family + """ + column_pairs = [] + for column in columns: + if isinstance(column, six.binary_type): + column = column.decode('utf-8') + # Remove trailing colons (i.e. for standalone column family). + if column.endswith(u':'): + column = column[:-1] + num_colons = column.count(u':') + if num_colons == 0: + # column is a column family. + if require_qualifier: + raise ValueError('column does not contain a qualifier', + column) + else: + column_pairs.append([column, None]) + elif num_colons == 1: + column_pairs.append(column.split(u':')) + else: + raise ValueError('Column contains the : separator more than once') + + return column_pairs +
+ +
+ +
+ + + + + + + \ No newline at end of file diff --git a/0.18.1/_modules/gcloud/bigtable/happybase/connection.html b/0.18.1/_modules/gcloud/bigtable/happybase/connection.html new file mode 100644 index 000000000000..d4f094b75e44 --- /dev/null +++ b/0.18.1/_modules/gcloud/bigtable/happybase/connection.html @@ -0,0 +1,724 @@ + + + + + + + + gcloud.bigtable.happybase.connection — gcloud 0.18.1 documentation + + + + + + + + + + + + + + +
+ +
+ +
+ + + Report an Issue + +
+
+ +
+ +

Source code for gcloud.bigtable.happybase.connection

+# Copyright 2016 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Google Cloud Bigtable HappyBase connection module."""
+
+
+import datetime
+import warnings
+
+import six
+
+from grpc.beta import interfaces
+from grpc.framework.interfaces.face import face
+
+try:
+    from happybase.hbase.ttypes import AlreadyExists
+except ImportError:
+    from gcloud.exceptions import Conflict as AlreadyExists
+
+from gcloud.bigtable.client import Client
+from gcloud.bigtable.column_family import GCRuleIntersection
+from gcloud.bigtable.column_family import MaxAgeGCRule
+from gcloud.bigtable.column_family import MaxVersionsGCRule
+from gcloud.bigtable.happybase.table import Table
+from gcloud.bigtable.table import Table as _LowLevelTable
+
+
+# Constants reproduced here for HappyBase compatibility, though values
+# are all null.
+COMPAT_MODES = None
+THRIFT_TRANSPORTS = None
+THRIFT_PROTOCOLS = None
+DEFAULT_HOST = None
+DEFAULT_PORT = None
+DEFAULT_TRANSPORT = None
+DEFAULT_COMPAT = None
+DEFAULT_PROTOCOL = None
+
+_LEGACY_ARGS = frozenset(('host', 'port', 'compat', 'transport', 'protocol'))
+_WARN = warnings.warn
+_BASE_DISABLE = 'Cloud Bigtable has no concept of enabled / disabled tables.'
+_DISABLE_DELETE_MSG = ('The disable argument should not be used in '
+                       'delete_table(). ') + _BASE_DISABLE
+_ENABLE_TMPL = 'Connection.enable_table(%r) was called, but ' + _BASE_DISABLE
+_DISABLE_TMPL = 'Connection.disable_table(%r) was called, but ' + _BASE_DISABLE
+_IS_ENABLED_TMPL = ('Connection.is_table_enabled(%r) was called, but ' +
+                    _BASE_DISABLE)
+_COMPACT_TMPL = ('Connection.compact_table(%r, major=%r) was called, but the '
+                 'Cloud Bigtable API handles table compactions automatically '
+                 'and does not expose an API for it.')
+
+
+def _get_instance(timeout=None):
+    """Gets instance for the default project.
+
+    Creates a client with the inferred credentials and project ID from
+    the local environment. Then uses
+    :meth:`.bigtable.client.Client.list_instances` to
+    get the unique instance owned by the project.
+
+    If the request fails for any reason, or if there isn't exactly one instance
+    owned by the project, then this function will fail.
+
+    :type timeout: int
+    :param timeout: (Optional) The socket timeout in milliseconds.
+
+    :rtype: :class:`gcloud.bigtable.instance.Instance`
+    :returns: The unique instance owned by the project inferred from
+              the environment.
+    :raises ValueError: if there is a failed location or any number of
+                        instances other than one.
+    """
+    client_kwargs = {'admin': True}
+    if timeout is not None:
+        client_kwargs['timeout_seconds'] = timeout / 1000.0
+    client = Client(**client_kwargs)
+    try:
+        client.start()
+        instances, failed_locations = client.list_instances()
+    finally:
+        client.stop()
+
+    if len(failed_locations) != 0:
+        raise ValueError('Determining instance via ListInstances encountered '
+                         'failed locations.')
+    if len(instances) == 0:
+        raise ValueError('This client doesn\'t have access to any instances.')
+    if len(instances) > 1:
+        raise ValueError('This client has access to more than one instance. '
+                         'Please directly pass the instance you\'d '
+                         'like to use.')
+    return instances[0]
+
+
+
[docs]class Connection(object): + """Connection to Cloud Bigtable backend. + + .. note:: + + If you pass a ``instance``, it will be :meth:`.Instance.copy`-ed before + being stored on the new connection. This also copies the + :class:`Client <gcloud.bigtable.client.Client>` that created the + :class:`Instance <gcloud.bigtable.instance.Instance>` instance and the + :class:`Credentials <oauth2client.client.Credentials>` stored on the + client. + + The arguments ``host``, ``port``, ``compat``, ``transport`` and + ``protocol`` are allowed (as keyword arguments) for compatibility with + HappyBase. However, they will not be used in any way, and will cause a + warning if passed. + + :type timeout: int + :param timeout: (Optional) The socket timeout in milliseconds. + + :type autoconnect: bool + :param autoconnect: (Optional) Whether the connection should be + :meth:`open`-ed during construction. + + :type table_prefix: str + :param table_prefix: (Optional) Prefix used to construct table names. + + :type table_prefix_separator: str + :param table_prefix_separator: (Optional) Separator used with + ``table_prefix``. Defaults to ``_``. + + :type instance: :class:`Instance <gcloud.bigtable.instance.Instance>` + :param instance: (Optional) A Cloud Bigtable instance. The instance also + owns a client for making gRPC requests to the Cloud + Bigtable API. If not passed in, defaults to creating client + with ``admin=True`` and using the ``timeout`` here for the + ``timeout_seconds`` argument to the + :class:`Client <gcloud.bigtable.client.Client>` + constructor. The credentials for the client + will be the implicit ones loaded from the environment. + Then that client is used to retrieve all the instances + owned by the client's project. + + :type kwargs: dict + :param kwargs: Remaining keyword arguments. Provided for HappyBase + compatibility. + """ + + _instance = None + + def __init__(self, timeout=None, autoconnect=True, table_prefix=None, + table_prefix_separator='_', instance=None, **kwargs): + self._handle_legacy_args(kwargs) + if table_prefix is not None: + if not isinstance(table_prefix, six.string_types): + raise TypeError('table_prefix must be a string', 'received', + table_prefix, type(table_prefix)) + + if not isinstance(table_prefix_separator, six.string_types): + raise TypeError('table_prefix_separator must be a string', + 'received', table_prefix_separator, + type(table_prefix_separator)) + + self.table_prefix = table_prefix + self.table_prefix_separator = table_prefix_separator + + if instance is None: + self._instance = _get_instance(timeout=timeout) + else: + if timeout is not None: + raise ValueError('Timeout cannot be used when an existing ' + 'instance is passed') + self._instance = instance.copy() + + if autoconnect: + self.open() + + self._initialized = True + + @staticmethod + def _handle_legacy_args(arguments_dict): + """Check legacy HappyBase arguments and warn if set. + + :type arguments_dict: dict + :param arguments_dict: Unused keyword arguments. + + :raises TypeError: if a keyword other than ``host``, ``port``, + ``compat``, ``transport`` or ``protocol`` is used. + """ + common_args = _LEGACY_ARGS.intersection(six.iterkeys(arguments_dict)) + if common_args: + all_args = ', '.join(common_args) + message = ('The HappyBase legacy arguments %s were used. These ' + 'arguments are unused by gcloud.' % (all_args,)) + _WARN(message) + for arg_name in common_args: + arguments_dict.pop(arg_name) + if arguments_dict: + unexpected_names = arguments_dict.keys() + raise TypeError('Received unexpected arguments', unexpected_names) + +
[docs] def open(self): + """Open the underlying transport to Cloud Bigtable. + + This method opens the underlying HTTP/2 gRPC connection using a + :class:`Client <gcloud.bigtable.client.Client>` bound to the + :class:`Instance <gcloud.bigtable.instance.Instance>` owned by + this connection. + """ + self._instance._client.start()
+ +
[docs] def close(self): + """Close the underlying transport to Cloud Bigtable. + + This method closes the underlying HTTP/2 gRPC connection using a + :class:`Client <gcloud.bigtable.client.Client>` bound to the + :class:`Instance <gcloud.bigtable.instance.Instance>` owned by + this connection. + """ + self._instance._client.stop()
+ + def __del__(self): + if self._instance is not None: + self.close() + + def _table_name(self, name): + """Construct a table name by optionally adding a table name prefix. + + :type name: str + :param name: The name to have a prefix added to it. + + :rtype: str + :returns: The prefixed name, if the current connection has a table + prefix set. + """ + if self.table_prefix is None: + return name + + return self.table_prefix + self.table_prefix_separator + name + +
[docs] def table(self, name, use_prefix=True): + """Table factory. + + :type name: str + :param name: The name of the table to be created. + + :type use_prefix: bool + :param use_prefix: Whether to use the table prefix (if any). + + :rtype: :class:`Table <gcloud.bigtable.happybase.table.Table>` + :returns: Table instance owned by this connection. + """ + if use_prefix: + name = self._table_name(name) + return Table(name, self)
+ +
[docs] def tables(self): + """Return a list of table names available to this connection. + + .. note:: + + This lists every table in the instance owned by this connection, + **not** every table that a given user may have access to. + + .. note:: + + If ``table_prefix`` is set on this connection, only returns the + table names which match that prefix. + + :rtype: list + :returns: List of string table names. + """ + low_level_table_instances = self._instance.list_tables() + table_names = [table_instance.table_id + for table_instance in low_level_table_instances] + + # Filter using prefix, and strip prefix from names + if self.table_prefix is not None: + prefix = self._table_name('') + offset = len(prefix) + table_names = [name[offset:] for name in table_names + if name.startswith(prefix)] + + return table_names
+ +
[docs] def create_table(self, name, families): + """Create a table. + + .. warning:: + + The only column family options from HappyBase that are able to be + used with Cloud Bigtable are ``max_versions`` and ``time_to_live``. + + Values in ``families`` represent column family options. In HappyBase, + these are dictionaries, corresponding to the ``ColumnDescriptor`` + structure in the Thrift API. The accepted keys are: + + * ``max_versions`` (``int``) + * ``compression`` (``str``) + * ``in_memory`` (``bool``) + * ``bloom_filter_type`` (``str``) + * ``bloom_filter_vector_size`` (``int``) + * ``bloom_filter_nb_hashes`` (``int``) + * ``block_cache_enabled`` (``bool``) + * ``time_to_live`` (``int``) + + :type name: str + :param name: The name of the table to be created. + + :type families: dict + :param families: Dictionary with column family names as keys and column + family options as the values. The options can be among + + * :class:`dict` + * :class:`.GarbageCollectionRule` + + :raises TypeError: If ``families`` is not a dictionary. + :raises ValueError: If ``families`` has no entries. + :raises AlreadyExists: If creation fails due to an already + existing table. + :raises NetworkError: If creation fails for a reason other than + table exists. + """ + if not isinstance(families, dict): + raise TypeError('families arg must be a dictionary') + + if not families: + raise ValueError('Cannot create table %r (no column ' + 'families specified)' % (name,)) + + # Parse all keys before making any API requests. + gc_rule_dict = {} + for column_family_name, option in families.items(): + if isinstance(column_family_name, six.binary_type): + column_family_name = column_family_name.decode('utf-8') + if column_family_name.endswith(':'): + column_family_name = column_family_name[:-1] + gc_rule_dict[column_family_name] = _parse_family_option(option) + + # Create table instance and then make API calls. + name = self._table_name(name) + low_level_table = _LowLevelTable(name, self._instance) + column_families = ( + low_level_table.column_family(column_family_name, gc_rule=gc_rule) + for column_family_name, gc_rule in six.iteritems(gc_rule_dict) + ) + try: + low_level_table.create(column_families=column_families) + except face.NetworkError as network_err: + if network_err.code == interfaces.StatusCode.ALREADY_EXISTS: + raise AlreadyExists(name) + else: + raise
+ +
[docs] def delete_table(self, name, disable=False): + """Delete the specified table. + + :type name: str + :param name: The name of the table to be deleted. If ``table_prefix`` + is set, a prefix will be added to the ``name``. + + :type disable: bool + :param disable: Whether to first disable the table if needed. This + is provided for compatibility with HappyBase, but is + not relevant for Cloud Bigtable since it has no concept + of enabled / disabled tables. + """ + if disable: + _WARN(_DISABLE_DELETE_MSG) + + name = self._table_name(name) + _LowLevelTable(name, self._instance).delete()
+ + @staticmethod +
[docs] def enable_table(name): + """Enable the specified table. + + .. warning:: + + Cloud Bigtable has no concept of enabled / disabled tables so this + method does nothing. It is provided simply for compatibility. + + :type name: str + :param name: The name of the table to be enabled. + """ + _WARN(_ENABLE_TMPL % (name,))
+ + @staticmethod +
[docs] def disable_table(name): + """Disable the specified table. + + .. warning:: + + Cloud Bigtable has no concept of enabled / disabled tables so this + method does nothing. It is provided simply for compatibility. + + :type name: str + :param name: The name of the table to be disabled. + """ + _WARN(_DISABLE_TMPL % (name,))
+ + @staticmethod +
[docs] def is_table_enabled(name): + """Return whether the specified table is enabled. + + .. warning:: + + Cloud Bigtable has no concept of enabled / disabled tables so this + method always returns :data:`True`. It is provided simply for + compatibility. + + :type name: str + :param name: The name of the table to check enabled / disabled status. + + :rtype: bool + :returns: The value :data:`True` always. + """ + _WARN(_IS_ENABLED_TMPL % (name,)) + return True
+ + @staticmethod +
[docs] def compact_table(name, major=False): + """Compact the specified table. + + .. warning:: + + Cloud Bigtable supports table compactions, it just doesn't expose + an API for that feature, so this method does nothing. It is + provided simply for compatibility. + + :type name: str + :param name: The name of the table to compact. + + :type major: bool + :param major: Whether to perform a major compaction. + """ + _WARN(_COMPACT_TMPL % (name, major))
+ + +def _parse_family_option(option): + """Parses a column family option into a garbage collection rule. + + .. note:: + + If ``option`` is not a dictionary, the type is not checked. + If ``option`` is :data:`None`, there is nothing to do, since this + is the correct output. + + :type option: :class:`dict`, + :data:`NoneType <types.NoneType>`, + :class:`.GarbageCollectionRule` + :param option: A column family option passes as a dictionary value in + :meth:`Connection.create_table`. + + :rtype: :class:`.GarbageCollectionRule` + :returns: A garbage collection rule parsed from the input. + """ + result = option + if isinstance(result, dict): + if not set(result.keys()) <= set(['max_versions', 'time_to_live']): + all_keys = ', '.join(repr(key) for key in result.keys()) + warning_msg = ('Cloud Bigtable only supports max_versions and ' + 'time_to_live column family settings. ' + 'Received: %s' % (all_keys,)) + _WARN(warning_msg) + + max_num_versions = result.get('max_versions') + max_age = None + if 'time_to_live' in result: + max_age = datetime.timedelta(seconds=result['time_to_live']) + + versions_rule = age_rule = None + if max_num_versions is not None: + versions_rule = MaxVersionsGCRule(max_num_versions) + if max_age is not None: + age_rule = MaxAgeGCRule(max_age) + + if versions_rule is None: + result = age_rule + else: + if age_rule is None: + result = versions_rule + else: + result = GCRuleIntersection(rules=[age_rule, versions_rule]) + + return result +
+ +
+ +
+ + + + + + + \ No newline at end of file diff --git a/0.18.1/_modules/gcloud/bigtable/happybase/pool.html b/0.18.1/_modules/gcloud/bigtable/happybase/pool.html new file mode 100644 index 000000000000..0bdf5a103f19 --- /dev/null +++ b/0.18.1/_modules/gcloud/bigtable/happybase/pool.html @@ -0,0 +1,387 @@ + + + + + + + + gcloud.bigtable.happybase.pool — gcloud 0.18.1 documentation + + + + + + + + + + + + + + +
+ +
+ +
+ + + Report an Issue + +
+
+ +
+ +

Source code for gcloud.bigtable.happybase.pool

+# Copyright 2016 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Google Cloud Bigtable HappyBase pool module."""
+
+
+import contextlib
+import threading
+
+import six
+
+from gcloud.bigtable.happybase.connection import Connection
+from gcloud.bigtable.happybase.connection import _get_instance
+
+
+_MIN_POOL_SIZE = 1
+"""Minimum allowable size of a connection pool."""
+
+
+
[docs]class NoConnectionsAvailable(RuntimeError): + """Exception raised when no connections are available. + + This happens if a timeout was specified when obtaining a connection, + and no connection became available within the specified timeout. + """
+ + +
[docs]class ConnectionPool(object): + """Thread-safe connection pool. + + .. note:: + + All keyword arguments are passed unmodified to the + :class:`Connection <.happybase.connection.Connection>` constructor + **except** for ``autoconnect``. This is because the ``open`` / + ``closed`` status of a connection is managed by the pool. In addition, + if ``instance`` is not passed, the default / inferred instance is + determined by the pool and then passed to each + :class:`Connection <.happybase.connection.Connection>` that is created. + + :type size: int + :param size: The maximum number of concurrently open connections. + + :type kwargs: dict + :param kwargs: Keyword arguments passed to + :class:`Connection <.happybase.Connection>` + constructor. + + :raises: :class:`TypeError <exceptions.TypeError>` if ``size`` + is non an integer. + :class:`ValueError <exceptions.ValueError>` if ``size`` + is not positive. + """ + def __init__(self, size, **kwargs): + if not isinstance(size, six.integer_types): + raise TypeError('Pool size arg must be an integer') + + if size < _MIN_POOL_SIZE: + raise ValueError('Pool size must be positive') + + self._lock = threading.Lock() + self._queue = six.moves.queue.LifoQueue(maxsize=size) + self._thread_connections = threading.local() + + connection_kwargs = kwargs + connection_kwargs['autoconnect'] = False + if 'instance' not in connection_kwargs: + connection_kwargs['instance'] = _get_instance( + timeout=kwargs.get('timeout')) + + for _ in six.moves.range(size): + connection = Connection(**connection_kwargs) + self._queue.put(connection) + + def _acquire_connection(self, timeout=None): + """Acquire a connection from the pool. + + :type timeout: int + :param timeout: (Optional) Time (in seconds) to wait for a connection + to open. + + :rtype: :class:`Connection <.happybase.Connection>` + :returns: An active connection from the queue stored on the pool. + :raises: :class:`NoConnectionsAvailable` if ``Queue.get`` fails + before the ``timeout`` (only if a timeout is specified). + """ + try: + return self._queue.get(block=True, timeout=timeout) + except six.moves.queue.Empty: + raise NoConnectionsAvailable('No connection available from pool ' + 'within specified timeout') + + @contextlib.contextmanager +
[docs] def connection(self, timeout=None): + """Obtain a connection from the pool. + + Must be used as a context manager, for example:: + + with pool.connection() as connection: + pass # do something with the connection + + If ``timeout`` is omitted, this method waits forever for a connection + to become available from the local queue. + + Yields an active :class:`Connection <.happybase.connection.Connection>` + from the pool. + + :type timeout: int + :param timeout: (Optional) Time (in seconds) to wait for a connection + to open. + + :raises: :class:`NoConnectionsAvailable` if no connection can be + retrieved from the pool before the ``timeout`` (only if + a timeout is specified). + """ + connection = getattr(self._thread_connections, 'current', None) + + retrieved_new_cnxn = False + if connection is None: + # In this case we need to actually grab a connection from the + # pool. After retrieval, the connection is stored on a thread + # local so that nested connection requests from the same + # thread can re-use the same connection instance. + # + # NOTE: This code acquires a lock before assigning to the + # thread local; see + # ('https://emptysqua.re/blog/' + # 'another-thing-about-pythons-threadlocals/') + retrieved_new_cnxn = True + connection = self._acquire_connection(timeout) + with self._lock: + self._thread_connections.current = connection + + # This is a no-op for connections that have already been opened + # since they just call Client.start(). + connection.open() + yield connection + + # Remove thread local reference after the outermost 'with' block + # ends. Afterwards the thread no longer owns the connection. + if retrieved_new_cnxn: + del self._thread_connections.current + self._queue.put(connection)
+
+ +
+ +
+ + + + + + + \ No newline at end of file diff --git a/0.18.1/_modules/gcloud/bigtable/happybase/table.html b/0.18.1/_modules/gcloud/bigtable/happybase/table.html new file mode 100644 index 000000000000..ee2724fe0d10 --- /dev/null +++ b/0.18.1/_modules/gcloud/bigtable/happybase/table.html @@ -0,0 +1,1211 @@ + + + + + + + + gcloud.bigtable.happybase.table — gcloud 0.18.1 documentation + + + + + + + + + + + + + + +
+ +
+ +
+ + + Report an Issue + +
+
+ +
+ +

Source code for gcloud.bigtable.happybase.table

+# Copyright 2016 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Google Cloud Bigtable HappyBase table module."""
+
+
+import struct
+import warnings
+
+import six
+
+from gcloud._helpers import _datetime_from_microseconds
+from gcloud._helpers import _microseconds_from_datetime
+from gcloud._helpers import _to_bytes
+from gcloud._helpers import _total_seconds
+from gcloud.bigtable.column_family import GCRuleIntersection
+from gcloud.bigtable.column_family import MaxAgeGCRule
+from gcloud.bigtable.column_family import MaxVersionsGCRule
+from gcloud.bigtable.happybase.batch import _get_column_pairs
+from gcloud.bigtable.happybase.batch import _WAL_SENTINEL
+from gcloud.bigtable.happybase.batch import Batch
+from gcloud.bigtable.row_filters import CellsColumnLimitFilter
+from gcloud.bigtable.row_filters import ColumnQualifierRegexFilter
+from gcloud.bigtable.row_filters import FamilyNameRegexFilter
+from gcloud.bigtable.row_filters import RowFilterChain
+from gcloud.bigtable.row_filters import RowFilterUnion
+from gcloud.bigtable.row_filters import RowKeyRegexFilter
+from gcloud.bigtable.row_filters import TimestampRange
+from gcloud.bigtable.row_filters import TimestampRangeFilter
+from gcloud.bigtable.table import Table as _LowLevelTable
+
+
+_WARN = warnings.warn
+_PACK_I64 = struct.Struct('>q').pack
+_UNPACK_I64 = struct.Struct('>q').unpack
+_SIMPLE_GC_RULES = (MaxAgeGCRule, MaxVersionsGCRule)
+
+
+
[docs]def make_row(cell_map, include_timestamp): + """Make a row dict for a Thrift cell mapping. + + .. warning:: + + This method is only provided for HappyBase compatibility, but does not + actually work. + + :type cell_map: dict + :param cell_map: Dictionary with ``fam:col`` strings as keys and ``TCell`` + instances as values. + + :type include_timestamp: bool + :param include_timestamp: Flag to indicate if cell timestamps should be + included with the output. + + :raises: :class:`NotImplementedError <exceptions.NotImplementedError>` + always + """ + raise NotImplementedError('The Cloud Bigtable API output is not the same ' + 'as the output from the Thrift server, so this ' + 'helper can not be implemented.', 'Called with', + cell_map, include_timestamp)
+ + +
[docs]def make_ordered_row(sorted_columns, include_timestamp): + """Make a row dict for sorted Thrift column results from scans. + + .. warning:: + + This method is only provided for HappyBase compatibility, but does not + actually work. + + :type sorted_columns: list + :param sorted_columns: List of ``TColumn`` instances from Thrift. + + :type include_timestamp: bool + :param include_timestamp: Flag to indicate if cell timestamps should be + included with the output. + + :raises: :class:`NotImplementedError <exceptions.NotImplementedError>` + always + """ + raise NotImplementedError('The Cloud Bigtable API output is not the same ' + 'as the output from the Thrift server, so this ' + 'helper can not be implemented.', 'Called with', + sorted_columns, include_timestamp)
+ + +
[docs]class Table(object): + """Representation of Cloud Bigtable table. + + Used for adding data and + + :type name: str + :param name: The name of the table. + + :type connection: :class:`Connection <.happybase.connection.Connection>` + :param connection: The connection which has access to the table. + """ + + def __init__(self, name, connection): + self.name = name + # This remains as legacy for HappyBase, but only the instance + # from the connection is needed. + self.connection = connection + self._low_level_table = None + if self.connection is not None: + self._low_level_table = _LowLevelTable(self.name, + self.connection._instance) + + def __repr__(self): + return '<table.Table name=%r>' % (self.name,) + +
[docs] def families(self): + """Retrieve the column families for this table. + + :rtype: dict + :returns: Mapping from column family name to garbage collection rule + for a column family. + """ + column_family_map = self._low_level_table.list_column_families() + result = {} + for col_fam, col_fam_obj in six.iteritems(column_family_map): + result[col_fam] = _gc_rule_to_dict(col_fam_obj.gc_rule) + return result
+ +
[docs] def regions(self): + """Retrieve the regions for this table. + + .. warning:: + + Cloud Bigtable does not give information about how a table is laid + out in memory, so this method does not work. It is + provided simply for compatibility. + + :raises: :class:`NotImplementedError <exceptions.NotImplementedError>` + always + """ + raise NotImplementedError('The Cloud Bigtable API does not have a ' + 'concept of splitting a table into regions.')
+ +
[docs] def row(self, row, columns=None, timestamp=None, include_timestamp=False): + """Retrieve a single row of data. + + Returns the latest cells in each column (or all columns if ``columns`` + is not specified). If a ``timestamp`` is set, then **latest** becomes + **latest** up until ``timestamp``. + + :type row: str + :param row: Row key for the row we are reading from. + + :type columns: list + :param columns: (Optional) Iterable containing column names (as + strings). Each column name can be either + + * an entire column family: ``fam`` or ``fam:`` + * a single column: ``fam:col`` + + :type timestamp: int + :param timestamp: (Optional) Timestamp (in milliseconds since the + epoch). If specified, only cells returned before the + the timestamp will be returned. + + :type include_timestamp: bool + :param include_timestamp: Flag to indicate if cell timestamps should be + included with the output. + + :rtype: dict + :returns: Dictionary containing all the latest column values in + the row. + """ + filters = [] + if columns is not None: + filters.append(_columns_filter_helper(columns)) + # versions == 1 since we only want the latest. + filter_ = _filter_chain_helper(versions=1, timestamp=timestamp, + filters=filters) + + partial_row_data = self._low_level_table.read_row( + row, filter_=filter_) + if partial_row_data is None: + return {} + + return _partial_row_to_dict(partial_row_data, + include_timestamp=include_timestamp)
+ +
[docs] def rows(self, rows, columns=None, timestamp=None, + include_timestamp=False): + """Retrieve multiple rows of data. + + All optional arguments behave the same in this method as they do in + :meth:`row`. + + :type rows: list + :param rows: Iterable of the row keys for the rows we are reading from. + + :type columns: list + :param columns: (Optional) Iterable containing column names (as + strings). Each column name can be either + + * an entire column family: ``fam`` or ``fam:`` + * a single column: ``fam:col`` + + :type timestamp: int + :param timestamp: (Optional) Timestamp (in milliseconds since the + epoch). If specified, only cells returned before (or + at) the timestamp will be returned. + + :type include_timestamp: bool + :param include_timestamp: Flag to indicate if cell timestamps should be + included with the output. + + :rtype: list + :returns: A list of pairs, where the first is the row key and the + second is a dictionary with the filtered values returned. + """ + if not rows: + # Avoid round-trip if the result is empty anyway + return [] + + filters = [] + if columns is not None: + filters.append(_columns_filter_helper(columns)) + filters.append(_row_keys_filter_helper(rows)) + # versions == 1 since we only want the latest. + filter_ = _filter_chain_helper(versions=1, timestamp=timestamp, + filters=filters) + + partial_rows_data = self._low_level_table.read_rows(filter_=filter_) + # NOTE: We could use max_loops = 1000 or some similar value to ensure + # that the stream isn't open too long. + partial_rows_data.consume_all() + + result = [] + for row_key in rows: + if row_key not in partial_rows_data.rows: + continue + curr_row_data = partial_rows_data.rows[row_key] + curr_row_dict = _partial_row_to_dict( + curr_row_data, include_timestamp=include_timestamp) + result.append((row_key, curr_row_dict)) + + return result
+ +
[docs] def cells(self, row, column, versions=None, timestamp=None, + include_timestamp=False): + """Retrieve multiple versions of a single cell from the table. + + :type row: str + :param row: Row key for the row we are reading from. + + :type column: str + :param column: Column we are reading from; of the form ``fam:col``. + + :type versions: int + :param versions: (Optional) The maximum number of cells to return. If + not set, returns all cells found. + + :type timestamp: int + :param timestamp: (Optional) Timestamp (in milliseconds since the + epoch). If specified, only cells returned before (or + at) the timestamp will be returned. + + :type include_timestamp: bool + :param include_timestamp: Flag to indicate if cell timestamps should be + included with the output. + + :rtype: list + :returns: List of values in the cell (with timestamps if + ``include_timestamp`` is :data:`True`). + """ + filter_ = _filter_chain_helper(column=column, versions=versions, + timestamp=timestamp) + partial_row_data = self._low_level_table.read_row(row, filter_=filter_) + if partial_row_data is None: + return [] + else: + cells = partial_row_data._cells + # We know that `_filter_chain_helper` has already verified that + # column will split as such. + column_family_id, column_qualifier = column.split(':') + # NOTE: We expect the only key in `cells` is `column_family_id` + # and the only key `cells[column_family_id]` is + # `column_qualifier`. But we don't check that this is true. + curr_cells = cells[column_family_id][column_qualifier] + return _cells_to_pairs( + curr_cells, include_timestamp=include_timestamp)
+ +
[docs] def scan(self, row_start=None, row_stop=None, row_prefix=None, + columns=None, timestamp=None, + include_timestamp=False, limit=None, **kwargs): + """Create a scanner for data in this table. + + This method returns a generator that can be used for looping over the + matching rows. + + If ``row_prefix`` is specified, only rows with row keys matching the + prefix will be returned. If given, ``row_start`` and ``row_stop`` + cannot be used. + + .. note:: + + Both ``row_start`` and ``row_stop`` can be :data:`None` to specify + the start and the end of the table respectively. If both are + omitted, a full table scan is done. Note that this usually results + in severe performance problems. + + The keyword argument ``filter`` is also supported (beyond column and + row range filters supported here). HappyBase / HBase users will have + used this as an HBase filter string. (See the `Thrift docs`_ for more + details on those filters.) However, Google Cloud Bigtable doesn't + support those filter strings so a + :class:`~gcloud.bigtable.row.RowFilter` should be used instead. + + .. _Thrift docs: http://hbase.apache.org/0.94/book/thrift.html + + The arguments ``batch_size``, ``scan_batching`` and ``sorted_columns`` + are allowed (as keyword arguments) for compatibility with + HappyBase. However, they will not be used in any way, and will cause a + warning if passed. (The ``batch_size`` determines the number of + results to retrieve per request. The HBase scanner defaults to reading + one record at a time, so this argument allows HappyBase to increase + that number. However, the Cloud Bigtable API uses HTTP/2 streaming so + there is no concept of a batched scan. The ``sorted_columns`` flag + tells HBase to return columns in order, but Cloud Bigtable doesn't + have this feature.) + + :type row_start: str + :param row_start: (Optional) Row key where the scanner should start + (includes ``row_start``). If not specified, reads + from the first key. If the table does not contain + ``row_start``, it will start from the next key after + it that **is** contained in the table. + + :type row_stop: str + :param row_stop: (Optional) Row key where the scanner should stop + (excludes ``row_stop``). If not specified, reads + until the last key. The table does not have to contain + ``row_stop``. + + :type row_prefix: str + :param row_prefix: (Optional) Prefix to match row keys. + + :type columns: list + :param columns: (Optional) Iterable containing column names (as + strings). Each column name can be either + + * an entire column family: ``fam`` or ``fam:`` + * a single column: ``fam:col`` + + :type timestamp: int + :param timestamp: (Optional) Timestamp (in milliseconds since the + epoch). If specified, only cells returned before (or + at) the timestamp will be returned. + + :type include_timestamp: bool + :param include_timestamp: Flag to indicate if cell timestamps should be + included with the output. + + :type limit: int + :param limit: (Optional) Maximum number of rows to return. + + :type kwargs: dict + :param kwargs: Remaining keyword arguments. Provided for HappyBase + compatibility. + + :raises: If ``limit`` is set but non-positive, or if ``row_prefix`` is + used with row start/stop, + :class:`TypeError <exceptions.TypeError>` if a string + ``filter`` is used. + """ + row_start, row_stop, filter_chain = _scan_filter_helper( + row_start, row_stop, row_prefix, columns, timestamp, limit, kwargs) + + partial_rows_data = self._low_level_table.read_rows( + start_key=row_start, end_key=row_stop, + limit=limit, filter_=filter_chain) + + # Mutable copy of data. + rows_dict = partial_rows_data.rows + while True: + try: + partial_rows_data.consume_next() + for row_key in sorted(rows_dict): + curr_row_data = rows_dict.pop(row_key) + # NOTE: We expect len(rows_dict) == 0, but don't check it. + curr_row_dict = _partial_row_to_dict( + curr_row_data, include_timestamp=include_timestamp) + yield (row_key, curr_row_dict) + except StopIteration: + break
+ +
[docs] def put(self, row, data, timestamp=None, wal=_WAL_SENTINEL): + """Insert data into a row in this table. + + .. note:: + + This method will send a request with a single "put" mutation. + In many situations, :meth:`batch` is a more appropriate + method to manipulate data since it helps combine many mutations + into a single request. + + :type row: str + :param row: The row key where the mutation will be "put". + + :type data: dict + :param data: Dictionary containing the data to be inserted. The keys + are columns names (of the form ``fam:col``) and the values + are strings (bytes) to be stored in those columns. + + :type timestamp: int + :param timestamp: (Optional) Timestamp (in milliseconds since the + epoch) that the mutation will be applied at. + + :type wal: object + :param wal: Unused parameter (to be passed to a created batch). + Provided for compatibility with HappyBase, but irrelevant + for Cloud Bigtable since it does not have a Write Ahead + Log. + """ + with self.batch(timestamp=timestamp, wal=wal) as batch: + batch.put(row, data)
+ +
[docs] def delete(self, row, columns=None, timestamp=None, wal=_WAL_SENTINEL): + """Delete data from a row in this table. + + This method deletes the entire ``row`` if ``columns`` is not + specified. + + .. note:: + + This method will send a request with a single delete mutation. + In many situations, :meth:`batch` is a more appropriate + method to manipulate data since it helps combine many mutations + into a single request. + + :type row: str + :param row: The row key where the delete will occur. + + :type columns: list + :param columns: (Optional) Iterable containing column names (as + strings). Each column name can be either + + * an entire column family: ``fam`` or ``fam:`` + * a single column: ``fam:col`` + + :type timestamp: int + :param timestamp: (Optional) Timestamp (in milliseconds since the + epoch) that the mutation will be applied at. + + :type wal: object + :param wal: Unused parameter (to be passed to a created batch). + Provided for compatibility with HappyBase, but irrelevant + for Cloud Bigtable since it does not have a Write Ahead + Log. + """ + with self.batch(timestamp=timestamp, wal=wal) as batch: + batch.delete(row, columns)
+ +
[docs] def batch(self, timestamp=None, batch_size=None, transaction=False, + wal=_WAL_SENTINEL): + """Create a new batch operation for this table. + + This method returns a new + :class:`Batch <.happybase.batch.Batch>` instance that can be + used for mass data manipulation. + + :type timestamp: int + :param timestamp: (Optional) Timestamp (in milliseconds since the + epoch) that all mutations will be applied at. + + :type batch_size: int + :param batch_size: (Optional) The maximum number of mutations to allow + to accumulate before committing them. + + :type transaction: bool + :param transaction: Flag indicating if the mutations should be sent + transactionally or not. If ``transaction=True`` and + an error occurs while a + :class:`Batch <.happybase.batch.Batch>` is + active, then none of the accumulated mutations will + be committed. If ``batch_size`` is set, the + mutation can't be transactional. + + :type wal: object + :param wal: Unused parameter (to be passed to the created batch). + Provided for compatibility with HappyBase, but irrelevant + for Cloud Bigtable since it does not have a Write Ahead + Log. + + :rtype: :class:`Batch <gcloud.bigtable.happybase.batch.Batch>` + :returns: A batch bound to this table. + """ + return Batch(self, timestamp=timestamp, batch_size=batch_size, + transaction=transaction, wal=wal)
+ +
[docs] def counter_get(self, row, column): + """Retrieve the current value of a counter column. + + This method retrieves the current value of a counter column. If the + counter column does not exist, this function initializes it to ``0``. + + .. note:: + + Application code should **never** store a counter value directly; + use the atomic :meth:`counter_inc` and :meth:`counter_dec` methods + for that. + + :type row: str + :param row: Row key for the row we are getting a counter from. + + :type column: str + :param column: Column we are ``get``-ing from; of the form ``fam:col``. + + :rtype: int + :returns: Counter value (after initializing / incrementing by 0). + """ + # Don't query directly, but increment with value=0 so that the counter + # is correctly initialized if didn't exist yet. + return self.counter_inc(row, column, value=0)
+ +
[docs] def counter_set(self, row, column, value=0): + """Set a counter column to a specific value. + + .. note:: + + Be careful using this method. It can be useful for setting the + initial value of a counter, but it defeats the purpose of using + atomic increment and decrement. + + :type row: str + :param row: Row key for the row we are setting a counter in. + + :type column: str + :param column: Column we are setting a value in; of + the form ``fam:col``. + + :type value: int + :param value: Value to set the counter to. + """ + self.put(row, {column: _PACK_I64(value)})
+ +
[docs] def counter_inc(self, row, column, value=1): + """Atomically increment a counter column. + + This method atomically increments a counter column in ``row``. + If the counter column does not exist, it is automatically initialized + to ``0`` before being incremented. + + :type row: str + :param row: Row key for the row we are incrementing a counter in. + + :type column: str + :param column: Column we are incrementing a value in; of the + form ``fam:col``. + + :type value: int + :param value: Amount to increment the counter by. (If negative, + this is equivalent to decrement.) + + :rtype: int + :returns: Counter value after incrementing. + """ + row = self._low_level_table.row(row, append=True) + if isinstance(column, six.binary_type): + column = column.decode('utf-8') + column_family_id, column_qualifier = column.split(':') + row.increment_cell_value(column_family_id, column_qualifier, value) + # See AppendRow.commit() will return a dictionary: + # { + # u'col-fam-id': { + # b'col-name1': [ + # (b'cell-val', datetime.datetime(...)), + # ... + # ], + # ... + # }, + # } + modified_cells = row.commit() + # Get the cells in the modified column, + column_cells = modified_cells[column_family_id][column_qualifier] + # Make sure there is exactly one cell in the column. + if len(column_cells) != 1: + raise ValueError('Expected server to return one modified cell.') + column_cell = column_cells[0] + # Get the bytes value from the column and convert it to an integer. + bytes_value = column_cell[0] + int_value, = _UNPACK_I64(bytes_value) + return int_value
+ +
[docs] def counter_dec(self, row, column, value=1): + """Atomically decrement a counter column. + + This method atomically decrements a counter column in ``row``. + If the counter column does not exist, it is automatically initialized + to ``0`` before being decremented. + + :type row: str + :param row: Row key for the row we are decrementing a counter in. + + :type column: str + :param column: Column we are decrementing a value in; of the + form ``fam:col``. + + :type value: int + :param value: Amount to decrement the counter by. (If negative, + this is equivalent to increment.) + + :rtype: int + :returns: Counter value after decrementing. + """ + return self.counter_inc(row, column, -value)
+ + +def _gc_rule_to_dict(gc_rule): + """Converts garbage collection rule to dictionary if possible. + + This is in place to support dictionary values as was done + in HappyBase, which has somewhat different garbage collection rule + settings for column families. + + Only does this if the garbage collection rule is: + + * :class:`gcloud.bigtable.column_family.MaxAgeGCRule` + * :class:`gcloud.bigtable.column_family.MaxVersionsGCRule` + * Composite :class:`gcloud.bigtable.column_family.GCRuleIntersection` + with two rules, one each of type + :class:`gcloud.bigtable.column_family.MaxAgeGCRule` and + :class:`gcloud.bigtable.column_family.MaxVersionsGCRule` + + Otherwise, just returns the input without change. + + :type gc_rule: :data:`NoneType <types.NoneType>`, + :class:`.GarbageCollectionRule` + :param gc_rule: A garbage collection rule to convert to a dictionary + (if possible). + + :rtype: dict or + :class:`gcloud.bigtable.column_family.GarbageCollectionRule` + :returns: The converted garbage collection rule. + """ + result = gc_rule + if gc_rule is None: + result = {} + elif isinstance(gc_rule, MaxAgeGCRule): + result = {'time_to_live': _total_seconds(gc_rule.max_age)} + elif isinstance(gc_rule, MaxVersionsGCRule): + result = {'max_versions': gc_rule.max_num_versions} + elif isinstance(gc_rule, GCRuleIntersection): + if len(gc_rule.rules) == 2: + rule1, rule2 = gc_rule.rules + if (isinstance(rule1, _SIMPLE_GC_RULES) and + isinstance(rule2, _SIMPLE_GC_RULES)): + rule1 = _gc_rule_to_dict(rule1) + rule2 = _gc_rule_to_dict(rule2) + key1, = rule1.keys() + key2, = rule2.keys() + if key1 != key2: + result = {key1: rule1[key1], key2: rule2[key2]} + return result + + +def _next_char(str_val, index): + """Gets the next character based on a position in a string. + + :type str_val: str + :param str_val: A string containing the character to update. + + :type index: int + :param index: An integer index in ``str_val``. + + :rtype: str + :returns: The next character after the character at ``index`` + in ``str_val``. + """ + ord_val = six.indexbytes(str_val, index) + return _to_bytes(chr(ord_val + 1), encoding='latin-1') + + +def _string_successor(str_val): + """Increment and truncate a byte string. + + Determines shortest string that sorts after the given string when + compared using regular string comparison semantics. + + Modeled after implementation in ``gcloud-golang``. + + Increments the last byte that is smaller than ``0xFF``, and + drops everything after it. If the string only contains ``0xFF`` bytes, + ``''`` is returned. + + :type str_val: str + :param str_val: String to increment. + + :rtype: str + :returns: The next string in lexical order after ``str_val``. + """ + str_val = _to_bytes(str_val, encoding='latin-1') + if str_val == b'': + return str_val + + index = len(str_val) - 1 + while index >= 0: + if six.indexbytes(str_val, index) != 0xff: + break + index -= 1 + + if index == -1: + return b'' + + return str_val[:index] + _next_char(str_val, index) + + +def _convert_to_time_range(timestamp=None): + """Create a timestamp range from an HBase / HappyBase timestamp. + + HBase uses timestamp as an argument to specify an exclusive end + deadline. Cloud Bigtable also uses exclusive end times, so + the behavior matches. + + :type timestamp: int + :param timestamp: (Optional) Timestamp (in milliseconds since the + epoch). Intended to be used as the end of an HBase + time range, which is exclusive. + + :rtype: :class:`gcloud.bigtable.row.TimestampRange`, + :data:`NoneType <types.NoneType>` + :returns: The timestamp range corresponding to the passed in + ``timestamp``. + """ + if timestamp is None: + return None + + next_timestamp = _datetime_from_microseconds(1000 * timestamp) + return TimestampRange(end=next_timestamp) + + +def _cells_to_pairs(cells, include_timestamp=False): + """Converts list of cells to HappyBase format. + + For example:: + + >>> import datetime + >>> from gcloud.bigtable.row_data import Cell + >>> cell1 = Cell(b'val1', datetime.datetime.utcnow()) + >>> cell2 = Cell(b'val2', datetime.datetime.utcnow()) + >>> _cells_to_pairs([cell1, cell2]) + [b'val1', b'val2'] + >>> _cells_to_pairs([cell1, cell2], include_timestamp=True) + [(b'val1', 1456361486255), (b'val2', 1456361491927)] + + :type cells: list + :param cells: List of :class:`gcloud.bigtable.row_data.Cell` returned + from a read request. + + :type include_timestamp: bool + :param include_timestamp: Flag to indicate if cell timestamps should be + included with the output. + + :rtype: list + :returns: List of values in the cell. If ``include_timestamp=True``, each + value will be a pair, with the first part the bytes value in + the cell and the second part the number of milliseconds in the + timestamp on the cell. + """ + result = [] + for cell in cells: + if include_timestamp: + ts_millis = _microseconds_from_datetime(cell.timestamp) // 1000 + result.append((cell.value, ts_millis)) + else: + result.append(cell.value) + return result + + +def _partial_row_to_dict(partial_row_data, include_timestamp=False): + """Convert a low-level row data object to a dictionary. + + Assumes only the latest value in each row is needed. This assumption + is due to the fact that this method is used by callers which use + a ``CellsColumnLimitFilter(1)`` filter. + + For example:: + + >>> import datetime + >>> from gcloud.bigtable.row_data import Cell, PartialRowData + >>> cell1 = Cell(b'val1', datetime.datetime.utcnow()) + >>> cell2 = Cell(b'val2', datetime.datetime.utcnow()) + >>> row_data = PartialRowData(b'row-key') + >>> _partial_row_to_dict(row_data) + {} + >>> row_data._cells[u'fam1'] = {b'col1': [cell1], b'col2': [cell2]} + >>> _partial_row_to_dict(row_data) + {b'fam1:col2': b'val2', b'fam1:col1': b'val1'} + >>> _partial_row_to_dict(row_data, include_timestamp=True) + {b'fam1:col2': (b'val2', 1456361724480), + b'fam1:col1': (b'val1', 1456361721135)} + + :type partial_row_data: :class:`.row_data.PartialRowData` + :param partial_row_data: Row data consumed from a stream. + + :type include_timestamp: bool + :param include_timestamp: Flag to indicate if cell timestamps should be + included with the output. + + :rtype: dict + :returns: The row data converted to a dictionary. + """ + result = {} + for column, cells in six.iteritems(partial_row_data.to_dict()): + cell_vals = _cells_to_pairs(cells, + include_timestamp=include_timestamp) + # NOTE: We assume there is exactly 1 version since we used that in + # our filter, but we don't check this. + result[column] = cell_vals[0] + return result + + +def _filter_chain_helper(column=None, versions=None, timestamp=None, + filters=None): + """Create filter chain to limit a results set. + + :type column: str + :param column: (Optional) The column (``fam:col``) to be selected + with the filter. + + :type versions: int + :param versions: (Optional) The maximum number of cells to return. + + :type timestamp: int + :param timestamp: (Optional) Timestamp (in milliseconds since the + epoch). If specified, only cells returned before (or + at) the timestamp will be matched. + + :type filters: list + :param filters: (Optional) List of existing filters to be extended. + + :rtype: :class:`RowFilter <gcloud.bigtable.row.RowFilter>` + :returns: The chained filter created, or just a single filter if only + one was needed. + :raises: :class:`ValueError <exceptions.ValueError>` if there are no + filters to chain. + """ + if filters is None: + filters = [] + + if column is not None: + if isinstance(column, six.binary_type): + column = column.decode('utf-8') + column_family_id, column_qualifier = column.split(':') + fam_filter = FamilyNameRegexFilter(column_family_id) + qual_filter = ColumnQualifierRegexFilter(column_qualifier) + filters.extend([fam_filter, qual_filter]) + if versions is not None: + filters.append(CellsColumnLimitFilter(versions)) + time_range = _convert_to_time_range(timestamp=timestamp) + if time_range is not None: + filters.append(TimestampRangeFilter(time_range)) + + num_filters = len(filters) + if num_filters == 0: + raise ValueError('Must have at least one filter.') + elif num_filters == 1: + return filters[0] + else: + return RowFilterChain(filters=filters) + + +def _scan_filter_helper(row_start, row_stop, row_prefix, columns, + timestamp, limit, kwargs): + """Helper for :meth:`scan`: build up a filter chain.""" + filter_ = kwargs.pop('filter', None) + legacy_args = [] + for kw_name in ('batch_size', 'scan_batching', 'sorted_columns'): + if kw_name in kwargs: + legacy_args.append(kw_name) + kwargs.pop(kw_name) + if legacy_args: + legacy_args = ', '.join(legacy_args) + message = ('The HappyBase legacy arguments %s were used. These ' + 'arguments are unused by gcloud.' % (legacy_args,)) + _WARN(message) + if kwargs: + raise TypeError('Received unexpected arguments', kwargs.keys()) + + if limit is not None and limit < 1: + raise ValueError('limit must be positive') + if row_prefix is not None: + if row_start is not None or row_stop is not None: + raise ValueError('row_prefix cannot be combined with ' + 'row_start or row_stop') + row_start = row_prefix + row_stop = _string_successor(row_prefix) + + filters = [] + if isinstance(filter_, six.string_types): + raise TypeError('Specifying filters as a string is not supported ' + 'by Cloud Bigtable. Use a ' + 'gcloud.bigtable.row.RowFilter instead.') + elif filter_ is not None: + filters.append(filter_) + + if columns is not None: + filters.append(_columns_filter_helper(columns)) + + # versions == 1 since we only want the latest. + filter_ = _filter_chain_helper(versions=1, timestamp=timestamp, + filters=filters) + return row_start, row_stop, filter_ + + +def _columns_filter_helper(columns): + """Creates a union filter for a list of columns. + + :type columns: list + :param columns: Iterable containing column names (as strings). Each column + name can be either + + * an entire column family: ``fam`` or ``fam:`` + * a single column: ``fam:col`` + + :rtype: :class:`RowFilter <gcloud.bigtable.row.RowFilter>` + :returns: The union filter created containing all of the matched columns. + :raises: :class:`ValueError <exceptions.ValueError>` if there are no + filters to union. + """ + filters = [] + for column_family_id, column_qualifier in _get_column_pairs(columns): + fam_filter = FamilyNameRegexFilter(column_family_id) + if column_qualifier is not None: + qual_filter = ColumnQualifierRegexFilter(column_qualifier) + combined_filter = RowFilterChain( + filters=[fam_filter, qual_filter]) + filters.append(combined_filter) + else: + filters.append(fam_filter) + + num_filters = len(filters) + if num_filters == 0: + raise ValueError('Must have at least one filter.') + elif num_filters == 1: + return filters[0] + else: + return RowFilterUnion(filters=filters) + + +def _row_keys_filter_helper(row_keys): + """Creates a union filter for a list of rows. + + :type row_keys: list + :param row_keys: Iterable containing row keys (as strings). + + :rtype: :class:`RowFilter <gcloud.bigtable.row.RowFilter>` + :returns: The union filter created containing all of the row keys. + :raises: :class:`ValueError <exceptions.ValueError>` if there are no + filters to union. + """ + filters = [] + for row_key in row_keys: + filters.append(RowKeyRegexFilter(row_key)) + + num_filters = len(filters) + if num_filters == 0: + raise ValueError('Must have at least one filter.') + elif num_filters == 1: + return filters[0] + else: + return RowFilterUnion(filters=filters) +
+ +
+ +
+ + + + + + + \ No newline at end of file diff --git a/0.18.1/_modules/gcloud/bigtable/instance.html b/0.18.1/_modules/gcloud/bigtable/instance.html new file mode 100644 index 000000000000..7bc8eb6d165d --- /dev/null +++ b/0.18.1/_modules/gcloud/bigtable/instance.html @@ -0,0 +1,721 @@ + + + + + + + + gcloud.bigtable.instance — gcloud 0.18.1 documentation + + + + + + + + + + + + + + +
+ +
+ +
+ + + Report an Issue + +
+
+ +
+ +

Source code for gcloud.bigtable.instance

+# Copyright 2015 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""User friendly container for Google Cloud Bigtable Instance."""
+
+
+import re
+
+from google.longrunning import operations_pb2
+
+from gcloud._helpers import _pb_timestamp_to_datetime
+from gcloud.bigtable._generated import (
+    instance_pb2 as data_v2_pb2)
+from gcloud.bigtable._generated import (
+    bigtable_instance_admin_pb2 as messages_v2_pb2)
+from gcloud.bigtable._generated import (
+    bigtable_table_admin_pb2 as table_messages_v2_pb2)
+from gcloud.bigtable.cluster import Cluster
+from gcloud.bigtable.cluster import DEFAULT_SERVE_NODES
+from gcloud.bigtable.table import Table
+
+
+_EXISTING_INSTANCE_LOCATION_ID = 'see-existing-cluster'
+_INSTANCE_NAME_RE = re.compile(r'^projects/(?P<project>[^/]+)/'
+                               r'instances/(?P<instance_id>[a-z][-a-z0-9]*)$')
+_OPERATION_NAME_RE = re.compile(r'^operations/projects/([^/]+)/'
+                                r'instances/([a-z][-a-z0-9]*)/'
+                                r'locations/(?P<location_id>[a-z][-a-z0-9]*)/'
+                                r'operations/(?P<operation_id>\d+)$')
+_TYPE_URL_BASE = 'type.googleapis.com/google.bigtable.'
+_ADMIN_TYPE_URL_BASE = _TYPE_URL_BASE + 'admin.v2.'
+_INSTANCE_CREATE_METADATA = _ADMIN_TYPE_URL_BASE + 'CreateInstanceMetadata'
+_TYPE_URL_MAP = {
+    _INSTANCE_CREATE_METADATA: messages_v2_pb2.CreateInstanceMetadata,
+}
+
+
+def _prepare_create_request(instance):
+    """Creates a protobuf request for a CreateInstance request.
+
+    :type instance: :class:`Instance`
+    :param instance: The instance to be created.
+
+    :rtype: :class:`.messages_v2_pb2.CreateInstanceRequest`
+    :returns: The CreateInstance request object containing the instance info.
+    """
+    parent_name = ('projects/' + instance._client.project)
+    message = messages_v2_pb2.CreateInstanceRequest(
+        parent=parent_name,
+        instance_id=instance.instance_id,
+        instance=data_v2_pb2.Instance(
+            display_name=instance.display_name,
+        ),
+    )
+    cluster = message.clusters[instance.instance_id]
+    cluster.name = instance.name + '/clusters/' + instance.instance_id
+    cluster.location = (
+        parent_name + '/locations/' + instance._cluster_location_id)
+    cluster.serve_nodes = instance._cluster_serve_nodes
+    return message
+
+
+def _parse_pb_any_to_native(any_val, expected_type=None):
+    """Convert a serialized "google.protobuf.Any" value to actual type.
+
+    :type any_val: :class:`google.protobuf.any_pb2.Any`
+    :param any_val: A serialized protobuf value container.
+
+    :type expected_type: str
+    :param expected_type: (Optional) The type URL we expect ``any_val``
+                          to have.
+
+    :rtype: object
+    :returns: The de-serialized object.
+    :raises: :class:`ValueError <exceptions.ValueError>` if the
+             ``expected_type`` does not match the ``type_url`` on the input.
+    """
+    if expected_type is not None and expected_type != any_val.type_url:
+        raise ValueError('Expected type: %s, Received: %s' % (
+            expected_type, any_val.type_url))
+    container_class = _TYPE_URL_MAP[any_val.type_url]
+    return container_class.FromString(any_val.value)
+
+
+def _process_operation(operation_pb):
+    """Processes a create protobuf response.
+
+    :type operation_pb: :class:`google.longrunning.operations_pb2.Operation`
+    :param operation_pb: The long-running operation response from a
+                         Create/Update/Undelete instance request.
+
+    :rtype: (int, str, datetime)
+    :returns: (operation_id, location_id, operation_begin).
+    :raises: :class:`ValueError <exceptions.ValueError>` if the operation name
+             doesn't match the :data:`_OPERATION_NAME_RE` regex.
+    """
+    match = _OPERATION_NAME_RE.match(operation_pb.name)
+    if match is None:
+        raise ValueError('Operation name was not in the expected '
+                         'format after instance creation.',
+                         operation_pb.name)
+    location_id = match.group('location_id')
+    operation_id = int(match.group('operation_id'))
+
+    request_metadata = _parse_pb_any_to_native(operation_pb.metadata)
+    operation_begin = _pb_timestamp_to_datetime(
+        request_metadata.request_time)
+
+    return operation_id, location_id, operation_begin
+
+
+
[docs]class Operation(object): + """Representation of a Google API Long-Running Operation. + + In particular, these will be the result of operations on + instances using the Cloud Bigtable API. + + :type op_type: str + :param op_type: The type of operation being performed. Expect + ``create``, ``update`` or ``undelete``. + + :type op_id: int + :param op_id: The ID of the operation. + + :type begin: :class:`datetime.datetime` + :param begin: The time when the operation was started. + + :type location_id: str + :param location_id: ID of the location in which the operation is running + + :type instance: :class:`Instance` + :param instance: The instance that created the operation. + """ + + def __init__(self, op_type, op_id, begin, location_id, instance=None): + self.op_type = op_type + self.op_id = op_id + self.begin = begin + self.location_id = location_id + self._instance = instance + self._complete = False + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return False + return (other.op_type == self.op_type and + other.op_id == self.op_id and + other.begin == self.begin and + other.location_id == self.location_id and + other._instance == self._instance and + other._complete == self._complete) + + def __ne__(self, other): + return not self.__eq__(other) + +
[docs] def finished(self): + """Check if the operation has finished. + + :rtype: bool + :returns: A boolean indicating if the current operation has completed. + :raises: :class:`ValueError <exceptions.ValueError>` if the operation + has already completed. + """ + if self._complete: + raise ValueError('The operation has completed.') + + operation_name = ( + 'operations/%s/locations/%s/operations/%d' % + (self._instance.name, self.location_id, self.op_id)) + request_pb = operations_pb2.GetOperationRequest(name=operation_name) + # We expect a `google.longrunning.operations_pb2.Operation`. + operation_pb = self._instance._client._operations_stub.GetOperation( + request_pb, self._instance._client.timeout_seconds) + + if operation_pb.done: + self._complete = True + return True + else: + return False
+ + +
[docs]class Instance(object): + """Representation of a Google Cloud Bigtable Instance. + + We can use a :class:`Instance` to: + + * :meth:`reload` itself + * :meth:`create` itself + * :meth:`update` itself + * :meth:`delete` itself + * :meth:`undelete` itself + + .. note:: + + For now, we leave out the ``default_storage_type`` (an enum) + which if not sent will end up as :data:`.data_v2_pb2.STORAGE_SSD`. + + :type instance_id: str + :param instance_id: The ID of the instance. + + :type client: :class:`Client <gcloud.bigtable.client.Client>` + :param client: The client that owns the instance. Provides + authorization and a project ID. + + :type location_id: str + :param location_id: ID of the location in which the instance will be + created. Required for instances which do not yet + exist. + + :type display_name: str + :param display_name: (Optional) The display name for the instance in the + Cloud Console UI. (Must be between 4 and 30 + characters.) If this value is not set in the + constructor, will fall back to the instance ID. + + :type serve_nodes: int + :param serve_nodes: (Optional) The number of nodes in the instance's + cluster; used to set up the instance's cluster. + """ + + def __init__(self, instance_id, client, + location_id=_EXISTING_INSTANCE_LOCATION_ID, + display_name=None, + serve_nodes=DEFAULT_SERVE_NODES): + self.instance_id = instance_id + self.display_name = display_name or instance_id + self._cluster_location_id = location_id + self._cluster_serve_nodes = serve_nodes + self._client = client + + def _update_from_pb(self, instance_pb): + """Refresh self from the server-provided protobuf. + + Helper for :meth:`from_pb` and :meth:`reload`. + """ + if not instance_pb.display_name: # Simple field (string) + raise ValueError('Instance protobuf does not contain display_name') + self.display_name = instance_pb.display_name + + @classmethod +
[docs] def from_pb(cls, instance_pb, client): + """Creates a instance instance from a protobuf. + + :type instance_pb: :class:`instance_pb2.Instance` + :param instance_pb: A instance protobuf object. + + :type client: :class:`Client <gcloud.bigtable.client.Client>` + :param client: The client that owns the instance. + + :rtype: :class:`Instance` + :returns: The instance parsed from the protobuf response. + :raises: :class:`ValueError <exceptions.ValueError>` if the instance + name does not match + ``projects/{project}/instances/{instance_id}`` + or if the parsed project ID does not match the project ID + on the client. + """ + match = _INSTANCE_NAME_RE.match(instance_pb.name) + if match is None: + raise ValueError('Instance protobuf name was not in the ' + 'expected format.', instance_pb.name) + if match.group('project') != client.project: + raise ValueError('Project ID on instance does not match the ' + 'project ID on the client') + instance_id = match.group('instance_id') + + result = cls(instance_id, client, _EXISTING_INSTANCE_LOCATION_ID) + result._update_from_pb(instance_pb) + return result
+ +
[docs] def copy(self): + """Make a copy of this instance. + + Copies the local data stored as simple types and copies the client + attached to this instance. + + :rtype: :class:`.Instance` + :returns: A copy of the current instance. + """ + new_client = self._client.copy() + return self.__class__(self.instance_id, new_client, + self._cluster_location_id, + display_name=self.display_name)
+ + @property + def name(self): + """Instance name used in requests. + + .. note:: + This property will not change if ``instance_id`` does not, + but the return value is not cached. + + The instance name is of the form + + ``"projects/{project}/instances/{instance_id}"`` + + :rtype: str + :returns: The instance name. + """ + return self._client.project_name + '/instances/' + self.instance_id + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return False + # NOTE: This does not compare the configuration values, such as + # the display_name. Instead, it only compares + # identifying values instance ID and client. This is + # intentional, since the same instance can be in different states + # if not synchronized. Instances with similar instance + # settings but different clients can't be used in the same way. + return (other.instance_id == self.instance_id and + other._client == self._client) + + def __ne__(self, other): + return not self.__eq__(other) + +
[docs] def reload(self): + """Reload the metadata for this instance.""" + request_pb = messages_v2_pb2.GetInstanceRequest(name=self.name) + # We expect `data_v2_pb2.Instance`. + instance_pb = self._client._instance_stub.GetInstance( + request_pb, self._client.timeout_seconds) + + # NOTE: _update_from_pb does not check that the project and + # instance ID on the response match the request. + self._update_from_pb(instance_pb)
+ +
[docs] def create(self): + """Create this instance. + + .. note:: + + Uses the ``project`` and ``instance_id`` on the current + :class:`Instance` in addition to the ``display_name``. + To change them before creating, reset the values via + + .. code:: python + + instance.display_name = 'New display name' + instance.instance_id = 'i-changed-my-mind' + + before calling :meth:`create`. + + :rtype: :class:`Operation` + :returns: The long-running operation corresponding to the + create operation. + """ + request_pb = _prepare_create_request(self) + # We expect a `google.longrunning.operations_pb2.Operation`. + operation_pb = self._client._instance_stub.CreateInstance( + request_pb, self._client.timeout_seconds) + + op_id, loc_id, op_begin = _process_operation(operation_pb) + return Operation('create', op_id, op_begin, loc_id, instance=self)
+ +
[docs] def update(self): + """Update this instance. + + .. note:: + + Updates the ``display_name``. To change that value before + updating, reset its values via + + .. code:: python + + instance.display_name = 'New display name' + + before calling :meth:`update`. + """ + request_pb = data_v2_pb2.Instance( + name=self.name, + display_name=self.display_name, + ) + # Ignore the expected `data_v2_pb2.Instance`. + self._client._instance_stub.UpdateInstance( + request_pb, self._client.timeout_seconds)
+ +
[docs] def delete(self): + """Delete this instance. + + Marks a instance and all of its tables for permanent deletion + in 7 days. + + Immediately upon completion of the request: + + * Billing will cease for all of the instance's reserved resources. + * The instance's ``delete_time`` field will be set 7 days in + the future. + + Soon afterward: + + * All tables within the instance will become unavailable. + + Prior to the instance's ``delete_time``: + + * The instance can be recovered with a call to ``UndeleteInstance``. + * All other attempts to modify or delete the instance will be rejected. + + At the instance's ``delete_time``: + + * The instance and **all of its tables** will immediately and + irrevocably disappear from the API, and their data will be + permanently deleted. + """ + request_pb = messages_v2_pb2.DeleteInstanceRequest(name=self.name) + # We expect a `google.protobuf.empty_pb2.Empty` + self._client._instance_stub.DeleteInstance( + request_pb, self._client.timeout_seconds)
+ +
[docs] def cluster(self, cluster_id, serve_nodes=3): + """Factory to create a cluster associated with this client. + + :type cluster_id: str + :param cluster_id: The ID of the cluster. + + :type serve_nodes: int + :param serve_nodes: (Optional) The number of nodes in the cluster. + Defaults to 3. + + :rtype: :class:`.Cluster` + :returns: The cluster owned by this client. + """ + return Cluster(cluster_id, self, serve_nodes=serve_nodes)
+ +
[docs] def list_clusters(self): + """Lists clusters in this instance. + + :rtype: tuple + :returns: A pair of results, the first is a list of :class:`.Cluster` s + returned and the second is a list of strings (the failed + locations in the request). + """ + request_pb = messages_v2_pb2.ListClustersRequest(parent=self.name) + # We expect a `.cluster_messages_v1_pb2.ListClustersResponse` + list_clusters_response = self._client._instance_stub.ListClusters( + request_pb, self._client.timeout_seconds) + + failed_locations = [ + location for location in list_clusters_response.failed_locations] + clusters = [Cluster.from_pb(cluster_pb, self) + for cluster_pb in list_clusters_response.clusters] + return clusters, failed_locations
+ +
[docs] def table(self, table_id): + """Factory to create a table associated with this instance. + + :type table_id: str + :param table_id: The ID of the table. + + :rtype: :class:`Table <gcloud.bigtable.table.Table>` + :returns: The table owned by this instance. + """ + return Table(table_id, self)
+ +
[docs] def list_tables(self): + """List the tables in this instance. + + :rtype: list of :class:`Table <gcloud.bigtable.table.Table>` + :returns: The list of tables owned by the instance. + :raises: :class:`ValueError <exceptions.ValueError>` if one of the + returned tables has a name that is not of the expected format. + """ + request_pb = table_messages_v2_pb2.ListTablesRequest(parent=self.name) + # We expect a `table_messages_v2_pb2.ListTablesResponse` + table_list_pb = self._client._table_stub.ListTables( + request_pb, self._client.timeout_seconds) + + result = [] + for table_pb in table_list_pb.tables: + table_prefix = self.name + '/tables/' + if not table_pb.name.startswith(table_prefix): + raise ValueError('Table name %s not of expected format' % ( + table_pb.name,)) + table_id = table_pb.name[len(table_prefix):] + result.append(self.table(table_id)) + + return result
+
+ +
+ +
+ + + + + + + \ No newline at end of file diff --git a/0.18.1/_modules/gcloud/bigtable/row.html b/0.18.1/_modules/gcloud/bigtable/row.html new file mode 100644 index 000000000000..0a3a7b2950fc --- /dev/null +++ b/0.18.1/_modules/gcloud/bigtable/row.html @@ -0,0 +1,1122 @@ + + + + + + + + gcloud.bigtable.row — gcloud 0.18.1 documentation + + + + + + + + + + + + + + +
+ +
+ +
+ + + Report an Issue + +
+
+ +
+ +

Source code for gcloud.bigtable.row

+# Copyright 2015 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""User friendly container for Google Cloud Bigtable Row."""
+
+
+import struct
+
+import six
+
+from gcloud._helpers import _datetime_from_microseconds
+from gcloud._helpers import _microseconds_from_datetime
+from gcloud._helpers import _to_bytes
+from gcloud.bigtable._generated import (
+    data_pb2 as data_v2_pb2)
+from gcloud.bigtable._generated import (
+    bigtable_pb2 as messages_v2_pb2)
+
+
+_PACK_I64 = struct.Struct('>q').pack
+
+MAX_MUTATIONS = 100000
+"""The maximum number of mutations that a row can accumulate."""
+
+
+
[docs]class Row(object): + """Base representation of a Google Cloud Bigtable Row. + + This class has three subclasses corresponding to the three + RPC methods for sending row mutations: + + * :class:`DirectRow` for ``MutateRow`` + * :class:`ConditionalRow` for ``CheckAndMutateRow`` + * :class:`AppendRow` for ``ReadModifyWriteRow`` + + :type row_key: bytes + :param row_key: The key for the current row. + + :type table: :class:`Table <gcloud.bigtable.table.Table>` + :param table: The table that owns the row. + """ + + def __init__(self, row_key, table): + self._row_key = _to_bytes(row_key) + self._table = table
+ + +class _SetDeleteRow(Row): + """Row helper for setting or deleting cell values. + + Implements helper methods to add mutations to set or delete cell contents: + + * :meth:`set_cell` + * :meth:`delete` + * :meth:`delete_cell` + * :meth:`delete_cells` + + :type row_key: bytes + :param row_key: The key for the current row. + + :type table: :class:`Table <gcloud.bigtable.table.Table>` + :param table: The table that owns the row. + """ + + ALL_COLUMNS = object() + """Sentinel value used to indicate all columns in a column family.""" + + def _get_mutations(self, state): + """Gets the list of mutations for a given state. + + This method intended to be implemented by subclasses. + + ``state`` may not need to be used by all subclasses. + + :type state: bool + :param state: The state that the mutation should be + applied in. + + :raises: :class:`NotImplementedError <exceptions.NotImplementedError>` + always. + """ + raise NotImplementedError + + def _set_cell(self, column_family_id, column, value, timestamp=None, + state=None): + """Helper for :meth:`set_cell` + + Adds a mutation to set the value in a specific cell. + + ``state`` is unused by :class:`DirectRow` but is used by + subclasses. + + :type column_family_id: str + :param column_family_id: The column family that contains the column. + Must be of the form + ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. + + :type column: bytes + :param column: The column within the column family where the cell + is located. + + :type value: bytes or :class:`int` + :param value: The value to set in the cell. If an integer is used, + will be interpreted as a 64-bit big-endian signed + integer (8 bytes). + + :type timestamp: :class:`datetime.datetime` + :param timestamp: (Optional) The timestamp of the operation. + + :type state: bool + :param state: (Optional) The state that is passed along to + :meth:`_get_mutations`. + """ + column = _to_bytes(column) + if isinstance(value, six.integer_types): + value = _PACK_I64(value) + value = _to_bytes(value) + if timestamp is None: + # Use -1 for current Bigtable server time. + timestamp_micros = -1 + else: + timestamp_micros = _microseconds_from_datetime(timestamp) + # Truncate to millisecond granularity. + timestamp_micros -= (timestamp_micros % 1000) + + mutation_val = data_v2_pb2.Mutation.SetCell( + family_name=column_family_id, + column_qualifier=column, + timestamp_micros=timestamp_micros, + value=value, + ) + mutation_pb = data_v2_pb2.Mutation(set_cell=mutation_val) + self._get_mutations(state).append(mutation_pb) + + def _delete(self, state=None): + """Helper for :meth:`delete` + + Adds a delete mutation (for the entire row) to the accumulated + mutations. + + ``state`` is unused by :class:`DirectRow` but is used by + subclasses. + + :type state: bool + :param state: (Optional) The state that is passed along to + :meth:`_get_mutations`. + """ + mutation_val = data_v2_pb2.Mutation.DeleteFromRow() + mutation_pb = data_v2_pb2.Mutation(delete_from_row=mutation_val) + self._get_mutations(state).append(mutation_pb) + + def _delete_cells(self, column_family_id, columns, time_range=None, + state=None): + """Helper for :meth:`delete_cell` and :meth:`delete_cells`. + + ``state`` is unused by :class:`DirectRow` but is used by + subclasses. + + :type column_family_id: str + :param column_family_id: The column family that contains the column + or columns with cells being deleted. Must be + of the form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. + + :type columns: :class:`list` of :class:`str` / + :func:`unicode <unicode>`, or :class:`object` + :param columns: The columns within the column family that will have + cells deleted. If :attr:`ALL_COLUMNS` is used then + the entire column family will be deleted from the row. + + :type time_range: :class:`TimestampRange` + :param time_range: (Optional) The range of time within which cells + should be deleted. + + :type state: bool + :param state: (Optional) The state that is passed along to + :meth:`_get_mutations`. + """ + mutations_list = self._get_mutations(state) + if columns is self.ALL_COLUMNS: + mutation_val = data_v2_pb2.Mutation.DeleteFromFamily( + family_name=column_family_id, + ) + mutation_pb = data_v2_pb2.Mutation(delete_from_family=mutation_val) + mutations_list.append(mutation_pb) + else: + delete_kwargs = {} + if time_range is not None: + delete_kwargs['time_range'] = time_range.to_pb() + + to_append = [] + for column in columns: + column = _to_bytes(column) + # time_range will never change if present, but the rest of + # delete_kwargs will + delete_kwargs.update( + family_name=column_family_id, + column_qualifier=column, + ) + mutation_val = data_v2_pb2.Mutation.DeleteFromColumn( + **delete_kwargs) + mutation_pb = data_v2_pb2.Mutation( + delete_from_column=mutation_val) + to_append.append(mutation_pb) + + # We don't add the mutations until all columns have been + # processed without error. + mutations_list.extend(to_append) + + +
[docs]class DirectRow(_SetDeleteRow): + """Google Cloud Bigtable Row for sending "direct" mutations. + + These mutations directly set or delete cell contents: + + * :meth:`set_cell` + * :meth:`delete` + * :meth:`delete_cell` + * :meth:`delete_cells` + + These methods can be used directly:: + + >>> row = table.row(b'row-key1') + >>> row.set_cell(u'fam', b'col1', b'cell-val') + >>> row.delete_cell(u'fam', b'col2') + + .. note:: + + A :class:`DirectRow` accumulates mutations locally via the + :meth:`set_cell`, :meth:`delete`, :meth:`delete_cell` and + :meth:`delete_cells` methods. To actually send these mutations to the + Google Cloud Bigtable API, you must call :meth:`commit`. + + :type row_key: bytes + :param row_key: The key for the current row. + + :type table: :class:`Table <gcloud.bigtable.table.Table>` + :param table: The table that owns the row. + """ + + def __init__(self, row_key, table): + super(DirectRow, self).__init__(row_key, table) + self._pb_mutations = [] + + def _get_mutations(self, state): # pylint: disable=unused-argument + """Gets the list of mutations for a given state. + + ``state`` is unused by :class:`DirectRow` but is used by + subclasses. + + :type state: bool + :param state: The state that the mutation should be + applied in. + + :rtype: list + :returns: The list to add new mutations to (for the current state). + """ + return self._pb_mutations + +
[docs] def set_cell(self, column_family_id, column, value, timestamp=None): + """Sets a value in this row. + + The cell is determined by the ``row_key`` of this :class:`DirectRow` + and the ``column``. The ``column`` must be in an existing + :class:`.ColumnFamily` (as determined by ``column_family_id``). + + .. note:: + + This method adds a mutation to the accumulated mutations on this + row, but does not make an API request. To actually + send an API request (with the mutations) to the Google Cloud + Bigtable API, call :meth:`commit`. + + :type column_family_id: str + :param column_family_id: The column family that contains the column. + Must be of the form + ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. + + :type column: bytes + :param column: The column within the column family where the cell + is located. + + :type value: bytes or :class:`int` + :param value: The value to set in the cell. If an integer is used, + will be interpreted as a 64-bit big-endian signed + integer (8 bytes). + + :type timestamp: :class:`datetime.datetime` + :param timestamp: (Optional) The timestamp of the operation. + """ + self._set_cell(column_family_id, column, value, timestamp=timestamp, + state=None)
+ +
[docs] def delete(self): + """Deletes this row from the table. + + .. note:: + + This method adds a mutation to the accumulated mutations on this + row, but does not make an API request. To actually + send an API request (with the mutations) to the Google Cloud + Bigtable API, call :meth:`commit`. + """ + self._delete(state=None)
+ +
[docs] def delete_cell(self, column_family_id, column, time_range=None): + """Deletes cell in this row. + + .. note:: + + This method adds a mutation to the accumulated mutations on this + row, but does not make an API request. To actually + send an API request (with the mutations) to the Google Cloud + Bigtable API, call :meth:`commit`. + + :type column_family_id: str + :param column_family_id: The column family that contains the column + or columns with cells being deleted. Must be + of the form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. + + :type column: bytes + :param column: The column within the column family that will have a + cell deleted. + + :type time_range: :class:`TimestampRange` + :param time_range: (Optional) The range of time within which cells + should be deleted. + """ + self._delete_cells(column_family_id, [column], time_range=time_range, + state=None)
+ +
[docs] def delete_cells(self, column_family_id, columns, time_range=None): + """Deletes cells in this row. + + .. note:: + + This method adds a mutation to the accumulated mutations on this + row, but does not make an API request. To actually + send an API request (with the mutations) to the Google Cloud + Bigtable API, call :meth:`commit`. + + :type column_family_id: str + :param column_family_id: The column family that contains the column + or columns with cells being deleted. Must be + of the form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. + + :type columns: :class:`list` of :class:`str` / + :func:`unicode <unicode>`, or :class:`object` + :param columns: The columns within the column family that will have + cells deleted. If :attr:`ALL_COLUMNS` is used then + the entire column family will be deleted from the row. + + :type time_range: :class:`TimestampRange` + :param time_range: (Optional) The range of time within which cells + should be deleted. + """ + self._delete_cells(column_family_id, columns, time_range=time_range, + state=None)
+ +
[docs] def commit(self): + """Makes a ``MutateRow`` API request. + + If no mutations have been created in the row, no request is made. + + Mutations are applied atomically and in order, meaning that earlier + mutations can be masked / negated by later ones. Cells already present + in the row are left unchanged unless explicitly changed by a mutation. + + After committing the accumulated mutations, resets the local + mutations to an empty list. + + :raises: :class:`ValueError <exceptions.ValueError>` if the number of + mutations exceeds the :data:`MAX_MUTATIONS`. + """ + mutations_list = self._get_mutations(None) + num_mutations = len(mutations_list) + if num_mutations == 0: + return + if num_mutations > MAX_MUTATIONS: + raise ValueError('%d total mutations exceed the maximum allowable ' + '%d.' % (num_mutations, MAX_MUTATIONS)) + request_pb = messages_v2_pb2.MutateRowRequest( + table_name=self._table.name, + row_key=self._row_key, + mutations=mutations_list, + ) + # We expect a `google.protobuf.empty_pb2.Empty` + client = self._table._instance._client + client._data_stub.MutateRow(request_pb, client.timeout_seconds) + self.clear()
+ +
[docs] def clear(self): + """Removes all currently accumulated mutations on the current row.""" + del self._pb_mutations[:]
+ + +
[docs]class ConditionalRow(_SetDeleteRow): + """Google Cloud Bigtable Row for sending mutations conditionally. + + Each mutation has an associated state: :data:`True` or :data:`False`. + When :meth:`commit`-ed, the mutations for the :data:`True` + state will be applied if the filter matches any cells in + the row, otherwise the :data:`False` state will be applied. + + A :class:`ConditionalRow` accumulates mutations in the same way a + :class:`DirectRow` does: + + * :meth:`set_cell` + * :meth:`delete` + * :meth:`delete_cell` + * :meth:`delete_cells` + + with the only change the extra ``state`` parameter:: + + >>> row_cond = table.row(b'row-key2', filter_=row_filter) + >>> row_cond.set_cell(u'fam', b'col', b'cell-val', state=True) + >>> row_cond.delete_cell(u'fam', b'col', state=False) + + .. note:: + + As with :class:`DirectRow`, to actually send these mutations to the + Google Cloud Bigtable API, you must call :meth:`commit`. + + :type row_key: bytes + :param row_key: The key for the current row. + + :type table: :class:`Table <gcloud.bigtable.table.Table>` + :param table: The table that owns the row. + + :type filter_: :class:`.RowFilter` + :param filter_: Filter to be used for conditional mutations. + """ + def __init__(self, row_key, table, filter_): + super(ConditionalRow, self).__init__(row_key, table) + self._filter = filter_ + self._true_pb_mutations = [] + self._false_pb_mutations = [] + + def _get_mutations(self, state): + """Gets the list of mutations for a given state. + + Over-ridden so that the state can be used in: + + * :meth:`set_cell` + * :meth:`delete` + * :meth:`delete_cell` + * :meth:`delete_cells` + + :type state: bool + :param state: The state that the mutation should be + applied in. + + :rtype: list + :returns: The list to add new mutations to (for the current state). + """ + if state: + return self._true_pb_mutations + else: + return self._false_pb_mutations + +
[docs] def commit(self): + """Makes a ``CheckAndMutateRow`` API request. + + If no mutations have been created in the row, no request is made. + + The mutations will be applied conditionally, based on whether the + filter matches any cells in the :class:`ConditionalRow` or not. (Each + method which adds a mutation has a ``state`` parameter for this + purpose.) + + Mutations are applied atomically and in order, meaning that earlier + mutations can be masked / negated by later ones. Cells already present + in the row are left unchanged unless explicitly changed by a mutation. + + After committing the accumulated mutations, resets the local + mutations. + + :rtype: bool + :returns: Flag indicating if the filter was matched (which also + indicates which set of mutations were applied by the server). + :raises: :class:`ValueError <exceptions.ValueError>` if the number of + mutations exceeds the :data:`MAX_MUTATIONS`. + """ + true_mutations = self._get_mutations(state=True) + false_mutations = self._get_mutations(state=False) + num_true_mutations = len(true_mutations) + num_false_mutations = len(false_mutations) + if num_true_mutations == 0 and num_false_mutations == 0: + return + if (num_true_mutations > MAX_MUTATIONS or + num_false_mutations > MAX_MUTATIONS): + raise ValueError( + 'Exceed the maximum allowable mutations (%d). Had %s true ' + 'mutations and %d false mutations.' % ( + MAX_MUTATIONS, num_true_mutations, num_false_mutations)) + + request_pb = messages_v2_pb2.CheckAndMutateRowRequest( + table_name=self._table.name, + row_key=self._row_key, + predicate_filter=self._filter.to_pb(), + true_mutations=true_mutations, + false_mutations=false_mutations, + ) + # We expect a `.messages_v2_pb2.CheckAndMutateRowResponse` + client = self._table._instance._client + resp = client._data_stub.CheckAndMutateRow( + request_pb, client.timeout_seconds) + self.clear() + return resp.predicate_matched
+ + # pylint: disable=arguments-differ +
[docs] def set_cell(self, column_family_id, column, value, timestamp=None, + state=True): + """Sets a value in this row. + + The cell is determined by the ``row_key`` of this + :class:`ConditionalRow` and the ``column``. The ``column`` must be in + an existing :class:`.ColumnFamily` (as determined by + ``column_family_id``). + + .. note:: + + This method adds a mutation to the accumulated mutations on this + row, but does not make an API request. To actually + send an API request (with the mutations) to the Google Cloud + Bigtable API, call :meth:`commit`. + + :type column_family_id: str + :param column_family_id: The column family that contains the column. + Must be of the form + ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. + + :type column: bytes + :param column: The column within the column family where the cell + is located. + + :type value: bytes or :class:`int` + :param value: The value to set in the cell. If an integer is used, + will be interpreted as a 64-bit big-endian signed + integer (8 bytes). + + :type timestamp: :class:`datetime.datetime` + :param timestamp: (Optional) The timestamp of the operation. + + :type state: bool + :param state: (Optional) The state that the mutation should be + applied in. Defaults to :data:`True`. + """ + self._set_cell(column_family_id, column, value, timestamp=timestamp, + state=state)
+ +
[docs] def delete(self, state=True): + """Deletes this row from the table. + + .. note:: + + This method adds a mutation to the accumulated mutations on this + row, but does not make an API request. To actually + send an API request (with the mutations) to the Google Cloud + Bigtable API, call :meth:`commit`. + + :type state: bool + :param state: (Optional) The state that the mutation should be + applied in. Defaults to :data:`True`. + """ + self._delete(state=state)
+ +
[docs] def delete_cell(self, column_family_id, column, time_range=None, + state=True): + """Deletes cell in this row. + + .. note:: + + This method adds a mutation to the accumulated mutations on this + row, but does not make an API request. To actually + send an API request (with the mutations) to the Google Cloud + Bigtable API, call :meth:`commit`. + + :type column_family_id: str + :param column_family_id: The column family that contains the column + or columns with cells being deleted. Must be + of the form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. + + :type column: bytes + :param column: The column within the column family that will have a + cell deleted. + + :type time_range: :class:`TimestampRange` + :param time_range: (Optional) The range of time within which cells + should be deleted. + + :type state: bool + :param state: (Optional) The state that the mutation should be + applied in. Defaults to :data:`True`. + """ + self._delete_cells(column_family_id, [column], time_range=time_range, + state=state)
+ +
[docs] def delete_cells(self, column_family_id, columns, time_range=None, + state=True): + """Deletes cells in this row. + + .. note:: + + This method adds a mutation to the accumulated mutations on this + row, but does not make an API request. To actually + send an API request (with the mutations) to the Google Cloud + Bigtable API, call :meth:`commit`. + + :type column_family_id: str + :param column_family_id: The column family that contains the column + or columns with cells being deleted. Must be + of the form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. + + :type columns: :class:`list` of :class:`str` / + :func:`unicode <unicode>`, or :class:`object` + :param columns: The columns within the column family that will have + cells deleted. If :attr:`ALL_COLUMNS` is used then the + entire column family will be deleted from the row. + + :type time_range: :class:`TimestampRange` + :param time_range: (Optional) The range of time within which cells + should be deleted. + + :type state: bool + :param state: (Optional) The state that the mutation should be + applied in. Defaults to :data:`True`. + """ + self._delete_cells(column_family_id, columns, time_range=time_range, + state=state)
+ # pylint: enable=arguments-differ + +
[docs] def clear(self): + """Removes all currently accumulated mutations on the current row.""" + del self._true_pb_mutations[:] + del self._false_pb_mutations[:]
+ + +
[docs]class AppendRow(Row): + """Google Cloud Bigtable Row for sending append mutations. + + These mutations are intended to augment the value of an existing cell + and uses the methods: + + * :meth:`append_cell_value` + * :meth:`increment_cell_value` + + The first works by appending bytes and the second by incrementing an + integer (stored in the cell as 8 bytes). In either case, if the + cell is empty, assumes the default empty value (empty string for + bytes or and 0 for integer). + + :type row_key: bytes + :param row_key: The key for the current row. + + :type table: :class:`Table <gcloud.bigtable.table.Table>` + :param table: The table that owns the row. + """ + + def __init__(self, row_key, table): + super(AppendRow, self).__init__(row_key, table) + self._rule_pb_list = [] + +
[docs] def clear(self): + """Removes all currently accumulated modifications on current row.""" + del self._rule_pb_list[:]
+ +
[docs] def append_cell_value(self, column_family_id, column, value): + """Appends a value to an existing cell. + + .. note:: + + This method adds a read-modify rule protobuf to the accumulated + read-modify rules on this row, but does not make an API + request. To actually send an API request (with the rules) to the + Google Cloud Bigtable API, call :meth:`commit`. + + :type column_family_id: str + :param column_family_id: The column family that contains the column. + Must be of the form + ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. + + :type column: bytes + :param column: The column within the column family where the cell + is located. + + :type value: bytes + :param value: The value to append to the existing value in the cell. If + the targeted cell is unset, it will be treated as + containing the empty string. + """ + column = _to_bytes(column) + value = _to_bytes(value) + rule_pb = data_v2_pb2.ReadModifyWriteRule( + family_name=column_family_id, + column_qualifier=column, + append_value=value) + self._rule_pb_list.append(rule_pb)
+ +
[docs] def increment_cell_value(self, column_family_id, column, int_value): + """Increments a value in an existing cell. + + Assumes the value in the cell is stored as a 64 bit integer + serialized to bytes. + + .. note:: + + This method adds a read-modify rule protobuf to the accumulated + read-modify rules on this row, but does not make an API + request. To actually send an API request (with the rules) to the + Google Cloud Bigtable API, call :meth:`commit`. + + :type column_family_id: str + :param column_family_id: The column family that contains the column. + Must be of the form + ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. + + :type column: bytes + :param column: The column within the column family where the cell + is located. + + :type int_value: int + :param int_value: The value to increment the existing value in the cell + by. If the targeted cell is unset, it will be treated + as containing a zero. Otherwise, the targeted cell + must contain an 8-byte value (interpreted as a 64-bit + big-endian signed integer), or the entire request + will fail. + """ + column = _to_bytes(column) + rule_pb = data_v2_pb2.ReadModifyWriteRule( + family_name=column_family_id, + column_qualifier=column, + increment_amount=int_value) + self._rule_pb_list.append(rule_pb)
+ +
[docs] def commit(self): + """Makes a ``ReadModifyWriteRow`` API request. + + This commits modifications made by :meth:`append_cell_value` and + :meth:`increment_cell_value`. If no modifications were made, makes + no API request and just returns ``{}``. + + Modifies a row atomically, reading the latest existing + timestamp / value from the specified columns and writing a new value by + appending / incrementing. The new cell created uses either the current + server time or the highest timestamp of a cell in that column (if it + exceeds the server time). + + After committing the accumulated mutations, resets the local mutations. + + .. code:: python + + >>> append_row.commit() + { + u'col-fam-id': { + b'col-name1': [ + (b'cell-val', datetime.datetime(...)), + (b'cell-val-newer', datetime.datetime(...)), + ], + b'col-name2': [ + (b'altcol-cell-val', datetime.datetime(...)), + ], + }, + u'col-fam-id2': { + b'col-name3-but-other-fam': [ + (b'foo', datetime.datetime(...)), + ], + }, + } + + :rtype: dict + :returns: The new contents of all modified cells. Returned as a + dictionary of column families, each of which holds a + dictionary of columns. Each column contains a list of cells + modified. Each cell is represented with a two-tuple with the + value (in bytes) and the timestamp for the cell. + :raises: :class:`ValueError <exceptions.ValueError>` if the number of + mutations exceeds the :data:`MAX_MUTATIONS`. + """ + num_mutations = len(self._rule_pb_list) + if num_mutations == 0: + return {} + if num_mutations > MAX_MUTATIONS: + raise ValueError('%d total append mutations exceed the maximum ' + 'allowable %d.' % (num_mutations, MAX_MUTATIONS)) + request_pb = messages_v2_pb2.ReadModifyWriteRowRequest( + table_name=self._table.name, + row_key=self._row_key, + rules=self._rule_pb_list, + ) + # We expect a `.data_v2_pb2.Row` + client = self._table._instance._client + row_response = client._data_stub.ReadModifyWriteRow( + request_pb, client.timeout_seconds) + + # Reset modifications after commit-ing request. + self.clear() + + # NOTE: We expect row_response.key == self._row_key but don't check. + return _parse_rmw_row_response(row_response)
+ + +def _parse_rmw_row_response(row_response): + """Parses the response to a ``ReadModifyWriteRow`` request. + + :type row_response: :class:`.data_v2_pb2.Row` + :param row_response: The response row (with only modified cells) from a + ``ReadModifyWriteRow`` request. + + :rtype: dict + :returns: The new contents of all modified cells. Returned as a + dictionary of column families, each of which holds a + dictionary of columns. Each column contains a list of cells + modified. Each cell is represented with a two-tuple with the + value (in bytes) and the timestamp for the cell. For example: + + .. code:: python + + { + u'col-fam-id': { + b'col-name1': [ + (b'cell-val', datetime.datetime(...)), + (b'cell-val-newer', datetime.datetime(...)), + ], + b'col-name2': [ + (b'altcol-cell-val', datetime.datetime(...)), + ], + }, + u'col-fam-id2': { + b'col-name3-but-other-fam': [ + (b'foo', datetime.datetime(...)), + ], + }, + } + """ + result = {} + for column_family in row_response.row.families: + column_family_id, curr_family = _parse_family_pb(column_family) + result[column_family_id] = curr_family + return result + + +def _parse_family_pb(family_pb): + """Parses a Family protobuf into a dictionary. + + :type family_pb: :class:`._generated.data_pb2.Family` + :param family_pb: A protobuf + + :rtype: tuple + :returns: A string and dictionary. The string is the name of the + column family and the dictionary has column names (within the + family) as keys and cell lists as values. Each cell is + represented with a two-tuple with the value (in bytes) and the + timestamp for the cell. For example: + + .. code:: python + + { + b'col-name1': [ + (b'cell-val', datetime.datetime(...)), + (b'cell-val-newer', datetime.datetime(...)), + ], + b'col-name2': [ + (b'altcol-cell-val', datetime.datetime(...)), + ], + } + """ + result = {} + for column in family_pb.columns: + result[column.qualifier] = cells = [] + for cell in column.cells: + val_pair = ( + cell.value, + _datetime_from_microseconds(cell.timestamp_micros), + ) + cells.append(val_pair) + + return family_pb.name, result +
+ +
+ +
+ + + + + + + \ No newline at end of file diff --git a/0.18.1/_modules/gcloud/bigtable/row_data.html b/0.18.1/_modules/gcloud/bigtable/row_data.html new file mode 100644 index 000000000000..d25f8ecdcd14 --- /dev/null +++ b/0.18.1/_modules/gcloud/bigtable/row_data.html @@ -0,0 +1,675 @@ + + + + + + + + gcloud.bigtable.row_data — gcloud 0.18.1 documentation + + + + + + + + + + + + + + +
+ +
+ +
+ + + Report an Issue + +
+
+ +
+ +

Source code for gcloud.bigtable.row_data

+# Copyright 2016 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Container for Google Cloud Bigtable Cells and Streaming Row Contents."""
+
+
+import copy
+import six
+
+from gcloud._helpers import _datetime_from_microseconds
+from gcloud._helpers import _to_bytes
+
+
+
[docs]class Cell(object): + """Representation of a Google Cloud Bigtable Cell. + + :type value: bytes + :param value: The value stored in the cell. + + :type timestamp: :class:`datetime.datetime` + :param timestamp: The timestamp when the cell was stored. + + :type labels: list + :param labels: (Optional) List of strings. Labels applied to the cell. + """ + + def __init__(self, value, timestamp, labels=()): + self.value = value + self.timestamp = timestamp + self.labels = list(labels) + + @classmethod +
[docs] def from_pb(cls, cell_pb): + """Create a new cell from a Cell protobuf. + + :type cell_pb: :class:`._generated.data_pb2.Cell` + :param cell_pb: The protobuf to convert. + + :rtype: :class:`Cell` + :returns: The cell corresponding to the protobuf. + """ + timestamp = _datetime_from_microseconds(cell_pb.timestamp_micros) + if cell_pb.labels: + return cls(cell_pb.value, timestamp, labels=cell_pb.labels) + else: + return cls(cell_pb.value, timestamp)
+ + def __eq__(self, other): + if not isinstance(other, self.__class__): + return False + return (other.value == self.value and + other.timestamp == self.timestamp and + other.labels == self.labels) + + def __ne__(self, other): + return not self.__eq__(other)
+ + +
[docs]class PartialCellData(object): + """Representation of partial cell in a Google Cloud Bigtable Table. + + These are expected to be updated directly from a + :class:`._generated.bigtable_service_messages_pb2.ReadRowsResponse` + + :type row_key: bytes + :param row_key: The key for the row holding the (partial) cell. + + :type family_name: str + :param family_name: The family name of the (partial) cell. + + :type qualifier: bytes + :param qualifier: The column qualifier of the (partial) cell. + + :type timestamp_micros: int + :param timestamp_micros: The timestamp (in microsecods) of the + (partial) cell. + + :type labels: list of str + :param labels: labels assigned to the (partial) cell + + :type value: bytes + :param value: The (accumulated) value of the (partial) cell. + """ + def __init__(self, row_key, family_name, qualifier, timestamp_micros, + labels=(), value=b''): + self.row_key = row_key + self.family_name = family_name + self.qualifier = qualifier + self.timestamp_micros = timestamp_micros + self.labels = labels + self.value = value + +
[docs] def append_value(self, value): + """Append bytes from a new chunk to value. + + :type value: bytes + :param value: bytes to append + """ + self.value += value
+ + +
[docs]class PartialRowData(object): + """Representation of partial row in a Google Cloud Bigtable Table. + + These are expected to be updated directly from a + :class:`._generated.bigtable_service_messages_pb2.ReadRowsResponse` + + :type row_key: bytes + :param row_key: The key for the row holding the (partial) data. + """ + + def __init__(self, row_key): + self._row_key = row_key + self._cells = {} + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return False + return (other._row_key == self._row_key and + other._cells == self._cells) + + def __ne__(self, other): + return not self.__eq__(other) + +
[docs] def to_dict(self): + """Convert the cells to a dictionary. + + This is intended to be used with HappyBase, so the column family and + column qualiers are combined (with ``:``). + + :rtype: dict + :returns: Dictionary containing all the data in the cells of this row. + """ + result = {} + for column_family_id, columns in six.iteritems(self._cells): + for column_qual, cells in six.iteritems(columns): + key = (_to_bytes(column_family_id) + b':' + + _to_bytes(column_qual)) + result[key] = cells + return result
+ + @property + def cells(self): + """Property returning all the cells accumulated on this partial row. + + :rtype: dict + :returns: Dictionary of the :class:`Cell` objects accumulated. This + dictionary has two-levels of keys (first for column families + and second for column names/qualifiers within a family). For + a given column, a list of :class:`Cell` objects is stored. + """ + return copy.deepcopy(self._cells) + + @property + def row_key(self): + """Getter for the current (partial) row's key. + + :rtype: bytes + :returns: The current (partial) row's key. + """ + return self._row_key
+ + +
[docs]class InvalidReadRowsResponse(RuntimeError): + """Exception raised to to invalid response data from back-end."""
+ + +
[docs]class InvalidChunk(RuntimeError): + """Exception raised to to invalid chunk data from back-end."""
+ + +
[docs]class PartialRowsData(object): + """Convenience wrapper for consuming a ``ReadRows`` streaming response. + + :type response_iterator: + :class:`grpc.framework.alpha._reexport._CancellableIterator` + :param response_iterator: A streaming iterator returned from a + ``ReadRows`` request. + """ + START = "Start" # No responses yet processed. + NEW_ROW = "New row" # No cells yet complete for row + ROW_IN_PROGRESS = "Row in progress" # Some cells complete for row + CELL_IN_PROGRESS = "Cell in progress" # Incomplete cell for row + + def __init__(self, response_iterator): + self._response_iterator = response_iterator + # Fully-processed rows, keyed by `row_key` + self._rows = {} + # Counter for responses pulled from iterator + self._counter = 0 + # Maybe cached from previous response + self._last_scanned_row_key = None + # In-progress row, unset until first response, after commit/reset + self._row = None + # Last complete row, unset until first commit + self._previous_row = None + # In-progress cell, unset until first response, after completion + self._cell = None + # Last complete cell, unset until first completion, after new row + self._previous_cell = None + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return False + return other._response_iterator == self._response_iterator + + def __ne__(self, other): + return not self.__eq__(other) + + @property + def state(self): + """State machine state. + + :rtype: str + :returns: name of state corresponding to currrent row / chunk + processing. + """ + if self._last_scanned_row_key is None: + return self.START + if self._row is None: + assert self._cell is None + assert self._previous_cell is None + return self.NEW_ROW + if self._cell is not None: + return self.CELL_IN_PROGRESS + if self._previous_cell is not None: + return self.ROW_IN_PROGRESS + return self.NEW_ROW # row added, no chunk yet processed + + @property + def rows(self): + """Property returning all rows accumulated from the stream. + + :rtype: dict + :returns: row_key -> :class:`PartialRowData`. + """ + # NOTE: To avoid duplicating large objects, this is just the + # mutable private data. + return self._rows + +
[docs] def cancel(self): + """Cancels the iterator, closing the stream.""" + self._response_iterator.cancel()
+ +
[docs] def consume_next(self): + """Consume the next ``ReadRowsResponse`` from the stream. + + Parse the response and its chunks into a new/existing row in + :attr:`_rows` + """ + response = six.next(self._response_iterator) + self._counter += 1 + + if self._last_scanned_row_key is None: # first response + if response.last_scanned_row_key: + raise InvalidReadRowsResponse() + + self._last_scanned_row_key = response.last_scanned_row_key + + row = self._row + cell = self._cell + + for chunk in response.chunks: + + self._validate_chunk(chunk) + + if chunk.reset_row: + row = self._row = None + cell = self._cell = self._previous_cell = None + continue + + if row is None: + row = self._row = PartialRowData(chunk.row_key) + + if cell is None: + cell = self._cell = PartialCellData( + chunk.row_key, + chunk.family_name.value, + chunk.qualifier.value, + chunk.timestamp_micros, + chunk.labels, + chunk.value) + self._copy_from_previous(cell) + else: + cell.append_value(chunk.value) + + if chunk.commit_row: + self._save_current_row() + row = cell = None + continue + + if chunk.value_size == 0: + self._save_current_cell() + cell = None
+ +
[docs] def consume_all(self, max_loops=None): + """Consume the streamed responses until there are no more. + + This simply calls :meth:`consume_next` until there are no + more to consume. + + :type max_loops: int + :param max_loops: (Optional) Maximum number of times to try to consume + an additional ``ReadRowsResponse``. You can use this + to avoid long wait times. + """ + curr_loop = 0 + if max_loops is None: + max_loops = float('inf') + while curr_loop < max_loops: + curr_loop += 1 + try: + self.consume_next() + except StopIteration: + break
+ + @staticmethod + def _validate_chunk_status(chunk): + """Helper for :meth:`_validate_chunk_row_in_progress`, etc.""" + # No reseet with other keys + if chunk.reset_row: + _raise_if(chunk.row_key) + _raise_if(chunk.HasField('family_name')) + _raise_if(chunk.HasField('qualifier')) + _raise_if(chunk.timestamp_micros) + _raise_if(chunk.labels) + _raise_if(chunk.value_size) + _raise_if(chunk.value) + # No commit with value size + _raise_if(chunk.commit_row and chunk.value_size > 0) + # No negative value_size (inferred as a general constraint). + _raise_if(chunk.value_size < 0) + + def _validate_chunk_new_row(self, chunk): + """Helper for :meth:`_validate_chunk`.""" + assert self.state == self.NEW_ROW + _raise_if(chunk.reset_row) + _raise_if(not chunk.row_key) + _raise_if(not chunk.family_name) + _raise_if(not chunk.qualifier) + # This constraint is not enforced in the Go example. + _raise_if(chunk.value_size > 0 and chunk.commit_row is not False) + # This constraint is from the Go example, not the spec. + _raise_if(self._previous_row is not None and + chunk.row_key <= self._previous_row.row_key) + + def _same_as_previous(self, chunk): + """Helper for :meth:`_validate_chunk_row_in_progress`""" + previous = self._previous_cell + return (chunk.row_key == previous.row_key and + chunk.family_name == previous.family_name and + chunk.qualifier == previous.qualifier and + chunk.labels == previous.labels) + + def _validate_chunk_row_in_progress(self, chunk): + """Helper for :meth:`_validate_chunk`""" + assert self.state == self.ROW_IN_PROGRESS + self._validate_chunk_status(chunk) + if not chunk.HasField('commit_row') and not chunk.reset_row: + _raise_if(not chunk.timestamp_micros or not chunk.value) + _raise_if(chunk.row_key and + chunk.row_key != self._row.row_key) + _raise_if(chunk.HasField('family_name') and + not chunk.HasField('qualifier')) + previous = self._previous_cell + _raise_if(self._same_as_previous(chunk) and + chunk.timestamp_micros <= previous.timestamp_micros) + + def _validate_chunk_cell_in_progress(self, chunk): + """Helper for :meth:`_validate_chunk`""" + assert self.state == self.CELL_IN_PROGRESS + self._validate_chunk_status(chunk) + self._copy_from_current(chunk) + + def _validate_chunk(self, chunk): + """Helper for :meth:`consume_next`.""" + if self.state == self.NEW_ROW: + self._validate_chunk_new_row(chunk) + if self.state == self.ROW_IN_PROGRESS: + self._validate_chunk_row_in_progress(chunk) + if self.state == self.CELL_IN_PROGRESS: + self._validate_chunk_cell_in_progress(chunk) + + def _save_current_cell(self): + """Helper for :meth:`consume_next`.""" + row, cell = self._row, self._cell + family = row._cells.setdefault(cell.family_name, {}) + qualified = family.setdefault(cell.qualifier, []) + complete = Cell.from_pb(self._cell) + qualified.append(complete) + self._cell, self._previous_cell = None, cell + + def _copy_from_current(self, chunk): + """Helper for :meth:`consume_next`.""" + current = self._cell + if current is not None: + if not chunk.row_key: + chunk.row_key = current.row_key + if not chunk.HasField('family_name'): + chunk.family_name.value = current.family_name + if not chunk.HasField('qualifier'): + chunk.qualifier.value = current.qualifier + if not chunk.timestamp_micros: + chunk.timestamp_micros = current.timestamp_micros + if not chunk.labels: + chunk.labels.extend(current.labels) + + def _copy_from_previous(self, cell): + """Helper for :meth:`consume_next`.""" + previous = self._previous_cell + if previous is not None: + if not cell.row_key: + cell.row_key = previous.row_key + if not cell.family_name: + cell.family_name = previous.family_name + if not cell.qualifier: + cell.qualifier = previous.qualifier + + def _save_current_row(self): + """Helper for :meth:`consume_next`.""" + if self._cell: + self._save_current_cell() + self._rows[self._row.row_key] = self._row + self._row, self._previous_row = None, self._row + self._previous_cell = None
+ + +def _raise_if(predicate, *args): + """Helper for validation methods.""" + if predicate: + raise InvalidChunk(*args) +
+ +
+ +
+ + + + + + + \ No newline at end of file diff --git a/0.18.1/_modules/gcloud/bigtable/row_filters.html b/0.18.1/_modules/gcloud/bigtable/row_filters.html new file mode 100644 index 000000000000..824440866d1c --- /dev/null +++ b/0.18.1/_modules/gcloud/bigtable/row_filters.html @@ -0,0 +1,1001 @@ + + + + + + + + gcloud.bigtable.row_filters — gcloud 0.18.1 documentation + + + + + + + + + + + + + + +
+ +
+ +
+ + + Report an Issue + +
+
+ +
+ +

Source code for gcloud.bigtable.row_filters

+# Copyright 2016 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Filters for Google Cloud Bigtable Row classes."""
+
+
+from gcloud._helpers import _microseconds_from_datetime
+from gcloud._helpers import _to_bytes
+from gcloud.bigtable._generated import (
+    data_pb2 as data_v2_pb2)
+
+
+
[docs]class RowFilter(object): + """Basic filter to apply to cells in a row. + + These values can be combined via :class:`RowFilterChain`, + :class:`RowFilterUnion` and :class:`ConditionalRowFilter`. + + .. note:: + + This class is a do-nothing base class for all row filters. + """ + + def __ne__(self, other): + return not self.__eq__(other)
+ + +class _BoolFilter(RowFilter): + """Row filter that uses a boolean flag. + + :type flag: bool + :param flag: An indicator if a setting is turned on or off. + """ + + def __init__(self, flag): + self.flag = flag + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return False + return other.flag == self.flag + + +
[docs]class SinkFilter(_BoolFilter): + """Advanced row filter to skip parent filters. + + :type flag: bool + :param flag: ADVANCED USE ONLY. Hook for introspection into the row filter. + Outputs all cells directly to the output of the read rather + than to any parent filter. Cannot be used within the + ``predicate_filter``, ``true_filter``, or ``false_filter`` + of a :class:`ConditionalRowFilter`. + """ + +
[docs] def to_pb(self): + """Converts the row filter to a protobuf. + + :rtype: :class:`.data_v2_pb2.RowFilter` + :returns: The converted current object. + """ + return data_v2_pb2.RowFilter(sink=self.flag)
+ + +
[docs]class PassAllFilter(_BoolFilter): + """Row filter equivalent to not filtering at all. + + :type flag: bool + :param flag: Matches all cells, regardless of input. Functionally + equivalent to leaving ``filter`` unset, but included for + completeness. + """ + +
[docs] def to_pb(self): + """Converts the row filter to a protobuf. + + :rtype: :class:`.data_v2_pb2.RowFilter` + :returns: The converted current object. + """ + return data_v2_pb2.RowFilter(pass_all_filter=self.flag)
+ + +
[docs]class BlockAllFilter(_BoolFilter): + """Row filter that doesn't match any cells. + + :type flag: bool + :param flag: Does not match any cells, regardless of input. Useful for + temporarily disabling just part of a filter. + """ + +
[docs] def to_pb(self): + """Converts the row filter to a protobuf. + + :rtype: :class:`.data_v2_pb2.RowFilter` + :returns: The converted current object. + """ + return data_v2_pb2.RowFilter(block_all_filter=self.flag)
+ + +class _RegexFilter(RowFilter): + """Row filter that uses a regular expression. + + The ``regex`` must be valid RE2 patterns. See Google's + `RE2 reference`_ for the accepted syntax. + + .. _RE2 reference: https://github.com/google/re2/wiki/Syntax + + :type regex: bytes or str + :param regex: A regular expression (RE2) for some row filter. + """ + + def __init__(self, regex): + self.regex = _to_bytes(regex) + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return False + return other.regex == self.regex + + +
[docs]class RowKeyRegexFilter(_RegexFilter): + """Row filter for a row key regular expression. + + The ``regex`` must be valid RE2 patterns. See Google's + `RE2 reference`_ for the accepted syntax. + + .. _RE2 reference: https://github.com/google/re2/wiki/Syntax + + .. note:: + + Special care need be used with the expression used. Since + each of these properties can contain arbitrary bytes, the ``\\C`` + escape sequence must be used if a true wildcard is desired. The ``.`` + character will not match the new line character ``\\n``, which may be + present in a binary value. + + :type regex: bytes + :param regex: A regular expression (RE2) to match cells from rows with row + keys that satisfy this regex. For a + ``CheckAndMutateRowRequest``, this filter is unnecessary + since the row key is already specified. + """ + +
[docs] def to_pb(self): + """Converts the row filter to a protobuf. + + :rtype: :class:`.data_v2_pb2.RowFilter` + :returns: The converted current object. + """ + return data_v2_pb2.RowFilter(row_key_regex_filter=self.regex)
+ + +
[docs]class RowSampleFilter(RowFilter): + """Matches all cells from a row with probability p. + + :type sample: float + :param sample: The probability of matching a cell (must be in the + interval ``[0, 1]``). + """ + + def __init__(self, sample): + self.sample = sample + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return False + return other.sample == self.sample + +
[docs] def to_pb(self): + """Converts the row filter to a protobuf. + + :rtype: :class:`.data_v2_pb2.RowFilter` + :returns: The converted current object. + """ + return data_v2_pb2.RowFilter(row_sample_filter=self.sample)
+ + +
[docs]class FamilyNameRegexFilter(_RegexFilter): + """Row filter for a family name regular expression. + + The ``regex`` must be valid RE2 patterns. See Google's + `RE2 reference`_ for the accepted syntax. + + .. _RE2 reference: https://github.com/google/re2/wiki/Syntax + + :type regex: str + :param regex: A regular expression (RE2) to match cells from columns in a + given column family. For technical reasons, the regex must + not contain the ``':'`` character, even if it is not being + used as a literal. + """ + +
[docs] def to_pb(self): + """Converts the row filter to a protobuf. + + :rtype: :class:`.data_v2_pb2.RowFilter` + :returns: The converted current object. + """ + return data_v2_pb2.RowFilter(family_name_regex_filter=self.regex)
+ + +
[docs]class ColumnQualifierRegexFilter(_RegexFilter): + """Row filter for a column qualifier regular expression. + + The ``regex`` must be valid RE2 patterns. See Google's + `RE2 reference`_ for the accepted syntax. + + .. _RE2 reference: https://github.com/google/re2/wiki/Syntax + + .. note:: + + Special care need be used with the expression used. Since + each of these properties can contain arbitrary bytes, the ``\\C`` + escape sequence must be used if a true wildcard is desired. The ``.`` + character will not match the new line character ``\\n``, which may be + present in a binary value. + + :type regex: bytes + :param regex: A regular expression (RE2) to match cells from column that + match this regex (irrespective of column family). + """ + +
[docs] def to_pb(self): + """Converts the row filter to a protobuf. + + :rtype: :class:`.data_v2_pb2.RowFilter` + :returns: The converted current object. + """ + return data_v2_pb2.RowFilter(column_qualifier_regex_filter=self.regex)
+ + +
[docs]class TimestampRange(object): + """Range of time with inclusive lower and exclusive upper bounds. + + :type start: :class:`datetime.datetime` + :param start: (Optional) The (inclusive) lower bound of the timestamp + range. If omitted, defaults to Unix epoch. + + :type end: :class:`datetime.datetime` + :param end: (Optional) The (exclusive) upper bound of the timestamp + range. If omitted, no upper bound is used. + """ + + def __init__(self, start=None, end=None): + self.start = start + self.end = end + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return False + return (other.start == self.start and + other.end == self.end) + + def __ne__(self, other): + return not self.__eq__(other) + +
[docs] def to_pb(self): + """Converts the :class:`TimestampRange` to a protobuf. + + :rtype: :class:`.data_v2_pb2.TimestampRange` + :returns: The converted current object. + """ + timestamp_range_kwargs = {} + if self.start is not None: + timestamp_range_kwargs['start_timestamp_micros'] = ( + _microseconds_from_datetime(self.start)) + if self.end is not None: + timestamp_range_kwargs['end_timestamp_micros'] = ( + _microseconds_from_datetime(self.end)) + return data_v2_pb2.TimestampRange(**timestamp_range_kwargs)
+ + +
[docs]class TimestampRangeFilter(RowFilter): + """Row filter that limits cells to a range of time. + + :type range_: :class:`TimestampRange` + :param range_: Range of time that cells should match against. + """ + + def __init__(self, range_): + self.range_ = range_ + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return False + return other.range_ == self.range_ + +
[docs] def to_pb(self): + """Converts the row filter to a protobuf. + + First converts the ``range_`` on the current object to a protobuf and + then uses it in the ``timestamp_range_filter`` field. + + :rtype: :class:`.data_v2_pb2.RowFilter` + :returns: The converted current object. + """ + return data_v2_pb2.RowFilter( + timestamp_range_filter=self.range_.to_pb())
+ + +
[docs]class ColumnRangeFilter(RowFilter): + """A row filter to restrict to a range of columns. + + Both the start and end column can be included or excluded in the range. + By default, we include them both, but this can be changed with optional + flags. + + :type column_family_id: str + :param column_family_id: The column family that contains the columns. Must + be of the form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. + + :type start_column: bytes + :param start_column: The start of the range of columns. If no value is + used, the backend applies no upper bound to the + values. + + :type end_column: bytes + :param end_column: The end of the range of columns. If no value is used, + the backend applies no upper bound to the values. + + :type inclusive_start: bool + :param inclusive_start: Boolean indicating if the start column should be + included in the range (or excluded). Defaults + to :data:`True` if ``start_column`` is passed and + no ``inclusive_start`` was given. + + :type inclusive_end: bool + :param inclusive_end: Boolean indicating if the end column should be + included in the range (or excluded). Defaults + to :data:`True` if ``end_column`` is passed and + no ``inclusive_end`` was given. + + :raises: :class:`ValueError <exceptions.ValueError>` if ``inclusive_start`` + is set but no ``start_column`` is given or if ``inclusive_end`` + is set but no ``end_column`` is given + """ + + def __init__(self, column_family_id, start_column=None, end_column=None, + inclusive_start=None, inclusive_end=None): + self.column_family_id = column_family_id + + if inclusive_start is None: + inclusive_start = True + elif start_column is None: + raise ValueError('Inclusive start was specified but no ' + 'start column was given.') + self.start_column = start_column + self.inclusive_start = inclusive_start + + if inclusive_end is None: + inclusive_end = True + elif end_column is None: + raise ValueError('Inclusive end was specified but no ' + 'end column was given.') + self.end_column = end_column + self.inclusive_end = inclusive_end + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return False + return (other.column_family_id == self.column_family_id and + other.start_column == self.start_column and + other.end_column == self.end_column and + other.inclusive_start == self.inclusive_start and + other.inclusive_end == self.inclusive_end) + +
[docs] def to_pb(self): + """Converts the row filter to a protobuf. + + First converts to a :class:`.data_v2_pb2.ColumnRange` and then uses it + in the ``column_range_filter`` field. + + :rtype: :class:`.data_v2_pb2.RowFilter` + :returns: The converted current object. + """ + column_range_kwargs = {'family_name': self.column_family_id} + if self.start_column is not None: + if self.inclusive_start: + key = 'start_qualifier_closed' + else: + key = 'start_qualifier_open' + column_range_kwargs[key] = _to_bytes(self.start_column) + if self.end_column is not None: + if self.inclusive_end: + key = 'end_qualifier_closed' + else: + key = 'end_qualifier_open' + column_range_kwargs[key] = _to_bytes(self.end_column) + + column_range = data_v2_pb2.ColumnRange(**column_range_kwargs) + return data_v2_pb2.RowFilter(column_range_filter=column_range)
+ + +
[docs]class ValueRegexFilter(_RegexFilter): + """Row filter for a value regular expression. + + The ``regex`` must be valid RE2 patterns. See Google's + `RE2 reference`_ for the accepted syntax. + + .. _RE2 reference: https://github.com/google/re2/wiki/Syntax + + .. note:: + + Special care need be used with the expression used. Since + each of these properties can contain arbitrary bytes, the ``\\C`` + escape sequence must be used if a true wildcard is desired. The ``.`` + character will not match the new line character ``\\n``, which may be + present in a binary value. + + :type regex: bytes + :param regex: A regular expression (RE2) to match cells with values that + match this regex. + """ + +
[docs] def to_pb(self): + """Converts the row filter to a protobuf. + + :rtype: :class:`.data_v2_pb2.RowFilter` + :returns: The converted current object. + """ + return data_v2_pb2.RowFilter(value_regex_filter=self.regex)
+ + +
[docs]class ValueRangeFilter(RowFilter): + """A range of values to restrict to in a row filter. + + Will only match cells that have values in this range. + + Both the start and end value can be included or excluded in the range. + By default, we include them both, but this can be changed with optional + flags. + + :type start_value: bytes + :param start_value: The start of the range of values. If no value is used, + the backend applies no lower bound to the values. + + :type end_value: bytes + :param end_value: The end of the range of values. If no value is used, + the backend applies no upper bound to the values. + + :type inclusive_start: bool + :param inclusive_start: Boolean indicating if the start value should be + included in the range (or excluded). Defaults + to :data:`True` if ``start_value`` is passed and + no ``inclusive_start`` was given. + + :type inclusive_end: bool + :param inclusive_end: Boolean indicating if the end value should be + included in the range (or excluded). Defaults + to :data:`True` if ``end_value`` is passed and + no ``inclusive_end`` was given. + + :raises: :class:`ValueError <exceptions.ValueError>` if ``inclusive_start`` + is set but no ``start_value`` is given or if ``inclusive_end`` + is set but no ``end_value`` is given + """ + + def __init__(self, start_value=None, end_value=None, + inclusive_start=None, inclusive_end=None): + if inclusive_start is None: + inclusive_start = True + elif start_value is None: + raise ValueError('Inclusive start was specified but no ' + 'start value was given.') + self.start_value = start_value + self.inclusive_start = inclusive_start + + if inclusive_end is None: + inclusive_end = True + elif end_value is None: + raise ValueError('Inclusive end was specified but no ' + 'end value was given.') + self.end_value = end_value + self.inclusive_end = inclusive_end + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return False + return (other.start_value == self.start_value and + other.end_value == self.end_value and + other.inclusive_start == self.inclusive_start and + other.inclusive_end == self.inclusive_end) + +
[docs] def to_pb(self): + """Converts the row filter to a protobuf. + + First converts to a :class:`.data_v2_pb2.ValueRange` and then uses + it to create a row filter protobuf. + + :rtype: :class:`.data_v2_pb2.RowFilter` + :returns: The converted current object. + """ + value_range_kwargs = {} + if self.start_value is not None: + if self.inclusive_start: + key = 'start_value_closed' + else: + key = 'start_value_open' + value_range_kwargs[key] = _to_bytes(self.start_value) + if self.end_value is not None: + if self.inclusive_end: + key = 'end_value_closed' + else: + key = 'end_value_open' + value_range_kwargs[key] = _to_bytes(self.end_value) + + value_range = data_v2_pb2.ValueRange(**value_range_kwargs) + return data_v2_pb2.RowFilter(value_range_filter=value_range)
+ + +class _CellCountFilter(RowFilter): + """Row filter that uses an integer count of cells. + + The cell count is used as an offset or a limit for the number + of results returned. + + :type num_cells: int + :param num_cells: An integer count / offset / limit. + """ + + def __init__(self, num_cells): + self.num_cells = num_cells + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return False + return other.num_cells == self.num_cells + + +
[docs]class CellsRowOffsetFilter(_CellCountFilter): + """Row filter to skip cells in a row. + + :type num_cells: int + :param num_cells: Skips the first N cells of the row. + """ + +
[docs] def to_pb(self): + """Converts the row filter to a protobuf. + + :rtype: :class:`.data_v2_pb2.RowFilter` + :returns: The converted current object. + """ + return data_v2_pb2.RowFilter( + cells_per_row_offset_filter=self.num_cells)
+ + +
[docs]class CellsRowLimitFilter(_CellCountFilter): + """Row filter to limit cells in a row. + + :type num_cells: int + :param num_cells: Matches only the first N cells of the row. + """ + +
[docs] def to_pb(self): + """Converts the row filter to a protobuf. + + :rtype: :class:`.data_v2_pb2.RowFilter` + :returns: The converted current object. + """ + return data_v2_pb2.RowFilter(cells_per_row_limit_filter=self.num_cells)
+ + +
[docs]class CellsColumnLimitFilter(_CellCountFilter): + """Row filter to limit cells in a column. + + :type num_cells: int + :param num_cells: Matches only the most recent N cells within each column. + This filters a (family name, column) pair, based on + timestamps of each cell. + """ + +
[docs] def to_pb(self): + """Converts the row filter to a protobuf. + + :rtype: :class:`.data_v2_pb2.RowFilter` + :returns: The converted current object. + """ + return data_v2_pb2.RowFilter( + cells_per_column_limit_filter=self.num_cells)
+ + +
[docs]class StripValueTransformerFilter(_BoolFilter): + """Row filter that transforms cells into empty string (0 bytes). + + :type flag: bool + :param flag: If :data:`True`, replaces each cell's value with the empty + string. As the name indicates, this is more useful as a + transformer than a generic query / filter. + """ + +
[docs] def to_pb(self): + """Converts the row filter to a protobuf. + + :rtype: :class:`.data_v2_pb2.RowFilter` + :returns: The converted current object. + """ + return data_v2_pb2.RowFilter(strip_value_transformer=self.flag)
+ + +
[docs]class ApplyLabelFilter(RowFilter): + """Filter to apply labels to cells. + + Intended to be used as an intermediate filter on a pre-existing filtered + result set. This way if two sets are combined, the label can tell where + the cell(s) originated.This allows the client to determine which results + were produced from which part of the filter. + + .. note:: + + Due to a technical limitation of the backend, it is not currently + possible to apply multiple labels to a cell. + + :type label: str + :param label: Label to apply to cells in the output row. Values must be + at most 15 characters long, and match the pattern + ``[a-z0-9\\-]+``. + """ + + def __init__(self, label): + self.label = label + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return False + return other.label == self.label + +
[docs] def to_pb(self): + """Converts the row filter to a protobuf. + + :rtype: :class:`.data_v2_pb2.RowFilter` + :returns: The converted current object. + """ + return data_v2_pb2.RowFilter(apply_label_transformer=self.label)
+ + +class _FilterCombination(RowFilter): + """Chain of row filters. + + Sends rows through several filters in sequence. The filters are "chained" + together to process a row. After the first filter is applied, the second + is applied to the filtered output and so on for subsequent filters. + + :type filters: list + :param filters: List of :class:`RowFilter` + """ + + def __init__(self, filters=None): + if filters is None: + filters = [] + self.filters = filters + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return False + return other.filters == self.filters + + +
[docs]class RowFilterChain(_FilterCombination): + """Chain of row filters. + + Sends rows through several filters in sequence. The filters are "chained" + together to process a row. After the first filter is applied, the second + is applied to the filtered output and so on for subsequent filters. + + :type filters: list + :param filters: List of :class:`RowFilter` + """ + +
[docs] def to_pb(self): + """Converts the row filter to a protobuf. + + :rtype: :class:`.data_v2_pb2.RowFilter` + :returns: The converted current object. + """ + chain = data_v2_pb2.RowFilter.Chain( + filters=[row_filter.to_pb() for row_filter in self.filters]) + return data_v2_pb2.RowFilter(chain=chain)
+ + +
[docs]class RowFilterUnion(_FilterCombination): + """Union of row filters. + + Sends rows through several filters simultaneously, then + merges / interleaves all the filtered results together. + + If multiple cells are produced with the same column and timestamp, + they will all appear in the output row in an unspecified mutual order. + + :type filters: list + :param filters: List of :class:`RowFilter` + """ + +
[docs] def to_pb(self): + """Converts the row filter to a protobuf. + + :rtype: :class:`.data_v2_pb2.RowFilter` + :returns: The converted current object. + """ + interleave = data_v2_pb2.RowFilter.Interleave( + filters=[row_filter.to_pb() for row_filter in self.filters]) + return data_v2_pb2.RowFilter(interleave=interleave)
+ + +
[docs]class ConditionalRowFilter(RowFilter): + """Conditional row filter which exhibits ternary behavior. + + Executes one of two filters based on another filter. If the ``base_filter`` + returns any cells in the row, then ``true_filter`` is executed. If not, + then ``false_filter`` is executed. + + .. note:: + + The ``base_filter`` does not execute atomically with the true and false + filters, which may lead to inconsistent or unexpected results. + + Additionally, executing a :class:`ConditionalRowFilter` has poor + performance on the server, especially when ``false_filter`` is set. + + :type base_filter: :class:`RowFilter` + :param base_filter: The filter to condition on before executing the + true/false filters. + + :type true_filter: :class:`RowFilter` + :param true_filter: (Optional) The filter to execute if there are any cells + matching ``base_filter``. If not provided, no results + will be returned in the true case. + + :type false_filter: :class:`RowFilter` + :param false_filter: (Optional) The filter to execute if there are no cells + matching ``base_filter``. If not provided, no results + will be returned in the false case. + """ + + def __init__(self, base_filter, true_filter=None, false_filter=None): + self.base_filter = base_filter + self.true_filter = true_filter + self.false_filter = false_filter + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return False + return (other.base_filter == self.base_filter and + other.true_filter == self.true_filter and + other.false_filter == self.false_filter) + +
[docs] def to_pb(self): + """Converts the row filter to a protobuf. + + :rtype: :class:`.data_v2_pb2.RowFilter` + :returns: The converted current object. + """ + condition_kwargs = {'predicate_filter': self.base_filter.to_pb()} + if self.true_filter is not None: + condition_kwargs['true_filter'] = self.true_filter.to_pb() + if self.false_filter is not None: + condition_kwargs['false_filter'] = self.false_filter.to_pb() + condition = data_v2_pb2.RowFilter.Condition(**condition_kwargs) + return data_v2_pb2.RowFilter(condition=condition)
+
+ +
+ +
+ + + + + + + \ No newline at end of file diff --git a/0.18.1/_modules/gcloud/bigtable/table.html b/0.18.1/_modules/gcloud/bigtable/table.html new file mode 100644 index 000000000000..9ea7f73b9f94 --- /dev/null +++ b/0.18.1/_modules/gcloud/bigtable/table.html @@ -0,0 +1,612 @@ + + + + + + + + gcloud.bigtable.table — gcloud 0.18.1 documentation + + + + + + + + + + + + + + +
+ +
+ +
+ + + Report an Issue + +
+
+ +
+ +

Source code for gcloud.bigtable.table

+# Copyright 2015 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""User friendly container for Google Cloud Bigtable Table."""
+
+from gcloud._helpers import _to_bytes
+from gcloud.bigtable._generated import (
+    bigtable_pb2 as data_messages_v2_pb2)
+from gcloud.bigtable._generated import (
+    bigtable_table_admin_pb2 as table_admin_messages_v2_pb2)
+from gcloud.bigtable._generated import (
+    table_pb2 as table_v2_pb2)
+from gcloud.bigtable.column_family import _gc_rule_from_pb
+from gcloud.bigtable.column_family import ColumnFamily
+from gcloud.bigtable.row import AppendRow
+from gcloud.bigtable.row import ConditionalRow
+from gcloud.bigtable.row import DirectRow
+from gcloud.bigtable.row_data import PartialRowsData
+
+
+
[docs]class Table(object): + """Representation of a Google Cloud Bigtable Table. + + .. note:: + + We don't define any properties on a table other than the name. + The only other fields are ``column_families`` and ``granularity``, + The ``column_families`` are not stored locally and + ``granularity`` is an enum with only one value. + + We can use a :class:`Table` to: + + * :meth:`create` the table + * :meth:`rename` the table + * :meth:`delete` the table + * :meth:`list_column_families` in the table + + :type table_id: str + :param table_id: The ID of the table. + + :type instance: :class:`Instance <.instance.Instance>` + :param instance: The instance that owns the table. + """ + + def __init__(self, table_id, instance): + self.table_id = table_id + self._instance = instance + + @property + def name(self): + """Table name used in requests. + + .. note:: + + This property will not change if ``table_id`` does not, but the + return value is not cached. + + The table name is of the form + + ``"projects/../instances/../tables/{table_id}"`` + + :rtype: str + :returns: The table name. + """ + return self._instance.name + '/tables/' + self.table_id + +
[docs] def column_family(self, column_family_id, gc_rule=None): + """Factory to create a column family associated with this table. + + :type column_family_id: str + :param column_family_id: The ID of the column family. Must be of the + form ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. + + :type gc_rule: :class:`.GarbageCollectionRule` + :param gc_rule: (Optional) The garbage collection settings for this + column family. + + :rtype: :class:`.ColumnFamily` + :returns: A column family owned by this table. + """ + return ColumnFamily(column_family_id, self, gc_rule=gc_rule)
+ +
[docs] def row(self, row_key, filter_=None, append=False): + """Factory to create a row associated with this table. + + .. warning:: + + At most one of ``filter_`` and ``append`` can be used in a + :class:`Row`. + + :type row_key: bytes + :param row_key: The key for the row being created. + + :type filter_: :class:`.RowFilter` + :param filter_: (Optional) Filter to be used for conditional mutations. + See :class:`.DirectRow` for more details. + + :type append: bool + :param append: (Optional) Flag to determine if the row should be used + for append mutations. + + :rtype: :class:`.DirectRow` + :returns: A row owned by this table. + :raises: :class:`ValueError <exceptions.ValueError>` if both + ``filter_`` and ``append`` are used. + """ + if append and filter_ is not None: + raise ValueError('At most one of filter_ and append can be set') + if append: + return AppendRow(row_key, self) + elif filter_ is not None: + return ConditionalRow(row_key, self, filter_=filter_) + else: + return DirectRow(row_key, self)
+ + def __eq__(self, other): + if not isinstance(other, self.__class__): + return False + return (other.table_id == self.table_id and + other._instance == self._instance) + + def __ne__(self, other): + return not self.__eq__(other) + +
[docs] def create(self, initial_split_keys=None, column_families=()): + """Creates this table. + + .. note:: + + A create request returns a + :class:`._generated.table_pb2.Table` but we don't use + this response. + + :type initial_split_keys: list + :param initial_split_keys: (Optional) List of row keys that will be + used to initially split the table into + several tablets (Tablets are similar to + HBase regions). Given two split keys, + ``"s1"`` and ``"s2"``, three tablets will be + created, spanning the key ranges: + ``[, s1)``, ``[s1, s2)``, ``[s2, )``. + + :type column_families: list + :param column_families: (Optional) List or other iterable of + :class:`.ColumnFamily` instances. + """ + if initial_split_keys is not None: + split_pb = table_admin_messages_v2_pb2.CreateTableRequest.Split + initial_split_keys = [ + split_pb(key=key) for key in initial_split_keys] + + table_pb = None + if column_families: + table_pb = table_v2_pb2.Table() + for col_fam in column_families: + curr_id = col_fam.column_family_id + table_pb.column_families[curr_id].MergeFrom(col_fam.to_pb()) + + request_pb = table_admin_messages_v2_pb2.CreateTableRequest( + initial_splits=initial_split_keys or [], + parent=self._instance.name, + table_id=self.table_id, + table=table_pb, + ) + client = self._instance._client + # We expect a `._generated.table_pb2.Table` + client._table_stub.CreateTable(request_pb, client.timeout_seconds)
+ +
[docs] def delete(self): + """Delete this table.""" + request_pb = table_admin_messages_v2_pb2.DeleteTableRequest( + name=self.name) + client = self._instance._client + # We expect a `google.protobuf.empty_pb2.Empty` + client._table_stub.DeleteTable(request_pb, client.timeout_seconds)
+ +
[docs] def list_column_families(self): + """List the column families owned by this table. + + :rtype: dict + :returns: Dictionary of column families attached to this table. Keys + are strings (column family names) and values are + :class:`.ColumnFamily` instances. + :raises: :class:`ValueError <exceptions.ValueError>` if the column + family name from the response does not agree with the computed + name from the column family ID. + """ + request_pb = table_admin_messages_v2_pb2.GetTableRequest( + name=self.name) + client = self._instance._client + # We expect a `._generated.table_pb2.Table` + table_pb = client._table_stub.GetTable(request_pb, + client.timeout_seconds) + + result = {} + for column_family_id, value_pb in table_pb.column_families.items(): + gc_rule = _gc_rule_from_pb(value_pb.gc_rule) + column_family = self.column_family(column_family_id, + gc_rule=gc_rule) + result[column_family_id] = column_family + return result
+ +
[docs] def read_row(self, row_key, filter_=None): + """Read a single row from this table. + + :type row_key: bytes + :param row_key: The key of the row to read from. + + :type filter_: :class:`.RowFilter` + :param filter_: (Optional) The filter to apply to the contents of the + row. If unset, returns the entire row. + + :rtype: :class:`.PartialRowData`, :data:`NoneType <types.NoneType>` + :returns: The contents of the row if any chunks were returned in + the response, otherwise :data:`None`. + :raises: :class:`ValueError <exceptions.ValueError>` if a commit row + chunk is never encountered. + """ + request_pb = _create_row_request(self.name, row_key=row_key, + filter_=filter_) + client = self._instance._client + response_iterator = client._data_stub.ReadRows(request_pb, + client.timeout_seconds) + rows_data = PartialRowsData(response_iterator) + rows_data.consume_all() + if rows_data.state not in (rows_data.NEW_ROW, rows_data.START): + raise ValueError('The row remains partial / is not committed.') + + if len(rows_data.rows) == 0: + return None + + return rows_data.rows[row_key]
+ +
[docs] def read_rows(self, start_key=None, end_key=None, limit=None, + filter_=None): + """Read rows from this table. + + :type start_key: bytes + :param start_key: (Optional) The beginning of a range of row keys to + read from. The range will include ``start_key``. If + left empty, will be interpreted as the empty string. + + :type end_key: bytes + :param end_key: (Optional) The end of a range of row keys to read from. + The range will not include ``end_key``. If left empty, + will be interpreted as an infinite string. + + :type limit: int + :param limit: (Optional) The read will terminate after committing to N + rows' worth of results. The default (zero) is to return + all results. + + :type filter_: :class:`.RowFilter` + :param filter_: (Optional) The filter to apply to the contents of the + specified row(s). If unset, reads every column in + each row. + + :rtype: :class:`.PartialRowsData` + :returns: A :class:`.PartialRowsData` convenience wrapper for consuming + the streamed results. + """ + request_pb = _create_row_request( + self.name, start_key=start_key, end_key=end_key, filter_=filter_, + limit=limit) + client = self._instance._client + response_iterator = client._data_stub.ReadRows(request_pb, + client.timeout_seconds) + # We expect an iterator of `data_messages_v2_pb2.ReadRowsResponse` + return PartialRowsData(response_iterator)
+ +
[docs] def sample_row_keys(self): + """Read a sample of row keys in the table. + + The returned row keys will delimit contiguous sections of the table of + approximately equal size, which can be used to break up the data for + distributed tasks like mapreduces. + + The elements in the iterator are a SampleRowKeys response and they have + the properties ``offset_bytes`` and ``row_key``. They occur in sorted + order. The table might have contents before the first row key in the + list and after the last one, but a key containing the empty string + indicates "end of table" and will be the last response given, if + present. + + .. note:: + + Row keys in this list may not have ever been written to or read + from, and users should therefore not make any assumptions about the + row key structure that are specific to their use case. + + The ``offset_bytes`` field on a response indicates the approximate + total storage space used by all rows in the table which precede + ``row_key``. Buffering the contents of all rows between two subsequent + samples would require space roughly equal to the difference in their + ``offset_bytes`` fields. + + :rtype: :class:`grpc.framework.alpha._reexport._CancellableIterator` + :returns: A cancel-able iterator. Can be consumed by calling ``next()`` + or by casting to a :class:`list` and can be cancelled by + calling ``cancel()``. + """ + request_pb = data_messages_v2_pb2.SampleRowKeysRequest( + table_name=self.name) + client = self._instance._client + response_iterator = client._data_stub.SampleRowKeys( + request_pb, client.timeout_seconds) + return response_iterator
+ + +def _create_row_request(table_name, row_key=None, start_key=None, end_key=None, + filter_=None, limit=None): + """Creates a request to read rows in a table. + + :type table_name: str + :param table_name: The name of the table to read from. + + :type row_key: bytes + :param row_key: (Optional) The key of a specific row to read from. + + :type start_key: bytes + :param start_key: (Optional) The beginning of a range of row keys to + read from. The range will include ``start_key``. If + left empty, will be interpreted as the empty string. + + :type end_key: bytes + :param end_key: (Optional) The end of a range of row keys to read from. + The range will not include ``end_key``. If left empty, + will be interpreted as an infinite string. + + :type filter_: :class:`.RowFilter` + :param filter_: (Optional) The filter to apply to the contents of the + specified row(s). If unset, reads the entire table. + + :type limit: int + :param limit: (Optional) The read will terminate after committing to N + rows' worth of results. The default (zero) is to return + all results. + + :rtype: :class:`data_messages_v2_pb2.ReadRowsRequest` + :returns: The ``ReadRowsRequest`` protobuf corresponding to the inputs. + :raises: :class:`ValueError <exceptions.ValueError>` if both + ``row_key`` and one of ``start_key`` and ``end_key`` are set + """ + request_kwargs = {'table_name': table_name} + if (row_key is not None and + (start_key is not None or end_key is not None)): + raise ValueError('Row key and row range cannot be ' + 'set simultaneously') + range_kwargs = {} + if start_key is not None or end_key is not None: + if start_key is not None: + range_kwargs['start_key_closed'] = _to_bytes(start_key) + if end_key is not None: + range_kwargs['end_key_open'] = _to_bytes(end_key) + if filter_ is not None: + request_kwargs['filter'] = filter_.to_pb() + if limit is not None: + request_kwargs['rows_limit'] = limit + + message = data_messages_v2_pb2.ReadRowsRequest(**request_kwargs) + + if row_key is not None: + message.rows.row_keys.append(_to_bytes(row_key)) + + if range_kwargs: + message.rows.row_ranges.add(**range_kwargs) + + return message +
+ +
+ +
+ + + + + + + \ No newline at end of file diff --git a/0.18.1/_modules/gcloud/client.html b/0.18.1/_modules/gcloud/client.html new file mode 100644 index 000000000000..25faf6505f45 --- /dev/null +++ b/0.18.1/_modules/gcloud/client.html @@ -0,0 +1,419 @@ + + + + + + + + gcloud.client — gcloud 0.18.1 documentation + + + + + + + + + + + + + + +
+ +
+ +
+ + + Report an Issue + +
+
+ +
+ +

Source code for gcloud.client

+# Copyright 2015 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Base classes for client used to interact with Google Cloud APIs."""
+
+from oauth2client.service_account import ServiceAccountCredentials
+import six
+
+from gcloud._helpers import _determine_default_project
+from gcloud.connection import Connection
+from gcloud.credentials import get_credentials
+
+
+class _ClientFactoryMixin(object):
+    """Mixin to allow factories that create credentials.
+
+    .. note::
+
+        This class is virtual.
+    """
+
+    @classmethod
+    def from_service_account_json(cls, json_credentials_path, *args, **kwargs):
+        """Factory to retrieve JSON credentials while creating client.
+
+        :type json_credentials_path: string
+        :param json_credentials_path: The path to a private key file (this file
+                                      was given to you when you created the
+                                      service account). This file must contain
+                                      a JSON object with a private key and
+                                      other credentials information (downloaded
+                                      from the Google APIs console).
+
+        :type args: tuple
+        :param args: Remaining positional arguments to pass to constructor.
+
+        :type kwargs: dict
+        :param kwargs: Remaining keyword arguments to pass to constructor.
+
+        :rtype: :class:`gcloud.pubsub.client.Client`
+        :returns: The client created with the retrieved JSON credentials.
+        :raises: :class:`TypeError` if there is a conflict with the kwargs
+                 and the credentials created by the factory.
+        """
+        if 'credentials' in kwargs:
+            raise TypeError('credentials must not be in keyword arguments')
+        credentials = ServiceAccountCredentials.from_json_keyfile_name(
+            json_credentials_path)
+        kwargs['credentials'] = credentials
+        return cls(*args, **kwargs)
+
+    @classmethod
+    def from_service_account_p12(cls, client_email, private_key_path,
+                                 *args, **kwargs):
+        """Factory to retrieve P12 credentials while creating client.
+
+        .. note::
+          Unless you have an explicit reason to use a PKCS12 key for your
+          service account, we recommend using a JSON key.
+
+        :type client_email: string
+        :param client_email: The e-mail attached to the service account.
+
+        :type private_key_path: string
+        :param private_key_path: The path to a private key file (this file was
+                                 given to you when you created the service
+                                 account). This file must be in P12 format.
+
+        :type args: tuple
+        :param args: Remaining positional arguments to pass to constructor.
+
+        :type kwargs: dict
+        :param kwargs: Remaining keyword arguments to pass to constructor.
+
+        :rtype: :class:`gcloud.client.Client`
+        :returns: The client created with the retrieved P12 credentials.
+        :raises: :class:`TypeError` if there is a conflict with the kwargs
+                 and the credentials created by the factory.
+        """
+        if 'credentials' in kwargs:
+            raise TypeError('credentials must not be in keyword arguments')
+        credentials = ServiceAccountCredentials.from_p12_keyfile(
+            client_email, private_key_path)
+        kwargs['credentials'] = credentials
+        return cls(*args, **kwargs)
+
+
+
[docs]class Client(_ClientFactoryMixin): + """Client to bundle configuration needed for API requests. + + Assumes that the associated ``_connection_class`` only accepts + ``http`` and ``credentials`` in its constructor. + + :type credentials: :class:`oauth2client.client.OAuth2Credentials` or + :class:`NoneType` + :param credentials: The OAuth2 Credentials to use for the connection + owned by this client. If not passed (and if no ``http`` + object is passed), falls back to the default inferred + from the environment. + + :type http: :class:`httplib2.Http` or class that defines ``request()``. + :param http: An optional HTTP object to make requests. If not passed, an + ``http`` object is created that is bound to the + ``credentials`` for the current object. + """ + + _connection_class = Connection + + def __init__(self, credentials=None, http=None): + if credentials is None and http is None: + credentials = get_credentials() + self.connection = self._connection_class( + credentials=credentials, http=http)
+ + +class _ClientProjectMixin(object): + """Mixin to allow setting the project on the client. + + :type project: string + :param project: the project which the client acts on behalf of. If not + passed falls back to the default inferred from the + environment. + + :raises: :class:`EnvironmentError` if the project is neither passed in nor + set in the environment. :class:`ValueError` if the project value + is invalid. + """ + + def __init__(self, project=None): + project = self._determine_default(project) + if project is None: + raise EnvironmentError('Project was not passed and could not be ' + 'determined from the environment.') + if isinstance(project, six.binary_type): + project = project.decode('utf-8') + if not isinstance(project, six.string_types): + raise ValueError('Project must be a string.') + self.project = project + + @staticmethod + def _determine_default(project): + """Helper: use default project detection.""" + return _determine_default_project(project) + + +
[docs]class JSONClient(Client, _ClientProjectMixin): + """Client to for Google JSON-based API. + + Assumes such APIs use the ``project`` and the client needs to store this + value. + + :type project: string + :param project: the project which the client acts on behalf of. If not + passed falls back to the default inferred from the + environment. + + :type credentials: :class:`oauth2client.client.OAuth2Credentials` or + :class:`NoneType` + :param credentials: The OAuth2 Credentials to use for the connection + owned by this client. If not passed (and if no ``http`` + object is passed), falls back to the default inferred + from the environment. + + :type http: :class:`httplib2.Http` or class that defines ``request()``. + :param http: An optional HTTP object to make requests. If not passed, an + ``http`` object is created that is bound to the + ``credentials`` for the current object. + + :raises: :class:`ValueError` if the project is neither passed in nor + set in the environment. + """ + + def __init__(self, project=None, credentials=None, http=None): + _ClientProjectMixin.__init__(self, project=project) + Client.__init__(self, credentials=credentials, http=http)
+
+ +
+ +
+ + + + + + + \ No newline at end of file diff --git a/0.18.1/_modules/gcloud/connection.html b/0.18.1/_modules/gcloud/connection.html new file mode 100644 index 000000000000..2667be474219 --- /dev/null +++ b/0.18.1/_modules/gcloud/connection.html @@ -0,0 +1,591 @@ + + + + + + + + gcloud.connection — gcloud 0.18.1 documentation + + + + + + + + + + + + + + +
+ +
+ +
+ + + Report an Issue + +
+
+ +
+ +

Source code for gcloud.connection

+# Copyright 2014 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Shared implementation of connections to API servers."""
+
+import json
+from pkg_resources import get_distribution
+import six
+from six.moves.urllib.parse import urlencode
+
+import httplib2
+
+from gcloud.exceptions import make_exception
+
+
+API_BASE_URL = 'https://www.googleapis.com'
+"""The base of the API call URL."""
+
+
+
[docs]class Connection(object): + """A generic connection to Google Cloud Platform. + + Subclasses should understand only the basic types in method arguments, + however they should be capable of returning advanced types. + + If no value is passed in for ``http``, a :class:`httplib2.Http` object + will be created and authorized with the ``credentials``. If not, the + ``credentials`` and ``http`` need not be related. + + Subclasses may seek to use the private key from ``credentials`` to sign + data. + + A custom (non-``httplib2``) HTTP object must have a ``request`` method + which accepts the following arguments: + + * ``uri`` + * ``method`` + * ``body`` + * ``headers`` + + In addition, ``redirections`` and ``connection_type`` may be used. + + Without the use of ``credentials.authorize(http)``, a custom ``http`` + object will also need to be able to add a bearer token to API + requests and handle token refresh on 401 errors. + + :type credentials: :class:`oauth2client.client.OAuth2Credentials` or + :class:`NoneType` + :param credentials: The OAuth2 Credentials to use for this connection. + + :type http: :class:`httplib2.Http` or class that defines ``request()``. + :param http: An optional HTTP object to make requests. + """ + + USER_AGENT = "gcloud-python/{0}".format(get_distribution('gcloud').version) + """The user agent for gcloud-python requests.""" + + SCOPE = None + """The scopes required for authenticating with a service. + + Needs to be set by subclasses. + """ + + def __init__(self, credentials=None, http=None): + self._http = http + self._credentials = self._create_scoped_credentials( + credentials, self.SCOPE) + + @property + def credentials(self): + """Getter for current credentials. + + :rtype: :class:`oauth2client.client.OAuth2Credentials` or + :class:`NoneType` + :returns: The credentials object associated with this connection. + """ + return self._credentials + + @property + def http(self): + """A getter for the HTTP transport used in talking to the API. + + :rtype: :class:`httplib2.Http` + :returns: A Http object used to transport data. + """ + if self._http is None: + self._http = httplib2.Http() + if self._credentials: + self._http = self._credentials.authorize(self._http) + return self._http + + @staticmethod + def _create_scoped_credentials(credentials, scope): + """Create a scoped set of credentials if it is required. + + :type credentials: :class:`oauth2client.client.OAuth2Credentials` or + :class:`NoneType` + :param credentials: The OAuth2 Credentials to add a scope to. + + :type scope: list of URLs + :param scope: the effective service auth scopes for the connection. + + :rtype: :class:`oauth2client.client.OAuth2Credentials` or + :class:`NoneType` + :returns: A new credentials object that has a scope added (if needed). + """ + if credentials: + try: + if credentials.create_scoped_required(): + credentials = credentials.create_scoped(scope) + except AttributeError: + pass + return credentials
+ + +
[docs]class JSONConnection(Connection): + """A connection to a Google JSON-based API. + + These APIs are discovery based. For reference: + + https://developers.google.com/discovery/ + + This defines :meth:`api_request` for making a generic JSON + API request and API requests are created elsewhere. + + The class constants + + * :attr:`API_BASE_URL` + * :attr:`API_VERSION` + * :attr:`API_URL_TEMPLATE` + + must be updated by subclasses. + """ + + API_BASE_URL = None + """The base of the API call URL.""" + + API_VERSION = None + """The version of the API, used in building the API call's URL.""" + + API_URL_TEMPLATE = None + """A template for the URL of a particular API call.""" + + @classmethod +
[docs] def build_api_url(cls, path, query_params=None, + api_base_url=None, api_version=None): + """Construct an API url given a few components, some optional. + + Typically, you shouldn't need to use this method. + + :type path: string + :param path: The path to the resource (ie, ``'/b/bucket-name'``). + + :type query_params: dict or list + :param query_params: A dictionary of keys and values (or list of + key-value pairs) to insert into the query + string of the URL. + + :type api_base_url: string + :param api_base_url: The base URL for the API endpoint. + Typically you won't have to provide this. + + :type api_version: string + :param api_version: The version of the API to call. + Typically you shouldn't provide this and instead + use the default for the library. + + :rtype: string + :returns: The URL assembled from the pieces provided. + """ + url = cls.API_URL_TEMPLATE.format( + api_base_url=(api_base_url or cls.API_BASE_URL), + api_version=(api_version or cls.API_VERSION), + path=path) + + query_params = query_params or {} + if query_params: + url += '?' + urlencode(query_params) + + return url
+ + def _make_request(self, method, url, data=None, content_type=None, + headers=None, target_object=None): + """A low level method to send a request to the API. + + Typically, you shouldn't need to use this method. + + :type method: string + :param method: The HTTP method to use in the request. + + :type url: string + :param url: The URL to send the request to. + + :type data: string + :param data: The data to send as the body of the request. + + :type content_type: string + :param content_type: The proper MIME type of the data provided. + + :type headers: dict + :param headers: A dictionary of HTTP headers to send with the request. + + :type target_object: object or :class:`NoneType` + :param target_object: Argument to be used by library callers. + This can allow custom behavior, for example, to + defer an HTTP request and complete initialization + of the object at a later time. + + :rtype: tuple of ``response`` (a dictionary of sorts) + and ``content`` (a string). + :returns: The HTTP response object and the content of the response, + returned by :meth:`_do_request`. + """ + headers = headers or {} + headers['Accept-Encoding'] = 'gzip' + + if data: + content_length = len(str(data)) + else: + content_length = 0 + + # NOTE: str is intended, bytes are sufficient for headers. + headers['Content-Length'] = str(content_length) + + if content_type: + headers['Content-Type'] = content_type + + headers['User-Agent'] = self.USER_AGENT + + return self._do_request(method, url, headers, data, target_object) + + def _do_request(self, method, url, headers, data, + target_object): # pylint: disable=unused-argument + """Low-level helper: perform the actual API request over HTTP. + + Allows batch context managers to override and defer a request. + + :type method: string + :param method: The HTTP method to use in the request. + + :type url: string + :param url: The URL to send the request to. + + :type headers: dict + :param headers: A dictionary of HTTP headers to send with the request. + + :type data: string + :param data: The data to send as the body of the request. + + :type target_object: object or :class:`NoneType` + :param target_object: Unused ``target_object`` here but may be used + by a superclass. + + :rtype: tuple of ``response`` (a dictionary of sorts) + and ``content`` (a string). + :returns: The HTTP response object and the content of the response. + """ + return self.http.request(uri=url, method=method, headers=headers, + body=data) + +
[docs] def api_request(self, method, path, query_params=None, + data=None, content_type=None, + api_base_url=None, api_version=None, + expect_json=True, _target_object=None): + """Make a request over the HTTP transport to the API. + + You shouldn't need to use this method, but if you plan to + interact with the API using these primitives, this is the + correct one to use. + + :type method: string + :param method: The HTTP method name (ie, ``GET``, ``POST``, etc). + Required. + + :type path: string + :param path: The path to the resource (ie, ``'/b/bucket-name'``). + Required. + + :type query_params: dict or list + :param query_params: A dictionary of keys and values (or list of + key-value pairs) to insert into the query + string of the URL. + + :type data: string + :param data: The data to send as the body of the request. Default is + the empty string. + + :type content_type: string + :param content_type: The proper MIME type of the data provided. Default + is None. + + :type api_base_url: string + :param api_base_url: The base URL for the API endpoint. + Typically you won't have to provide this. + Default is the standard API base URL. + + :type api_version: string + :param api_version: The version of the API to call. Typically + you shouldn't provide this and instead use + the default for the library. Default is the + latest API version supported by + gcloud-python. + + :type expect_json: bool + :param expect_json: If True, this method will try to parse the + response as JSON and raise an exception if + that cannot be done. Default is True. + + :type _target_object: :class:`object` or :class:`NoneType` + :param _target_object: Protected argument to be used by library + callers. This can allow custom behavior, for + example, to defer an HTTP request and complete + initialization of the object at a later time. + + :raises: Exception if the response code is not 200 OK. + :rtype: dict or str + :returns: The API response payload, either as a raw string or + a dictionary if the response is valid JSON. + """ + url = self.build_api_url(path=path, query_params=query_params, + api_base_url=api_base_url, + api_version=api_version) + + # Making the executive decision that any dictionary + # data will be sent properly as JSON. + if data and isinstance(data, dict): + data = json.dumps(data) + content_type = 'application/json' + + response, content = self._make_request( + method=method, url=url, data=data, content_type=content_type, + target_object=_target_object) + + if not 200 <= response.status < 300: + raise make_exception(response, content, + error_info=method + ' ' + url) + + string_or_bytes = (six.binary_type, six.text_type) + if content and expect_json and isinstance(content, string_or_bytes): + content_type = response.get('content-type', '') + if not content_type.startswith('application/json'): + raise TypeError('Expected JSON, got %s' % content_type) + if isinstance(content, six.binary_type): + content = content.decode('utf-8') + return json.loads(content) + + return content
+
+ +
+ +
+ + + + + + + \ No newline at end of file diff --git a/0.18.1/_modules/gcloud/credentials.html b/0.18.1/_modules/gcloud/credentials.html new file mode 100644 index 000000000000..b3dea59b16d5 --- /dev/null +++ b/0.18.1/_modules/gcloud/credentials.html @@ -0,0 +1,479 @@ + + + + + + + + gcloud.credentials — gcloud 0.18.1 documentation + + + + + + + + + + + + + + +
+ +
+ +
+ + + Report an Issue + +
+
+ +
+ +

Source code for gcloud.credentials

+# Copyright 2014 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""A simple wrapper around the OAuth2 credentials library."""
+
+import base64
+import datetime
+import six
+from six.moves.urllib.parse import urlencode
+
+from oauth2client import client
+
+from gcloud._helpers import UTC
+from gcloud._helpers import _NOW
+from gcloud._helpers import _microseconds_from_datetime
+
+
+
[docs]def get_credentials(): + """Gets credentials implicitly from the current environment. + + .. note:: + + You should not need to use this function directly. Instead, use a + helper method which uses this method under the hood. + + Checks environment in order of precedence: + + * Google App Engine (production and testing) + * Environment variable :envvar:`GOOGLE_APPLICATION_CREDENTIALS` pointing to + a file with stored credentials information. + * Stored "well known" file associated with ``gcloud`` command line tool. + * Google Compute Engine production environment. + + The file referred to in :envvar:`GOOGLE_APPLICATION_CREDENTIALS` is + expected to contain information about credentials that are ready to use. + This means either service account information or user account information + with a ready-to-use refresh token: + + .. code:: json + + { + 'type': 'authorized_user', + 'client_id': '...', + 'client_secret': '...', + 'refresh_token': '...' + } + + or + + .. code:: json + + { + 'type': 'service_account', + 'client_id': '...', + 'client_email': '...', + 'private_key_id': '...', + 'private_key': '...' + } + + The second of these is simply a JSON key downloaded from the Google APIs + console. The first is a close cousin of the "client secrets" JSON file + used by :mod:`oauth2client.clientsecrets` but differs in formatting. + + :rtype: :class:`oauth2client.client.GoogleCredentials`, + :class:`oauth2client.contrib.appengine.AppAssertionCredentials`, + :class:`oauth2client.contrib.gce.AppAssertionCredentials`, + :class:`oauth2client.service_account.ServiceAccountCredentials` + :returns: A new credentials instance corresponding to the implicit + environment. + """ + return client.GoogleCredentials.get_application_default()
+ + +def _get_signed_query_params(credentials, expiration, string_to_sign): + """Gets query parameters for creating a signed URL. + + :type credentials: :class:`oauth2client.client.AssertionCredentials` + :param credentials: The credentials used to create a private key + for signing text. + + :type expiration: int or long + :param expiration: When the signed URL should expire. + + :type string_to_sign: string + :param string_to_sign: The string to be signed by the credentials. + + :raises AttributeError: If :meth: sign_blob is unavailable. + + :rtype: dict + :returns: Query parameters matching the signing credentials with a + signed payload. + """ + if not hasattr(credentials, 'sign_blob'): + raise AttributeError('you need a private key to sign credentials.' + 'the credentials you are currently using %s ' + 'just contains a token. see https://googlecloud' + 'platform.github.io/gcloud-python/stable/gcloud-' + 'auth.html#setting-up-a-service-account for more ' + 'details.' % type(credentials)) + + _, signature_bytes = credentials.sign_blob(string_to_sign) + signature = base64.b64encode(signature_bytes) + service_account_name = credentials.service_account_email + return { + 'GoogleAccessId': service_account_name, + 'Expires': str(expiration), + 'Signature': signature, + } + + +def _get_expiration_seconds(expiration): + """Convert 'expiration' to a number of seconds in the future. + + :type expiration: int, long, datetime.datetime, datetime.timedelta + :param expiration: When the signed URL should expire. + + :raises TypeError: When expiration is not an integer. + + :rtype: int + :returns: a timestamp as an absolute number of seconds. + """ + # If it's a timedelta, add it to `now` in UTC. + if isinstance(expiration, datetime.timedelta): + now = _NOW().replace(tzinfo=UTC) + expiration = now + expiration + + # If it's a datetime, convert to a timestamp. + if isinstance(expiration, datetime.datetime): + micros = _microseconds_from_datetime(expiration) + expiration = micros // 10**6 + + if not isinstance(expiration, six.integer_types): + raise TypeError('Expected an integer timestamp, datetime, or ' + 'timedelta. Got %s' % type(expiration)) + return expiration + + +
[docs]def generate_signed_url(credentials, resource, expiration, + api_access_endpoint='', + method='GET', content_md5=None, + content_type=None, response_type=None, + response_disposition=None, generation=None): + """Generate signed URL to provide query-string auth'n to a resource. + + .. note:: + + Assumes ``credentials`` implements a ``sign_blob()`` method that takes + bytes to sign and returns a pair of the key ID (unused here) and the + signed bytes (this is abstract in the base class + :class:`oauth2client.client.AssertionCredentials`). Also assumes + ``credentials`` has a ``service_account_email`` property which + identifies the credentials. + + .. note:: + + If you are on Google Compute Engine, you can't generate a signed URL. + Follow `Issue 922`_ for updates on this. If you'd like to be able to + generate a signed URL from GCE, you can use a standard service account + from a JSON file rather than a GCE service account. + + See headers `reference`_ for more details on optional arguments. + + .. _Issue 922: https://github.com/GoogleCloudPlatform/\ + gcloud-python/issues/922 + .. _reference: https://cloud.google.com/storage/docs/reference-headers + + :type credentials: :class:`oauth2client.appengine.AppAssertionCredentials` + :param credentials: Credentials object with an associated private key to + sign text. + + :type resource: string + :param resource: A pointer to a specific resource + (typically, ``/bucket-name/path/to/blob.txt``). + + :type expiration: :class:`int`, :class:`long`, :class:`datetime.datetime`, + :class:`datetime.timedelta` + :param expiration: When the signed URL should expire. + + :type api_access_endpoint: str + :param api_access_endpoint: Optional URI base. Defaults to empty string. + + :type method: str + :param method: The HTTP verb that will be used when requesting the URL. + Defaults to ``'GET'``. + + :type content_md5: str + :param content_md5: (Optional) The MD5 hash of the object referenced by + ``resource``. + + :type content_type: str + :param content_type: (Optional) The content type of the object referenced + by ``resource``. + + :type response_type: str + :param response_type: (Optional) Content type of responses to requests for + the signed URL. Used to over-ride the content type of + the underlying resource. + + :type response_disposition: str + :param response_disposition: (Optional) Content disposition of responses to + requests for the signed URL. + + :type generation: str + :param generation: (Optional) A value that indicates which generation of + the resource to fetch. + + :rtype: string + :returns: A signed URL you can use to access the resource + until expiration. + """ + expiration = _get_expiration_seconds(expiration) + + # Generate the string to sign. + string_to_sign = '\n'.join([ + method, + content_md5 or '', + content_type or '', + str(expiration), + resource]) + + # Set the right query parameters. + query_params = _get_signed_query_params(credentials, + expiration, + string_to_sign) + if response_type is not None: + query_params['response-content-type'] = response_type + if response_disposition is not None: + query_params['response-content-disposition'] = response_disposition + if generation is not None: + query_params['generation'] = generation + + # Return the built URL. + return '{endpoint}{resource}?{querystring}'.format( + endpoint=api_access_endpoint, resource=resource, + querystring=urlencode(query_params))
+
+ +
+ +
+ + + + + + + \ No newline at end of file diff --git a/0.18.1/_modules/gcloud/datastore/batch.html b/0.18.1/_modules/gcloud/datastore/batch.html new file mode 100644 index 000000000000..edf83ee5e4b2 --- /dev/null +++ b/0.18.1/_modules/gcloud/datastore/batch.html @@ -0,0 +1,534 @@ + + + + + + + + gcloud.datastore.batch — gcloud 0.18.1 documentation + + + + + + + + + + + + + + +
+ +
+ +
+ + + Report an Issue + +
+
+ +
+ +

Source code for gcloud.datastore.batch

+# Copyright 2014 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Create / interact with a batch of updates / deletes.
+
+Batches provide the ability to execute multiple operations
+in a single request to the Cloud Datastore API.
+
+See
+https://cloud.google.com/datastore/docs/concepts/entities#Datastore_Batch_operations
+"""
+
+from gcloud.datastore import helpers
+from gcloud.datastore._generated import datastore_pb2 as _datastore_pb2
+
+
+
[docs]class Batch(object): + """An abstraction representing a collected group of updates / deletes. + + Used to build up a bulk mutuation. + + For example, the following snippet of code will put the two ``save`` + operations and the ``delete`` operation into the same mutation, and send + them to the server in a single API request:: + + >>> from gcloud import datastore + >>> client = datastore.Client() + >>> batch = client.batch() + >>> batch.put(entity1) + >>> batch.put(entity2) + >>> batch.delete(key3) + >>> batch.commit() + + You can also use a batch as a context manager, in which case + :meth:`commit` will be called automatically if its block exits without + raising an exception:: + + >>> with batch: + ... batch.put(entity1) + ... batch.put(entity2) + ... batch.delete(key3) + + By default, no updates will be sent if the block exits with an error:: + + >>> with batch: + ... do_some_work(batch) + ... raise Exception() # rolls back + + :type client: :class:`gcloud.datastore.client.Client` + :param client: The client used to connect to datastore. + """ + + _id = None # "protected" attribute, always None for non-transactions + + _INITIAL = 0 + """Enum value for _INITIAL status of batch/transaction.""" + + _IN_PROGRESS = 1 + """Enum value for _IN_PROGRESS status of batch/transaction.""" + + _ABORTED = 2 + """Enum value for _ABORTED status of batch/transaction.""" + + _FINISHED = 3 + """Enum value for _FINISHED status of batch/transaction.""" + + def __init__(self, client): + self._client = client + self._commit_request = _datastore_pb2.CommitRequest() + self._partial_key_entities = [] + self._status = self._INITIAL + +
[docs] def current(self): + """Return the topmost batch / transaction, or None.""" + return self._client.current_batch
+ + @property + def project(self): + """Getter for project in which the batch will run. + + :rtype: :class:`str` + :returns: The project in which the batch will run. + """ + return self._client.project + + @property + def namespace(self): + """Getter for namespace in which the batch will run. + + :rtype: :class:`str` + :returns: The namespace in which the batch will run. + """ + return self._client.namespace + + @property + def connection(self): + """Getter for connection over which the batch will run. + + :rtype: :class:`gcloud.datastore.connection.Connection` + :returns: The connection over which the batch will run. + """ + return self._client.connection + + def _add_partial_key_entity_pb(self): + """Adds a new mutation for an entity with a partial key. + + :rtype: :class:`gcloud.datastore._generated.entity_pb2.Entity` + :returns: The newly created entity protobuf that will be + updated and sent with a commit. + """ + new_mutation = self.mutations.add() + return new_mutation.insert + + def _add_complete_key_entity_pb(self): + """Adds a new mutation for an entity with a completed key. + + :rtype: :class:`gcloud.datastore._generated.entity_pb2.Entity` + :returns: The newly created entity protobuf that will be + updated and sent with a commit. + """ + # We use ``upsert`` for entities with completed keys, rather than + # ``insert`` or ``update``, in order not to create race conditions + # based on prior existence / removal of the entity. + new_mutation = self.mutations.add() + return new_mutation.upsert + + def _add_delete_key_pb(self): + """Adds a new mutation for a key to be deleted. + + :rtype: :class:`gcloud.datastore._generated.entity_pb2.Key` + :returns: The newly created key protobuf that will be + deleted when sent with a commit. + """ + new_mutation = self.mutations.add() + return new_mutation.delete + + @property + def mutations(self): + """Getter for the changes accumulated by this batch. + + Every batch is committed with a single commit request containing all + the work to be done as mutations. Inside a batch, calling :meth:`put` + with an entity, or :meth:`delete` with a key, builds up the request by + adding a new mutation. This getter returns the protobuf that has been + built-up so far. + + :rtype: iterable + :returns: The list of :class:`._generated.datastore_pb2.Mutation` + protobufs to be sent in the commit request. + """ + return self._commit_request.mutations + +
[docs] def put(self, entity): + """Remember an entity's state to be saved during :meth:`commit`. + + .. note:: + Any existing properties for the entity will be replaced by those + currently set on this instance. Already-stored properties which do + not correspond to keys set on this instance will be removed from + the datastore. + + .. note:: + Property values which are "text" ('unicode' in Python2, 'str' in + Python3) map to 'string_value' in the datastore; values which are + "bytes" ('str' in Python2, 'bytes' in Python3) map to 'blob_value'. + + When an entity has a partial key, calling :meth:`commit` sends it as + an ``insert`` mutation and the key is completed. On return, + the key for the ``entity`` passed in is updated to match the key ID + assigned by the server. + + :type entity: :class:`gcloud.datastore.entity.Entity` + :param entity: the entity to be saved. + + :raises: ValueError if entity has no key assigned, or if the key's + ``project`` does not match ours. + """ + if entity.key is None: + raise ValueError("Entity must have a key") + + if self.project != entity.key.project: + raise ValueError("Key must be from same project as batch") + + if entity.key.is_partial: + entity_pb = self._add_partial_key_entity_pb() + self._partial_key_entities.append(entity) + else: + entity_pb = self._add_complete_key_entity_pb() + + _assign_entity_to_pb(entity_pb, entity)
+ +
[docs] def delete(self, key): + """Remember a key to be deleted during :meth:`commit`. + + :type key: :class:`gcloud.datastore.key.Key` + :param key: the key to be deleted. + + :raises: ValueError if key is not complete, or if the key's + ``project`` does not match ours. + """ + if key.is_partial: + raise ValueError("Key must be complete") + + if self.project != key.project: + raise ValueError("Key must be from same project as batch") + + key_pb = key.to_protobuf() + self._add_delete_key_pb().CopyFrom(key_pb)
+ +
[docs] def begin(self): + """Begins a batch. + + This method is called automatically when entering a with + statement, however it can be called explicitly if you don't want + to use a context manager. + + Overridden by :class:`gcloud.datastore.transaction.Transaction`. + + :raises: :class:`ValueError` if the batch has already begun. + """ + if self._status != self._INITIAL: + raise ValueError('Batch already started previously.') + self._status = self._IN_PROGRESS
+ + def _commit(self): + """Commits the batch. + + This is called by :meth:`commit`. + """ + # NOTE: ``self._commit_request`` will be modified. + _, updated_keys = self.connection.commit( + self.project, self._commit_request, self._id) + # If the back-end returns without error, we are guaranteed that + # :meth:`Connection.commit` will return keys that match (length and + # order) directly ``_partial_key_entities``. + for new_key_pb, entity in zip(updated_keys, + self._partial_key_entities): + new_id = new_key_pb.path[-1].id + entity.key = entity.key.completed_key(new_id) + +
[docs] def commit(self): + """Commits the batch. + + This is called automatically upon exiting a with statement, + however it can be called explicitly if you don't want to use a + context manager. + """ + try: + self._commit() + finally: + self._status = self._FINISHED
+ +
[docs] def rollback(self): + """Rolls back the current batch. + + Marks the batch as aborted (can't be used again). + + Overridden by :class:`gcloud.datastore.transaction.Transaction`. + """ + self._status = self._ABORTED
+ + def __enter__(self): + self._client._push_batch(self) + self.begin() + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + try: + if exc_type is None: + self.commit() + else: + self.rollback() + finally: + self._client._pop_batch()
+ + +def _assign_entity_to_pb(entity_pb, entity): + """Copy ``entity`` into ``entity_pb``. + + Helper method for ``Batch.put``. + + :type entity_pb: :class:`gcloud.datastore._generated.entity_pb2.Entity` + :param entity_pb: The entity owned by a mutation. + + :type entity: :class:`gcloud.datastore.entity.Entity` + :param entity: The entity being updated within the batch / transaction. + """ + bare_entity_pb = helpers.entity_to_protobuf(entity) + bare_entity_pb.key.CopyFrom(bare_entity_pb.key) + entity_pb.CopyFrom(bare_entity_pb) +
+ +
+ +
+ + + + + + + \ No newline at end of file diff --git a/0.18.1/_modules/gcloud/datastore/client.html b/0.18.1/_modules/gcloud/datastore/client.html new file mode 100644 index 000000000000..cd891e470a10 --- /dev/null +++ b/0.18.1/_modules/gcloud/datastore/client.html @@ -0,0 +1,687 @@ + + + + + + + + gcloud.datastore.client — gcloud 0.18.1 documentation + + + + + + + + + + + + + + +
+ +
+ +
+ + + Report an Issue + +
+
+ +
+ +

Source code for gcloud.datastore.client

+# Copyright 2014 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Convenience wrapper for invoking APIs/factories w/ a project."""
+
+import os
+
+from gcloud._helpers import _LocalStack
+from gcloud._helpers import _determine_default_project as _base_default_project
+from gcloud.client import _ClientProjectMixin
+from gcloud.client import Client as _BaseClient
+from gcloud.datastore import helpers
+from gcloud.datastore.connection import Connection
+from gcloud.datastore.batch import Batch
+from gcloud.datastore.entity import Entity
+from gcloud.datastore.key import Key
+from gcloud.datastore.query import Query
+from gcloud.datastore.transaction import Transaction
+from gcloud.environment_vars import GCD_DATASET
+
+
+_MAX_LOOPS = 128
+"""Maximum number of iterations to wait for deferred keys."""
+
+
+def _get_gcd_project():
+    """Gets the GCD application ID if it can be inferred."""
+    return os.getenv(GCD_DATASET)
+
+
+def _determine_default_project(project=None):
+    """Determine default project explicitly or implicitly as fall-back.
+
+    In implicit case, supports four environments. In order of precedence, the
+    implicit environments are:
+
+    * DATASTORE_DATASET environment variable (for ``gcd`` / emulator testing)
+    * GCLOUD_PROJECT environment variable
+    * Google App Engine application ID
+    * Google Compute Engine project ID (from metadata server)
+
+    :type project: string
+    :param project: Optional. The project to use as default.
+
+    :rtype: string or ``NoneType``
+    :returns: Default project if it can be determined.
+    """
+    if project is None:
+        project = _get_gcd_project()
+
+    if project is None:
+        project = _base_default_project(project=project)
+
+    return project
+
+
+def _extended_lookup(connection, project, key_pbs,
+                     missing=None, deferred=None,
+                     eventual=False, transaction_id=None):
+    """Repeat lookup until all keys found (unless stop requested).
+
+    Helper function for :meth:`Client.get_multi`.
+
+    :type connection: :class:`gcloud.datastore.connection.Connection`
+    :param connection: The connection used to connect to datastore.
+
+    :type project: string
+    :param project: The project to make the request for.
+
+    :type key_pbs: list of :class:`gcloud.datastore._generated.entity_pb2.Key`
+    :param key_pbs: The keys to retrieve from the datastore.
+
+    :type missing: list
+    :param missing: (Optional) If a list is passed, the key-only entity
+                    protobufs returned by the backend as "missing" will be
+                    copied into it.
+
+    :type deferred: list
+    :param deferred: (Optional) If a list is passed, the key protobufs returned
+                     by the backend as "deferred" will be copied into it.
+
+    :type eventual: bool
+    :param eventual: If False (the default), request ``STRONG`` read
+                     consistency.  If True, request ``EVENTUAL`` read
+                     consistency.
+
+    :type transaction_id: string
+    :param transaction_id: If passed, make the request in the scope of
+                           the given transaction.  Incompatible with
+                           ``eventual==True``.
+
+    :rtype: list of :class:`gcloud.datastore._generated.entity_pb2.Entity`
+    :returns: The requested entities.
+    :raises: :class:`ValueError` if missing / deferred are not null or
+             empty list.
+    """
+    if missing is not None and missing != []:
+        raise ValueError('missing must be None or an empty list')
+
+    if deferred is not None and deferred != []:
+        raise ValueError('deferred must be None or an empty list')
+
+    results = []
+
+    loop_num = 0
+    while loop_num < _MAX_LOOPS:  # loop against possible deferred.
+        loop_num += 1
+
+        results_found, missing_found, deferred_found = connection.lookup(
+            project=project,
+            key_pbs=key_pbs,
+            eventual=eventual,
+            transaction_id=transaction_id,
+        )
+
+        results.extend(results_found)
+
+        if missing is not None:
+            missing.extend(missing_found)
+
+        if deferred is not None:
+            deferred.extend(deferred_found)
+            break
+
+        if len(deferred_found) == 0:
+            break
+
+        # We have deferred keys, and the user didn't ask to know about
+        # them, so retry (but only with the deferred ones).
+        key_pbs = deferred_found
+
+    return results
+
+
+
[docs]class Client(_BaseClient, _ClientProjectMixin): + """Convenience wrapper for invoking APIs/factories w/ a project. + + :type project: string + :param project: (optional) The project to pass to proxied API methods. + + :type namespace: string + :param namespace: (optional) namespace to pass to proxied API methods. + + :type credentials: :class:`oauth2client.client.OAuth2Credentials` or + :class:`NoneType` + :param credentials: The OAuth2 Credentials to use for the connection + owned by this client. If not passed (and if no ``http`` + object is passed), falls back to the default inferred + from the environment. + + :type http: :class:`httplib2.Http` or class that defines ``request()``. + :param http: An optional HTTP object to make requests. If not passed, an + ``http`` object is created that is bound to the + ``credentials`` for the current object. + """ + _connection_class = Connection + + def __init__(self, project=None, namespace=None, + credentials=None, http=None): + _ClientProjectMixin.__init__(self, project=project) + self.namespace = namespace + self._batch_stack = _LocalStack() + super(Client, self).__init__(credentials, http) + + @staticmethod + def _determine_default(project): + """Helper: override default project detection.""" + return _determine_default_project(project) + + def _push_batch(self, batch): + """Push a batch/transaction onto our stack. + + "Protected", intended for use by batch / transaction context mgrs. + + :type batch: :class:`gcloud.datastore.batch.Batch`, or an object + implementing its API. + :param batch: newly-active batch/transaction. + """ + self._batch_stack.push(batch) + + def _pop_batch(self): + """Pop a batch/transaction from our stack. + + "Protected", intended for use by batch / transaction context mgrs. + + :raises: IndexError if the stack is empty. + :rtype: :class:`gcloud.datastore.batch.Batch`, or an object + implementing its API. + :returns: the top-most batch/transaction, after removing it. + """ + return self._batch_stack.pop() + + @property + def current_batch(self): + """Currently-active batch. + + :rtype: :class:`gcloud.datastore.batch.Batch`, or an object + implementing its API, or ``NoneType`` (if no batch is active). + :returns: The batch/transaction at the top of the batch stack. + """ + return self._batch_stack.top + + @property + def current_transaction(self): + """Currently-active transaction. + + :rtype: :class:`gcloud.datastore.transaction.Transaction`, or an object + implementing its API, or ``NoneType`` (if no transaction is + active). + :returns: The transaction at the top of the batch stack. + """ + transaction = self.current_batch + if isinstance(transaction, Transaction): + return transaction + +
[docs] def get(self, key, missing=None, deferred=None, transaction=None): + """Retrieve an entity from a single key (if it exists). + + .. note:: + + This is just a thin wrapper over :meth:`get_multi`. + The backend API does not make a distinction between a single key or + multiple keys in a lookup request. + + :type key: :class:`gcloud.datastore.key.Key` + :param key: The key to be retrieved from the datastore. + + :type missing: list + :param missing: (Optional) If a list is passed, the key-only entities + returned by the backend as "missing" will be copied + into it. + + :type deferred: list + :param deferred: (Optional) If a list is passed, the keys returned + by the backend as "deferred" will be copied into it. + + :type transaction: :class:`gcloud.datastore.transaction.Transaction` + :param transaction: (Optional) Transaction to use for read consistency. + If not passed, uses current transaction, if set. + + :rtype: :class:`gcloud.datastore.entity.Entity` or ``NoneType`` + :returns: The requested entity if it exists. + """ + entities = self.get_multi(keys=[key], missing=missing, + deferred=deferred, transaction=transaction) + if entities: + return entities[0]
+ +
[docs] def get_multi(self, keys, missing=None, deferred=None, transaction=None): + """Retrieve entities, along with their attributes. + + :type keys: list of :class:`gcloud.datastore.key.Key` + :param keys: The keys to be retrieved from the datastore. + + :type missing: list + :param missing: (Optional) If a list is passed, the key-only entities + returned by the backend as "missing" will be copied + into it. If the list is not empty, an error will occur. + + :type deferred: list + :param deferred: (Optional) If a list is passed, the keys returned + by the backend as "deferred" will be copied into it. + If the list is not empty, an error will occur. + + :type transaction: :class:`gcloud.datastore.transaction.Transaction` + :param transaction: (Optional) Transaction to use for read consistency. + If not passed, uses current transaction, if set. + + :rtype: list of :class:`gcloud.datastore.entity.Entity` + :returns: The requested entities. + :raises: :class:`ValueError` if one or more of ``keys`` has a project + which does not match our project. + """ + if not keys: + return [] + + ids = set(key.project for key in keys) + for current_id in ids: + if current_id != self.project: + raise ValueError('Keys do not match project') + + if transaction is None: + transaction = self.current_transaction + + entity_pbs = _extended_lookup( + connection=self.connection, + project=self.project, + key_pbs=[k.to_protobuf() for k in keys], + missing=missing, + deferred=deferred, + transaction_id=transaction and transaction.id, + ) + + if missing is not None: + missing[:] = [ + helpers.entity_from_protobuf(missed_pb) + for missed_pb in missing] + + if deferred is not None: + deferred[:] = [ + helpers.key_from_protobuf(deferred_pb) + for deferred_pb in deferred] + + return [helpers.entity_from_protobuf(entity_pb) + for entity_pb in entity_pbs]
+ +
[docs] def put(self, entity): + """Save an entity in the Cloud Datastore. + + .. note:: + + This is just a thin wrapper over :meth:`put_multi`. + The backend API does not make a distinction between a single + entity or multiple entities in a commit request. + + :type entity: :class:`gcloud.datastore.entity.Entity` + :param entity: The entity to be saved to the datastore. + """ + self.put_multi(entities=[entity])
+ +
[docs] def put_multi(self, entities): + """Save entities in the Cloud Datastore. + + :type entities: list of :class:`gcloud.datastore.entity.Entity` + :param entities: The entities to be saved to the datastore. + + :raises: :class:`ValueError` if ``entities`` is a single entity. + """ + if isinstance(entities, Entity): + raise ValueError("Pass a sequence of entities") + + if not entities: + return + + current = self.current_batch + in_batch = current is not None + + if not in_batch: + current = self.batch() + + for entity in entities: + current.put(entity) + + if not in_batch: + current.commit()
+ +
[docs] def delete(self, key): + """Delete the key in the Cloud Datastore. + + .. note:: + + This is just a thin wrapper over :meth:`delete_multi`. + The backend API does not make a distinction between a single key or + multiple keys in a commit request. + + :type key: :class:`gcloud.datastore.key.Key` + :param key: The key to be deleted from the datastore. + """ + self.delete_multi(keys=[key])
+ +
[docs] def delete_multi(self, keys): + """Delete keys from the Cloud Datastore. + + :type keys: list of :class:`gcloud.datastore.key.Key` + :param keys: The keys to be deleted from the datastore. + """ + if not keys: + return + + # We allow partial keys to attempt a delete, the backend will fail. + current = self.current_batch + in_batch = current is not None + + if not in_batch: + current = self.batch() + + for key in keys: + current.delete(key) + + if not in_batch: + current.commit()
+ +
[docs] def allocate_ids(self, incomplete_key, num_ids): + """Allocate a list of IDs from a partial key. + + :type incomplete_key: :class:`gcloud.datastore.key.Key` + :param incomplete_key: Partial key to use as base for allocated IDs. + + :type num_ids: int + :param num_ids: The number of IDs to allocate. + + :rtype: list of :class:`gcloud.datastore.key.Key` + :returns: The (complete) keys allocated with ``incomplete_key`` as + root. + :raises: :class:`ValueError` if ``incomplete_key`` is not a + partial key. + """ + if not incomplete_key.is_partial: + raise ValueError(('Key is not partial.', incomplete_key)) + + incomplete_key_pb = incomplete_key.to_protobuf() + incomplete_key_pbs = [incomplete_key_pb] * num_ids + + conn = self.connection + allocated_key_pbs = conn.allocate_ids(incomplete_key.project, + incomplete_key_pbs) + allocated_ids = [allocated_key_pb.path[-1].id + for allocated_key_pb in allocated_key_pbs] + return [incomplete_key.completed_key(allocated_id) + for allocated_id in allocated_ids]
+ +
[docs] def key(self, *path_args, **kwargs): + """Proxy to :class:`gcloud.datastore.key.Key`. + + Passes our ``project``. + """ + if 'project' in kwargs: + raise TypeError('Cannot pass project') + kwargs['project'] = self.project + if 'namespace' not in kwargs: + kwargs['namespace'] = self.namespace + return Key(*path_args, **kwargs)
+ +
[docs] def batch(self): + """Proxy to :class:`gcloud.datastore.batch.Batch`.""" + return Batch(self)
+ +
[docs] def transaction(self): + """Proxy to :class:`gcloud.datastore.transaction.Transaction`.""" + return Transaction(self)
+ +
[docs] def query(self, **kwargs): + """Proxy to :class:`gcloud.datastore.query.Query`. + + Passes our ``project``. + """ + if 'client' in kwargs: + raise TypeError('Cannot pass client') + if 'project' in kwargs: + raise TypeError('Cannot pass project') + kwargs['project'] = self.project + if 'namespace' not in kwargs: + kwargs['namespace'] = self.namespace + return Query(self, **kwargs)
+
+ +
+ +
+ + + + + + + \ No newline at end of file diff --git a/0.18.1/_modules/gcloud/datastore/connection.html b/0.18.1/_modules/gcloud/datastore/connection.html new file mode 100644 index 000000000000..d482c2982317 --- /dev/null +++ b/0.18.1/_modules/gcloud/datastore/connection.html @@ -0,0 +1,665 @@ + + + + + + + + gcloud.datastore.connection — gcloud 0.18.1 documentation + + + + + + + + + + + + + + +
+ +
+ +
+ + + Report an Issue + +
+
+ +
+ +

Source code for gcloud.datastore.connection

+# Copyright 2014 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Connections to gcloud datastore API servers."""
+
+import os
+
+from gcloud import connection
+from gcloud.environment_vars import GCD_HOST
+from gcloud.exceptions import make_exception
+from gcloud.datastore._generated import datastore_pb2 as _datastore_pb2
+from google.rpc import status_pb2
+
+
+
[docs]class Connection(connection.Connection): + """A connection to the Google Cloud Datastore via the Protobuf API. + + This class should understand only the basic types (and protobufs) + in method arguments, however should be capable of returning advanced types. + + :type credentials: :class:`oauth2client.client.OAuth2Credentials` + :param credentials: The OAuth2 Credentials to use for this connection. + + :type http: :class:`httplib2.Http` or class that defines ``request()``. + :param http: An optional HTTP object to make requests. + + :type api_base_url: string + :param api_base_url: The base of the API call URL. Defaults to + :attr:`API_BASE_URL`. + """ + + API_BASE_URL = 'https://datastore.googleapis.com' + """The base of the API call URL.""" + + API_VERSION = 'v1beta3' + """The version of the API, used in building the API call's URL.""" + + API_URL_TEMPLATE = ('{api_base}/{api_version}/projects' + '/{project}:{method}') + """A template for the URL of a particular API call.""" + + SCOPE = ('https://www.googleapis.com/auth/datastore',) + """The scopes required for authenticating as a Cloud Datastore consumer.""" + + def __init__(self, credentials=None, http=None, api_base_url=None): + super(Connection, self).__init__(credentials=credentials, http=http) + if api_base_url is None: + try: + # gcd.sh has /datastore/ in the path still since it supports + # v1beta2 and v1beta3 simultaneously. + api_base_url = '%s/datastore' % (os.environ[GCD_HOST],) + except KeyError: + api_base_url = self.__class__.API_BASE_URL + self.api_base_url = api_base_url + + def _request(self, project, method, data): + """Make a request over the Http transport to the Cloud Datastore API. + + :type project: string + :param project: The project to make the request for. + + :type method: string + :param method: The API call method name (ie, ``runQuery``, + ``lookup``, etc) + + :type data: string + :param data: The data to send with the API call. + Typically this is a serialized Protobuf string. + + :rtype: string + :returns: The string response content from the API call. + :raises: :class:`gcloud.exceptions.GCloudError` if the response + code is not 200 OK. + """ + headers = { + 'Content-Type': 'application/x-protobuf', + 'Content-Length': str(len(data)), + 'User-Agent': self.USER_AGENT, + } + headers, content = self.http.request( + uri=self.build_api_url(project=project, method=method), + method='POST', headers=headers, body=data) + + status = headers['status'] + if status != '200': + error_status = status_pb2.Status.FromString(content) + raise make_exception(headers, error_status.message, use_json=False) + + return content + + def _rpc(self, project, method, request_pb, response_pb_cls): + """Make a protobuf RPC request. + + :type project: string + :param project: The project to connect to. This is + usually your project name in the cloud console. + + :type method: string + :param method: The name of the method to invoke. + + :type request_pb: :class:`google.protobuf.message.Message` instance + :param request_pb: the protobuf instance representing the request. + + :type response_pb_cls: A :class:`google.protobuf.message.Message` + subclass. + :param response_pb_cls: The class used to unmarshall the response + protobuf. + + :rtype: :class:`google.protobuf.message.Message` + :returns: The RPC message parsed from the response. + """ + response = self._request(project=project, method=method, + data=request_pb.SerializeToString()) + return response_pb_cls.FromString(response) + +
[docs] def build_api_url(self, project, method, base_url=None, + api_version=None): + """Construct the URL for a particular API call. + + This method is used internally to come up with the URL to use when + making RPCs to the Cloud Datastore API. + + :type project: string + :param project: The project to connect to. This is + usually your project name in the cloud console. + + :type method: string + :param method: The API method to call (e.g. 'runQuery', 'lookup'). + + :type base_url: string + :param base_url: The base URL where the API lives. + You shouldn't have to provide this. + + :type api_version: string + :param api_version: The version of the API to connect to. + You shouldn't have to provide this. + + :rtype: str + :returns: The API URL created. + """ + return self.API_URL_TEMPLATE.format( + api_base=(base_url or self.api_base_url), + api_version=(api_version or self.API_VERSION), + project=project, method=method)
+ +
[docs] def lookup(self, project, key_pbs, + eventual=False, transaction_id=None): + """Lookup keys from a project in the Cloud Datastore. + + Maps the ``DatastoreService.Lookup`` protobuf RPC. + + This uses mostly protobufs + (:class:`gcloud.datastore._generated.entity_pb2.Key` as input and + :class:`gcloud.datastore._generated.entity_pb2.Entity` as output). It + is used under the hood in + :meth:`Client.get() <.datastore.client.Client.get>`: + + >>> from gcloud import datastore + >>> client = datastore.Client(project='project') + >>> key = client.key('MyKind', 1234) + >>> client.get(key) + [<Entity object>] + + Using a :class:`Connection` directly: + + >>> connection.lookup('project', [key.to_protobuf()]) + [<Entity protobuf>] + + :type project: string + :param project: The project to look up the keys in. + + :type key_pbs: list of + :class:`gcloud.datastore._generated.entity_pb2.Key` + :param key_pbs: The keys to retrieve from the datastore. + + :type eventual: bool + :param eventual: If False (the default), request ``STRONG`` read + consistency. If True, request ``EVENTUAL`` read + consistency. + + :type transaction_id: string + :param transaction_id: If passed, make the request in the scope of + the given transaction. Incompatible with + ``eventual==True``. + + :rtype: tuple + :returns: A triple of (``results``, ``missing``, ``deferred``) where + both ``results`` and ``missing`` are lists of + :class:`gcloud.datastore._generated.entity_pb2.Entity` and + ``deferred`` is a list of + :class:`gcloud.datastore._generated.entity_pb2.Key`. + """ + lookup_request = _datastore_pb2.LookupRequest() + _set_read_options(lookup_request, eventual, transaction_id) + _add_keys_to_request(lookup_request.keys, key_pbs) + + lookup_response = self._rpc(project, 'lookup', lookup_request, + _datastore_pb2.LookupResponse) + + results = [result.entity for result in lookup_response.found] + missing = [result.entity for result in lookup_response.missing] + + return results, missing, list(lookup_response.deferred)
+ +
[docs] def run_query(self, project, query_pb, namespace=None, + eventual=False, transaction_id=None): + """Run a query on the Cloud Datastore. + + Maps the ``DatastoreService.RunQuery`` protobuf RPC. + + Given a Query protobuf, sends a ``runQuery`` request to the + Cloud Datastore API and returns a list of entity protobufs + matching the query. + + You typically wouldn't use this method directly, in favor of the + :meth:`gcloud.datastore.query.Query.fetch` method. + + Under the hood, the :class:`gcloud.datastore.query.Query` class + uses this method to fetch data: + + >>> from gcloud import datastore + >>> client = datastore.Client() + >>> query = client.query(kind='MyKind') + >>> query.add_filter('property', '=', 'val') + + Using the query iterator's + :meth:`next_page() <.datastore.query.Iterator.next_page>` method: + + >>> query_iter = query.fetch() + >>> entities, more_results, cursor = query_iter.next_page() + >>> entities + [<list of Entity unmarshalled from protobuf>] + >>> more_results + <boolean of more results> + >>> cursor + <string containing cursor where fetch stopped> + + Under the hood this is doing: + + >>> connection.run_query('project', query.to_protobuf()) + [<list of Entity Protobufs>], cursor, more_results, skipped_results + + :type project: string + :param project: The project over which to run the query. + + :type query_pb: :class:`gcloud.datastore._generated.query_pb2.Query` + :param query_pb: The Protobuf representing the query to run. + + :type namespace: string + :param namespace: The namespace over which to run the query. + + :type eventual: bool + :param eventual: If False (the default), request ``STRONG`` read + consistency. If True, request ``EVENTUAL`` read + consistency. + + :type transaction_id: string + :param transaction_id: If passed, make the request in the scope of + the given transaction. Incompatible with + ``eventual==True``. + + :rtype: tuple + :returns: Four-tuple containing the entities returned, + the end cursor of the query, a ``more_results`` + enum and a count of the number of skipped results. + """ + request = _datastore_pb2.RunQueryRequest() + _set_read_options(request, eventual, transaction_id) + + if namespace: + request.partition_id.namespace_id = namespace + + request.query.CopyFrom(query_pb) + response = self._rpc(project, 'runQuery', request, + _datastore_pb2.RunQueryResponse) + return ( + [e.entity for e in response.batch.entity_results], + response.batch.end_cursor, # Assume response always has cursor. + response.batch.more_results, + response.batch.skipped_results, + )
+ +
[docs] def begin_transaction(self, project): + """Begin a transaction. + + Maps the ``DatastoreService.BeginTransaction`` protobuf RPC. + + :type project: string + :param project: The project to which the transaction applies. + + :rtype: bytes + :returns: The serialized transaction that was begun. + """ + request = _datastore_pb2.BeginTransactionRequest() + response = self._rpc(project, 'beginTransaction', request, + _datastore_pb2.BeginTransactionResponse) + return response.transaction
+ +
[docs] def commit(self, project, request, transaction_id): + """Commit mutations in context of current transation (if any). + + Maps the ``DatastoreService.Commit`` protobuf RPC. + + :type project: string + :param project: The project to which the transaction applies. + + :type request: :class:`._generated.datastore_pb2.CommitRequest` + :param request: The protobuf with the mutations being committed. + + :type transaction_id: string or None + :param transaction_id: The transaction ID returned from + :meth:`begin_transaction`. Non-transactional + batches must pass ``None``. + + .. note:: + + This method will mutate ``request`` before using it. + + :rtype: tuple + :returns: The pair of the number of index updates and a list of + :class:`._generated.entity_pb2.Key` for each incomplete key + that was completed in the commit. + """ + if transaction_id: + request.mode = _datastore_pb2.CommitRequest.TRANSACTIONAL + request.transaction = transaction_id + else: + request.mode = _datastore_pb2.CommitRequest.NON_TRANSACTIONAL + + response = self._rpc(project, 'commit', request, + _datastore_pb2.CommitResponse) + return _parse_commit_response(response)
+ +
[docs] def rollback(self, project, transaction_id): + """Rollback the connection's existing transaction. + + Maps the ``DatastoreService.Rollback`` protobuf RPC. + + :type project: string + :param project: The project to which the transaction belongs. + + :type transaction_id: string + :param transaction_id: The transaction ID returned from + :meth:`begin_transaction`. + """ + request = _datastore_pb2.RollbackRequest() + request.transaction = transaction_id + # Nothing to do with this response, so just execute the method. + self._rpc(project, 'rollback', request, + _datastore_pb2.RollbackResponse)
+ +
[docs] def allocate_ids(self, project, key_pbs): + """Obtain backend-generated IDs for a set of keys. + + Maps the ``DatastoreService.AllocateIds`` protobuf RPC. + + :type project: string + :param project: The project to which the transaction belongs. + + :type key_pbs: list of + :class:`gcloud.datastore._generated.entity_pb2.Key` + :param key_pbs: The keys for which the backend should allocate IDs. + + :rtype: list of :class:`gcloud.datastore._generated.entity_pb2.Key` + :returns: An equal number of keys, with IDs filled in by the backend. + """ + request = _datastore_pb2.AllocateIdsRequest() + _add_keys_to_request(request.keys, key_pbs) + # Nothing to do with this response, so just execute the method. + response = self._rpc(project, 'allocateIds', request, + _datastore_pb2.AllocateIdsResponse) + return list(response.keys)
+ + +def _set_read_options(request, eventual, transaction_id): + """Validate rules for read options, and assign to the request. + + Helper method for ``lookup()`` and ``run_query``. + + :raises: :class:`ValueError` if ``eventual`` is ``True`` and the + ``transaction_id`` is not ``None``. + """ + if eventual and (transaction_id is not None): + raise ValueError('eventual must be False when in a transaction') + + opts = request.read_options + if eventual: + opts.read_consistency = _datastore_pb2.ReadOptions.EVENTUAL + elif transaction_id: + opts.transaction = transaction_id + + +def _add_keys_to_request(request_field_pb, key_pbs): + """Add protobuf keys to a request object. + + :type request_field_pb: `RepeatedCompositeFieldContainer` + :param request_field_pb: A repeated proto field that contains keys. + + :type key_pbs: list of :class:`gcloud.datastore._generated.entity_pb2.Key` + :param key_pbs: The keys to add to a request. + """ + for key_pb in key_pbs: + request_field_pb.add().CopyFrom(key_pb) + + +def _parse_commit_response(commit_response_pb): + """Extract response data from a commit response. + + :type commit_response_pb: :class:`._generated.datastore_pb2.CommitResponse` + :param commit_response_pb: The protobuf response from a commit request. + + :rtype: tuple + :returns: The pair of the number of index updates and a list of + :class:`._generated.entity_pb2.Key` for each incomplete key + that was completed in the commit. + """ + mut_results = commit_response_pb.mutation_results + index_updates = commit_response_pb.index_updates + completed_keys = [mut_result.key for mut_result in mut_results + if mut_result.HasField('key')] # Message field (Key) + return index_updates, completed_keys +
+ +
+ +
+ + + + + + + \ No newline at end of file diff --git a/0.18.1/_modules/gcloud/datastore/entity.html b/0.18.1/_modules/gcloud/datastore/entity.html new file mode 100644 index 000000000000..d5d36c5519c7 --- /dev/null +++ b/0.18.1/_modules/gcloud/datastore/entity.html @@ -0,0 +1,376 @@ + + + + + + + + gcloud.datastore.entity — gcloud 0.18.1 documentation + + + + + + + + + + + + + + +
+ +
+ +
+ + + Report an Issue + +
+
+ +
+ +

Source code for gcloud.datastore.entity

+# Copyright 2014 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Class for representing a single entity in the Cloud Datastore."""
+
+
+from gcloud._helpers import _ensure_tuple_or_list
+
+
+
[docs]class Entity(dict): + """Entities are akin to rows in a relational database + + An entity storing the actual instance of data. + + Each entity is officially represented with a + :class:`gcloud.datastore.key.Key` class, however it is possible that + you might create an Entity with only a partial Key (that is, a Key + with a Kind, and possibly a parent, but without an ID). In such a + case, the datastore service will automatically assign an ID to the + partial key. + + Entities in this API act like dictionaries with extras built in that + allow you to delete or persist the data stored on the entity. + + Entities are mutable and act like a subclass of a dictionary. + This means you could take an existing entity and change the key + to duplicate the object. + + Use :func:`gcloud.datastore.get` to retrieve an existing entity. + + >>> from gcloud import datastore + >>> client = datastore.Client() + >>> client.get(key) + <Entity[{'kind': 'EntityKind', id: 1234}] {'property': 'value'}> + + You can the set values on the entity just like you would on any + other dictionary. + + >>> entity['age'] = 20 + >>> entity['name'] = 'JJ' + >>> entity + <Entity[{'kind': 'EntityKind', id: 1234}] {'age': 20, 'name': 'JJ'}> + + And you can convert an entity to a regular Python dictionary with the + ``dict`` builtin: + + >>> dict(entity) + {'age': 20, 'name': 'JJ'} + + .. note:: + + When saving an entity to the backend, values which are "text" + (``unicode`` in Python2, ``str`` in Python3) will be saved using + the 'text_value' field, after being encoded to UTF-8. When + retrieved from the back-end, such values will be decoded to "text" + again. Values which are "bytes" (``str`` in Python2, ``bytes`` in + Python3), will be saved using the 'blob_value' field, without + any decoding / encoding step. + + :type key: :class:`gcloud.datastore.key.Key` + :param key: Optional key to be set on entity. + + :type exclude_from_indexes: tuple of string + :param exclude_from_indexes: Names of fields whose values are not to be + indexed for this entity. + """ + + def __init__(self, key=None, exclude_from_indexes=()): + super(Entity, self).__init__() + self.key = key + self._exclude_from_indexes = set(_ensure_tuple_or_list( + 'exclude_from_indexes', exclude_from_indexes)) + # NOTE: This will be populated when parsing a protobuf in + # gcloud.datastore.helpers.entity_from_protobuf. + self._meanings = {} + + def __eq__(self, other): + """Compare two entities for equality. + + Entities compare equal if their keys compare equal, and their + properties compare equal. + + :rtype: boolean + :returns: True if the entities compare equal, else False. + """ + if not isinstance(other, Entity): + return False + + return (self.key == other.key and + self._exclude_from_indexes == other._exclude_from_indexes and + self._meanings == other._meanings and + super(Entity, self).__eq__(other)) + + def __ne__(self, other): + """Compare two entities for inequality. + + Entities compare equal if their keys compare equal, and their + properties compare equal. + + :rtype: boolean + :returns: False if the entities compare equal, else True. + """ + return not self.__eq__(other) + + @property + def kind(self): + """Get the kind of the current entity. + + .. note:: + This relies entirely on the :class:`gcloud.datastore.key.Key` + set on the entity. That means that we're not storing the kind + of the entity at all, just the properties and a pointer to a + Key which knows its Kind. + """ + if self.key: + return self.key.kind + + @property + def exclude_from_indexes(self): + """Names of fields which are *not* to be indexed for this entity. + + :rtype: sequence of field names + :returns: The set of fields excluded from indexes. + """ + return frozenset(self._exclude_from_indexes) + + def __repr__(self): + if self.key: + return '<Entity%s %s>' % (self.key.path, + super(Entity, self).__repr__()) + else: + return '<Entity %s>' % (super(Entity, self).__repr__())
+
+ +
+ +
+ + + + + + + \ No newline at end of file diff --git a/0.18.1/_modules/gcloud/datastore/helpers.html b/0.18.1/_modules/gcloud/datastore/helpers.html new file mode 100644 index 000000000000..0183876652e1 --- /dev/null +++ b/0.18.1/_modules/gcloud/datastore/helpers.html @@ -0,0 +1,703 @@ + + + + + + + + gcloud.datastore.helpers — gcloud 0.18.1 documentation + + + + + + + + + + + + + + +
+ +
+ +
+ + + Report an Issue + +
+
+ +
+ +

Source code for gcloud.datastore.helpers

+# Copyright 2014 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Helper functions for dealing with Cloud Datastore's Protobuf API.
+
+The non-private functions are part of the API.
+"""
+
+import datetime
+import itertools
+
+from google.protobuf import struct_pb2
+from google.type import latlng_pb2
+import six
+
+from gcloud._helpers import _datetime_to_pb_timestamp
+from gcloud._helpers import _pb_timestamp_to_datetime
+from gcloud.datastore._generated import entity_pb2 as _entity_pb2
+from gcloud.datastore.entity import Entity
+from gcloud.datastore.key import Key
+
+__all__ = ('entity_from_protobuf', 'key_from_protobuf')
+
+
+def _get_meaning(value_pb, is_list=False):
+    """Get the meaning from a protobuf value.
+
+    :type value_pb: :class:`gcloud.datastore._generated.entity_pb2.Value`
+    :param value_pb: The protobuf value to be checked for an
+                     associated meaning.
+
+    :type is_list: bool
+    :param is_list: Boolean indicating if the ``value_pb`` contains
+                    a list value.
+
+    :rtype: int
+    :returns: The meaning for the ``value_pb`` if one is set, else
+              :data:`None`. For a list value, if there are disagreeing
+              means it just returns a list of meanings. If all the
+              list meanings agree, it just condenses them.
+    """
+    meaning = None
+    if is_list:
+        # An empty list will have no values, hence no shared meaning
+        # set among them.
+        if len(value_pb.array_value.values) == 0:
+            return None
+
+        # We check among all the meanings, some of which may be None,
+        # the rest which may be enum/int values.
+        all_meanings = [_get_meaning(sub_value_pb)
+                        for sub_value_pb in value_pb.array_value.values]
+        unique_meanings = set(all_meanings)
+        if len(unique_meanings) == 1:
+            # If there is a unique meaning, we preserve it.
+            meaning = unique_meanings.pop()
+        else:  # We know len(value_pb.array_value.values) > 0.
+            # If the meaning is not unique, just return all of them.
+            meaning = all_meanings
+    elif value_pb.meaning:  # Simple field (int32)
+        meaning = value_pb.meaning
+
+    return meaning
+
+
+def _new_value_pb(entity_pb, name):
+    """Add (by name) a new ``Value`` protobuf to an entity protobuf.
+
+    :type entity_pb: :class:`gcloud.datastore._generated.entity_pb2.Entity`
+    :param entity_pb: An entity protobuf to add a new property to.
+
+    :type name: string
+    :param name: The name of the new property.
+
+    :rtype: :class:`gcloud.datastore._generated.entity_pb2.Value`
+    :returns: The new ``Value`` protobuf that was added to the entity.
+    """
+    return entity_pb.properties.get_or_create(name)
+
+
+def _property_tuples(entity_pb):
+    """Iterator of name, ``Value`` tuples from entity properties.
+
+    :type entity_pb: :class:`gcloud.datastore._generated.entity_pb2.Entity`
+    :param entity_pb: An entity protobuf to add a new property to.
+
+    :rtype: :class:`generator`
+    :returns: An iterator that yields tuples of a name and ``Value``
+              corresponding to properties on the entity.
+    """
+    return six.iteritems(entity_pb.properties)
+
+
+
[docs]def entity_from_protobuf(pb): + """Factory method for creating an entity based on a protobuf. + + The protobuf should be one returned from the Cloud Datastore + Protobuf API. + + :type pb: :class:`gcloud.datastore._generated.entity_pb2.Entity` + :param pb: The Protobuf representing the entity. + + :rtype: :class:`gcloud.datastore.entity.Entity` + :returns: The entity derived from the protobuf. + """ + key = None + if pb.HasField('key'): # Message field (Key) + key = key_from_protobuf(pb.key) + + entity_props = {} + entity_meanings = {} + exclude_from_indexes = [] + + for prop_name, value_pb in _property_tuples(pb): + value = _get_value_from_value_pb(value_pb) + entity_props[prop_name] = value + + # Check if the property has an associated meaning. + is_list = isinstance(value, list) + meaning = _get_meaning(value_pb, is_list=is_list) + if meaning is not None: + entity_meanings[prop_name] = (meaning, value) + + # Check if ``value_pb`` was excluded from index. Lists need to be + # special-cased and we require all ``exclude_from_indexes`` values + # in a list agree. + if is_list: + exclude_values = set(value_pb.exclude_from_indexes + for value_pb in value_pb.array_value.values) + if len(exclude_values) != 1: + raise ValueError('For an array_value, subvalues must either ' + 'all be indexed or all excluded from ' + 'indexes.') + + if exclude_values.pop(): + exclude_from_indexes.append(prop_name) + else: + if value_pb.exclude_from_indexes: + exclude_from_indexes.append(prop_name) + + entity = Entity(key=key, exclude_from_indexes=exclude_from_indexes) + entity.update(entity_props) + entity._meanings.update(entity_meanings) + return entity
+ + +def _set_pb_meaning_from_entity(entity, name, value, value_pb, + is_list=False): + """Add meaning information (from an entity) to a protobuf. + + :type entity: :class:`gcloud.datastore.entity.Entity` + :param entity: The entity to be turned into a protobuf. + + :type name: string + :param name: The name of the property. + + :type value: object + :param value: The current value stored as property ``name``. + + :type value_pb: :class:`gcloud.datastore._generated.entity_pb2.Value` + :param value_pb: The protobuf value to add meaning / meanings to. + + :type is_list: bool + :param is_list: (Optional) Boolean indicating if the ``value`` is + a list value. + """ + if name not in entity._meanings: + return + + meaning, orig_value = entity._meanings[name] + # Only add the meaning back to the protobuf if the value is + # unchanged from when it was originally read from the API. + if orig_value is not value: + return + + # For lists, we set meaning on each sub-element. + if is_list: + if not isinstance(meaning, list): + meaning = itertools.repeat(meaning) + val_iter = six.moves.zip(value_pb.array_value.values, + meaning) + for sub_value_pb, sub_meaning in val_iter: + if sub_meaning is not None: + sub_value_pb.meaning = sub_meaning + else: + value_pb.meaning = meaning + + +def entity_to_protobuf(entity): + """Converts an entity into a protobuf. + + :type entity: :class:`gcloud.datastore.entity.Entity` + :param entity: The entity to be turned into a protobuf. + + :rtype: :class:`gcloud.datastore._generated.entity_pb2.Entity` + :returns: The protobuf representing the entity. + """ + entity_pb = _entity_pb2.Entity() + if entity.key is not None: + key_pb = entity.key.to_protobuf() + entity_pb.key.CopyFrom(key_pb) + + for name, value in entity.items(): + value_is_list = isinstance(value, list) + if value_is_list and len(value) == 0: + continue + + value_pb = _new_value_pb(entity_pb, name) + # Set the appropriate value. + _set_protobuf_value(value_pb, value) + + # Add index information to protobuf. + if name in entity.exclude_from_indexes: + if not value_is_list: + value_pb.exclude_from_indexes = True + + for sub_value in value_pb.array_value.values: + sub_value.exclude_from_indexes = True + + # Add meaning information to protobuf. + _set_pb_meaning_from_entity(entity, name, value, value_pb, + is_list=value_is_list) + + return entity_pb + + +
[docs]def key_from_protobuf(pb): + """Factory method for creating a key based on a protobuf. + + The protobuf should be one returned from the Cloud Datastore + Protobuf API. + + :type pb: :class:`gcloud.datastore._generated.entity_pb2.Key` + :param pb: The Protobuf representing the key. + + :rtype: :class:`gcloud.datastore.key.Key` + :returns: a new `Key` instance + """ + path_args = [] + for element in pb.path: + path_args.append(element.kind) + if element.id: # Simple field (int64) + path_args.append(element.id) + # This is safe: we expect proto objects returned will only have + # one of `name` or `id` set. + if element.name: # Simple field (string) + path_args.append(element.name) + + project = None + if pb.partition_id.project_id: # Simple field (string) + project = pb.partition_id.project_id + namespace = None + if pb.partition_id.namespace_id: # Simple field (string) + namespace = pb.partition_id.namespace_id + + return Key(*path_args, namespace=namespace, project=project)
+ + +def _pb_attr_value(val): + """Given a value, return the protobuf attribute name and proper value. + + The Protobuf API uses different attribute names based on value types + rather than inferring the type. This function simply determines the + proper attribute name based on the type of the value provided and + returns the attribute name as well as a properly formatted value. + + Certain value types need to be coerced into a different type (such + as a `datetime.datetime` into an integer timestamp, or a + `gcloud.datastore.key.Key` into a Protobuf representation. This + function handles that for you. + + .. note:: + Values which are "text" ('unicode' in Python2, 'str' in Python3) map + to 'string_value' in the datastore; values which are "bytes" + ('str' in Python2, 'bytes' in Python3) map to 'blob_value'. + + For example: + + >>> _pb_attr_value(1234) + ('integer_value', 1234) + >>> _pb_attr_value('my_string') + ('string_value', 'my_string') + + :type val: `datetime.datetime`, :class:`gcloud.datastore.key.Key`, + bool, float, integer, string + :param val: The value to be scrutinized. + + :rtype: tuple + :returns: A tuple of the attribute name and proper value type. + """ + + if isinstance(val, datetime.datetime): + name = 'timestamp' + value = _datetime_to_pb_timestamp(val) + elif isinstance(val, Key): + name, value = 'key', val.to_protobuf() + elif isinstance(val, bool): + name, value = 'boolean', val + elif isinstance(val, float): + name, value = 'double', val + elif isinstance(val, six.integer_types): + name, value = 'integer', val + elif isinstance(val, six.text_type): + name, value = 'string', val + elif isinstance(val, (bytes, str)): + name, value = 'blob', val + elif isinstance(val, Entity): + name, value = 'entity', val + elif isinstance(val, list): + name, value = 'array', val + elif isinstance(val, GeoPoint): + name, value = 'geo_point', val.to_protobuf() + elif val is None: + name, value = 'null', struct_pb2.NULL_VALUE + else: + raise ValueError("Unknown protobuf attr type %s" % type(val)) + + return name + '_value', value + + +def _get_value_from_value_pb(value_pb): + """Given a protobuf for a Value, get the correct value. + + The Cloud Datastore Protobuf API returns a Property Protobuf which + has one value set and the rest blank. This function retrieves the + the one value provided. + + Some work is done to coerce the return value into a more useful type + (particularly in the case of a timestamp value, or a key value). + + :type value_pb: :class:`gcloud.datastore._generated.entity_pb2.Value` + :param value_pb: The Value Protobuf. + + :rtype: object + :returns: The value provided by the Protobuf. + :raises: :class:`ValueError <exceptions.ValueError>` if no value type + has been set. + """ + value_type = value_pb.WhichOneof('value_type') + + if value_type == 'timestamp_value': + result = _pb_timestamp_to_datetime(value_pb.timestamp_value) + + elif value_type == 'key_value': + result = key_from_protobuf(value_pb.key_value) + + elif value_type == 'boolean_value': + result = value_pb.boolean_value + + elif value_type == 'double_value': + result = value_pb.double_value + + elif value_type == 'integer_value': + result = value_pb.integer_value + + elif value_type == 'string_value': + result = value_pb.string_value + + elif value_type == 'blob_value': + result = value_pb.blob_value + + elif value_type == 'entity_value': + result = entity_from_protobuf(value_pb.entity_value) + + elif value_type == 'array_value': + result = [_get_value_from_value_pb(value) + for value in value_pb.array_value.values] + + elif value_type == 'geo_point_value': + result = GeoPoint(value_pb.geo_point_value.latitude, + value_pb.geo_point_value.longitude) + + elif value_type == 'null_value': + result = None + + else: + raise ValueError('Value protobuf did not have any value set') + + return result + + +def _set_protobuf_value(value_pb, val): + """Assign 'val' to the correct subfield of 'value_pb'. + + The Protobuf API uses different attribute names based on value types + rather than inferring the type. + + Some value types (entities, keys, lists) cannot be directly + assigned; this function handles them correctly. + + :type value_pb: :class:`gcloud.datastore._generated.entity_pb2.Value` + :param value_pb: The value protobuf to which the value is being assigned. + + :type val: :class:`datetime.datetime`, boolean, float, integer, string, + :class:`gcloud.datastore.key.Key`, + :class:`gcloud.datastore.entity.Entity` + :param val: The value to be assigned. + """ + attr, val = _pb_attr_value(val) + if attr == 'key_value': + value_pb.key_value.CopyFrom(val) + elif attr == 'timestamp_value': + value_pb.timestamp_value.CopyFrom(val) + elif attr == 'entity_value': + entity_pb = entity_to_protobuf(val) + value_pb.entity_value.CopyFrom(entity_pb) + elif attr == 'array_value': + l_pb = value_pb.array_value.values + for item in val: + i_pb = l_pb.add() + _set_protobuf_value(i_pb, item) + elif attr == 'geo_point_value': + value_pb.geo_point_value.CopyFrom(val) + else: # scalar, just assign + setattr(value_pb, attr, val) + + +class GeoPoint(object): + """Simple container for a geo point value. + + :type latitude: float + :param latitude: Latitude of a point. + + :type longitude: float + :param longitude: Longitude of a point. + """ + + def __init__(self, latitude, longitude): + self.latitude = latitude + self.longitude = longitude + + def to_protobuf(self): + """Convert the current object to protobuf. + + :rtype: :class:`google.type.latlng_pb2.LatLng`. + :returns: The current point as a protobuf. + """ + return latlng_pb2.LatLng(latitude=self.latitude, + longitude=self.longitude) + + def __eq__(self, other): + """Compare two geo points for equality. + + :rtype: boolean + :returns: True if the points compare equal, else False. + """ + if not isinstance(other, GeoPoint): + return False + + return (self.latitude == other.latitude and + self.longitude == other.longitude) + + def __ne__(self, other): + """Compare two geo points for inequality. + + :rtype: boolean + :returns: False if the points compare equal, else True. + """ + return not self.__eq__(other) +
+ +
+ +
+ + + + + + + \ No newline at end of file diff --git a/0.18.1/_modules/gcloud/datastore/key.html b/0.18.1/_modules/gcloud/datastore/key.html new file mode 100644 index 000000000000..13478c8bfc3e --- /dev/null +++ b/0.18.1/_modules/gcloud/datastore/key.html @@ -0,0 +1,637 @@ + + + + + + + + gcloud.datastore.key — gcloud 0.18.1 documentation + + + + + + + + + + + + + + +
+ +
+ +
+ + + Report an Issue + +
+
+ +
+ +

Source code for gcloud.datastore.key

+# Copyright 2014 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Create / interact with gcloud datastore keys."""
+
+import copy
+import six
+
+from gcloud.datastore._generated import entity_pb2 as _entity_pb2
+
+
+
[docs]class Key(object): + """An immutable representation of a datastore Key. + + To create a basic key: + + >>> Key('EntityKind', 1234) + <Key[{'kind': 'EntityKind', 'id': 1234}]> + >>> Key('EntityKind', 'foo') + <Key[{'kind': 'EntityKind', 'name': 'foo'}]> + + To create a key with a parent: + + >>> Key('Parent', 'foo', 'Child', 1234) + <Key[{'kind': 'Parent', 'name': 'foo'}, {'kind': 'Child', 'id': 1234}]> + >>> Key('Child', 1234, parent=parent_key) + <Key[{'kind': 'Parent', 'name': 'foo'}, {'kind': 'Child', 'id': 1234}]> + + To create a partial key: + + >>> Key('Parent', 'foo', 'Child') + <Key[{'kind': 'Parent', 'name': 'foo'}, {'kind': 'Child'}]> + + :type path_args: tuple of string and integer + :param path_args: May represent a partial (odd length) or full (even + length) key path. + + :type kwargs: dict + :param kwargs: Keyword arguments to be passed in. + + Accepted keyword arguments are + + * namespace (string): A namespace identifier for the key. + * project (string): The project associated with the key. + * parent (:class:`gcloud.datastore.key.Key`): The parent of the key. + + The project argument is required unless it has been set implicitly. + """ + + def __init__(self, *path_args, **kwargs): + self._flat_path = path_args + parent = self._parent = kwargs.get('parent') + self._namespace = kwargs.get('namespace') + project = kwargs.get('project') + self._project = _validate_project(project, parent) + # _flat_path, _parent, _namespace and _project must be set before + # _combine_args() is called. + self._path = self._combine_args() + + def __eq__(self, other): + """Compare two keys for equality. + + Incomplete keys never compare equal to any other key. + + Completed keys compare equal if they have the same path, project, + and namespace. + + :rtype: bool + :returns: True if the keys compare equal, else False. + """ + if not isinstance(other, Key): + return False + + if self.is_partial or other.is_partial: + return False + + return (self.flat_path == other.flat_path and + self.project == other.project and + self.namespace == other.namespace) + + def __ne__(self, other): + """Compare two keys for inequality. + + Incomplete keys never compare equal to any other key. + + Completed keys compare equal if they have the same path, project, + and namespace. + + :rtype: bool + :returns: False if the keys compare equal, else True. + """ + return not self.__eq__(other) + + def __hash__(self): + """Hash a keys for use in a dictionary lookp. + + :rtype: integer + :returns: a hash of the key's state. + """ + return (hash(self.flat_path) + + hash(self.project) + + hash(self.namespace)) + + @staticmethod + def _parse_path(path_args): + """Parses positional arguments into key path with kinds and IDs. + + :type path_args: tuple + :param path_args: A tuple from positional arguments. Should be + alternating list of kinds (string) and ID/name + parts (int or string). + + :rtype: :class:`list` of :class:`dict` + :returns: A list of key parts with kind and ID or name set. + :raises: :class:`ValueError` if there are no ``path_args``, if one of + the kinds is not a string or if one of the IDs/names is not + a string or an integer. + """ + if len(path_args) == 0: + raise ValueError('Key path must not be empty.') + + kind_list = path_args[::2] + id_or_name_list = path_args[1::2] + # Dummy sentinel value to pad incomplete key to even length path. + partial_ending = object() + if len(path_args) % 2 == 1: + id_or_name_list += (partial_ending,) + + result = [] + for kind, id_or_name in zip(kind_list, id_or_name_list): + curr_key_part = {} + if isinstance(kind, six.string_types): + curr_key_part['kind'] = kind + else: + raise ValueError(kind, 'Kind was not a string.') + + if isinstance(id_or_name, six.string_types): + curr_key_part['name'] = id_or_name + elif isinstance(id_or_name, six.integer_types): + curr_key_part['id'] = id_or_name + elif id_or_name is not partial_ending: + raise ValueError(id_or_name, + 'ID/name was not a string or integer.') + + result.append(curr_key_part) + + return result + + def _combine_args(self): + """Sets protected data by combining raw data set from the constructor. + + If a ``_parent`` is set, updates the ``_flat_path`` and sets the + ``_namespace`` and ``_project`` if not already set. + + :rtype: :class:`list` of :class:`dict` + :returns: A list of key parts with kind and ID or name set. + :raises: :class:`ValueError` if the parent key is not complete. + """ + child_path = self._parse_path(self._flat_path) + + if self._parent is not None: + if self._parent.is_partial: + raise ValueError('Parent key must be complete.') + + # We know that _parent.path() will return a copy. + child_path = self._parent.path + child_path + self._flat_path = self._parent.flat_path + self._flat_path + if (self._namespace is not None and + self._namespace != self._parent.namespace): + raise ValueError('Child namespace must agree with parent\'s.') + self._namespace = self._parent.namespace + if (self._project is not None and + self._project != self._parent.project): + raise ValueError('Child project must agree with parent\'s.') + self._project = self._parent.project + + return child_path + + def _clone(self): + """Duplicates the Key. + + Most attributes are simple types, so don't require copying. Other + attributes like ``parent`` are long-lived and so we re-use them. + + :rtype: :class:`gcloud.datastore.key.Key` + :returns: A new ``Key`` instance with the same data as the current one. + """ + cloned_self = self.__class__(*self.flat_path, + project=self.project, + namespace=self.namespace) + # If the current parent has already been set, we re-use + # the same instance + cloned_self._parent = self._parent + return cloned_self + +
[docs] def completed_key(self, id_or_name): + """Creates new key from existing partial key by adding final ID/name. + + :type id_or_name: string or integer + :param id_or_name: ID or name to be added to the key. + + :rtype: :class:`gcloud.datastore.key.Key` + :returns: A new ``Key`` instance with the same data as the current one + and an extra ID or name added. + :raises: :class:`ValueError` if the current key is not partial or if + ``id_or_name`` is not a string or integer. + """ + if not self.is_partial: + raise ValueError('Only a partial key can be completed.') + + id_or_name_key = None + if isinstance(id_or_name, six.string_types): + id_or_name_key = 'name' + elif isinstance(id_or_name, six.integer_types): + id_or_name_key = 'id' + else: + raise ValueError(id_or_name, + 'ID/name was not a string or integer.') + + new_key = self._clone() + new_key._path[-1][id_or_name_key] = id_or_name + new_key._flat_path += (id_or_name,) + return new_key
+ +
[docs] def to_protobuf(self): + """Return a protobuf corresponding to the key. + + :rtype: :class:`gcloud.datastore._generated.entity_pb2.Key` + :returns: The protobuf representing the key. + """ + key = _entity_pb2.Key() + key.partition_id.project_id = self.project + + if self.namespace: + key.partition_id.namespace_id = self.namespace + + for item in self.path: + element = key.path.add() + if 'kind' in item: + element.kind = item['kind'] + if 'id' in item: + element.id = item['id'] + if 'name' in item: + element.name = item['name'] + + return key
+ + @property + def is_partial(self): + """Boolean indicating if the key has an ID (or name). + + :rtype: bool + :returns: ``True`` if the last element of the key's path does not have + an ``id`` or a ``name``. + """ + return self.id_or_name is None + + @property + def namespace(self): + """Namespace getter. + + :rtype: string + :returns: The namespace of the current key. + """ + return self._namespace + + @property + def path(self): + """Path getter. + + Returns a copy so that the key remains immutable. + + :rtype: :class:`list` of :class:`dict` + :returns: The (key) path of the current key. + """ + return copy.deepcopy(self._path) + + @property + def flat_path(self): + """Getter for the key path as a tuple. + + :rtype: tuple of string and integer + :returns: The tuple of elements in the path. + """ + return self._flat_path + + @property + def kind(self): + """Kind getter. Based on the last element of path. + + :rtype: string + :returns: The kind of the current key. + """ + return self.path[-1]['kind'] + + @property + def id(self): + """ID getter. Based on the last element of path. + + :rtype: integer + :returns: The (integer) ID of the key. + """ + return self.path[-1].get('id') + + @property + def name(self): + """Name getter. Based on the last element of path. + + :rtype: string + :returns: The (string) name of the key. + """ + return self.path[-1].get('name') + + @property + def id_or_name(self): + """Getter. Based on the last element of path. + + :rtype: integer (if ``id``) or string (if ``name``) + :returns: The last element of the key's path if it is either an ``id`` + or a ``name``. + """ + return self.id or self.name + + @property + def project(self): + """Project getter. + + :rtype: string + :returns: The key's project. + """ + return self._project + + def _make_parent(self): + """Creates a parent key for the current path. + + Extracts all but the last element in the key path and creates a new + key, while still matching the namespace and the project. + + :rtype: :class:`gcloud.datastore.key.Key` or :class:`NoneType` + :returns: A new ``Key`` instance, whose path consists of all but the + last element of current path. If the current key has only + one path element, returns ``None``. + """ + if self.is_partial: + parent_args = self.flat_path[:-1] + else: + parent_args = self.flat_path[:-2] + if parent_args: + return self.__class__(*parent_args, project=self.project, + namespace=self.namespace) + + @property + def parent(self): + """The parent of the current key. + + :rtype: :class:`gcloud.datastore.key.Key` or :class:`NoneType` + :returns: A new ``Key`` instance, whose path consists of all but the + last element of current path. If the current key has only + one path element, returns ``None``. + """ + if self._parent is None: + self._parent = self._make_parent() + + return self._parent + + def __repr__(self): + return '<Key%s, project=%s>' % (self.path, self.project)
+ + +def _validate_project(project, parent): + """Ensure the project is set appropriately. + + If ``parent`` is passed, skip the test (it will be checked / fixed up + later). + + If ``project`` is unset, attempt to infer the project from the environment. + + :type project: string + :param project: A project. + + :type parent: :class:`gcloud.datastore.key.Key` or ``NoneType`` + :param parent: The parent of the key or ``None``. + + :rtype: string + :returns: The ``project`` passed in, or implied from the environment. + :raises: :class:`ValueError` if ``project`` is ``None`` and no project + can be inferred from the parent. + """ + if parent is None: + if project is None: + raise ValueError("A Key must have a project set.") + + return project +
+ +
+ +
+ + + + + + + \ No newline at end of file diff --git a/0.18.1/_modules/gcloud/datastore/query.html b/0.18.1/_modules/gcloud/datastore/query.html new file mode 100644 index 000000000000..3ffd1f32ed6f --- /dev/null +++ b/0.18.1/_modules/gcloud/datastore/query.html @@ -0,0 +1,772 @@ + + + + + + + + gcloud.datastore.query — gcloud 0.18.1 documentation + + + + + + + + + + + + + + +
+ +
+ +
+ + + Report an Issue + +
+
+ +
+ +

Source code for gcloud.datastore.query

+# Copyright 2014 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Create / interact with gcloud datastore queries."""
+
+import base64
+
+from gcloud._helpers import _ensure_tuple_or_list
+from gcloud.datastore._generated import query_pb2 as _query_pb2
+from gcloud.datastore import helpers
+from gcloud.datastore.key import Key
+
+
+
[docs]class Query(object): + """A Query against the Cloud Datastore. + + This class serves as an abstraction for creating a query over data + stored in the Cloud Datastore. + + :type client: :class:`gcloud.datastore.client.Client` + :param client: The client used to connect to datastore. + + :type kind: string + :param kind: The kind to query. + + :type project: string + :param project: The project associated with the query. If not passed, + uses the client's value. + + :type namespace: string or None + :param namespace: The namespace to which to restrict results. If not + passed, uses the client's value. + + :type ancestor: :class:`gcloud.datastore.key.Key` or None + :param ancestor: key of the ancestor to which this query's results are + restricted. + + :type filters: sequence of (property_name, operator, value) tuples + :param filters: property filters applied by this query. + + :type projection: sequence of string + :param projection: fields returned as part of query results. + + :type order: sequence of string + :param order: field names used to order query results. Prepend '-' + to a field name to sort it in descending order. + + :type distinct_on: sequence of string + :param distinct_on: field names used to group query results. + + :raises: ValueError if ``project`` is not passed and no implicit + default is set. + """ + + OPERATORS = { + '<=': _query_pb2.PropertyFilter.LESS_THAN_OR_EQUAL, + '>=': _query_pb2.PropertyFilter.GREATER_THAN_OR_EQUAL, + '<': _query_pb2.PropertyFilter.LESS_THAN, + '>': _query_pb2.PropertyFilter.GREATER_THAN, + '=': _query_pb2.PropertyFilter.EQUAL, + } + """Mapping of operator strings and their protobuf equivalents.""" + + def __init__(self, + client, + kind=None, + project=None, + namespace=None, + ancestor=None, + filters=(), + projection=(), + order=(), + distinct_on=()): + + self._client = client + self._kind = kind + self._project = project or client.project + self._namespace = namespace or client.namespace + self._ancestor = ancestor + self._filters = [] + # Verify filters passed in. + for property_name, operator, value in filters: + self.add_filter(property_name, operator, value) + self._projection = _ensure_tuple_or_list('projection', projection) + self._order = _ensure_tuple_or_list('order', order) + self._distinct_on = _ensure_tuple_or_list('distinct_on', distinct_on) + + @property + def project(self): + """Get the project for this Query. + + :rtype: str + :returns: The project for the query. + """ + return self._project or self._client.project + + @property + def namespace(self): + """This query's namespace + + :rtype: string or None + :returns: the namespace assigned to this query + """ + return self._namespace or self._client.namespace + + @namespace.setter + def namespace(self, value): + """Update the query's namespace. + + :type value: string + """ + if not isinstance(value, str): + raise ValueError("Namespace must be a string") + self._namespace = value + + @property + def kind(self): + """Get the Kind of the Query. + + :rtype: string + :returns: The kind for the query. + """ + return self._kind + + @kind.setter + def kind(self, value): + """Update the Kind of the Query. + + :type value: string + :param value: updated kind for the query. + + .. note:: + + The protobuf specification allows for ``kind`` to be repeated, + but the current implementation returns an error if more than + one value is passed. If the back-end changes in the future to + allow multiple values, this method will be updated to allow passing + either a string or a sequence of strings. + """ + if not isinstance(value, str): + raise TypeError("Kind must be a string") + self._kind = value + + @property + def ancestor(self): + """The ancestor key for the query. + + :rtype: Key or None + :returns: The ancestor for the query. + """ + return self._ancestor + + @ancestor.setter + def ancestor(self, value): + """Set the ancestor for the query + + :type value: Key + :param value: the new ancestor key + """ + if not isinstance(value, Key): + raise TypeError("Ancestor must be a Key") + self._ancestor = value + + @ancestor.deleter + def ancestor(self): + """Remove the ancestor for the query.""" + self._ancestor = None + + @property + def filters(self): + """Filters set on the query. + + :rtype: sequence of (property_name, operator, value) tuples. + :returns: The filters set on the query. + """ + return self._filters[:] + +
[docs] def add_filter(self, property_name, operator, value): + """Filter the query based on a property name, operator and a value. + + Expressions take the form of:: + + .add_filter('<property>', '<operator>', <value>) + + where property is a property stored on the entity in the datastore + and operator is one of ``OPERATORS`` + (ie, ``=``, ``<``, ``<=``, ``>``, ``>=``):: + + >>> from gcloud import datastore + >>> client = datastore.Client() + >>> query = client.query(kind='Person') + >>> query.add_filter('name', '=', 'James') + >>> query.add_filter('age', '>', 50) + + :type property_name: string + :param property_name: A property name. + + :type operator: string + :param operator: One of ``=``, ``<``, ``<=``, ``>``, ``>=``. + + :type value: :class:`int`, :class:`str`, :class:`bool`, + :class:`float`, :class:`NoneType`, + :class:`datetime.datetime`, + :class:`gcloud.datastore.key.Key` + :param value: The value to filter on. + + :raises: :class:`ValueError` if ``operation`` is not one of the + specified values, or if a filter names ``'__key__'`` but + passes an invalid value (a key is required). + """ + if self.OPERATORS.get(operator) is None: + error_message = 'Invalid expression: "%s"' % (operator,) + choices_message = 'Please use one of: =, <, <=, >, >=.' + raise ValueError(error_message, choices_message) + + if property_name == '__key__' and not isinstance(value, Key): + raise ValueError('Invalid key: "%s"' % value) + + self._filters.append((property_name, operator, value))
+ + @property + def projection(self): + """Fields names returned by the query. + + :rtype: sequence of string + :returns: Names of fields in query results. + """ + return self._projection[:] + + @projection.setter + def projection(self, projection): + """Set the fields returned the query. + + :type projection: string or sequence of strings + :param projection: Each value is a string giving the name of a + property to be included in the projection query. + """ + if isinstance(projection, str): + projection = [projection] + self._projection[:] = projection + +
[docs] def keys_only(self): + """Set the projection to include only keys.""" + self._projection[:] = ['__key__']
+ +
[docs] def key_filter(self, key, operator='='): + """Filter on a key. + + :type key: :class:`gcloud.datastore.key.Key` + :param key: The key to filter on. + + :type operator: string + :param operator: (Optional) One of ``=``, ``<``, ``<=``, ``>``, ``>=``. + Defaults to ``=``. + """ + self.add_filter('__key__', operator, key)
+ + @property + def order(self): + """Names of fields used to sort query results. + + :rtype: sequence of string + :returns: The order(s) set on the query. + """ + return self._order[:] + + @order.setter + def order(self, value): + """Set the fields used to sort query results. + + Sort fields will be applied in the order specified. + + :type value: string or sequence of strings + :param value: Each value is a string giving the name of the + property on which to sort, optionally preceded by a + hyphen (-) to specify descending order. + Omitting the hyphen implies ascending order. + """ + if isinstance(value, str): + value = [value] + self._order[:] = value + + @property + def distinct_on(self): + """Names of fields used to group query results. + + :rtype: sequence of string + :returns: The "distinct on" fields set on the query. + """ + return self._distinct_on[:] + + @distinct_on.setter + def distinct_on(self, value): + """Set fields used to group query results. + + :type value: string or sequence of strings + :param value: Each value is a string giving the name of a + property to use to group results together. + """ + if isinstance(value, str): + value = [value] + self._distinct_on[:] = value + +
[docs] def fetch(self, limit=None, offset=0, start_cursor=None, end_cursor=None, + client=None): + """Execute the Query; return an iterator for the matching entities. + + For example:: + + >>> from gcloud import datastore + >>> client = datastore.Client() + >>> query = client.query(kind='Person') + >>> query.add_filter('name', '=', 'Sally') + >>> list(query.fetch()) + [<Entity object>, <Entity object>, ...] + >>> list(query.fetch(1)) + [<Entity object>] + + :type limit: integer or None + :param limit: An optional limit passed through to the iterator. + + :type offset: integer + :param offset: An optional offset passed through to the iterator. + + :type start_cursor: bytes + :param start_cursor: An optional cursor passed through to the iterator. + + :type end_cursor: bytes + :param end_cursor: An optional cursor passed through to the iterator. + + :type client: :class:`gcloud.datastore.client.Client` + :param client: client used to connect to datastore. + If not supplied, uses the query's value. + + :rtype: :class:`Iterator` + :returns: The iterator for the query. + :raises: ValueError if ``connection`` is not passed and no implicit + default has been set. + """ + if client is None: + client = self._client + + return Iterator( + self, client, limit, offset, start_cursor, end_cursor)
+ + +
[docs]class Iterator(object): + """Represent the state of a given execution of a Query. + + :type query: :class:`gcloud.datastore.query.Query` + :param query: Query object holding permanent configuration (i.e. + things that don't change on with each page in + a results set). + + :type client: :class:`gcloud.datastore.client.Client` + :param client: The client used to make a request. + + :type limit: integer + :param limit: (Optional) Limit the number of results returned. + + :type offset: integer + :param offset: (Optional) Offset used to begin a query. + + :type start_cursor: bytes + :param start_cursor: (Optional) Cursor to begin paging through + query results. + + :type end_cursor: bytes + :param end_cursor: (Optional) Cursor to end paging through + query results. + """ + + _NOT_FINISHED = _query_pb2.QueryResultBatch.NOT_FINISHED + + _FINISHED = ( + _query_pb2.QueryResultBatch.NO_MORE_RESULTS, + _query_pb2.QueryResultBatch.MORE_RESULTS_AFTER_LIMIT, + _query_pb2.QueryResultBatch.MORE_RESULTS_AFTER_CURSOR, + ) + + def __init__(self, query, client, limit=None, offset=None, + start_cursor=None, end_cursor=None): + self._query = query + self._client = client + self._limit = limit + self._offset = offset + self._start_cursor = start_cursor + self._end_cursor = end_cursor + self._page = self._more_results = None + self._skipped_results = None + +
[docs] def next_page(self): + """Fetch a single "page" of query results. + + Low-level API for fine control: the more convenient API is + to iterate on the current Iterator. + + :rtype: tuple, (entities, more_results, cursor) + :returns: The next page of results. + """ + pb = _pb_from_query(self._query) + + start_cursor = self._start_cursor + if start_cursor is not None: + pb.start_cursor = base64.urlsafe_b64decode(start_cursor) + + end_cursor = self._end_cursor + if end_cursor is not None: + pb.end_cursor = base64.urlsafe_b64decode(end_cursor) + + if self._limit is not None: + pb.limit.value = self._limit + + if self._offset is not None: + pb.offset = self._offset + + transaction = self._client.current_transaction + + query_results = self._client.connection.run_query( + query_pb=pb, + project=self._query.project, + namespace=self._query.namespace, + transaction_id=transaction and transaction.id, + ) + (entity_pbs, cursor_as_bytes, + more_results_enum, self._skipped_results) = query_results + + if cursor_as_bytes == b'': + self._start_cursor = None + else: + self._start_cursor = base64.urlsafe_b64encode(cursor_as_bytes) + self._end_cursor = None + + if more_results_enum == self._NOT_FINISHED: + self._more_results = True + elif more_results_enum in self._FINISHED: + self._more_results = False + else: + raise ValueError('Unexpected value returned for `more_results`.') + + self._page = [ + helpers.entity_from_protobuf(entity) + for entity in entity_pbs] + return self._page, self._more_results, self._start_cursor
+ + def __iter__(self): + """Generator yielding all results matching our query. + + :rtype: sequence of :class:`gcloud.datastore.entity.Entity` + """ + while True: + self.next_page() + for entity in self._page: + yield entity + if not self._more_results: + break + num_results = len(self._page) + if self._limit is not None: + self._limit -= num_results + if self._offset is not None and self._skipped_results is not None: + # NOTE: The offset goes down relative to the location + # because we are updating the cursor each time. + self._offset -= self._skipped_results
+ + +def _pb_from_query(query): + """Convert a Query instance to the corresponding protobuf. + + :type query: :class:`Query` + :param query: The source query. + + :rtype: :class:`gcloud.datastore._generated.query_pb2.Query` + :returns: A protobuf that can be sent to the protobuf API. N.b. that + it does not contain "in-flight" fields for ongoing query + executions (cursors, offset, limit). + """ + pb = _query_pb2.Query() + + for projection_name in query.projection: + pb.projection.add().property.name = projection_name + + if query.kind: + pb.kind.add().name = query.kind + + composite_filter = pb.filter.composite_filter + composite_filter.op = _query_pb2.CompositeFilter.AND + + if query.ancestor: + ancestor_pb = query.ancestor.to_protobuf() + + # Filter on __key__ HAS_ANCESTOR == ancestor. + ancestor_filter = composite_filter.filters.add().property_filter + ancestor_filter.property.name = '__key__' + ancestor_filter.op = _query_pb2.PropertyFilter.HAS_ANCESTOR + ancestor_filter.value.key_value.CopyFrom(ancestor_pb) + + for property_name, operator, value in query.filters: + pb_op_enum = query.OPERATORS.get(operator) + + # Add the specific filter + property_filter = composite_filter.filters.add().property_filter + property_filter.property.name = property_name + property_filter.op = pb_op_enum + + # Set the value to filter on based on the type. + if property_name == '__key__': + key_pb = value.to_protobuf() + property_filter.value.key_value.CopyFrom(key_pb) + else: + helpers._set_protobuf_value(property_filter.value, value) + + if not composite_filter.filters: + pb.ClearField('filter') + + for prop in query.order: + property_order = pb.order.add() + + if prop.startswith('-'): + property_order.property.name = prop[1:] + property_order.direction = property_order.DESCENDING + else: + property_order.property.name = prop + property_order.direction = property_order.ASCENDING + + for distinct_on_name in query.distinct_on: + pb.distinct_on.add().name = distinct_on_name + + return pb +
+ +
+ +
+ + + + + + + \ No newline at end of file diff --git a/0.18.1/_modules/gcloud/datastore/transaction.html b/0.18.1/_modules/gcloud/datastore/transaction.html new file mode 100644 index 000000000000..8ee31128e4c7 --- /dev/null +++ b/0.18.1/_modules/gcloud/datastore/transaction.html @@ -0,0 +1,396 @@ + + + + + + + + gcloud.datastore.transaction — gcloud 0.18.1 documentation + + + + + + + + + + + + + + +
+ +
+ +
+ + + Report an Issue + +
+
+ +
+ +

Source code for gcloud.datastore.transaction

+# Copyright 2014 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Create / interact with gcloud datastore transactions."""
+
+from gcloud.datastore.batch import Batch
+
+
+
[docs]class Transaction(Batch): + """An abstraction representing datastore Transactions. + + Transactions can be used to build up a bulk mutation and ensure all + or none succeed (transactionally). + + For example, the following snippet of code will put the two ``save`` + operations (either ``insert`` or ``upsert``) into the same + mutation, and execute those within a transaction:: + + >>> from gcloud import datastore + >>> client = datastore.Client() + >>> with client.transaction(): + ... client.put_multi([entity1, entity2]) + + Because it derives from :class:`Batch <.datastore.batch.Batch>`, + :class:`Transaction` also provides :meth:`put` and :meth:`delete` methods:: + + >>> with client.transaction() as xact: + ... xact.put(entity1) + ... xact.delete(entity2.key) + + By default, the transaction is rolled back if the transaction block + exits with an error:: + + >>> with client.transaction(): + ... do_some_work() + ... raise SomeException() # rolls back + + If the transaction block exists without an exception, it will commit + by default. + + .. warning:: Inside a transaction, automatically assigned IDs for + entities will not be available at save time! That means, if you + try:: + + >>> with client.transaction(): + ... entity = datastore.Entity(key=client.key('Thing')) + ... client.put(entity) + + ``entity`` won't have a complete key until the transaction is + committed. + + Once you exit the transaction (or call :meth:`commit`), the + automatically generated ID will be assigned to the entity:: + + >>> with client.transaction(): + ... entity = datastore.Entity(key=client.key('Thing')) + ... client.put(entity) + ... print(entity.key.is_partial) # There is no ID on this key. + ... + True + >>> print(entity.key.is_partial) # There *is* an ID. + False + + If you don't want to use the context manager you can initialize a + transaction manually:: + + >>> transaction = client.transaction() + >>> transaction.begin() + >>> + >>> entity = datastore.Entity(key=client.key('Thing')) + >>> transaction.put(entity) + >>> + >>> if error: + ... transaction.rollback() + ... else: + ... transaction.commit() + + :type client: :class:`gcloud.datastore.client.Client` + :param client: the client used to connect to datastore. + """ + + def __init__(self, client): + super(Transaction, self).__init__(client) + self._id = None + + @property + def id(self): + """Getter for the transaction ID. + + :rtype: string + :returns: The ID of the current transaction. + """ + return self._id + +
[docs] def current(self): + """Return the topmost transaction. + + .. note:: + + If the topmost element on the stack is not a transaction, + returns None. + + :rtype: :class:`gcloud.datastore.transaction.Transaction` or None + :returns: The current transaction (if any are active). + """ + top = super(Transaction, self).current() + if isinstance(top, Transaction): + return top
+ +
[docs] def begin(self): + """Begins a transaction. + + This method is called automatically when entering a with + statement, however it can be called explicitly if you don't want + to use a context manager. + + :raises: :class:`ValueError` if the transaction has already begun. + """ + super(Transaction, self).begin() + self._id = self.connection.begin_transaction(self.project)
+ +
[docs] def rollback(self): + """Rolls back the current transaction. + + This method has necessary side-effects: + + - Sets the current connection's transaction reference to None. + - Sets the current transaction's ID to None. + """ + try: + self.connection.rollback(self.project, self._id) + finally: + super(Transaction, self).rollback() + # Clear our own ID in case this gets accidentally reused. + self._id = None
+ +
[docs] def commit(self): + """Commits the transaction. + + This is called automatically upon exiting a with statement, + however it can be called explicitly if you don't want to use a + context manager. + + This method has necessary side-effects: + + - Sets the current transaction's ID to None. + """ + try: + super(Transaction, self).commit() + finally: + # Clear our own ID in case this gets accidentally reused. + self._id = None
+
+ +
+ +
+ + + + + + + \ No newline at end of file diff --git a/0.18.1/_modules/gcloud/dns/changes.html b/0.18.1/_modules/gcloud/dns/changes.html new file mode 100644 index 000000000000..b03328b63638 --- /dev/null +++ b/0.18.1/_modules/gcloud/dns/changes.html @@ -0,0 +1,492 @@ + + + + + + + + gcloud.dns.changes — gcloud 0.18.1 documentation + + + + + + + + + + + + + + +
+ +
+ +
+ + + Report an Issue + +
+
+ +
+ +

Source code for gcloud.dns.changes

+# Copyright 2015 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Define API ResourceRecordSets."""
+
+import six
+
+from gcloud._helpers import _rfc3339_to_datetime
+from gcloud.exceptions import NotFound
+from gcloud.dns.resource_record_set import ResourceRecordSet
+
+
+
[docs]class Changes(object): + """Changes are bundled additions / deletions of DNS resource records. + + Changes are owned by a :class:`gcloud.dns.zone.ManagedZone` instance. + + See: + https://cloud.google.com/dns/api/v1/changes + + :type zone: :class:`gcloud.dns.zone.ManagedZone` + :param zone: A zone which holds one or more record sets. + """ + + def __init__(self, zone): + self.zone = zone + self._properties = {} + self._additions = self._deletions = () + + @classmethod +
[docs] def from_api_repr(cls, resource, zone): + """Factory: construct a change set given its API representation + + :type resource: dict + :param resource: change set representation returned from the API + + :type zone: :class:`gcloud.dns.zone.ManagedZone` + :param zone: A zone which holds zero or more change sets. + + :rtype: :class:`gcloud.dns.changes.Changes` + :returns: RRS parsed from ``resource``. + """ + changes = cls(zone=zone) + changes._set_properties(resource) + return changes
+ + def _set_properties(self, resource): + """Helper method for :meth:`from_api_repr`, :meth:`create`, etc. + + :type resource: dict + :param resource: change set representation returned from the API + """ + resource = resource.copy() + self._additions = tuple([ + ResourceRecordSet.from_api_repr(added_res, self.zone) + for added_res in resource.pop('additions', ())]) + self._deletions = tuple([ + ResourceRecordSet.from_api_repr(added_res, self.zone) + for added_res in resource.pop('deletions', ())]) + self._properties = resource + + @property + def path(self): + """URL path for change set APIs. + + :rtype: string + :returns: the path based on project, zone, and change set names. + """ + return '/projects/%s/managedZones/%s/changes/%s' % ( + self.zone.project, self.zone.name, self.name) + + @property + def name(self): + """Name of the change set. + + :rtype: string or ``NoneType`` + :returns: Name, as set by the back-end, or None. + """ + return self._properties.get('id') + + @name.setter + def name(self, value): + """Update name of the change set. + + :type value: string + :param value: New name for the changeset. + """ + if not isinstance(value, six.string_types): + raise ValueError("Pass a string") + self._properties['id'] = value + + @property + def status(self): + """Status of the change set. + + :rtype: string or ``NoneType`` + :returns: Status, as set by the back-end, or None. + """ + return self._properties.get('status') + + @property + def started(self): + """Time when the change set was started. + + :rtype: ``datetime.datetime`` or ``NoneType`` + :returns: Time, as set by the back-end, or None. + """ + stamp = self._properties.get('startTime') + if stamp is not None: + return _rfc3339_to_datetime(stamp) + + @property + def additions(self): + """Resource record sets to be added to the zone. + + :rtype: sequence of + :class:`gcloud.dns.resource_record_set.ResourceRecordSet`. + :returns: record sets appended via :meth:`add_record_set` + """ + return self._additions + + @property + def deletions(self): + """Resource record sets to be deleted from the zone. + + :rtype: sequence of + :class:`gcloud.dns.resource_record_set.ResourceRecordSet`. + :returns: record sets appended via :meth:`delete_record_set` + """ + return self._deletions + +
[docs] def add_record_set(self, record_set): + """Append a record set to the 'additions' for the change set. + + :type record_set: + :class:`gcloud.dns.resource_record_set.ResourceRecordSet` + :param record_set: the record set to append + + :raises: ``ValueError`` if ``record_set`` is not of the required type. + """ + if not isinstance(record_set, ResourceRecordSet): + raise ValueError("Pass a ResourceRecordSet") + self._additions += (record_set,)
+ +
[docs] def delete_record_set(self, record_set): + """Append a record set to the 'deletions' for the change set. + + :type record_set: + :class:`gcloud.dns.resource_record_set.ResourceRecordSet` + :param record_set: the record set to append + + :raises: ``ValueError`` if ``record_set`` is not of the required type. + """ + if not isinstance(record_set, ResourceRecordSet): + raise ValueError("Pass a ResourceRecordSet") + self._deletions += (record_set,)
+ + def _require_client(self, client): + """Check client or verify over-ride. + + :type client: :class:`gcloud.dns.client.Client` or ``NoneType`` + :param client: the client to use. If not passed, falls back to the + ``client`` stored on the current zone. + + :rtype: :class:`gcloud.dns.client.Client` + :returns: The client passed in or the currently bound client. + """ + if client is None: + client = self.zone._client + return client + + def _build_resource(self): + """Generate a resource for ``create``.""" + additions = [{ + 'name': added.name, + 'type': added.record_type, + 'ttl': str(added.ttl), + 'rrdatas': added.rrdatas, + } for added in self.additions] + + deletions = [{ + 'name': deleted.name, + 'type': deleted.record_type, + 'ttl': str(deleted.ttl), + 'rrdatas': deleted.rrdatas, + } for deleted in self.deletions] + + return { + 'additions': additions, + 'deletions': deletions, + } + +
[docs] def create(self, client=None): + """API call: create the change set via a POST request + + See: + https://cloud.google.com/dns/api/v1/changes/create + + :type client: :class:`gcloud.dns.client.Client` or ``NoneType`` + :param client: the client to use. If not passed, falls back to the + ``client`` stored on the current zone. + """ + if len(self.additions) == 0 and len(self.deletions) == 0: + raise ValueError("No record sets added or deleted") + client = self._require_client(client) + path = '/projects/%s/managedZones/%s/changes' % ( + self.zone.project, self.zone.name) + api_response = client.connection.api_request( + method='POST', path=path, data=self._build_resource()) + self._set_properties(api_response)
+ +
[docs] def exists(self, client=None): + """API call: test for the existence of the change set via a GET request + + See + https://cloud.google.com/dns/api/v1/changes/get + + :type client: :class:`gcloud.dns.client.Client` or ``NoneType`` + :param client: the client to use. If not passed, falls back to the + ``client`` stored on the current zone. + + :rtype: bool + :returns: Boolean indicating existence of the changes. + """ + client = self._require_client(client) + try: + client.connection.api_request(method='GET', path=self.path, + query_params={'fields': 'id'}) + except NotFound: + return False + else: + return True
+ +
[docs] def reload(self, client=None): + """API call: refresh zone properties via a GET request + + See + https://cloud.google.com/dns/api/v1/changes/get + + :type client: :class:`gcloud.dns.client.Client` or ``NoneType`` + :param client: the client to use. If not passed, falls back to the + ``client`` stored on the current zone. + """ + client = self._require_client(client) + + api_response = client.connection.api_request( + method='GET', path=self.path) + self._set_properties(api_response)
+
+ +
+ +
+ + + + + + + \ No newline at end of file diff --git a/0.18.1/_modules/gcloud/dns/client.html b/0.18.1/_modules/gcloud/dns/client.html new file mode 100644 index 000000000000..b5a3e49bbbb9 --- /dev/null +++ b/0.18.1/_modules/gcloud/dns/client.html @@ -0,0 +1,349 @@ + + + + + + + + gcloud.dns.client — gcloud 0.18.1 documentation + + + + + + + + + + + + + + +
+ +
+ +
+ + + Report an Issue + +
+
+ +
+ +

Source code for gcloud.dns.client

+# Copyright 2015 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Client for interacting with the Google Cloud DNS API."""
+
+
+from gcloud.client import JSONClient
+from gcloud.dns.connection import Connection
+from gcloud.dns.zone import ManagedZone
+
+
+
[docs]class Client(JSONClient): + """Client to bundle configuration needed for API requests. + + :type project: string + :param project: the project which the client acts on behalf of. Will be + passed when creating a zone. If not passed, + falls back to the default inferred from the environment. + + :type credentials: :class:`oauth2client.client.OAuth2Credentials` or + :class:`NoneType` + :param credentials: The OAuth2 Credentials to use for the connection + owned by this client. If not passed (and if no ``http`` + object is passed), falls back to the default inferred + from the environment. + + :type http: :class:`httplib2.Http` or class that defines ``request()``. + :param http: An optional HTTP object to make requests. If not passed, an + ``http`` object is created that is bound to the + ``credentials`` for the current object. + """ + + _connection_class = Connection + +
[docs] def quotas(self): + """Return DNS quots for the project associated with this client. + + See: + https://cloud.google.com/dns/api/v1/projects/get + + :rtype: mapping + :returns: keys for the mapping correspond to those of the ``quota`` + sub-mapping of the project resource. + """ + path = '/projects/%s' % (self.project,) + resp = self.connection.api_request(method='GET', path=path) + + return dict([(key, int(value)) + for key, value in resp['quota'].items() if key != 'kind'])
+ +
[docs] def list_zones(self, max_results=None, page_token=None): + """List zones for the project associated with this client. + + See: + https://cloud.google.com/dns/api/v1/managedZones/list + + :type max_results: int + :param max_results: maximum number of zones to return, If not + passed, defaults to a value set by the API. + + :type page_token: string + :param page_token: opaque marker for the next "page" of zones. If + not passed, the API will return the first page of + zones. + + :rtype: tuple, (list, str) + :returns: list of :class:`gcloud.dns.zone.ManagedZone`, plus a + "next page token" string: if the token is not None, + indicates that more zones can be retrieved with another + call (pass that value as ``page_token``). + """ + params = {} + + if max_results is not None: + params['maxResults'] = max_results + + if page_token is not None: + params['pageToken'] = page_token + + path = '/projects/%s/managedZones' % (self.project,) + resp = self.connection.api_request(method='GET', path=path, + query_params=params) + zones = [ManagedZone.from_api_repr(resource, self) + for resource in resp['managedZones']] + return zones, resp.get('nextPageToken')
+ +
[docs] def zone(self, name, dns_name=None, description=None): + """Construct a zone bound to this client. + + :type name: string + :param name: Name of the zone. + + :type dns_name: string or :class:`NoneType` + :param dns_name: DNS name of the zone. If not passed, then calls + to :meth:`zone.create` will fail. + + :type description: string or :class:`NoneType` + :param description: the description for the zone. If not passed, + defaults to the value of 'dns_name'. + + :rtype: :class:`gcloud.dns.zone.ManagedZone` + :returns: a new ``ManagedZone`` instance + """ + return ManagedZone(name, dns_name, client=self, + description=description)
+
+ +
+ +
+ + + + + + + \ No newline at end of file diff --git a/0.18.1/_modules/gcloud/dns/connection.html b/0.18.1/_modules/gcloud/dns/connection.html new file mode 100644 index 000000000000..35163a95511e --- /dev/null +++ b/0.18.1/_modules/gcloud/dns/connection.html @@ -0,0 +1,266 @@ + + + + + + + + gcloud.dns.connection — gcloud 0.18.1 documentation + + + + + + + + + + + + + + +
+ +
+ +
+ + + Report an Issue + +
+
+ +
+ +

Source code for gcloud.dns.connection

+# Copyright 2015 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Create / interact with gcloud dns connections."""
+
+from gcloud import connection as base_connection
+
+
+
[docs]class Connection(base_connection.JSONConnection): + """A connection to Google Cloud DNS via the JSON REST API.""" + + API_BASE_URL = 'https://www.googleapis.com' + """The base of the API call URL.""" + + API_VERSION = 'v1' + """The version of the API, used in building the API call's URL.""" + + API_URL_TEMPLATE = '{api_base_url}/dns/{api_version}{path}' + """A template for the URL of a particular API call.""" + + SCOPE = ('https://www.googleapis.com/auth/ndev.clouddns.readwrite',) + """The scopes required for authenticating as a Cloud DNS consumer."""
+
+ +
+ +
+ + + + + + + \ No newline at end of file diff --git a/0.18.1/_modules/gcloud/dns/resource_record_set.html b/0.18.1/_modules/gcloud/dns/resource_record_set.html new file mode 100644 index 000000000000..a7d354fb64d9 --- /dev/null +++ b/0.18.1/_modules/gcloud/dns/resource_record_set.html @@ -0,0 +1,299 @@ + + + + + + + + gcloud.dns.resource_record_set — gcloud 0.18.1 documentation + + + + + + + + + + + + + + +
+ +
+ +
+ + + Report an Issue + +
+
+ +
+ +

Source code for gcloud.dns.resource_record_set

+# Copyright 2015 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Define API ResourceRecordSets."""
+
+
+
[docs]class ResourceRecordSet(object): + """ResourceRecordSets are DNS resource records. + + RRS are owned by a :class:`gcloud.dns.zone.ManagedZone` instance. + + See: + https://cloud.google.com/dns/api/v1/resourceRecordSets + + :type name: string + :param name: the name of the record set + + :type record_type: string + :param record_type: the RR type of the zone + + :type ttl: integer + :param ttl: TTL (in seconds) for caching the record sets + + :type rrdatas: list of string + :param rrdatas: one or more lines containing the resource data + + :type zone: :class:`gcloud.dns.zone.ManagedZone` + :param zone: A zone which holds one or more record sets. + """ + + def __init__(self, name, record_type, ttl, rrdatas, zone): + self.name = name + self.record_type = record_type + self.ttl = ttl + self.rrdatas = rrdatas + self.zone = zone + + @classmethod +
[docs] def from_api_repr(cls, resource, zone): + """Factory: construct a record set given its API representation + + :type resource: dict + :param resource: record sets representation returned from the API + + :type zone: :class:`gcloud.dns.zone.ManagedZone` + :param zone: A zone which holds one or more record sets. + + :rtype: :class:`gcloud.dns.zone.ResourceRecordSet` + :returns: RRS parsed from ``resource``. + """ + name = resource['name'] + record_type = resource['type'] + ttl = int(resource['ttl']) + rrdatas = resource['rrdatas'] + return cls(name, record_type, ttl, rrdatas, zone=zone)
+
+ +
+ +
+ + + + + + + \ No newline at end of file diff --git a/0.18.1/_modules/gcloud/dns/zone.html b/0.18.1/_modules/gcloud/dns/zone.html new file mode 100644 index 000000000000..4ebaee7ed994 --- /dev/null +++ b/0.18.1/_modules/gcloud/dns/zone.html @@ -0,0 +1,631 @@ + + + + + + + + gcloud.dns.zone — gcloud 0.18.1 documentation + + + + + + + + + + + + + + +
+ +
+ +
+ + + Report an Issue + +
+
+ +
+ +

Source code for gcloud.dns.zone

+# Copyright 2015 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Define API ManagedZones."""
+import six
+
+from gcloud._helpers import _rfc3339_to_datetime
+from gcloud.exceptions import NotFound
+from gcloud.dns.changes import Changes
+from gcloud.dns.resource_record_set import ResourceRecordSet
+
+
+
[docs]class ManagedZone(object): + """ManagedZones are containers for DNS resource records. + + See: + https://cloud.google.com/dns/api/v1/managedZones + + :type name: string + :param name: the name of the zone + + :type dns_name: string or :class:`NoneType` + :param dns_name: the DNS name of the zone. If not passed, then calls + to :meth:`create` will fail. + + :type client: :class:`gcloud.dns.client.Client` + :param client: A client which holds credentials and project configuration + for the zone (which requires a project). + + :type description: string or :class:`NoneType` + :param description: the description for the zone. If not passed, defaults + to the value of 'dns_name'. + """ + + def __init__(self, name, dns_name=None, client=None, description=None): + self.name = name + self.dns_name = dns_name + self._client = client + self._properties = {} + if description is None: + description = dns_name + self.description = description + + @classmethod +
[docs] def from_api_repr(cls, resource, client): + """Factory: construct a zone given its API representation + + :type resource: dict + :param resource: zone resource representation returned from the API + + :type client: :class:`gcloud.dns.client.Client` + :param client: Client which holds credentials and project + configuration for the zone. + + :rtype: :class:`gcloud.dns.zone.ManagedZone` + :returns: Zone parsed from ``resource``. + """ + name = resource.get('name') + dns_name = resource.get('dnsName') + if name is None or dns_name is None: + raise KeyError('Resource lacks required identity information:' + '["name"]["dnsName"]') + zone = cls(name, dns_name, client=client) + zone._set_properties(resource) + return zone
+ + @property + def project(self): + """Project bound to the zone. + + :rtype: string + :returns: the project (derived from the client). + """ + return self._client.project + + @property + def path(self): + """URL path for the zone's APIs. + + :rtype: string + :returns: the path based on project and dataste name. + """ + return '/projects/%s/managedZones/%s' % (self.project, self.name) + + @property + def created(self): + """Datetime at which the zone was created. + + :rtype: ``datetime.datetime``, or ``NoneType`` + :returns: the creation time (None until set from the server). + """ + return self._properties.get('creationTime') + + @property + def name_servers(self): + """Datetime at which the zone was created. + + :rtype: list of strings, or ``NoneType``. + :returns: the assigned name servers (None until set from the server). + """ + return self._properties.get('nameServers') + + @property + def zone_id(self): + """ID for the zone resource. + + :rtype: string, or ``NoneType`` + :returns: the ID (None until set from the server). + """ + return self._properties.get('id') + + @property + def description(self): + """Description of the zone. + + :rtype: string, or ``NoneType`` + :returns: The description as set by the user, or None (the default). + """ + return self._properties.get('description') + + @description.setter + def description(self, value): + """Update description of the zone. + + :type value: string, or ``NoneType`` + :param value: new description + + :raises: ValueError for invalid value types. + """ + if not isinstance(value, six.string_types) and value is not None: + raise ValueError("Pass a string, or None") + self._properties['description'] = value + + @property + def name_server_set(self): + """Named set of DNS name servers that all host the same ManagedZones. + + Most users will leave this blank. + + See: + https://cloud.google.com/dns/api/v1/managedZones#nameServerSet + + :rtype: string, or ``NoneType`` + :returns: The name as set by the user, or None (the default). + """ + return self._properties.get('nameServerSet') + + @name_server_set.setter + def name_server_set(self, value): + """Update named set of DNS name servers. + + :type value: string, or ``NoneType`` + :param value: new title + + :raises: ValueError for invalid value types. + """ + if not isinstance(value, six.string_types) and value is not None: + raise ValueError("Pass a string, or None") + self._properties['nameServerSet'] = value + +
[docs] def resource_record_set(self, name, record_type, ttl, rrdatas): + """Construct a resource record set bound to this zone. + + :type name: string + :param name: Name of the record set. + + :type record_type: string + :param record_type: RR type + + :type ttl: integer + :param ttl: TTL for the RR, in seconds + + :type rrdatas: list of string + :param rrdatas: resource data for the RR + + :rtype: :class:`gcloud.dns.resource_record_set.ResourceRecordSet` + :returns: a new ``ResourceRecordSet`` instance + """ + return ResourceRecordSet(name, record_type, ttl, rrdatas, zone=self)
+ +
[docs] def changes(self): + """Construct a change set bound to this zone. + + :rtype: :class:`gcloud.dns.changes.Changes` + :returns: a new ``Changes`` instance + """ + return Changes(zone=self)
+ + def _require_client(self, client): + """Check client or verify over-ride. + + :type client: :class:`gcloud.dns.client.Client` or ``NoneType`` + :param client: the client to use. If not passed, falls back to the + ``client`` stored on the current zone. + + :rtype: :class:`gcloud.dns.client.Client` + :returns: The client passed in or the currently bound client. + """ + if client is None: + client = self._client + return client + + def _set_properties(self, api_response): + """Update properties from resource in body of ``api_response`` + + :type api_response: httplib2.Response + :param api_response: response returned from an API call + """ + self._properties.clear() + cleaned = api_response.copy() + self.dns_name = cleaned.pop('dnsName', None) + if 'creationTime' in cleaned: + cleaned['creationTime'] = _rfc3339_to_datetime( + cleaned['creationTime']) + self._properties.update(cleaned) + + def _build_resource(self): + """Generate a resource for ``create`` or ``update``.""" + resource = { + 'name': self.name, + } + + if self.dns_name is not None: + resource['dnsName'] = self.dns_name + + if self.description is not None: + resource['description'] = self.description + + if self.name_server_set is not None: + resource['nameServerSet'] = self.name_server_set + + return resource + +
[docs] def create(self, client=None): + """API call: create the zone via a PUT request + + See: + https://cloud.google.com/dns/api/v1/managedZones/create + + :type client: :class:`gcloud.dns.client.Client` or ``NoneType`` + :param client: the client to use. If not passed, falls back to the + ``client`` stored on the current zone. + """ + client = self._require_client(client) + path = '/projects/%s/managedZones' % (self.project,) + api_response = client.connection.api_request( + method='POST', path=path, data=self._build_resource()) + self._set_properties(api_response)
+ +
[docs] def exists(self, client=None): + """API call: test for the existence of the zone via a GET request + + See + https://cloud.google.com/dns/api/v1/managedZones/get + + :type client: :class:`gcloud.dns.client.Client` or ``NoneType`` + :param client: the client to use. If not passed, falls back to the + ``client`` stored on the current zone. + + :rtype: bool + :returns: Boolean indicating existence of the managed zone. + """ + client = self._require_client(client) + + try: + client.connection.api_request(method='GET', path=self.path, + query_params={'fields': 'id'}) + except NotFound: + return False + else: + return True
+ +
[docs] def reload(self, client=None): + """API call: refresh zone properties via a GET request + + See + https://cloud.google.com/dns/api/v1/managedZones/get + + :type client: :class:`gcloud.dns.client.Client` or ``NoneType`` + :param client: the client to use. If not passed, falls back to the + ``client`` stored on the current zone. + """ + client = self._require_client(client) + + api_response = client.connection.api_request( + method='GET', path=self.path) + self._set_properties(api_response)
+ +
[docs] def delete(self, client=None): + """API call: delete the zone via a DELETE request + + See: + https://cloud.google.com/dns/api/v1/managedZones/delete + + :type client: :class:`gcloud.dns.client.Client` or ``NoneType`` + :param client: the client to use. If not passed, falls back to the + ``client`` stored on the current zone. + """ + client = self._require_client(client) + client.connection.api_request(method='DELETE', path=self.path)
+ +
[docs] def list_resource_record_sets(self, max_results=None, page_token=None, + client=None): + """List resource record sets for this zone. + + See: + https://cloud.google.com/dns/api/v1/resourceRecordSets/list + + :type max_results: int + :param max_results: maximum number of zones to return, If not + passed, defaults to a value set by the API. + + :type page_token: string + :param page_token: opaque marker for the next "page" of zones. If + not passed, the API will return the first page of + zones. + + :type client: :class:`gcloud.dns.client.Client` or ``NoneType`` + :param client: the client to use. If not passed, falls back to the + ``client`` stored on the current zone. + + :rtype: tuple, (list, str) + :returns: list of + :class:`gcloud.dns.resource_record_set.ResourceRecordSet`, + plus a "next page token" string: if the token is not None, + indicates that more zones can be retrieved with another + call (pass that value as ``page_token``). + """ + params = {} + + if max_results is not None: + params['maxResults'] = max_results + + if page_token is not None: + params['pageToken'] = page_token + + path = '/projects/%s/managedZones/%s/rrsets' % ( + self.project, self.name) + client = self._require_client(client) + conn = client.connection + resp = conn.api_request(method='GET', path=path, query_params=params) + zones = [ResourceRecordSet.from_api_repr(resource, self) + for resource in resp['rrsets']] + return zones, resp.get('nextPageToken')
+ +
[docs] def list_changes(self, max_results=None, page_token=None, client=None): + """List change sets for this zone. + + See: + https://cloud.google.com/dns/api/v1/resourceRecordSets/list + + :type max_results: int + :param max_results: maximum number of zones to return, If not + passed, defaults to a value set by the API. + + :type page_token: string + :param page_token: opaque marker for the next "page" of zones. If + not passed, the API will return the first page of + zones. + + :type client: :class:`gcloud.dns.client.Client` or ``NoneType`` + :param client: the client to use. If not passed, falls back to the + ``client`` stored on the current zone. + + :rtype: tuple, (list, str) + :returns: list of + :class:`gcloud.dns.resource_record_set.ResourceRecordSet`, + plus a "next page token" string: if the token is not None, + indicates that more zones can be retrieved with another + call (pass that value as ``page_token``). + """ + params = {} + + if max_results is not None: + params['maxResults'] = max_results + + if page_token is not None: + params['pageToken'] = page_token + + path = '/projects/%s/managedZones/%s/changes' % ( + self.project, self.name) + client = self._require_client(client) + conn = client.connection + resp = conn.api_request(method='GET', path=path, query_params=params) + zones = [Changes.from_api_repr(resource, self) + for resource in resp['changes']] + return zones, resp.get('nextPageToken')
+
+ +
+ +
+ + + + + + + \ No newline at end of file diff --git a/0.18.1/_modules/gcloud/error_reporting/client.html b/0.18.1/_modules/gcloud/error_reporting/client.html new file mode 100644 index 000000000000..1dc3f3e2cea1 --- /dev/null +++ b/0.18.1/_modules/gcloud/error_reporting/client.html @@ -0,0 +1,485 @@ + + + + + + + + gcloud.error_reporting.client — gcloud 0.18.1 documentation + + + + + + + + + + + + + + +
+ +
+ +
+ + + Report an Issue + +
+
+ +
+ +

Source code for gcloud.error_reporting.client

+# Copyright 2016 Google Inc. All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Client for interacting with the Stackdriver Logging API"""
+
+import traceback
+
+import gcloud.logging.client
+import six
+
+
+
[docs]class HTTPContext(object): + """HTTPContext defines an object that captures the parameter for the + httpRequest part of Error Reporting API + + :type method: string + :param method: The type of HTTP request, such as GET, POST, etc. + + :type url: string + :param url: The URL of the request + + :type user_agent: string + :param user_agent: The user agent information that is provided with the + request. + + :type referrer: string + :param referrer: The referrer information that is provided with the + request. + + :type response_status_code: int + :param response_status_code: The HTTP response status code for the request. + + :type remote_ip: string + :param remote_ip: The IP address from which the request originated. This + can be IPv4, IPv6, or a token which is derived from + the IP address, depending on the data that has been + provided in the error report. + """ + + def __init__(self, method=None, url=None, + user_agent=None, referrer=None, + response_status_code=None, remote_ip=None): + self.method = method + self.url = url + # intentionally camel case for mapping to JSON API expects + # pylint: disable=invalid-name + self.userAgent = user_agent + self.referrer = referrer + self.responseStatusCode = response_status_code + self.remoteIp = remote_ip
+ + +
[docs]class Client(object): + """Error Reporting client. Currently Error Reporting is done by creating + a Logging client. + + :type project: string + :param project: the project which the client acts on behalf of. If not + passed falls back to the default inferred from the + environment. + + :type credentials: :class:`oauth2client.client.OAuth2Credentials` or + :class:`NoneType` + :param credentials: The OAuth2 Credentials to use for the connection + owned by this client. If not passed (and if no ``http`` + object is passed), falls back to the default inferred + from the environment. + + :type http: :class:`httplib2.Http` or class that defines ``request()``. + :param http: An optional HTTP object to make requests. If not passed, an + ``http`` object is created that is bound to the + ``credentials`` for the current object. + + :type service: str + :param service: An identifier of the service, such as the name of the + executable, job, or Google App Engine service name. This + field is expected to have a low number of values that are + relatively stable over time, as opposed to version, + which can be changed whenever new code is deployed. + + + :type version: str + :param version: Represents the source code version that the developer + provided, which could represent a version label or a Git + SHA-1 hash, for example. If the developer did not provide + a version, the value is set to default. + + :raises: :class:`ValueError` if the project is neither passed in nor + set in the environment. + """ + + def __init__(self, project=None, + credentials=None, + http=None, + service=None, + version=None): + self.logging_client = gcloud.logging.client.Client( + project, credentials, http) + self.service = service if service else self.DEFAULT_SERVICE + self.version = version + + DEFAULT_SERVICE = 'python' + + def _send_error_report(self, message, + report_location=None, http_context=None, user=None): + """Makes the call to the Error Reporting API via the log stream. + + This is the lower-level interface to build the payload, generally + users will use either report() or report_exception() to automatically + gather the parameters for this method. + + Currently this method sends the Error Report by formatting a structured + log message according to + + https://cloud.google.com/error-reporting/docs/formatting-error-messages + + :type message: string + :param message: The stack trace that was reported or logged by the + service. + + :type report_location: dict + :param report_location: The location in the source code where the + decision was made to report the error, usually the place + where it was logged. For a logged exception this would be the + source line where the exception is logged, usually close to + the place where it was caught. + + This should be a Python dict that contains the keys 'filePath', + 'lineNumber', and 'functionName' + + :type http_context: :class`gcloud.error_reporting.HTTPContext` + :param http_context: The HTTP request which was processed when the + error was triggered. + + :type user: string + :param user: The user who caused or was affected by the crash. This can + be a user ID, an email address, or an arbitrary token that + uniquely identifies the user. When sending an error + report, leave this field empty if the user was not + logged in. In this case the Error Reporting system will + use other data, such as remote IP address, + to distinguish affected users. + """ + payload = { + 'serviceContext': { + 'service': self.service, + }, + 'message': '{0}'.format(message) + } + + if self.version: + payload['serviceContext']['version'] = self.version + + if report_location or http_context or user: + payload['context'] = {} + + if report_location: + payload['context']['reportLocation'] = report_location + + if http_context: + http_context_dict = http_context.__dict__ + # strip out None values + # once py26 support is dropped this can use dict comprehension + payload['context']['httpContext'] = dict( + (k, v) for (k, v) in six.iteritems(http_context_dict) + if v is not None + ) + + if user: + payload['context']['user'] = user + + logger = self.logging_client.logger('errors') + logger.log_struct(payload) + +
[docs] def report(self, message, http_context=None, user=None): + """ Reports a message to Stackdriver Error Reporting + https://cloud.google.com/error-reporting/docs/formatting-error-messages + + :type message: str + :param message: A user-supplied message to report + + + :type http_context: :class`gcloud.error_reporting.HTTPContext` + :param http_context: The HTTP request which was processed when the + error was triggered. + + :type user: string + :param user: The user who caused or was affected by the crash. This + can be a user ID, an email address, or an arbitrary + token that uniquely identifies the user. When sending + an error report, leave this field empty if the user + was not logged in. In this case the Error Reporting + system will use other data, such as remote IP address, + to distinguish affected users. + + Example:: + >>> client.report("Something went wrong!") + """ + stack = traceback.extract_stack() + last_call = stack[-2] + file_path = last_call[0] + line_number = last_call[1] + function_name = last_call[2] + report_location = { + 'filePath': file_path, + 'lineNumber': line_number, + 'functionName': function_name + } + + self._send_error_report(message, + http_context=http_context, + user=user, + report_location=report_location)
+ +
[docs] def report_exception(self, http_context=None, user=None): + """ Reports the details of the latest exceptions to Stackdriver Error + Reporting. + + :type http_context: :class`gcloud.error_reporting.HTTPContext` + :param http_context: The HTTP request which was processed when the + error was triggered. + + :type user: string + :param user: The user who caused or was affected by the crash. This + can be a user ID, an email address, or an arbitrary + token that uniquely identifies the user. When sending an + error report, leave this field empty if the user was + not logged in. In this case the Error Reporting system + will use other data, such as remote IP address, + to distinguish affected users. + + Example:: + + >>> try: + >>> raise NameError + >>> except Exception: + >>> client.report_exception() + """ + self._send_error_report(traceback.format_exc(), + http_context=http_context, + user=user)
+
+ +
+ +
+ + + + + + + \ No newline at end of file diff --git a/0.18.1/_modules/gcloud/exceptions.html b/0.18.1/_modules/gcloud/exceptions.html new file mode 100644 index 000000000000..bbc075462f83 --- /dev/null +++ b/0.18.1/_modules/gcloud/exceptions.html @@ -0,0 +1,457 @@ + + + + + + + + gcloud.exceptions — gcloud 0.18.1 documentation + + + + + + + + + + + + + + +
+ +
+ +
+ + + Report an Issue + +
+
+ +
+ +

Source code for gcloud.exceptions

+# Copyright 2014 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Custom exceptions for :mod:`gcloud` package.
+
+See: https://cloud.google.com/storage/docs/json_api/v1/status-codes
+"""
+
+import copy
+import json
+import six
+
+_HTTP_CODE_TO_EXCEPTION = {}  # populated at end of module
+
+
+
[docs]class GCloudError(Exception): + """Base error class for gcloud errors (abstract). + + Each subclass represents a single type of HTTP error response. + """ + code = None + """HTTP status code. Concrete subclasses *must* define. + + See: http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html + """ + + def __init__(self, message, errors=()): + super(GCloudError, self).__init__() + # suppress deprecation warning under 2.6.x + self.message = message + self._errors = errors + + def __str__(self): + return '%d %s' % (self.code, self.message) + + @property + def errors(self): + """Detailed error information. + + :rtype: list(dict) + :returns: a list of mappings describing each error. + """ + return [copy.deepcopy(error) for error in self._errors]
+ + +
[docs]class Redirection(GCloudError): + """Base for 3xx responses + + This class is abstract. + """
+ + +
[docs]class MovedPermanently(Redirection): + """Exception mapping a '301 Moved Permanently' response.""" + code = 301
+ + +
[docs]class NotModified(Redirection): + """Exception mapping a '304 Not Modified' response.""" + code = 304
+ + +
[docs]class TemporaryRedirect(Redirection): + """Exception mapping a '307 Temporary Redirect' response.""" + code = 307
+ + +
[docs]class ResumeIncomplete(Redirection): + """Exception mapping a '308 Resume Incomplete' response.""" + code = 308
+ + +
[docs]class ClientError(GCloudError): + """Base for 4xx responses + + This class is abstract + """
+ + +
[docs]class BadRequest(ClientError): + """Exception mapping a '400 Bad Request' response.""" + code = 400
+ + +
[docs]class Unauthorized(ClientError): + """Exception mapping a '401 Unauthorized' response.""" + code = 401
+ + +
[docs]class Forbidden(ClientError): + """Exception mapping a '403 Forbidden' response.""" + code = 403
+ + +
[docs]class NotFound(ClientError): + """Exception mapping a '404 Not Found' response.""" + code = 404
+ + +
[docs]class MethodNotAllowed(ClientError): + """Exception mapping a '405 Method Not Allowed' response.""" + code = 405
+ + +
[docs]class Conflict(ClientError): + """Exception mapping a '409 Conflict' response.""" + code = 409
+ + +
[docs]class LengthRequired(ClientError): + """Exception mapping a '411 Length Required' response.""" + code = 411
+ + +
[docs]class PreconditionFailed(ClientError): + """Exception mapping a '412 Precondition Failed' response.""" + code = 412
+ + +
[docs]class RequestRangeNotSatisfiable(ClientError): + """Exception mapping a '416 Request Range Not Satisfiable' response.""" + code = 416
+ + +
[docs]class TooManyRequests(ClientError): + """Exception mapping a '429 Too Many Requests' response.""" + code = 429
+ + +
[docs]class ServerError(GCloudError): + """Base for 5xx responses: (abstract)"""
+ + +
[docs]class InternalServerError(ServerError): + """Exception mapping a '500 Internal Server Error' response.""" + code = 500
+ + +
[docs]class MethodNotImplemented(ServerError): + """Exception mapping a '501 Not Implemented' response.""" + code = 501
+ + +
[docs]class ServiceUnavailable(ServerError): + """Exception mapping a '503 Service Unavailable' response.""" + code = 503
+ + +
[docs]def make_exception(response, content, error_info=None, use_json=True): + """Factory: create exception based on HTTP response code. + + :type response: :class:`httplib2.Response` or other HTTP response object + :param response: A response object that defines a status code as the + status attribute. + + :type content: string or dictionary + :param content: The body of the HTTP error response. + + :type error_info: string + :param error_info: Optional string giving extra information about the + failed request. + + :type use_json: bool + :param use_json: Flag indicating if ``content`` is expected to be JSON. + + :rtype: instance of :class:`GCloudError`, or a concrete subclass. + :returns: Exception specific to the error response. + """ + if isinstance(content, six.binary_type): + content = content.decode('utf-8') + + if isinstance(content, six.string_types): + payload = None + if use_json: + try: + payload = json.loads(content) + except ValueError: + # Expected JSON but received something else. + pass + if payload is None: + payload = {'error': {'message': content}} + else: + payload = content + + message = payload.get('error', {}).get('message', '') + errors = payload.get('error', {}).get('errors', ()) + + if error_info is not None: + message += ' (%s)' % (error_info,) + + try: + klass = _HTTP_CODE_TO_EXCEPTION[response.status] + except KeyError: + error = GCloudError(message, errors) + error.code = response.status + else: + error = klass(message, errors) + return error
+ + +def _walk_subclasses(klass): + """Recursively walk subclass tree.""" + for sub in klass.__subclasses__(): + yield sub + for subsub in _walk_subclasses(sub): + yield subsub + + +# Build the code->exception class mapping. +for _eklass in _walk_subclasses(GCloudError): + code = getattr(_eklass, 'code', None) + if code is not None: + _HTTP_CODE_TO_EXCEPTION[code] = _eklass +
+ +
+ +
+ + + + + + + \ No newline at end of file diff --git a/0.18.1/_modules/gcloud/logging/client.html b/0.18.1/_modules/gcloud/logging/client.html new file mode 100644 index 000000000000..f8f3eda58d3f --- /dev/null +++ b/0.18.1/_modules/gcloud/logging/client.html @@ -0,0 +1,533 @@ + + + + + + + + gcloud.logging.client — gcloud 0.18.1 documentation + + + + + + + + + + + + + + +
+ +
+ +
+ + + Report an Issue + +
+
+ +
+ +

Source code for gcloud.logging.client

+# Copyright 2016 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Client for interacting with the Google Stackdriver Logging API."""
+
+import os
+
+try:
+    from google.cloud.logging.v2.config_service_v2_api import (
+        ConfigServiceV2Api as GeneratedSinksAPI)
+    from google.cloud.logging.v2.logging_service_v2_api import (
+        LoggingServiceV2Api as GeneratedLoggingAPI)
+    from google.cloud.logging.v2.metrics_service_v2_api import (
+        MetricsServiceV2Api as GeneratedMetricsAPI)
+    from gcloud.logging._gax import _LoggingAPI as GAXLoggingAPI
+    from gcloud.logging._gax import _MetricsAPI as GAXMetricsAPI
+    from gcloud.logging._gax import _SinksAPI as GAXSinksAPI
+except ImportError:  # pragma: NO COVER
+    _HAVE_GAX = False
+    GeneratedLoggingAPI = GAXLoggingAPI = None
+    GeneratedMetricsAPI = GAXMetricsAPI = None
+    GeneratedSinksAPI = GAXSinksAPI = None
+else:
+    _HAVE_GAX = True
+
+from gcloud.client import JSONClient
+from gcloud.logging.connection import Connection
+from gcloud.logging.connection import _LoggingAPI as JSONLoggingAPI
+from gcloud.logging.connection import _MetricsAPI as JSONMetricsAPI
+from gcloud.logging.connection import _SinksAPI as JSONSinksAPI
+from gcloud.logging.entries import ProtobufEntry
+from gcloud.logging.entries import StructEntry
+from gcloud.logging.entries import TextEntry
+from gcloud.logging.logger import Logger
+from gcloud.logging.metric import Metric
+from gcloud.logging.sink import Sink
+
+
+_USE_GAX = _HAVE_GAX and (os.environ.get('GCLOUD_ENABLE_GAX') is not None)
+
+
+
[docs]class Client(JSONClient): + """Client to bundle configuration needed for API requests. + + :type project: str + :param project: the project which the client acts on behalf of. + If not passed, falls back to the default inferred + from the environment. + + :type credentials: :class:`oauth2client.client.OAuth2Credentials` or + :class:`NoneType` + :param credentials: The OAuth2 Credentials to use for the connection + owned by this client. If not passed (and if no ``http`` + object is passed), falls back to the default inferred + from the environment. + + :type http: :class:`httplib2.Http` or class that defines ``request()``. + :param http: An optional HTTP object to make requests. If not passed, an + ``http`` object is created that is bound to the + ``credentials`` for the current object. + """ + + _connection_class = Connection + _logging_api = _sinks_api = _metrics_api = None + + @property + def logging_api(self): + """Helper for logging-related API calls. + + See: + https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/entries + https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/projects.logs + """ + if self._logging_api is None: + if _USE_GAX: + generated = GeneratedLoggingAPI() + self._logging_api = GAXLoggingAPI(generated) + else: + self._logging_api = JSONLoggingAPI(self.connection) + return self._logging_api + + @property + def sinks_api(self): + """Helper for log sink-related API calls. + + See: + https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/projects.sinks + """ + if self._sinks_api is None: + if _USE_GAX: + generated = GeneratedSinksAPI() + self._sinks_api = GAXSinksAPI(generated) + else: + self._sinks_api = JSONSinksAPI(self.connection) + return self._sinks_api + + @property + def metrics_api(self): + """Helper for log metric-related API calls. + + See: + https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/projects.metrics + """ + if self._metrics_api is None: + if _USE_GAX: + generated = GeneratedMetricsAPI() + self._metrics_api = GAXMetricsAPI(generated) + else: + self._metrics_api = JSONMetricsAPI(self.connection) + return self._metrics_api + +
[docs] def logger(self, name): + """Creates a logger bound to the current client. + + :type name: str + :param name: the name of the logger to be constructed. + + :rtype: :class:`gcloud.logging.logger.Logger` + :returns: Logger created with the current client. + """ + return Logger(name, client=self)
+ + def _entry_from_resource(self, resource, loggers): + """Detect correct entry type from resource and instantiate. + + :type resource: dict + :param resource: one entry resource from API response + + :type loggers: dict or None + :param loggers: A mapping of logger fullnames -> loggers. If not + passed, the entry will have a newly-created logger. + + :rtype: One of: + :class:`gcloud.logging.entries.TextEntry`, + :class:`gcloud.logging.entries.StructEntry`, + :class:`gcloud.logging.entries.ProtobufEntry` + :returns: the entry instance, constructed via the resource + """ + if 'textPayload' in resource: + return TextEntry.from_api_repr(resource, self, loggers) + elif 'jsonPayload' in resource: + return StructEntry.from_api_repr(resource, self, loggers) + elif 'protoPayload' in resource: + return ProtobufEntry.from_api_repr(resource, self, loggers) + raise ValueError('Cannot parse log entry resource') + +
[docs] def list_entries(self, projects=None, filter_=None, order_by=None, + page_size=None, page_token=None): + """Return a page of log entries. + + See: + https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/entries/list + + :type projects: list of strings + :param projects: project IDs to include. If not passed, + defaults to the project bound to the client. + + :type filter_: str + :param filter_: a filter expression. See: + https://cloud.google.com/logging/docs/view/advanced_filters + + :type order_by: str + :param order_by: One of :data:`gcloud.logging.ASCENDING` or + :data:`gcloud.logging.DESCENDING`. + + :type page_size: int + :param page_size: maximum number of entries to return, If not passed, + defaults to a value set by the API. + + :type page_token: str + :param page_token: opaque marker for the next "page" of entries. If not + passed, the API will return the first page of + entries. + + :rtype: tuple, (list, str) + :returns: list of :class:`gcloud.logging.entry.TextEntry`, plus a + "next page token" string: if not None, indicates that + more entries can be retrieved with another call (pass that + value as ``page_token``). + """ + if projects is None: + projects = [self.project] + + resources, token = self.logging_api.list_entries( + projects=projects, filter_=filter_, order_by=order_by, + page_size=page_size, page_token=page_token) + loggers = {} + entries = [self._entry_from_resource(resource, loggers) + for resource in resources] + return entries, token
+ +
[docs] def sink(self, name, filter_=None, destination=None): + """Creates a sink bound to the current client. + + :type name: str + :param name: the name of the sink to be constructed. + + :type filter_: str + :param filter_: (optional) the advanced logs filter expression + defining the entries exported by the sink. If not + passed, the instance should already exist, to be + refreshed via :meth:`Sink.reload`. + + :type destination: str + :param destination: destination URI for the entries exported by + the sink. If not passed, the instance should + already exist, to be refreshed via + :meth:`Sink.reload`. + + :rtype: :class:`gcloud.logging.sink.Sink` + :returns: Sink created with the current client. + """ + return Sink(name, filter_, destination, client=self)
+ +
[docs] def list_sinks(self, page_size=None, page_token=None): + """List sinks for the project associated with this client. + + See: + https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/projects.sinks/list + + :type page_size: int + :param page_size: maximum number of sinks to return, If not passed, + defaults to a value set by the API. + + :type page_token: str + :param page_token: opaque marker for the next "page" of sinks. If not + passed, the API will return the first page of + sinks. + + :rtype: tuple, (list, str) + :returns: list of :class:`gcloud.logging.sink.Sink`, plus a + "next page token" string: if not None, indicates that + more sinks can be retrieved with another call (pass that + value as ``page_token``). + """ + resources, token = self.sinks_api.list_sinks( + self.project, page_size, page_token) + sinks = [Sink.from_api_repr(resource, self) + for resource in resources] + return sinks, token
+ +
[docs] def metric(self, name, filter_=None, description=''): + """Creates a metric bound to the current client. + + :type name: str + :param name: the name of the metric to be constructed. + + :type filter_: str + :param filter_: the advanced logs filter expression defining the + entries tracked by the metric. If not + passed, the instance should already exist, to be + refreshed via :meth:`Metric.reload`. + + :type description: str + :param description: the description of the metric to be constructed. + If not passed, the instance should already exist, + to be refreshed via :meth:`Metric.reload`. + + :rtype: :class:`gcloud.logging.metric.Metric` + :returns: Metric created with the current client. + """ + return Metric(name, filter_, client=self, description=description)
+ +
[docs] def list_metrics(self, page_size=None, page_token=None): + """List metrics for the project associated with this client. + + See: + https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/projects.metrics/list + + :type page_size: int + :param page_size: maximum number of metrics to return, If not passed, + defaults to a value set by the API. + + :type page_token: str + :param page_token: opaque marker for the next "page" of metrics. If not + passed, the API will return the first page of + metrics. + + :rtype: tuple, (list, str) + :returns: list of :class:`gcloud.logging.metric.Metric`, plus a + "next page token" string: if not None, indicates that + more metrics can be retrieved with another call (pass that + value as ``page_token``). + """ + resources, token = self.metrics_api.list_metrics( + self.project, page_size, page_token) + metrics = [Metric.from_api_repr(resource, self) + for resource in resources] + return metrics, token
+
+ +
+ +
+ + + + + + + \ No newline at end of file diff --git a/0.18.1/_modules/gcloud/logging/connection.html b/0.18.1/_modules/gcloud/logging/connection.html new file mode 100644 index 000000000000..de7a089fc81a --- /dev/null +++ b/0.18.1/_modules/gcloud/logging/connection.html @@ -0,0 +1,674 @@ + + + + + + + + gcloud.logging.connection — gcloud 0.18.1 documentation + + + + + + + + + + + + + + +
+ +
+ +
+ + + Report an Issue + +
+
+ +
+ +

Source code for gcloud.logging.connection

+# Copyright 2016 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Create / interact with Stackdriver Logging connections."""
+
+from gcloud import connection as base_connection
+
+
+
[docs]class Connection(base_connection.JSONConnection): + """A connection to Google Stackdriver Logging via the JSON REST API. + + :type credentials: :class:`oauth2client.client.OAuth2Credentials` + :param credentials: (Optional) The OAuth2 Credentials to use for this + connection. + + :type http: :class:`httplib2.Http` or class that defines ``request()``. + :param http: (Optional) HTTP object to make requests. + + :type api_base_url: string + :param api_base_url: The base of the API call URL. Defaults to the value + :attr:`Connection.API_BASE_URL`. + """ + + API_BASE_URL = 'https://logging.googleapis.com' + """The base of the API call URL.""" + + API_VERSION = 'v2beta1' + """The version of the API, used in building the API call's URL.""" + + API_URL_TEMPLATE = '{api_base_url}/{api_version}{path}' + """A template for the URL of a particular API call.""" + + SCOPE = ('https://www.googleapis.com/auth/logging.read', + 'https://www.googleapis.com/auth/logging.write', + 'https://www.googleapis.com/auth/logging.admin', + 'https://www.googleapis.com/auth/cloud-platform') + """The scopes required for authenticating as a Logging consumer."""
+ + +class _LoggingAPI(object): + """Helper mapping logging-related APIs. + + See: + https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/entries + https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/projects.logs + + :type connection: :class:`gcloud.logging.connection.Connection` + :param connection: the connection used to make API requests. + """ + def __init__(self, connection): + self._connection = connection + + def list_entries(self, projects, filter_=None, order_by=None, + page_size=None, page_token=None): + """Return a page of log entry resources. + + See: + https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/entries/list + + :type projects: list of strings + :param projects: project IDs to include. If not passed, + defaults to the project bound to the client. + + :type filter_: str + :param filter_: a filter expression. See: + https://cloud.google.com/logging/docs/view/advanced_filters + + :type order_by: str + :param order_by: One of :data:`gcloud.logging.ASCENDING` or + :data:`gcloud.logging.DESCENDING`. + + :type page_size: int + :param page_size: maximum number of entries to return, If not passed, + defaults to a value set by the API. + + :type page_token: str + :param page_token: opaque marker for the next "page" of entries. If not + passed, the API will return the first page of + entries. + + :rtype: tuple, (list, str) + :returns: list of mappings, plus a "next page token" string: + if not None, indicates that more entries can be retrieved + with another call (pass that value as ``page_token``). + """ + params = {'projectIds': projects} + + if filter_ is not None: + params['filter'] = filter_ + + if order_by is not None: + params['orderBy'] = order_by + + if page_size is not None: + params['pageSize'] = page_size + + if page_token is not None: + params['pageToken'] = page_token + + resp = self._connection.api_request( + method='POST', path='/entries:list', data=params) + + return resp.get('entries', ()), resp.get('nextPageToken') + + def write_entries(self, entries, logger_name=None, resource=None, + labels=None): + """API call: log an entry resource via a POST request + + See: + https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/entries/write + + :type entries: sequence of mapping + :param entries: the log entry resources to log. + + :type logger_name: string + :param logger_name: name of default logger to which to log the entries; + individual entries may override. + + :type resource: mapping + :param resource: default resource to associate with entries; + individual entries may override. + + :type labels: mapping + :param labels: default labels to associate with entries; + individual entries may override. + """ + data = {'entries': list(entries)} + + if logger_name is not None: + data['logName'] = logger_name + + if resource is not None: + data['resource'] = resource + + if labels is not None: + data['labels'] = labels + + self._connection.api_request(method='POST', path='/entries:write', + data=data) + + def logger_delete(self, project, logger_name): + """API call: delete all entries in a logger via a DELETE request + + See: + https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/projects.logs/delete + + :type project: string + :param project: ID of project containing the log entries to delete + + :type logger_name: string + :param logger_name: name of logger containing the log entries to delete + """ + path = '/projects/%s/logs/%s' % (project, logger_name) + self._connection.api_request(method='DELETE', path=path) + + +class _SinksAPI(object): + """Helper mapping sink-related APIs. + + See: + https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/projects.sinks + + :type connection: :class:`gcloud.logging.connection.Connection` + :param connection: the connection used to make API requests. + """ + def __init__(self, connection): + self._connection = connection + + def list_sinks(self, project, page_size=None, page_token=None): + """List sinks for the project associated with this client. + + See: + https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/projects.sinks/list + + :type project: string + :param project: ID of the project whose sinks are to be listed. + + :type page_size: int + :param page_size: maximum number of sinks to return, If not passed, + defaults to a value set by the API. + + :type page_token: str + :param page_token: opaque marker for the next "page" of sinks. If not + passed, the API will return the first page of + sinks. + + :rtype: tuple, (list, str) + :returns: list of mappings, plus a "next page token" string: + if not None, indicates that more sinks can be retrieved + with another call (pass that value as ``page_token``). + """ + params = {} + + if page_size is not None: + params['pageSize'] = page_size + + if page_token is not None: + params['pageToken'] = page_token + + path = '/projects/%s/sinks' % (project,) + resp = self._connection.api_request( + method='GET', path=path, query_params=params) + sinks = resp.get('sinks', ()) + return sinks, resp.get('nextPageToken') + + def sink_create(self, project, sink_name, filter_, destination): + """API call: create a sink resource. + + See: + https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/projects.sinks/create + + :type project: string + :param project: ID of the project in which to create the sink. + + :type sink_name: string + :param sink_name: the name of the sink + + :type filter_: string + :param filter_: the advanced logs filter expression defining the + entries exported by the sink. + + :type destination: string + :param destination: destination URI for the entries exported by + the sink. + """ + target = '/projects/%s/sinks' % (project,) + data = { + 'name': sink_name, + 'filter': filter_, + 'destination': destination, + } + self._connection.api_request(method='POST', path=target, data=data) + + def sink_get(self, project, sink_name): + """API call: retrieve a sink resource. + + See: + https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/projects.sinks/get + + :type project: string + :param project: ID of the project containing the sink. + + :type sink_name: string + :param sink_name: the name of the sink + + :rtype: dict + :returns: The JSON sink object returned from the API. + """ + target = '/projects/%s/sinks/%s' % (project, sink_name) + return self._connection.api_request(method='GET', path=target) + + def sink_update(self, project, sink_name, filter_, destination): + """API call: update a sink resource. + + See: + https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/projects.sinks/update + + :type project: string + :param project: ID of the project containing the sink. + + :type sink_name: string + :param sink_name: the name of the sink + + :type filter_: string + :param filter_: the advanced logs filter expression defining the + entries exported by the sink. + + :type destination: string + :param destination: destination URI for the entries exported by + the sink. + """ + target = '/projects/%s/sinks/%s' % (project, sink_name) + data = { + 'name': sink_name, + 'filter': filter_, + 'destination': destination, + } + self._connection.api_request(method='PUT', path=target, data=data) + + def sink_delete(self, project, sink_name): + """API call: delete a sink resource. + + See: + https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/projects.sinks/delete + + :type project: string + :param project: ID of the project containing the sink. + + :type sink_name: string + :param sink_name: the name of the sink + """ + target = '/projects/%s/sinks/%s' % (project, sink_name) + self._connection.api_request(method='DELETE', path=target) + + +class _MetricsAPI(object): + """Helper mapping sink-related APIs. + + See: + https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/projects.metrics + + :type connection: :class:`gcloud.logging.connection.Connection` + :param connection: the connection used to make API requests. + """ + def __init__(self, connection): + self._connection = connection + + def list_metrics(self, project, page_size=None, page_token=None): + """List metrics for the project associated with this client. + + See: + https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/projects.metrics/list + + :type project: string + :param project: ID of the project whose metrics are to be listed. + + :type page_size: int + :param page_size: maximum number of metrics to return, If not passed, + defaults to a value set by the API. + + :type page_token: str + :param page_token: opaque marker for the next "page" of metrics. If not + passed, the API will return the first page of + metrics. + + :rtype: tuple, (list, str) + :returns: list of mappings, plus a "next page token" string: + if not None, indicates that more metrics can be retrieved + with another call (pass that value as ``page_token``). + """ + params = {} + + if page_size is not None: + params['pageSize'] = page_size + + if page_token is not None: + params['pageToken'] = page_token + + path = '/projects/%s/metrics' % (project,) + resp = self._connection.api_request( + method='GET', path=path, query_params=params) + metrics = resp.get('metrics', ()) + return metrics, resp.get('nextPageToken') + + def metric_create(self, project, metric_name, filter_, description=None): + """API call: create a metric resource. + + See: + https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/projects.metrics/create + + :type project: string + :param project: ID of the project in which to create the metric. + + :type metric_name: string + :param metric_name: the name of the metric + + :type filter_: string + :param filter_: the advanced logs filter expression defining the + entries exported by the metric. + + :type description: string + :param description: description of the metric. + """ + target = '/projects/%s/metrics' % (project,) + data = { + 'name': metric_name, + 'filter': filter_, + 'description': description, + } + self._connection.api_request(method='POST', path=target, data=data) + + def metric_get(self, project, metric_name): + """API call: retrieve a metric resource. + + See: + https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/projects.metrics/get + + :type project: string + :param project: ID of the project containing the metric. + + :type metric_name: string + :param metric_name: the name of the metric + + :rtype: dict + :returns: The JSON metric object returned from the API. + """ + target = '/projects/%s/metrics/%s' % (project, metric_name) + return self._connection.api_request(method='GET', path=target) + + def metric_update(self, project, metric_name, filter_, description): + """API call: update a metric resource. + + See: + https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/projects.metrics/update + + :type project: string + :param project: ID of the project containing the metric. + + :type metric_name: string + :param metric_name: the name of the metric + + :type filter_: string + :param filter_: the advanced logs filter expression defining the + entries exported by the metric. + + :type description: string + :param description: description of the metric. + """ + target = '/projects/%s/metrics/%s' % (project, metric_name) + data = { + 'name': metric_name, + 'filter': filter_, + 'description': description, + } + self._connection.api_request(method='PUT', path=target, data=data) + + def metric_delete(self, project, metric_name): + """API call: delete a metric resource. + + See: + https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/projects.metrics/delete + + :type project: string + :param project: ID of the project containing the metric. + + :type metric_name: string + :param metric_name: the name of the metric + """ + target = '/projects/%s/metrics/%s' % (project, metric_name) + self._connection.api_request(method='DELETE', path=target) +
+ +
+ +
+ + + + + + + \ No newline at end of file diff --git a/0.18.1/_modules/gcloud/logging/entries.html b/0.18.1/_modules/gcloud/logging/entries.html new file mode 100644 index 000000000000..cb2a50c0a905 --- /dev/null +++ b/0.18.1/_modules/gcloud/logging/entries.html @@ -0,0 +1,390 @@ + + + + + + + + gcloud.logging.entries — gcloud 0.18.1 documentation + + + + + + + + + + + + + + +
+ +
+ +
+ + + Report an Issue + +
+
+ +
+ +

Source code for gcloud.logging.entries

+# Copyright 2016 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Log entries within the Google Stackdriver Logging API."""
+
+import json
+import re
+
+from google.protobuf.json_format import Parse
+
+from gcloud._helpers import _name_from_project_path
+from gcloud._helpers import _rfc3339_nanos_to_datetime
+
+
+_LOGGER_TEMPLATE = re.compile(r"""
+    projects/            # static prefix
+    (?P<project>[^/]+)   # initial letter, wordchars + hyphen
+    /logs/               # static midfix
+    (?P<name>[^/]+)      # initial letter, wordchars + allowed punc
+""", re.VERBOSE)
+
+
+
[docs]def logger_name_from_path(path): + """Validate a logger URI path and get the logger name. + + :type path: str + :param path: URI path for a logger API request. + + :rtype: str + :returns: Logger name parsed from ``path``. + :raises: :class:`ValueError` if the ``path`` is ill-formed or if + the project from the ``path`` does not agree with the + ``project`` passed in. + """ + return _name_from_project_path(path, None, _LOGGER_TEMPLATE)
+ + +class _BaseEntry(object): + """Base class for TextEntry, StructEntry. + + :type payload: text or dict + :param payload: The payload passed as ``textPayload``, ``jsonPayload``, + or ``protoPayload``. + + :type logger: :class:`gcloud.logging.logger.Logger` + :param logger: the logger used to write the entry. + + :type insert_id: text, or :class:`NoneType` + :param insert_id: (optional) the ID used to identify an entry uniquely. + + :type timestamp: :class:`datetime.datetime`, or :class:`NoneType` + :param timestamp: (optional) timestamp for the entry + + :type labels: dict or :class:`NoneType` + :param labels: (optional) mapping of labels for the entry + + :type severity: string or :class:`NoneType` + :param severity: (optional) severity of event being logged. + + :type http_request: dict or :class:`NoneType` + :param http_request: (optional) info about HTTP request associated with + the entry + """ + def __init__(self, payload, logger, insert_id=None, timestamp=None, + labels=None, severity=None, http_request=None): + self.payload = payload + self.logger = logger + self.insert_id = insert_id + self.timestamp = timestamp + self.labels = labels + self.severity = severity + self.http_request = http_request + + @classmethod + def from_api_repr(cls, resource, client, loggers=None): + """Factory: construct an entry given its API representation + + :type resource: dict + :param resource: text entry resource representation returned from + the API + + :type client: :class:`gcloud.logging.client.Client` + :param client: Client which holds credentials and project + configuration. + + :type loggers: dict or None + :param loggers: A mapping of logger fullnames -> loggers. If not + passed, the entry will have a newly-created logger. + + :rtype: :class:`gcloud.logging.entries.TextEntry` + :returns: Text entry parsed from ``resource``. + """ + if loggers is None: + loggers = {} + logger_fullname = resource['logName'] + logger = loggers.get(logger_fullname) + if logger is None: + logger_name = logger_name_from_path(logger_fullname) + logger = loggers[logger_fullname] = client.logger(logger_name) + payload = resource[cls._PAYLOAD_KEY] + insert_id = resource.get('insertId') + timestamp = resource.get('timestamp') + if timestamp is not None: + timestamp = _rfc3339_nanos_to_datetime(timestamp) + labels = resource.get('labels') + severity = resource.get('severity') + http_request = resource.get('httpRequest') + return cls(payload, logger, insert_id=insert_id, timestamp=timestamp, + labels=labels, severity=severity, http_request=http_request) + + +
[docs]class TextEntry(_BaseEntry): + """Entry created with ``textPayload``. + + See: + https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/LogEntry + """ + _PAYLOAD_KEY = 'textPayload'
+ + +
[docs]class StructEntry(_BaseEntry): + """Entry created with ``jsonPayload``. + + See: + https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/LogEntry + """ + _PAYLOAD_KEY = 'jsonPayload'
+ + +
[docs]class ProtobufEntry(_BaseEntry): + """Entry created with ``protoPayload``. + + See: + https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/LogEntry + """ + _PAYLOAD_KEY = 'protoPayload' + +
[docs] def parse_message(self, message): + """Parse payload into a protobuf message. + + Mutates the passed-in ``message`` in place. + + :type message: Protobuf message + :param message: the message to be logged + """ + Parse(json.dumps(self.payload), message)
+
+ +
+ +
+ + + + + + + \ No newline at end of file diff --git a/0.18.1/_modules/gcloud/logging/logger.html b/0.18.1/_modules/gcloud/logging/logger.html new file mode 100644 index 000000000000..fe5fa9c600c5 --- /dev/null +++ b/0.18.1/_modules/gcloud/logging/logger.html @@ -0,0 +1,679 @@ + + + + + + + + gcloud.logging.logger — gcloud 0.18.1 documentation + + + + + + + + + + + + + + +
+ +
+ +
+ + + Report an Issue + +
+
+ +
+ +

Source code for gcloud.logging.logger

+# Copyright 2016 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Define API Loggers."""
+
+import json
+
+from google.protobuf.json_format import MessageToJson
+
+
+
[docs]class Logger(object): + """Loggers represent named targets for log entries. + + See: + https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/projects.logs + + :type name: string + :param name: the name of the logger + + :type client: :class:`gcloud.logging.client.Client` + :param client: A client which holds credentials and project configuration + for the logger (which requires a project). + + :type labels: dict or :class:`NoneType` + :param labels: (optional) mapping of default labels for entries written + via this logger. + """ + def __init__(self, name, client, labels=None): + self.name = name + self._client = client + self.labels = labels + + @property + def client(self): + """Clent bound to the logger.""" + return self._client + + @property + def project(self): + """Project bound to the logger.""" + return self._client.project + + @property + def full_name(self): + """Fully-qualified name used in logging APIs""" + return 'projects/%s/logs/%s' % (self.project, self.name) + + @property + def path(self): + """URI path for use in logging APIs""" + return '/%s' % (self.full_name,) + + def _require_client(self, client): + """Check client or verify over-ride. + + :type client: :class:`gcloud.logging.client.Client` or ``NoneType`` + :param client: the client to use. If not passed, falls back to the + ``client`` stored on the current logger. + + :rtype: :class:`gcloud.logging.client.Client` + :returns: The client passed in or the currently bound client. + """ + if client is None: + client = self._client + return client + +
[docs] def batch(self, client=None): + """Return a batch to use as a context manager. + + :type client: :class:`gcloud.logging.client.Client` or ``NoneType`` + :param client: the client to use. If not passed, falls back to the + ``client`` stored on the current topic. + + :rtype: :class:`Batch` + :returns: A batch to use as a context manager. + """ + client = self._require_client(client) + return Batch(self, client)
+ + def _make_entry_resource(self, text=None, info=None, message=None, + labels=None, insert_id=None, severity=None, + http_request=None): + """Return a log entry resource of the appropriate type. + + Helper for :meth:`log_text`, :meth:`log_struct`, and :meth:`log_proto`. + + Only one of ``text``, ``info``, or ``message`` should be passed. + + :type text: string or :class:`NoneType` + :param text: text payload + + :type info: dict or :class:`NoneType` + :param info: struct payload + + :type message: Protobuf message or :class:`NoneType` + :param message: protobuf payload + + :type labels: dict or :class:`NoneType` + :param labels: labels passed in to calling method. + + :type insert_id: string or :class:`NoneType` + :param insert_id: (optional) unique ID for log entry. + + :type severity: string or :class:`NoneType` + :param severity: (optional) severity of event being logged. + + :type http_request: dict or :class:`NoneType` + :param http_request: (optional) info about HTTP request associated with + the entry + + :rtype: dict + :returns: The JSON resource created. + """ + resource = { + 'logName': self.full_name, + 'resource': {'type': 'global'}, + } + + if text is not None: + resource['textPayload'] = text + + if info is not None: + resource['jsonPayload'] = info + + if message is not None: + as_json_str = MessageToJson(message) + as_json = json.loads(as_json_str) + resource['protoPayload'] = as_json + + if labels is None: + labels = self.labels + + if labels is not None: + resource['labels'] = labels + + if insert_id is not None: + resource['insertId'] = insert_id + + if severity is not None: + resource['severity'] = severity + + if http_request is not None: + resource['httpRequest'] = http_request + + return resource + +
[docs] def log_text(self, text, client=None, labels=None, insert_id=None, + severity=None, http_request=None): + """API call: log a text message via a POST request + + See: + https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/entries/write + + :type text: text + :param text: the log message. + + :type client: :class:`gcloud.logging.client.Client` or ``NoneType`` + :param client: the client to use. If not passed, falls back to the + ``client`` stored on the current logger. + + :type labels: dict or :class:`NoneType` + :param labels: (optional) mapping of labels for the entry. + + :type insert_id: string or :class:`NoneType` + :param insert_id: (optional) unique ID for log entry. + + :type severity: string or :class:`NoneType` + :param severity: (optional) severity of event being logged. + + :type http_request: dict or :class:`NoneType` + :param http_request: (optional) info about HTTP request associated with + the entry + """ + client = self._require_client(client) + entry_resource = self._make_entry_resource( + text=text, labels=labels, insert_id=insert_id, severity=severity, + http_request=http_request) + client.logging_api.write_entries([entry_resource])
+ +
[docs] def log_struct(self, info, client=None, labels=None, insert_id=None, + severity=None, http_request=None): + """API call: log a structured message via a POST request + + See: + https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/entries/write + + :type info: dict + :param info: the log entry information + + :type client: :class:`gcloud.logging.client.Client` or ``NoneType`` + :param client: the client to use. If not passed, falls back to the + ``client`` stored on the current logger. + + :type labels: dict or :class:`NoneType` + :param labels: (optional) mapping of labels for the entry. + + :type insert_id: string or :class:`NoneType` + :param insert_id: (optional) unique ID for log entry. + + :type severity: string or :class:`NoneType` + :param severity: (optional) severity of event being logged. + + :type http_request: dict or :class:`NoneType` + :param http_request: (optional) info about HTTP request associated with + the entry + """ + client = self._require_client(client) + entry_resource = self._make_entry_resource( + info=info, labels=labels, insert_id=insert_id, severity=severity, + http_request=http_request) + client.logging_api.write_entries([entry_resource])
+ +
[docs] def log_proto(self, message, client=None, labels=None, insert_id=None, + severity=None, http_request=None): + """API call: log a protobuf message via a POST request + + See: + https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/entries/write + + :type message: Protobuf message + :param message: the message to be logged + + :type client: :class:`gcloud.logging.client.Client` or ``NoneType`` + :param client: the client to use. If not passed, falls back to the + ``client`` stored on the current logger. + + :type labels: dict or :class:`NoneType` + :param labels: (optional) mapping of labels for the entry. + + :type insert_id: string or :class:`NoneType` + :param insert_id: (optional) unique ID for log entry. + + :type severity: string or :class:`NoneType` + :param severity: (optional) severity of event being logged. + + :type http_request: dict or :class:`NoneType` + :param http_request: (optional) info about HTTP request associated with + the entry + """ + client = self._require_client(client) + entry_resource = self._make_entry_resource( + message=message, labels=labels, insert_id=insert_id, + severity=severity, http_request=http_request) + client.logging_api.write_entries([entry_resource])
+ +
[docs] def delete(self, client=None): + """API call: delete all entries in a logger via a DELETE request + + See: + https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/projects.logs/delete + + :type client: :class:`gcloud.logging.client.Client` or ``NoneType`` + :param client: the client to use. If not passed, falls back to the + ``client`` stored on the current logger. + """ + client = self._require_client(client) + client.logging_api.logger_delete(self.project, self.name)
+ +
[docs] def list_entries(self, projects=None, filter_=None, order_by=None, + page_size=None, page_token=None): + """Return a page of log entries. + + See: + https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/entries/list + + :type projects: list of strings + :param projects: project IDs to include. If not passed, + defaults to the project bound to the client. + + :type filter_: string + :param filter_: a filter expression. See: + https://cloud.google.com/logging/docs/view/advanced_filters + + :type order_by: string + :param order_by: One of :data:`gcloud.logging.ASCENDING` or + :data:`gcloud.logging.DESCENDING`. + + :type page_size: int + :param page_size: maximum number of entries to return, If not passed, + defaults to a value set by the API. + + :type page_token: string + :param page_token: opaque marker for the next "page" of entries. If not + passed, the API will return the first page of + entries. + + :rtype: tuple, (list, str) + :returns: list of :class:`gcloud.logging.entry.TextEntry`, plus a + "next page token" string: if not None, indicates that + more entries can be retrieved with another call (pass that + value as ``page_token``). + """ + log_filter = 'logName=%s' % (self.full_name,) + if filter_ is not None: + filter_ = '%s AND %s' % (filter_, log_filter) + else: + filter_ = log_filter + return self.client.list_entries( + projects=projects, filter_=filter_, order_by=order_by, + page_size=page_size, page_token=page_token)
+ + +
[docs]class Batch(object): + """Context manager: collect entries to log via a single API call. + + Helper returned by :meth:`Logger.batch` + + :type logger: :class:`gcloud.logging.logger.Logger` + :param logger: the logger to which entries will be logged. + + :type client: :class:`gcloud.logging.client.Client` + :param client: The client to use. + """ + def __init__(self, logger, client): + self.logger = logger + self.entries = [] + self.client = client + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + if exc_type is None: + self.commit() + +
[docs] def log_text(self, text, labels=None, insert_id=None, severity=None, + http_request=None): + """Add a text entry to be logged during :meth:`commit`. + + :type text: string + :param text: the text entry + + :type labels: dict or :class:`NoneType` + :param labels: (optional) mapping of labels for the entry. + + :type insert_id: string or :class:`NoneType` + :param insert_id: (optional) unique ID for log entry. + + :type severity: string or :class:`NoneType` + :param severity: (optional) severity of event being logged. + + :type http_request: dict or :class:`NoneType` + :param http_request: (optional) info about HTTP request associated with + the entry. + """ + self.entries.append( + ('text', text, labels, insert_id, severity, http_request))
+ +
[docs] def log_struct(self, info, labels=None, insert_id=None, severity=None, + http_request=None): + """Add a struct entry to be logged during :meth:`commit`. + + :type info: dict + :param info: the struct entry + + :type labels: dict or :class:`NoneType` + :param labels: (optional) mapping of labels for the entry. + + :type insert_id: string or :class:`NoneType` + :param insert_id: (optional) unique ID for log entry. + + :type severity: string or :class:`NoneType` + :param severity: (optional) severity of event being logged. + + :type http_request: dict or :class:`NoneType` + :param http_request: (optional) info about HTTP request associated with + the entry. + """ + self.entries.append( + ('struct', info, labels, insert_id, severity, http_request))
+ +
[docs] def log_proto(self, message, labels=None, insert_id=None, severity=None, + http_request=None): + """Add a protobuf entry to be logged during :meth:`commit`. + + :type message: protobuf message + :param message: the protobuf entry + + :type labels: dict or :class:`NoneType` + :param labels: (optional) mapping of labels for the entry. + + :type insert_id: string or :class:`NoneType` + :param insert_id: (optional) unique ID for log entry. + + :type severity: string or :class:`NoneType` + :param severity: (optional) severity of event being logged. + + :type http_request: dict or :class:`NoneType` + :param http_request: (optional) info about HTTP request associated with + the entry. + """ + self.entries.append( + ('proto', message, labels, insert_id, severity, http_request))
+ +
[docs] def commit(self, client=None): + """Send saved log entries as a single API call. + + :type client: :class:`gcloud.logging.client.Client` or ``NoneType`` + :param client: the client to use. If not passed, falls back to the + ``client`` stored on the current batch. + """ + if client is None: + client = self.client + + kwargs = { + 'logger_name': self.logger.path, + 'resource': {'type': 'global'}, + } + if self.logger.labels is not None: + kwargs['labels'] = self.logger.labels + + entries = [] + for entry_type, entry, labels, iid, severity, http_req in self.entries: + if entry_type == 'text': + info = {'textPayload': entry} + elif entry_type == 'struct': + info = {'jsonPayload': entry} + elif entry_type == 'proto': + as_json_str = MessageToJson(entry) + as_json = json.loads(as_json_str) + info = {'protoPayload': as_json} + else: + raise ValueError('Unknown entry type: %s' % (entry_type,)) + if labels is not None: + info['labels'] = labels + if iid is not None: + info['insertId'] = iid + if severity is not None: + info['severity'] = severity + if http_req is not None: + info['httpRequest'] = http_req + entries.append(info) + + client.logging_api.write_entries(entries, **kwargs) + del self.entries[:]
+
+ +
+ +
+ + + + + + + \ No newline at end of file diff --git a/0.18.1/_modules/gcloud/logging/metric.html b/0.18.1/_modules/gcloud/logging/metric.html new file mode 100644 index 000000000000..4bcc811464ff --- /dev/null +++ b/0.18.1/_modules/gcloud/logging/metric.html @@ -0,0 +1,410 @@ + + + + + + + + gcloud.logging.metric — gcloud 0.18.1 documentation + + + + + + + + + + + + + + +
+ +
+ +
+ + + Report an Issue + +
+
+ +
+ +

Source code for gcloud.logging.metric

+# Copyright 2016 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Define Stackdriver Logging API Metrics."""
+
+from gcloud.exceptions import NotFound
+
+
+
[docs]class Metric(object): + """Metrics represent named filters for log entries. + + See: + https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/projects.metrics + + :type name: string + :param name: the name of the metric + + :type filter_: string + :param filter_: the advanced logs filter expression defining the entries + tracked by the metric. If not passed, the instance should + already exist, to be refreshed via :meth:`reload`. + + :type client: :class:`gcloud.logging.client.Client` + :param client: A client which holds credentials and project configuration + for the metric (which requires a project). + + :type description: string + :param description: an optional description of the metric. + """ + def __init__(self, name, filter_=None, client=None, description=''): + self.name = name + self._client = client + self.filter_ = filter_ + self.description = description + + @property + def client(self): + """Clent bound to the logger.""" + return self._client + + @property + def project(self): + """Project bound to the logger.""" + return self._client.project + + @property + def full_name(self): + """Fully-qualified name used in metric APIs""" + return 'projects/%s/metrics/%s' % (self.project, self.name) + + @property + def path(self): + """URL path for the metric's APIs""" + return '/%s' % (self.full_name,) + + @classmethod +
[docs] def from_api_repr(cls, resource, client): + """Factory: construct a metric given its API representation + + :type resource: dict + :param resource: metric resource representation returned from the API + + :type client: :class:`gcloud.logging.client.Client` + :param client: Client which holds credentials and project + configuration for the metric. + + :rtype: :class:`gcloud.logging.metric.Metric` + :returns: Metric parsed from ``resource``. + """ + metric_name = resource['name'] + filter_ = resource['filter'] + description = resource.get('description', '') + return cls(metric_name, filter_, client=client, + description=description)
+ + def _require_client(self, client): + """Check client or verify over-ride. + + :type client: :class:`gcloud.logging.client.Client` or ``NoneType`` + :param client: the client to use. If not passed, falls back to the + ``client`` stored on the current metric. + + :rtype: :class:`gcloud.logging.client.Client` + :returns: The client passed in or the currently bound client. + """ + if client is None: + client = self._client + return client + +
[docs] def create(self, client=None): + """API call: create the metric via a PUT request + + See: + https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/projects.metrics/create + + :type client: :class:`gcloud.logging.client.Client` or ``NoneType`` + :param client: the client to use. If not passed, falls back to the + ``client`` stored on the current metric. + """ + client = self._require_client(client) + client.metrics_api.metric_create( + self.project, self.name, self.filter_, self.description)
+ +
[docs] def exists(self, client=None): + """API call: test for the existence of the metric via a GET request + + See + https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/projects.metrics/get + + :type client: :class:`gcloud.logging.client.Client` or ``NoneType`` + :param client: the client to use. If not passed, falls back to the + ``client`` stored on the current metric. + + :rtype: bool + :returns: Boolean indicating existence of the metric. + """ + client = self._require_client(client) + + try: + client.metrics_api.metric_get(self.project, self.name) + except NotFound: + return False + else: + return True
+ +
[docs] def reload(self, client=None): + """API call: sync local metric configuration via a GET request + + See + https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/projects.metrics/get + + :type client: :class:`gcloud.logging.client.Client` or ``NoneType`` + :param client: the client to use. If not passed, falls back to the + ``client`` stored on the current metric. + """ + client = self._require_client(client) + data = client.metrics_api.metric_get(self.project, self.name) + self.description = data.get('description', '') + self.filter_ = data['filter']
+ +
[docs] def update(self, client=None): + """API call: update metric configuration via a PUT request + + See + https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/projects.metrics/update + + :type client: :class:`gcloud.logging.client.Client` or ``NoneType`` + :param client: the client to use. If not passed, falls back to the + ``client`` stored on the current metric. + """ + client = self._require_client(client) + client.metrics_api.metric_update( + self.project, self.name, self.filter_, self.description)
+ +
[docs] def delete(self, client=None): + """API call: delete a metric via a DELETE request + + See + https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/projects.metrics/delete + + :type client: :class:`gcloud.logging.client.Client` or ``NoneType`` + :param client: the client to use. If not passed, falls back to the + ``client`` stored on the current metric. + """ + client = self._require_client(client) + client.metrics_api.metric_delete(self.project, self.name)
+
+ +
+ +
+ + + + + + + \ No newline at end of file diff --git a/0.18.1/_modules/gcloud/logging/sink.html b/0.18.1/_modules/gcloud/logging/sink.html new file mode 100644 index 000000000000..50e7e34aebd6 --- /dev/null +++ b/0.18.1/_modules/gcloud/logging/sink.html @@ -0,0 +1,414 @@ + + + + + + + + gcloud.logging.sink — gcloud 0.18.1 documentation + + + + + + + + + + + + + + +
+ +
+ +
+ + + Report an Issue + +
+
+ +
+ +

Source code for gcloud.logging.sink

+# Copyright 2016 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Define Stackdriver Logging API Sinks."""
+
+from gcloud.exceptions import NotFound
+
+
+
[docs]class Sink(object): + """Sinks represent filtered exports for log entries. + + See: + https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/projects.sinks + + :type name: string + :param name: the name of the sink + + :type filter_: string + :param filter_: the advanced logs filter expression defining the entries + exported by the sink. If not passed, the instance should + already exist, to be refreshed via :meth:`reload`. + + :type destination: string + :param destination: destination URI for the entries exported by the sink. + If not passed, the instance should already exist, to + be refreshed via :meth:`reload`. + + :type client: :class:`gcloud.logging.client.Client` + :param client: A client which holds credentials and project configuration + for the sink (which requires a project). + """ + def __init__(self, name, filter_=None, destination=None, client=None): + self.name = name + self.filter_ = filter_ + self.destination = destination + self._client = client + + @property + def client(self): + """Clent bound to the sink.""" + return self._client + + @property + def project(self): + """Project bound to the sink.""" + return self._client.project + + @property + def full_name(self): + """Fully-qualified name used in sink APIs""" + return 'projects/%s/sinks/%s' % (self.project, self.name) + + @property + def path(self): + """URL path for the sink's APIs""" + return '/%s' % (self.full_name) + + @classmethod +
[docs] def from_api_repr(cls, resource, client): + """Factory: construct a sink given its API representation + + :type resource: dict + :param resource: sink resource representation returned from the API + + :type client: :class:`gcloud.logging.client.Client` + :param client: Client which holds credentials and project + configuration for the sink. + + :rtype: :class:`gcloud.logging.sink.Sink` + :returns: Sink parsed from ``resource``. + :raises: :class:`ValueError` if ``client`` is not ``None`` and the + project from the resource does not agree with the project + from the client. + """ + sink_name = resource['name'] + filter_ = resource['filter'] + destination = resource['destination'] + return cls(sink_name, filter_, destination, client=client)
+ + def _require_client(self, client): + """Check client or verify over-ride. + + :type client: :class:`gcloud.logging.client.Client` or ``NoneType`` + :param client: the client to use. If not passed, falls back to the + ``client`` stored on the current sink. + + :rtype: :class:`gcloud.logging.client.Client` + :returns: The client passed in or the currently bound client. + """ + if client is None: + client = self._client + return client + +
[docs] def create(self, client=None): + """API call: create the sink via a PUT request + + See: + https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/projects.sinks/create + + :type client: :class:`gcloud.logging.client.Client` or ``NoneType`` + :param client: the client to use. If not passed, falls back to the + ``client`` stored on the current sink. + """ + client = self._require_client(client) + client.sinks_api.sink_create( + self.project, self.name, self.filter_, self.destination)
+ +
[docs] def exists(self, client=None): + """API call: test for the existence of the sink via a GET request + + See + https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/projects.sinks/get + + :type client: :class:`gcloud.logging.client.Client` or ``NoneType`` + :param client: the client to use. If not passed, falls back to the + ``client`` stored on the current sink. + + :rtype: bool + :returns: Boolean indicating existence of the sink. + """ + client = self._require_client(client) + + try: + client.sinks_api.sink_get(self.project, self.name) + except NotFound: + return False + else: + return True
+ +
[docs] def reload(self, client=None): + """API call: sync local sink configuration via a GET request + + See + https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/projects.sinks/get + + :type client: :class:`gcloud.logging.client.Client` or ``NoneType`` + :param client: the client to use. If not passed, falls back to the + ``client`` stored on the current sink. + """ + client = self._require_client(client) + data = client.sinks_api.sink_get(self.project, self.name) + self.filter_ = data['filter'] + self.destination = data['destination']
+ +
[docs] def update(self, client=None): + """API call: update sink configuration via a PUT request + + See + https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/projects.sinks/update + + :type client: :class:`gcloud.logging.client.Client` or ``NoneType`` + :param client: the client to use. If not passed, falls back to the + ``client`` stored on the current sink. + """ + client = self._require_client(client) + client.sinks_api.sink_update( + self.project, self.name, self.filter_, self.destination)
+ +
[docs] def delete(self, client=None): + """API call: delete a sink via a DELETE request + + See + https://cloud.google.com/logging/docs/api/ref_v2beta1/rest/v2beta1/projects.sinks/delete + + :type client: :class:`gcloud.logging.client.Client` or ``NoneType`` + :param client: the client to use. If not passed, falls back to the + ``client`` stored on the current sink. + """ + client = self._require_client(client) + client.sinks_api.sink_delete(self.project, self.name)
+
+ +
+ +
+ + + + + + + \ No newline at end of file diff --git a/0.18.1/_modules/gcloud/monitoring/client.html b/0.18.1/_modules/gcloud/monitoring/client.html new file mode 100644 index 000000000000..632e83c644fb --- /dev/null +++ b/0.18.1/_modules/gcloud/monitoring/client.html @@ -0,0 +1,517 @@ + + + + + + + + gcloud.monitoring.client — gcloud 0.18.1 documentation + + + + + + + + + + + + + + +
+ +
+ +
+ + + Report an Issue + +
+
+ +
+ +

Source code for gcloud.monitoring.client

+# Copyright 2016 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Client for interacting with the `Google Stackdriver Monitoring API (V3)`_.
+
+Example::
+
+    >>> from gcloud import monitoring
+    >>> client = monitoring.Client()
+    >>> query = client.query(minutes=5)
+    >>> print(query.as_dataframe())  # Requires pandas.
+
+At present, the client supports querying of time series, metric descriptors,
+and monitored resource descriptors.
+
+.. _Google Stackdriver Monitoring API (V3):
+    https://cloud.google.com/monitoring/api/v3/
+"""
+
+from gcloud.client import JSONClient
+from gcloud.monitoring.connection import Connection
+from gcloud.monitoring.metric import MetricDescriptor
+from gcloud.monitoring.metric import MetricKind
+from gcloud.monitoring.metric import ValueType
+from gcloud.monitoring.query import Query
+from gcloud.monitoring.resource import ResourceDescriptor
+
+
+
[docs]class Client(JSONClient): + """Client to bundle configuration needed for API requests. + + :type project: string + :param project: The target project. If not passed, falls back to the + default inferred from the environment. + + :type credentials: :class:`oauth2client.client.OAuth2Credentials` or + :class:`NoneType` + :param credentials: The OAuth2 Credentials to use for the connection + owned by this client. If not passed (and if no ``http`` + object is passed), falls back to the default inferred + from the environment. + + :type http: :class:`httplib2.Http` or class that defines ``request()`` + :param http: An optional HTTP object to make requests. If not passed, an + ``http`` object is created that is bound to the + ``credentials`` for the current object. + """ + + _connection_class = Connection + +
[docs] def query(self, + metric_type=Query.DEFAULT_METRIC_TYPE, + end_time=None, + days=0, hours=0, minutes=0): + """Construct a query object for retrieving metric data. + + Example:: + + >>> query = client.query(minutes=5) + >>> print(query.as_dataframe()) # Requires pandas. + + :type metric_type: string + :param metric_type: The metric type name. The default value is + :data:`Query.DEFAULT_METRIC_TYPE + <gcloud.monitoring.query.Query.DEFAULT_METRIC_TYPE>`, + but please note that this default value is provided only for + demonstration purposes and is subject to change. See the + `supported metrics`_. + + :type end_time: :class:`datetime.datetime` or None + :param end_time: The end time (inclusive) of the time interval + for which results should be returned, as a datetime object. + The default is the start of the current minute. + + The start time (exclusive) is determined by combining the + values of ``days``, ``hours``, and ``minutes``, and + subtracting the resulting duration from the end time. + + It is also allowed to omit the end time and duration here, + in which case + :meth:`~gcloud.monitoring.query.Query.select_interval` + must be called before the query is executed. + + :type days: integer + :param days: The number of days in the time interval. + + :type hours: integer + :param hours: The number of hours in the time interval. + + :type minutes: integer + :param minutes: The number of minutes in the time interval. + + :rtype: :class:`~gcloud.monitoring.query.Query` + :returns: The query object. + + :raises: :exc:`ValueError` if ``end_time`` is specified but + ``days``, ``hours``, and ``minutes`` are all zero. + If you really want to specify a point in time, use + :meth:`~gcloud.monitoring.query.Query.select_interval`. + + .. _supported metrics: https://cloud.google.com/monitoring/api/metrics + """ + return Query(self, metric_type, + end_time=end_time, + days=days, hours=hours, minutes=minutes)
+ +
[docs] def metric_descriptor(self, type_, + metric_kind=MetricKind.METRIC_KIND_UNSPECIFIED, + value_type=ValueType.VALUE_TYPE_UNSPECIFIED, + labels=(), unit='', description='', display_name=''): + """Construct a metric descriptor object. + + Metric descriptors specify the schema for a particular metric type. + + This factory method is used most often in conjunction with the metric + descriptor :meth:`~gcloud.monitoring.metric.MetricDescriptor.create` + method to define custom metrics:: + + >>> descriptor = client.metric_descriptor( + ... 'custom.googleapis.com/my_metric', + ... metric_kind=MetricKind.GAUGE, + ... value_type=ValueType.DOUBLE, + ... description='This is a simple example of a custom metric.') + >>> descriptor.create() + + Here is an example where the custom metric is parameterized by a + metric label:: + + >>> label = LabelDescriptor('response_code', LabelValueType.INT64, + ... description='HTTP status code') + >>> descriptor = client.metric_descriptor( + ... 'custom.googleapis.com/my_app/response_count', + ... metric_kind=MetricKind.CUMULATIVE, + ... value_type=ValueType.INT64, + ... labels=[label], + ... description='Cumulative count of HTTP responses.') + >>> descriptor.create() + + :type type_: string + :param type_: + The metric type including a DNS name prefix. For example: + ``"custom.googleapis.com/my_metric"`` + + :type metric_kind: string + :param metric_kind: + The kind of measurement. It must be one of + :data:`MetricKind.GAUGE`, :data:`MetricKind.DELTA`, + or :data:`MetricKind.CUMULATIVE`. + See :class:`~gcloud.monitoring.metric.MetricKind`. + + :type value_type: string + :param value_type: + The value type of the metric. It must be one of + :data:`ValueType.BOOL`, :data:`ValueType.INT64`, + :data:`ValueType.DOUBLE`, :data:`ValueType.STRING`, + or :data:`ValueType.DISTRIBUTION`. + See :class:`ValueType`. + + :type labels: list of :class:`~gcloud.monitoring.label.LabelDescriptor` + :param labels: + A sequence of zero or more label descriptors specifying the labels + used to identify a specific instance of this metric. + + :type unit: string + :param unit: An optional unit in which the metric value is reported. + + :type description: string + :param description: An optional detailed description of the metric. + + :type display_name: string + :param display_name: An optional concise name for the metric. + + :rtype: :class:`MetricDescriptor` + :returns: The metric descriptor created with the passed-in arguments. + """ + return MetricDescriptor( + self, type_, + metric_kind=metric_kind, + value_type=value_type, + labels=labels, + unit=unit, + description=description, + display_name=display_name, + )
+ +
[docs] def fetch_metric_descriptor(self, metric_type): + """Look up a metric descriptor by type. + + Example:: + + >>> METRIC = 'compute.googleapis.com/instance/cpu/utilization' + >>> print(client.fetch_metric_descriptor(METRIC)) + + :type metric_type: string + :param metric_type: The metric type name. + + :rtype: :class:`~gcloud.monitoring.metric.MetricDescriptor` + :returns: The metric descriptor instance. + + :raises: :class:`gcloud.exceptions.NotFound` if the metric descriptor + is not found. + """ + return MetricDescriptor._fetch(self, metric_type)
+ +
[docs] def list_metric_descriptors(self, filter_string=None, type_prefix=None): + """List all metric descriptors for the project. + + Examples:: + + >>> for descriptor in client.list_metric_descriptors(): + ... print(descriptor.type) + + >>> for descriptor in client.list_metric_descriptors( + ... type_prefix='custom.'): + ... print(descriptor.type) + + :type filter_string: string or None + :param filter_string: + An optional filter expression describing the metric descriptors + to be returned. See the `filter documentation`_. + + :type type_prefix: string or None + :param type_prefix: An optional prefix constraining the selected + metric types. This adds ``metric.type = starts_with("<prefix>")`` + to the filter. + + :rtype: list of :class:`~gcloud.monitoring.metric.MetricDescriptor` + :returns: A list of metric descriptor instances. + + .. _filter documentation: + https://cloud.google.com/monitoring/api/v3/filters + """ + return MetricDescriptor._list(self, filter_string, + type_prefix=type_prefix)
+ +
[docs] def fetch_resource_descriptor(self, resource_type): + """Look up a monitored resource descriptor by type. + + Example:: + + >>> print(client.fetch_resource_descriptor('gce_instance')) + + :type resource_type: string + :param resource_type: The resource type name. + + :rtype: :class:`~gcloud.monitoring.resource.ResourceDescriptor` + :returns: The resource descriptor instance. + + :raises: :class:`gcloud.exceptions.NotFound` if the resource descriptor + is not found. + """ + return ResourceDescriptor._fetch(self, resource_type)
+ +
[docs] def list_resource_descriptors(self, filter_string=None): + """List all monitored resource descriptors for the project. + + Example:: + + >>> for descriptor in client.list_resource_descriptors(): + ... print(descriptor.type) + + :type filter_string: string or None + :param filter_string: + An optional filter expression describing the resource descriptors + to be returned. See the `filter documentation`_. + + :rtype: list of :class:`~gcloud.monitoring.resource.ResourceDescriptor` + :returns: A list of resource descriptor instances. + + .. _filter documentation: + https://cloud.google.com/monitoring/api/v3/filters + """ + return ResourceDescriptor._list(self, filter_string)
+
+ +
+ +
+ + + + + + + \ No newline at end of file diff --git a/0.18.1/_modules/gcloud/monitoring/connection.html b/0.18.1/_modules/gcloud/monitoring/connection.html new file mode 100644 index 000000000000..260763c70caa --- /dev/null +++ b/0.18.1/_modules/gcloud/monitoring/connection.html @@ -0,0 +1,280 @@ + + + + + + + + gcloud.monitoring.connection — gcloud 0.18.1 documentation + + + + + + + + + + + + + + +
+ +
+ +
+ + + Report an Issue + +
+
+ +
+ +

Source code for gcloud.monitoring.connection

+# Copyright 2016 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Create / interact with Stackdriver Monitoring connections."""
+
+from gcloud import connection as base_connection
+
+
+
[docs]class Connection(base_connection.JSONConnection): + """A connection to Google Stackdriver Monitoring via the JSON REST API. + + :type credentials: :class:`oauth2client.client.OAuth2Credentials` + :param credentials: (Optional) The OAuth2 Credentials to use for this + connection. + + :type http: :class:`httplib2.Http` or class that defines ``request()`` + :param http: (Optional) HTTP object to make requests. + + :type api_base_url: string + :param api_base_url: The base of the API call URL. Defaults to the value + :attr:`Connection.API_BASE_URL`. + """ + + API_BASE_URL = 'https://monitoring.googleapis.com' + """The base of the API call URL.""" + + API_VERSION = 'v3' + """The version of the API, used in building the API call's URL.""" + + API_URL_TEMPLATE = '{api_base_url}/{api_version}{path}' + """A template for the URL of a particular API call.""" + + SCOPE = ('https://www.googleapis.com/auth/monitoring.read', + 'https://www.googleapis.com/auth/monitoring', + 'https://www.googleapis.com/auth/cloud-platform') + """The scopes required for authenticating as a Monitoring consumer."""
+
+ +
+ +
+ + + + + + + \ No newline at end of file diff --git a/0.18.1/_modules/gcloud/monitoring/label.html b/0.18.1/_modules/gcloud/monitoring/label.html new file mode 100644 index 000000000000..30666420f289 --- /dev/null +++ b/0.18.1/_modules/gcloud/monitoring/label.html @@ -0,0 +1,332 @@ + + + + + + + + gcloud.monitoring.label — gcloud 0.18.1 documentation + + + + + + + + + + + + + + +
+ +
+ +
+ + + Report an Issue + +
+
+ +
+ +

Source code for gcloud.monitoring.label

+# Copyright 2016 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Label Descriptors for the `Google Monitoring API (V3)`_.
+
+.. _Google Monitoring API (V3):
+    https://cloud.google.com/monitoring/api/ref_v3/rest/v3/LabelDescriptor
+"""
+
+
+
[docs]class LabelValueType(object): + """Allowed values for the `type of a label`_. + + .. _type of a label: + https://cloud.google.com/monitoring/api/ref_v3/rest/v3/\ + LabelDescriptor#ValueType + """ + + STRING = 'STRING' + BOOL = 'BOOL' + INT64 = 'INT64'
+ + +
[docs]class LabelDescriptor(object): + """Schema specification and documentation for a single label. + + :type key: string + :param key: The name of the label. + + :type value_type: string + :param value_type: + The type of the label. It must be one of :data:`LabelValueType.STRING`, + :data:`LabelValueType.BOOL`, or :data:`LabelValueType.INT64`. + See :class:`LabelValueType`. + + :type description: string + :param description: A human-readable description for the label. + """ + + def __init__(self, key, value_type=LabelValueType.STRING, description=''): + self.key = key + self.value_type = value_type + self.description = description + + @classmethod + def _from_dict(cls, info): + """Construct a label descriptor from the parsed JSON representation. + + :type info: dict + :param info: + A ``dict`` parsed from the JSON wire-format representation. + + :rtype: :class:`LabelDescriptor` + :returns: A label descriptor. + """ + return cls( + info['key'], + info.get('valueType', LabelValueType.STRING), + info.get('description', ''), + ) + + def _to_dict(self): + """Build a dictionary ready to be serialized to the JSON wire format. + + :rtype: dict + :returns: A dictionary. + """ + info = { + 'key': self.key, + 'valueType': self.value_type, + } + + if self.description: + info['description'] = self.description + + return info + + def __eq__(self, other): + return self.__dict__ == other.__dict__ + + def __ne__(self, other): + return self.__dict__ != other.__dict__ + + def __repr__(self): + return ( + 'LabelDescriptor(key={key!r}, value_type={value_type!r},' + ' description={description!r})' + ).format(**self.__dict__)
+
+ +
+ +
+ + + + + + + \ No newline at end of file diff --git a/0.18.1/_modules/gcloud/monitoring/metric.html b/0.18.1/_modules/gcloud/monitoring/metric.html new file mode 100644 index 000000000000..de7a4644d6ba --- /dev/null +++ b/0.18.1/_modules/gcloud/monitoring/metric.html @@ -0,0 +1,578 @@ + + + + + + + + gcloud.monitoring.metric — gcloud 0.18.1 documentation + + + + + + + + + + + + + + +
+ +
+ +
+ + + Report an Issue + +
+
+ +
+ +

Source code for gcloud.monitoring.metric

+# Copyright 2016 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Metric Descriptors for the `Google Stackdriver Monitoring API (V3)`_.
+
+.. _Google Stackdriver Monitoring API (V3):
+    https://cloud.google.com/monitoring/api/ref_v3/rest/v3/\
+    projects.metricDescriptors
+"""
+
+import collections
+
+from gcloud.monitoring.label import LabelDescriptor
+
+
+
[docs]class MetricKind(object): + """Choices for the `kind of measurement`_. + + .. _kind of measurement: + https://cloud.google.com/monitoring/api/ref_v3/rest/v3/\ + projects.metricDescriptors#MetricKind + """ + + METRIC_KIND_UNSPECIFIED = 'METRIC_KIND_UNSPECIFIED' + """.. note:: An unspecified kind is not allowed in metric descriptors.""" + + GAUGE = 'GAUGE' + DELTA = 'DELTA' + CUMULATIVE = 'CUMULATIVE'
+ + +
[docs]class ValueType(object): + """Choices for the `metric value type`_. + + .. _metric value type: + https://cloud.google.com/monitoring/api/ref_v3/rest/v3/\ + projects.metricDescriptors#ValueType + """ + + VALUE_TYPE_UNSPECIFIED = 'VALUE_TYPE_UNSPECIFIED' + """.. note:: An unspecified type is not allowed in metric descriptors.""" + + BOOL = 'BOOL' + INT64 = 'INT64' + DOUBLE = 'DOUBLE' + STRING = 'STRING' + DISTRIBUTION = 'DISTRIBUTION'
+ + +
[docs]class MetricDescriptor(object): + """Specification of a metric type and its schema. + + The preferred way to construct a metric descriptor object is using the + :meth:`~gcloud.monitoring.client.Client.metric_descriptor` factory method + of the :class:`~gcloud.monitoring.client.Client` class. + + :type client: :class:`gcloud.monitoring.client.Client` + :param client: A client for operating on the metric descriptor. + + :type type_: string + :param type_: + The metric type including a DNS name prefix. For example: + ``"compute.googleapis.com/instance/cpu/utilization"`` + + :type metric_kind: string + :param metric_kind: + The kind of measurement. It must be one of + :data:`MetricKind.GAUGE`, :data:`MetricKind.DELTA`, + or :data:`MetricKind.CUMULATIVE`. See :class:`MetricKind`. + + :type value_type: string + :param value_type: + The value type of the metric. It must be one of + :data:`ValueType.BOOL`, :data:`ValueType.INT64`, + :data:`ValueType.DOUBLE`, :data:`ValueType.STRING`, + or :data:`ValueType.DISTRIBUTION`. + See :class:`ValueType`. + + :type labels: list of :class:`~gcloud.monitoring.label.LabelDescriptor` + :param labels: + A sequence of zero or more label descriptors specifying the labels + used to identify a specific instance of this metric. + + :type unit: string + :param unit: An optional unit in which the metric value is reported. + + :type description: string + :param description: An optional detailed description of the metric. + + :type display_name: string + :param display_name: An optional concise name for the metric. + + :type name: string or None + :param name: + The "resource name" of the metric descriptor. For example: + ``"projects/<project_id>/metricDescriptors/<type>"``. As + retrieved from the service, this will always be specified. + You can and should omit it when constructing an instance for + the purpose of creating a new metric descriptor. + """ + + def __init__(self, client, type_, + metric_kind=MetricKind.METRIC_KIND_UNSPECIFIED, + value_type=ValueType.VALUE_TYPE_UNSPECIFIED, + labels=(), + unit='', description='', display_name='', + name=None): + self.client = client + self.name = name + self.type = type_ + self.labels = labels + self.metric_kind = metric_kind + self.value_type = value_type + self.unit = unit + self.description = description + self.display_name = display_name + +
[docs] def create(self): + """Create a new metric descriptor based on this object. + + Example:: + + >>> descriptor = client.metric_descriptor( + ... 'custom.googleapis.com/my_metric', + ... metric_kind=MetricKind.GAUGE, + ... value_type=ValueType.DOUBLE, + ... description='This is a simple example of a custom metric.') + >>> descriptor.create() + + The metric kind must not be :data:`MetricKind.METRIC_KIND_UNSPECIFIED`, + and the value type must not be + :data:`ValueType.VALUE_TYPE_UNSPECIFIED`. + + The ``name`` attribute is ignored in preparing the creation request. + All attributes are overwritten by the values received in the response + (normally affecting only ``name``). + """ + path = '/projects/{project}/metricDescriptors/'.format( + project=self.client.project) + response = self.client.connection.api_request(method='POST', path=path, + data=self._to_dict()) + self._init_from_dict(response)
+ +
[docs] def delete(self): + """Delete the metric descriptor identified by this object. + + Example:: + + >>> descriptor = client.metric_descriptor( + ... 'custom.googleapis.com/my_metric') + >>> descriptor.delete() + + Only the ``client`` and ``type`` attributes are used. + """ + path = '/projects/{project}/metricDescriptors/{type}'.format( + project=self.client.project, + type=self.type) + self.client.connection.api_request(method='DELETE', path=path)
+ + @classmethod + def _fetch(cls, client, metric_type): + """Look up a metric descriptor by type. + + :type client: :class:`gcloud.monitoring.client.Client` + :param client: The client to use. + + :type metric_type: string + :param metric_type: The metric type name. + + :rtype: :class:`MetricDescriptor` + :returns: The metric descriptor instance. + + :raises: :class:`gcloud.exceptions.NotFound` if the metric descriptor + is not found. + """ + path = '/projects/{project}/metricDescriptors/{type}'.format( + project=client.project, + type=metric_type) + info = client.connection.api_request(method='GET', path=path) + return cls._from_dict(client, info) + + @classmethod + def _list(cls, client, filter_string=None, type_prefix=None): + """List all metric descriptors for the project. + + :type client: :class:`gcloud.monitoring.client.Client` + :param client: The client to use. + + :type filter_string: string or None + :param filter_string: + An optional filter expression describing the metric descriptors + to be returned. See the `filter documentation`_. + + :type type_prefix: string or None + :param type_prefix: An optional prefix constraining the selected + metric types. This adds ``metric.type = starts_with("<prefix>")`` + to the filter. + + :rtype: list of :class:`MetricDescriptor` + :returns: A list of metric descriptor instances. + + .. _filter documentation: + https://cloud.google.com/monitoring/api/v3/filters + """ + path = '/projects/{project}/metricDescriptors/'.format( + project=client.project) + + filters = [] + if filter_string is not None: + filters.append(filter_string) + + if type_prefix is not None: + filters.append('metric.type = starts_with("{prefix}")'.format( + prefix=type_prefix)) + + descriptors = [] + page_token = None + while True: + params = {} + + if filters: + params['filter'] = ' AND '.join(filters) + + if page_token is not None: + params['pageToken'] = page_token + + response = client.connection.api_request( + method='GET', path=path, query_params=params) + for info in response.get('metricDescriptors', ()): + descriptors.append(cls._from_dict(client, info)) + + page_token = response.get('nextPageToken') + if not page_token: + break + + return descriptors + + @classmethod + def _from_dict(cls, client, info): + """Construct a metric descriptor from the parsed JSON representation. + + :type client: :class:`gcloud.monitoring.client.Client` + :param client: A client to be included in the returned object. + + :type info: dict + :param info: + A ``dict`` parsed from the JSON wire-format representation. + + :rtype: :class:`MetricDescriptor` + :returns: A metric descriptor. + """ + descriptor = cls(client, None) + descriptor._init_from_dict(info) + return descriptor + + def _init_from_dict(self, info): + """Initialize attributes from the parsed JSON representation. + + :type info: dict + :param info: + A ``dict`` parsed from the JSON wire-format representation. + """ + self.name = info['name'] + self.type = info['type'] + self.labels = tuple(LabelDescriptor._from_dict(label) + for label in info.get('labels', [])) + self.metric_kind = info['metricKind'] + self.value_type = info['valueType'] + self.unit = info.get('unit', '') + self.description = info.get('description', '') + self.display_name = info.get('displayName', '') + + def _to_dict(self): + """Build a dictionary ready to be serialized to the JSON wire format. + + :rtype: dict + :returns: A dictionary. + """ + info = { + 'type': self.type, + 'metricKind': self.metric_kind, + 'valueType': self.value_type, + } + + if self.labels: + info['labels'] = [label._to_dict() for label in self.labels] + if self.unit: + info['unit'] = self.unit + if self.description: + info['description'] = self.description + if self.display_name: + info['displayName'] = self.display_name + + return info + + def __repr__(self): + return ( + '<MetricDescriptor:\n' + ' name={name!r},\n' + ' type={type!r},\n' + ' metric_kind={metric_kind!r}, value_type={value_type!r},\n' + ' labels={labels!r},\n' + ' display_name={display_name!r}, unit={unit!r},\n' + ' description={description!r}>' + ).format(**self.__dict__)
+ + +
[docs]class Metric(collections.namedtuple('Metric', 'type labels')): + """A specific metric identified by specifying values for all labels. + + :type type: string + :param type: The metric type name. + + :type labels: dict + :param labels: A mapping from label names to values for all labels + enumerated in the associated :class:`MetricDescriptor`. + """ + __slots__ = () + + @classmethod + def _from_dict(cls, info): + """Construct a metric object from the parsed JSON representation. + + :type info: dict + :param info: + A ``dict`` parsed from the JSON wire-format representation. + + :rtype: :class:`Metric` + :returns: A metric object. + """ + return cls( + type=info['type'], + labels=info.get('labels', {}), + )
+
+ +
+ +
+ + + + + + + \ No newline at end of file diff --git a/0.18.1/_modules/gcloud/monitoring/query.html b/0.18.1/_modules/gcloud/monitoring/query.html new file mode 100644 index 000000000000..2519d79ea129 --- /dev/null +++ b/0.18.1/_modules/gcloud/monitoring/query.html @@ -0,0 +1,890 @@ + + + + + + + + gcloud.monitoring.query — gcloud 0.18.1 documentation + + + + + + + + + + + + + + +
+ +
+ +
+ + + Report an Issue + +
+
+ +
+ +

Source code for gcloud.monitoring.query

+# Copyright 2016 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Time series query for the `Google Stackdriver Monitoring API (V3)`_.
+
+.. _Google Stackdriver Monitoring API (V3):
+    https://cloud.google.com/monitoring/api/ref_v3/rest/v3/\
+    projects.timeSeries/list
+"""
+
+import copy
+import datetime
+import itertools
+
+import six
+
+from gcloud._helpers import _datetime_to_rfc3339
+from gcloud.monitoring._dataframe import _build_dataframe
+from gcloud.monitoring.timeseries import TimeSeries
+
+_UTCNOW = datetime.datetime.utcnow  # To be replaced by tests.
+
+
+
[docs]class Aligner(object): + """Allowed values for the `supported aligners`_.""" + + ALIGN_NONE = 'ALIGN_NONE' + ALIGN_DELTA = 'ALIGN_DELTA' + ALIGN_RATE = 'ALIGN_RATE' + ALIGN_INTERPOLATE = 'ALIGN_INTERPOLATE' + ALIGN_NEXT_OLDER = 'ALIGN_NEXT_OLDER' + ALIGN_MIN = 'ALIGN_MIN' + ALIGN_MAX = 'ALIGN_MAX' + ALIGN_MEAN = 'ALIGN_MEAN' + ALIGN_COUNT = 'ALIGN_COUNT' + ALIGN_SUM = 'ALIGN_SUM' + ALIGN_STDDEV = 'ALIGN_STDDEV' + ALIGN_COUNT_TRUE = 'ALIGN_COUNT_TRUE' + ALIGN_FRACTION_TRUE = 'ALIGN_FRACTION_TRUE'
+ + +
[docs]class Reducer(object): + """Allowed values for the `supported reducers`_.""" + + REDUCE_NONE = 'REDUCE_NONE' + REDUCE_MEAN = 'REDUCE_MEAN' + REDUCE_MIN = 'REDUCE_MIN' + REDUCE_MAX = 'REDUCE_MAX' + REDUCE_SUM = 'REDUCE_SUM' + REDUCE_STDDEV = 'REDUCE_STDDEV' + REDUCE_COUNT = 'REDUCE_COUNT' + REDUCE_COUNT_TRUE = 'REDUCE_COUNT_TRUE' + REDUCE_FRACTION_TRUE = 'REDUCE_FRACTION_TRUE' + REDUCE_PERCENTILE_99 = 'REDUCE_PERCENTILE_99' + REDUCE_PERCENTILE_95 = 'REDUCE_PERCENTILE_95' + REDUCE_PERCENTILE_50 = 'REDUCE_PERCENTILE_50' + REDUCE_PERCENTILE_05 = 'REDUCE_PERCENTILE_05'
+ + +
[docs]class Query(object): + """Query object for retrieving metric data. + + The preferred way to construct a query object is using the + :meth:`~gcloud.monitoring.client.Client.query` method + of the :class:`~gcloud.monitoring.client.Client` class. + + :type client: :class:`gcloud.monitoring.client.Client` + :param client: The client to use. + + :type metric_type: string + :param metric_type: The metric type name. The default value is + :data:`Query.DEFAULT_METRIC_TYPE + <gcloud.monitoring.query.Query.DEFAULT_METRIC_TYPE>`, + but please note that this default value is provided only for + demonstration purposes and is subject to change. See the + `supported metrics`_. + + :type end_time: :class:`datetime.datetime` or None + :param end_time: The end time (inclusive) of the time interval + for which results should be returned, as a datetime object. + The default is the start of the current minute. + + The start time (exclusive) is determined by combining the + values of ``days``, ``hours``, and ``minutes``, and + subtracting the resulting duration from the end time. + + It is also allowed to omit the end time and duration here, + in which case + :meth:`~gcloud.monitoring.query.Query.select_interval` + must be called before the query is executed. + + :type days: integer + :param days: The number of days in the time interval. + + :type hours: integer + :param hours: The number of hours in the time interval. + + :type minutes: integer + :param minutes: The number of minutes in the time interval. + + :raises: :exc:`ValueError` if ``end_time`` is specified but + ``days``, ``hours``, and ``minutes`` are all zero. + If you really want to specify a point in time, use + :meth:`~gcloud.monitoring.query.Query.select_interval`. + + .. _supported metrics: https://cloud.google.com/monitoring/api/metrics + """ + + DEFAULT_METRIC_TYPE = 'compute.googleapis.com/instance/cpu/utilization' + + def __init__(self, client, + metric_type=DEFAULT_METRIC_TYPE, + end_time=None, days=0, hours=0, minutes=0): + start_time = None + if days or hours or minutes: + if end_time is None: + end_time = _UTCNOW().replace(second=0, microsecond=0) + start_time = end_time - datetime.timedelta(days=days, + hours=hours, + minutes=minutes) + elif end_time is not None: + raise ValueError('Non-zero duration required for time interval.') + + self._client = client + self._end_time = end_time + self._start_time = start_time + self._filter = _Filter(metric_type) + + self._per_series_aligner = None + self._alignment_period_seconds = None + self._cross_series_reducer = None + self._group_by_fields = () + + def __iter__(self): + return self.iter() + + @property + def metric_type(self): + """The metric type name.""" + return self._filter.metric_type + + @property + def filter(self): + """The filter string. + + This is constructed from the metric type, the resource type, and + selectors for the group ID, monitored projects, resource labels, + and metric labels. + """ + return str(self._filter) + +
[docs] def select_interval(self, end_time, start_time=None): + """Copy the query and set the query time interval. + + Example:: + + import datetime + + now = datetime.datetime.utcnow() + query = query.select_interval( + end_time=now, + start_time=now - datetime.timedelta(minutes=5)) + + As a convenience, you can alternatively specify the end time and + an interval duration when you create the query initially. + + :type end_time: :class:`datetime.datetime` + :param end_time: The end time (inclusive) of the time interval + for which results should be returned, as a datetime object. + + :type start_time: :class:`datetime.datetime` or None + :param start_time: The start time (exclusive) of the time interval + for which results should be returned, as a datetime object. + If not specified, the interval is a point in time. + + :rtype: :class:`Query` + :returns: The new query object. + """ + new_query = self.copy() + new_query._end_time = end_time + new_query._start_time = start_time + return new_query
+ +
[docs] def select_group(self, group_id): + """Copy the query and add filtering by group. + + Example:: + + query = query.select_group('1234567') + + :type group_id: string + :param group_id: The ID of a group to filter by. + + :rtype: :class:`Query` + :returns: The new query object. + """ + new_query = self.copy() + new_query._filter.group_id = group_id + return new_query
+ +
[docs] def select_projects(self, *args): + """Copy the query and add filtering by monitored projects. + + This is only useful if the target project represents a Stackdriver + account containing the specified monitored projects. + + Examples:: + + query = query.select_projects('project-1') + query = query.select_projects('project-1', 'project-2') + + :type args: tuple + :param args: Project IDs limiting the resources to be included + in the query. + + :rtype: :class:`Query` + :returns: The new query object. + """ + new_query = self.copy() + new_query._filter.projects = args + return new_query
+ +
[docs] def select_resources(self, *args, **kwargs): + """Copy the query and add filtering by resource labels. + + Examples:: + + query = query.select_resources(zone='us-central1-a') + query = query.select_resources(zone_prefix='europe-') + query = query.select_resources(resource_type='gce_instance') + + A keyword argument ``<label>=<value>`` ordinarily generates a filter + expression of the form:: + + resource.label.<label> = "<value>" + + However, by adding ``"_prefix"`` or ``"_suffix"`` to the keyword, + you can specify a partial match. + + ``<label>_prefix=<value>`` generates:: + + resource.label.<label> = starts_with("<value>") + + ``<label>_suffix=<value>`` generates:: + + resource.label.<label> = ends_with("<value>") + + As a special case, ``"resource_type"`` is treated as a special + pseudo-label corresponding to the filter object ``resource.type``. + For example, ``resource_type=<value>`` generates:: + + resource.type = "<value>" + + See the `defined resource types`_. + + .. note:: + + The label ``"instance_name"`` is a metric label, + not a resource label. You would filter on it using + ``select_metrics(instance_name=...)``. + + :type args: tuple + :param args: Raw filter expression strings to include in the + conjunction. If just one is provided and no keyword arguments + are provided, it can be a disjunction. + + :type kwargs: dict + :param kwargs: Label filters to include in the conjunction as + described above. + + :rtype: :class:`Query` + :returns: The new query object. + + .. _defined resource types: + https://cloud.google.com/monitoring/api/v3/monitored-resources + """ + new_query = self.copy() + new_query._filter.select_resources(*args, **kwargs) + return new_query
+ +
[docs] def select_metrics(self, *args, **kwargs): + """Copy the query and add filtering by metric labels. + + Examples:: + + query = query.select_metrics(instance_name='myinstance') + query = query.select_metrics(instance_name_prefix='mycluster-') + + A keyword argument ``<label>=<value>`` ordinarily generates a filter + expression of the form:: + + metric.label.<label> = "<value>" + + However, by adding ``"_prefix"`` or ``"_suffix"`` to the keyword, + you can specify a partial match. + + ``<label>_prefix=<value>`` generates:: + + metric.label.<label> = starts_with("<value>") + + ``<label>_suffix=<value>`` generates:: + + metric.label.<label> = ends_with("<value>") + + :type args: tuple + :param args: Raw filter expression strings to include in the + conjunction. If just one is provided and no keyword arguments + are provided, it can be a disjunction. + + :type kwargs: dict + :param kwargs: Label filters to include in the conjunction as + described above. + + :rtype: :class:`Query` + :returns: The new query object. + """ + new_query = self.copy() + new_query._filter.select_metrics(*args, **kwargs) + return new_query
+ +
[docs] def align(self, per_series_aligner, seconds=0, minutes=0, hours=0): + """Copy the query and add temporal alignment. + + If ``per_series_aligner`` is not :data:`Aligner.ALIGN_NONE`, each time + series will contain data points only on the period boundaries. + + Example:: + + query = query.align(Aligner.ALIGN_MEAN, minutes=5) + + It is also possible to specify the aligner as a literal string:: + + query = query.align('ALIGN_MEAN', minutes=5) + + :type per_series_aligner: string + :param per_series_aligner: The approach to be used to align + individual time series. For example: :data:`Aligner.ALIGN_MEAN`. + See :class:`Aligner` and the descriptions of the `supported + aligners`_. + + :type seconds: integer + :param seconds: The number of seconds in the alignment period. + + :type minutes: integer + :param minutes: The number of minutes in the alignment period. + + :type hours: integer + :param hours: The number of hours in the alignment period. + + :rtype: :class:`Query` + :returns: The new query object. + + .. _supported aligners: + https://cloud.google.com/monitoring/api/ref_v3/rest/v3/\ + projects.timeSeries/list#Aligner + """ + new_query = self.copy() + new_query._per_series_aligner = per_series_aligner + new_query._alignment_period_seconds = seconds + 60 * (minutes + + 60 * hours) + return new_query
+ +
[docs] def reduce(self, cross_series_reducer, *group_by_fields): + """Copy the query and add cross-series reduction. + + Cross-series reduction combines time series by aggregating their + data points. + + For example, you could request an aggregated time series for each + combination of project and zone as follows:: + + query = query.reduce(Reducer.REDUCE_MEAN, + 'resource.project_id', 'resource.zone') + + :type cross_series_reducer: string + :param cross_series_reducer: + The approach to be used to combine time series. For example: + :data:`Reducer.REDUCE_MEAN`. See :class:`Reducer` and the + descriptions of the `supported reducers`_. + + :type group_by_fields: strings + :param group_by_fields: + Fields to be preserved by the reduction. For example, specifying + just ``"resource.zone"`` will result in one time series per zone. + The default is to aggregate all of the time series into just one. + + :rtype: :class:`Query` + :returns: The new query object. + + .. _supported reducers: + https://cloud.google.com/monitoring/api/ref_v3/rest/v3/\ + projects.timeSeries/list#Reducer + """ + new_query = self.copy() + new_query._cross_series_reducer = cross_series_reducer + new_query._group_by_fields = group_by_fields + return new_query
+ +
[docs] def iter(self, headers_only=False, page_size=None): + """Yield all time series objects selected by the query. + + The generator returned iterates over + :class:`~gcloud.monitoring.timeseries.TimeSeries` objects + containing points ordered from oldest to newest. + + Note that the :class:`Query` object itself is an iterable, such that + the following are equivalent:: + + for timeseries in query: + ... + + for timeseries in query.iter(): + ... + + :type headers_only: boolean + :param headers_only: + Whether to omit the point data from the time series objects. + + :type page_size: integer or None + :param page_size: + An optional positive number specifying the maximum number of + points to return per page. This can be used to control how far + the iterator reads ahead. + + :raises: :exc:`ValueError` if the query time interval has not been + specified. + """ + # The following use of groupby() relies on equality comparison + # of time series as (named) tuples. + for timeseries, fragments in itertools.groupby( + self._iter_fragments(headers_only, page_size), + lambda fragment: fragment.header()): + points = list(itertools.chain.from_iterable( + fragment.points for fragment in fragments)) + points.reverse() # Order from oldest to newest. + yield timeseries.header(points=points)
+ + def _iter_fragments(self, headers_only=False, page_size=None): + """Yield all time series fragments selected by the query. + + There may be multiple fragments per time series. These will be + contiguous. + + The parameters and return value are as for :meth:`Query.iter`. + """ + if self._end_time is None: + raise ValueError('Query time interval not specified.') + + path = '/projects/{project}/timeSeries/'.format( + project=self._client.project) + + page_token = None + while True: + params = list(self._build_query_params( + headers_only=headers_only, + page_size=page_size, + page_token=page_token, + )) + response = self._client.connection.api_request( + method='GET', + path=path, + query_params=params, + ) + for info in response.get('timeSeries', ()): + yield TimeSeries._from_dict(info) + + page_token = response.get('nextPageToken') + if not page_token: + break + + def _build_query_params(self, headers_only=False, + page_size=None, page_token=None): + """Yield key-value pairs for the URL query string. + + We use a series of key-value pairs (suitable for passing to + ``urlencode``) instead of a ``dict`` to allow for repeated fields. + + :type headers_only: boolean + :param headers_only: + Whether to omit the point data from the + :class:`~gcloud.monitoring.timeseries.TimeSeries` objects. + + :type page_size: integer or None + :param page_size: A limit on the number of points to return per page. + + :type page_token: string or None + :param page_token: A token to continue the retrieval. + """ + yield 'filter', self.filter + + yield 'interval.endTime', _datetime_to_rfc3339( + self._end_time, ignore_zone=False) + + if self._start_time is not None: + yield 'interval.startTime', _datetime_to_rfc3339( + self._start_time, ignore_zone=False) + + if self._per_series_aligner is not None: + yield 'aggregation.perSeriesAligner', self._per_series_aligner + + if self._alignment_period_seconds is not None: + alignment_period = '{period}s'.format( + period=self._alignment_period_seconds) + yield 'aggregation.alignmentPeriod', alignment_period + + if self._cross_series_reducer is not None: + yield ('aggregation.crossSeriesReducer', + self._cross_series_reducer) + + for field in self._group_by_fields: + yield 'aggregation.groupByFields', field + + if headers_only: + yield 'view', 'HEADERS' + + if page_size is not None: + yield 'pageSize', page_size + + if page_token is not None: + yield 'pageToken', page_token + +
[docs] def as_dataframe(self, label=None, labels=None): + """Return all the selected time series as a :mod:`pandas` dataframe. + + .. note:: + + Use of this method requires that you have :mod:`pandas` installed. + + Examples:: + + # Generate a dataframe with a multi-level column header including + # the resource type and all available resource and metric labels. + # This can be useful for seeing what labels are available. + dataframe = query.as_dataframe() + + # Generate a dataframe using a particular label for the column + # names. + dataframe = query.as_dataframe(label='instance_name') + + # Generate a dataframe with a multi-level column header. + dataframe = query.as_dataframe(labels=['zone', 'instance_name']) + + # Generate a dataframe with a multi-level column header, assuming + # the metric is issued by more than one type of resource. + dataframe = query.as_dataframe( + labels=['resource_type', 'instance_id']) + + :type label: string or None + :param label: The label name to use for the dataframe header. + This can be the name of a resource label or metric label + (e.g., ``"instance_name"``), or the string ``"resource_type"``. + + :type labels: list of strings, or None + :param labels: A list or tuple of label names to use for the dataframe + header. If more than one label name is provided, the resulting + dataframe will have a multi-level column header. Providing values + for both ``label`` and ``labels`` is an error. + + :rtype: :class:`pandas.DataFrame` + :returns: A dataframe where each column represents one time series. + """ + return _build_dataframe(self, label, labels) # pragma: NO COVER
+ +
[docs] def copy(self): + """Copy the query object. + + :rtype: :class:`Query` + :returns: The new query object. + """ + # Using copy.deepcopy() would be appropriate, except that we want + # to copy self._client only as a reference. + new_query = copy.copy(self) + new_query._filter = copy.copy(self._filter) + return new_query
+ + +class _Filter(object): + """Helper for assembling a filter string.""" + + def __init__(self, metric_type): + self.metric_type = metric_type + self.group_id = None + self.projects = () + self.resource_label_filter = None + self.metric_label_filter = None + + def select_resources(self, *args, **kwargs): + """Select by resource labels. + + See :meth:`Query.select_resources`. + """ + self.resource_label_filter = _build_label_filter('resource', + *args, **kwargs) + + def select_metrics(self, *args, **kwargs): + """Select by metric labels. + + See :meth:`Query.select_metrics`. + """ + self.metric_label_filter = _build_label_filter('metric', + *args, **kwargs) + + def __str__(self): + filters = ['metric.type = "{type}"'.format(type=self.metric_type)] + if self.group_id is not None: + filters.append('group.id = "{id}"'.format(id=self.group_id)) + if self.projects: + filters.append( + ' OR '.join('project = "{project}"'.format(project=project) + for project in self.projects)) + if self.resource_label_filter: + filters.append(self.resource_label_filter) + if self.metric_label_filter: + filters.append(self.metric_label_filter) + + # Parentheses are never actually required, because OR binds more + # tightly than AND in the Monitoring API's filter syntax. + return ' AND '.join(filters) + + +def _build_label_filter(category, *args, **kwargs): + """Construct a filter string to filter on metric or resource labels.""" + terms = list(args) + for key, value in six.iteritems(kwargs): + if value is None: + continue + + suffix = None + if key.endswith('_prefix') or key.endswith('_suffix'): + key, suffix = key.rsplit('_', 1) + + if category == 'resource' and key == 'resource_type': + key = 'resource.type' + else: + key = '.'.join((category, 'label', key)) + + if suffix == 'prefix': + term = '{key} = starts_with("{value}")' + elif suffix == 'suffix': + term = '{key} = ends_with("{value}")' + else: + term = '{key} = "{value}"' + + terms.append(term.format(key=key, value=value)) + + return ' AND '.join(sorted(terms)) +
+ +
+ +
+ + + + + + + \ No newline at end of file diff --git a/0.18.1/_modules/gcloud/monitoring/resource.html b/0.18.1/_modules/gcloud/monitoring/resource.html new file mode 100644 index 000000000000..4e9b0a7fee78 --- /dev/null +++ b/0.18.1/_modules/gcloud/monitoring/resource.html @@ -0,0 +1,417 @@ + + + + + + + + gcloud.monitoring.resource — gcloud 0.18.1 documentation + + + + + + + + + + + + + + +
+ +
+ +
+ + + Report an Issue + +
+
+ +
+ +

Source code for gcloud.monitoring.resource

+# Copyright 2016 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Monitored Resource Descriptors for the
+`Google Stackdriver Monitoring API (V3)`_.
+
+.. _Google Stackdriver Monitoring API (V3):
+    https://cloud.google.com/monitoring/api/ref_v3/rest/v3/\
+    projects.monitoredResourceDescriptors
+"""
+
+import collections
+
+from gcloud.monitoring.label import LabelDescriptor
+
+
+
[docs]class ResourceDescriptor(object): + """Specification of a monitored resource type and its schema. + + :type name: string + :param name: + The "resource name" of the monitored resource descriptor: + ``"projects/<project_id>/monitoredResourceDescriptors/<type>"`` + + :type type_: string + :param type_: + The monitored resource type. For example: ``"gce_instance"`` + + :type display_name: string + :param display_name: + A concise name that might be displayed in user interfaces. + + :type description: string + :param description: + A detailed description that might be used in documentation. + + :type labels: list of :class:`~gcloud.monitoring.label.LabelDescriptor` + :param labels: + A sequence of label descriptors specifying the labels used + to identify a specific instance of this monitored resource. + """ + + def __init__(self, name, type_, display_name, description, labels): + self.name = name + self.type = type_ + self.display_name = display_name + self.description = description + self.labels = labels + + @classmethod + def _fetch(cls, client, resource_type): + """Look up a monitored resource descriptor by type. + + :type client: :class:`gcloud.monitoring.client.Client` + :param client: The client to use. + + :type resource_type: string + :param resource_type: The resource type name. + + :rtype: :class:`ResourceDescriptor` + :returns: The resource descriptor instance. + + :raises: :class:`gcloud.exceptions.NotFound` if the resource descriptor + is not found. + """ + path = ('/projects/{project}/monitoredResourceDescriptors/{type}' + .format(project=client.project, + type=resource_type)) + info = client.connection.api_request(method='GET', path=path) + return cls._from_dict(info) + + @classmethod + def _list(cls, client, filter_string=None): + """List all monitored resource descriptors for the project. + + :type client: :class:`gcloud.monitoring.client.Client` + :param client: The client to use. + + :type filter_string: string or None + :param filter_string: + An optional filter expression describing the resource descriptors + to be returned. See the `filter documentation`_. + + :rtype: list of :class:`ResourceDescriptor` + :returns: A list of resource descriptor instances. + + .. _filter documentation: + https://cloud.google.com/monitoring/api/v3/filters + """ + path = '/projects/{project}/monitoredResourceDescriptors/'.format( + project=client.project) + + descriptors = [] + + page_token = None + while True: + params = {} + + if filter_string is not None: + params['filter'] = filter_string + + if page_token is not None: + params['pageToken'] = page_token + + response = client.connection.api_request( + method='GET', path=path, query_params=params) + for info in response.get('resourceDescriptors', ()): + descriptors.append(cls._from_dict(info)) + + page_token = response.get('nextPageToken') + if not page_token: + break + + return descriptors + + @classmethod + def _from_dict(cls, info): + """Construct a resource descriptor from the parsed JSON representation. + + :type info: dict + :param info: + A ``dict`` parsed from the JSON wire-format representation. + + :rtype: :class:`ResourceDescriptor` + :returns: A resource descriptor. + """ + return cls( + name=info['name'], + type_=info['type'], + display_name=info.get('displayName', ''), + description=info.get('description', ''), + labels=tuple(LabelDescriptor._from_dict(label) + for label in info.get('labels', ())), + ) + + def __repr__(self): + return ( + '<ResourceDescriptor:\n' + ' name={name!r},\n' + ' type={type!r},\n' + ' labels={labels!r},\n' + ' display_name={display_name!r},\n' + ' description={description!r}>' + ).format(**self.__dict__)
+ + +
[docs]class Resource(collections.namedtuple('Resource', 'type labels')): + """A monitored resource identified by specifying values for all labels. + + :type type: string + :param type: The resource type name. + + :type labels: dict + :param labels: A mapping from label names to values for all labels + enumerated in the associated :class:`ResourceDescriptor`. + """ + __slots__ = () + + @classmethod + def _from_dict(cls, info): + """Construct a resource object from the parsed JSON representation. + + :type info: dict + :param info: + A ``dict`` parsed from the JSON wire-format representation. + + :rtype: :class:`Resource` + :returns: A resource object. + """ + return cls( + type=info['type'], + labels=info.get('labels', {}), + )
+
+ +
+ +
+ + + + + + + \ No newline at end of file diff --git a/0.18.1/_modules/gcloud/monitoring/timeseries.html b/0.18.1/_modules/gcloud/monitoring/timeseries.html new file mode 100644 index 000000000000..2ef33d5ca63d --- /dev/null +++ b/0.18.1/_modules/gcloud/monitoring/timeseries.html @@ -0,0 +1,385 @@ + + + + + + + + gcloud.monitoring.timeseries — gcloud 0.18.1 documentation + + + + + + + + + + + + + + +
+ +
+ +
+ + + Report an Issue + +
+
+ +
+ +

Source code for gcloud.monitoring.timeseries

+# Copyright 2016 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Time series for the `Google Stackdriver Monitoring API (V3)`_.
+
+Features intentionally omitted from this first version of the client library:
+  * Writing time series.
+  * Natural representation of distribution values.
+
+.. _Google Stackdriver Monitoring API (V3):
+    https://cloud.google.com/monitoring/api/ref_v3/rest/v3/TimeSeries
+"""
+
+import collections
+
+from gcloud.monitoring.metric import Metric
+from gcloud.monitoring.resource import Resource
+
+
+
[docs]class TimeSeries(collections.namedtuple( + 'TimeSeries', 'metric resource metric_kind value_type points')): + """A single time series of metric values. + + :type metric: :class:`~gcloud.monitoring.metric.Metric` + :param metric: A metric object. + + :type resource: :class:`~gcloud.monitoring.resource.Resource` + :param resource: A resource object. + + :type metric_kind: string + :param metric_kind: + The kind of measurement: :data:`MetricKind.GAUGE`, + :data:`MetricKind.DELTA`, or :data:`MetricKind.CUMULATIVE`. + See :class:`~gcloud.monitoring.metric.MetricKind`. + + :type value_type: string + :param value_type: + The value type of the metric: :data:`ValueType.BOOL`, + :data:`ValueType.INT64`, :data:`ValueType.DOUBLE`, + :data:`ValueType.STRING`, or :data:`ValueType.DISTRIBUTION`. + See :class:`~gcloud.monitoring.metric.ValueType`. + + :type points: list of :class:`Point` + :param points: A list of point objects. + """ + + _labels = None + + @property + def labels(self): + """A single dictionary with values for all the labels. + + This combines ``resource.labels`` and ``metric.labels`` and also + adds ``"resource_type"``. + """ + if self._labels is None: + labels = {'resource_type': self.resource.type} + labels.update(self.resource.labels) + labels.update(self.metric.labels) + self._labels = labels + + return self._labels + +
[docs] def header(self, points=None): + """Copy everything but the point data. + + :type points: list of :class:`Point`, or None + :param points: An optional point list. + + :rtype: :class:`TimeSeries` + :returns: The new time series object. + """ + points = list(points) if points else [] + return self._replace(points=points)
+ + @classmethod + def _from_dict(cls, info): + """Construct a time series from the parsed JSON representation. + + :type info: dict + :param info: + A ``dict`` parsed from the JSON wire-format representation. + + :rtype: :class:`TimeSeries` + :returns: A time series object. + """ + metric = Metric._from_dict(info['metric']) + resource = Resource._from_dict(info['resource']) + metric_kind = info['metricKind'] + value_type = info['valueType'] + points = [Point._from_dict(p) for p in info.get('points', ())] + return cls(metric, resource, metric_kind, value_type, points) + + def __repr__(self): + """Return a representation string with the points elided.""" + return ( + '<TimeSeries with {num} points:\n' + ' metric={metric!r},\n' + ' resource={resource!r},\n' + ' metric_kind={kind!r}, value_type={type!r}>' + ).format( + num=len(self.points), + metric=self.metric, + resource=self.resource, + kind=self.metric_kind, + type=self.value_type, + )
+ + +
[docs]class Point(collections.namedtuple('Point', 'end_time start_time value')): + """A single point in a time series. + + :type end_time: string + :param end_time: The end time in RFC3339 UTC "Zulu" format. + + :type start_time: string or None + :param start_time: An optional start time in RFC3339 UTC "Zulu" format. + + :type value: object + :param value: The metric value. This can be a scalar or a distribution. + """ + __slots__ = () + + @classmethod + def _from_dict(cls, info): + """Construct a Point from the parsed JSON representation. + + :type info: dict + :param info: + A ``dict`` parsed from the JSON wire-format representation. + + :rtype: :class:`Point` + :returns: A point object. + """ + end_time = info['interval']['endTime'] + start_time = info['interval'].get('startTime') + (value_type, value), = info['value'].items() + if value_type == 'int64Value': + value = int(value) # Convert from string. + + return cls(end_time, start_time, value)
+
+ +
+ +
+ + + + + + + \ No newline at end of file diff --git a/0.18.1/_modules/gcloud/pubsub/client.html b/0.18.1/_modules/gcloud/pubsub/client.html new file mode 100644 index 000000000000..d5451d2c10ab --- /dev/null +++ b/0.18.1/_modules/gcloud/pubsub/client.html @@ -0,0 +1,421 @@ + + + + + + + + gcloud.pubsub.client — gcloud 0.18.1 documentation + + + + + + + + + + + + + + +
+ +
+ +
+ + + Report an Issue + +
+
+ +
+ +

Source code for gcloud.pubsub.client

+# Copyright 2015 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Client for interacting with the Google Cloud Pub/Sub API."""
+
+import os
+
+from gcloud.client import JSONClient
+from gcloud.pubsub.connection import Connection
+from gcloud.pubsub.connection import _PublisherAPI as JSONPublisherAPI
+from gcloud.pubsub.connection import _SubscriberAPI as JSONSubscriberAPI
+from gcloud.pubsub.connection import _IAMPolicyAPI
+from gcloud.pubsub.subscription import Subscription
+from gcloud.pubsub.topic import Topic
+
+# pylint: disable=ungrouped-imports
+try:
+    from google.cloud.pubsub.v1.publisher_api import (
+        PublisherApi as GeneratedPublisherAPI)
+    from google.cloud.pubsub.v1.subscriber_api import (
+        SubscriberApi as GeneratedSubscriberAPI)
+    from gcloud.pubsub._gax import _PublisherAPI as GAXPublisherAPI
+    from gcloud.pubsub._gax import _SubscriberAPI as GAXSubscriberAPI
+except ImportError:  # pragma: NO COVER
+    _HAVE_GAX = False
+    GeneratedPublisherAPI = GAXPublisherAPI = None
+    GeneratedSubscriberAPI = GAXSubscriberAPI = None
+else:
+    _HAVE_GAX = True
+# pylint: enable=ungrouped-imports
+
+
+_USE_GAX = _HAVE_GAX and (os.environ.get('GCLOUD_ENABLE_GAX') is not None)
+
+
+
[docs]class Client(JSONClient): + """Client to bundle configuration needed for API requests. + + :type project: string + :param project: the project which the client acts on behalf of. Will be + passed when creating a topic. If not passed, + falls back to the default inferred from the environment. + + :type credentials: :class:`oauth2client.client.OAuth2Credentials` or + :class:`NoneType` + :param credentials: The OAuth2 Credentials to use for the connection + owned by this client. If not passed (and if no ``http`` + object is passed), falls back to the default inferred + from the environment. + + :type http: :class:`httplib2.Http` or class that defines ``request()``. + :param http: An optional HTTP object to make requests. If not passed, an + ``http`` object is created that is bound to the + ``credentials`` for the current object. + """ + + _connection_class = Connection + _publisher_api = _subscriber_api = _iam_policy_api = None + + @property + def publisher_api(self): + """Helper for publisher-related API calls.""" + if self._publisher_api is None: + if _USE_GAX: + generated = GeneratedPublisherAPI() + self._publisher_api = GAXPublisherAPI(generated) + else: + self._publisher_api = JSONPublisherAPI(self.connection) + return self._publisher_api + + @property + def subscriber_api(self): + """Helper for subscriber-related API calls.""" + if self._subscriber_api is None: + if _USE_GAX: + generated = GeneratedSubscriberAPI() + self._subscriber_api = GAXSubscriberAPI(generated) + else: + self._subscriber_api = JSONSubscriberAPI(self.connection) + return self._subscriber_api + + @property + def iam_policy_api(self): + """Helper for IAM policy-related API calls.""" + if self._iam_policy_api is None: + self._iam_policy_api = _IAMPolicyAPI(self.connection) + return self._iam_policy_api + +
[docs] def list_topics(self, page_size=None, page_token=None): + """List topics for the project associated with this client. + + See: + https://cloud.google.com/pubsub/reference/rest/v1/projects.topics/list + + Example: + + .. literalinclude:: pubsub_snippets.py + :start-after: [START client_list_topics] + :end-before: [END client_list_topics] + + :type page_size: int + :param page_size: maximum number of topics to return, If not passed, + defaults to a value set by the API. + + :type page_token: string + :param page_token: opaque marker for the next "page" of topics. If not + passed, the API will return the first page of + topics. + + :rtype: tuple, (list, str) + :returns: list of :class:`gcloud.pubsub.topic.Topic`, plus a + "next page token" string: if not None, indicates that + more topics can be retrieved with another call (pass that + value as ``page_token``). + """ + api = self.publisher_api + resources, next_token = api.list_topics( + self.project, page_size, page_token) + topics = [Topic.from_api_repr(resource, self) + for resource in resources] + return topics, next_token
+ +
[docs] def list_subscriptions(self, page_size=None, page_token=None): + """List subscriptions for the project associated with this client. + + See: + https://cloud.google.com/pubsub/reference/rest/v1/projects.topics/list + + Example: + + .. literalinclude:: pubsub_snippets.py + :start-after: [START client_list_subscriptions] + :end-before: [END client_list_subscriptions] + + :type page_size: int + :param page_size: maximum number of topics to return, If not passed, + defaults to a value set by the API. + + :type page_token: string + :param page_token: opaque marker for the next "page" of topics. If not + passed, the API will return the first page of + topics. + + :rtype: tuple, (list, str) + :returns: list of :class:`gcloud.pubsub.subscription.Subscription`, + plus a "next page token" string: if not None, indicates that + more topics can be retrieved with another call (pass that + value as ``page_token``). + """ + api = self.subscriber_api + resources, next_token = api.list_subscriptions( + self.project, page_size, page_token) + topics = {} + subscriptions = [Subscription.from_api_repr(resource, self, + topics=topics) + for resource in resources] + return subscriptions, next_token
+ +
[docs] def topic(self, name, timestamp_messages=False): + """Creates a topic bound to the current client. + + Example: + + .. literalinclude:: pubsub_snippets.py + :start-after: [START client_topic] + :end-before: [END client_topic] + + :type name: string + :param name: the name of the topic to be constructed. + + :type timestamp_messages: boolean + :param timestamp_messages: To be passed to ``Topic`` constructor. + + :rtype: :class:`gcloud.pubsub.topic.Topic` + :returns: Topic created with the current client. + """ + return Topic(name, client=self, timestamp_messages=timestamp_messages)
+
+ +
+ +
+ + + + + + + \ No newline at end of file diff --git a/0.18.1/_modules/gcloud/pubsub/connection.html b/0.18.1/_modules/gcloud/pubsub/connection.html new file mode 100644 index 000000000000..54d61c289198 --- /dev/null +++ b/0.18.1/_modules/gcloud/pubsub/connection.html @@ -0,0 +1,772 @@ + + + + + + + + gcloud.pubsub.connection — gcloud 0.18.1 documentation + + + + + + + + + + + + + + +
+ +
+ +
+ + + Report an Issue + +
+
+ +
+ +

Source code for gcloud.pubsub.connection

+# Copyright 2015 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Create / interact with gcloud pubsub connections."""
+
+import os
+
+from gcloud import connection as base_connection
+from gcloud.environment_vars import PUBSUB_EMULATOR
+
+
+
[docs]class Connection(base_connection.JSONConnection): + """A connection to Google Cloud Pubsub via the JSON REST API. + + :type credentials: :class:`oauth2client.client.OAuth2Credentials` + :param credentials: (Optional) The OAuth2 Credentials to use for this + connection. + + :type http: :class:`httplib2.Http` or class that defines ``request()``. + :param http: (Optional) HTTP object to make requests. + + :type api_base_url: string + :param api_base_url: The base of the API call URL. Defaults to the value + :attr:`Connection.API_BASE_URL`. + """ + + API_BASE_URL = 'https://pubsub.googleapis.com' + """The base of the API call URL.""" + + API_VERSION = 'v1' + """The version of the API, used in building the API call's URL.""" + + API_URL_TEMPLATE = '{api_base_url}/{api_version}{path}' + """A template for the URL of a particular API call.""" + + SCOPE = ('https://www.googleapis.com/auth/pubsub', + 'https://www.googleapis.com/auth/cloud-platform') + """The scopes required for authenticating as a Cloud Pub/Sub consumer.""" + + def __init__(self, credentials=None, http=None, api_base_url=None): + super(Connection, self).__init__(credentials=credentials, http=http) + if api_base_url is None: + emulator_host = os.getenv(PUBSUB_EMULATOR) + if emulator_host is None: + api_base_url = self.__class__.API_BASE_URL + else: + api_base_url = 'http://' + emulator_host + self.api_base_url = api_base_url + +
[docs] def build_api_url(self, path, query_params=None, + api_base_url=None, api_version=None): + """Construct an API url given a few components, some optional. + + Typically, you shouldn't need to use this method. + + :type path: string + :param path: The path to the resource. + + :type query_params: dict or list + :param query_params: A dictionary of keys and values (or list of + key-value pairs) to insert into the query + string of the URL. + + :type api_base_url: string + :param api_base_url: The base URL for the API endpoint. + Typically you won't have to provide this. + + :type api_version: string + :param api_version: The version of the API to call. + Typically you shouldn't provide this and instead + use the default for the library. + + :rtype: string + :returns: The URL assembled from the pieces provided. + """ + if api_base_url is None: + api_base_url = self.api_base_url + return super(Connection, self.__class__).build_api_url( + path, query_params=query_params, + api_base_url=api_base_url, api_version=api_version)
+ + +class _PublisherAPI(object): + """Helper mapping publisher-related APIs. + + :type connection: :class:`Connection` + :param connection: the connection used to make API requests. + """ + + def __init__(self, connection): + self._connection = connection + + def list_topics(self, project, page_size=None, page_token=None): + """API call: list topics for a given project + + See: + https://cloud.google.com/pubsub/reference/rest/v1/projects.topics/list + + :type project: string + :param project: project ID + + :type page_size: int + :param page_size: maximum number of topics to return, If not passed, + defaults to a value set by the API. + + :type page_token: string + :param page_token: opaque marker for the next "page" of topics. If not + passed, the API will return the first page of + topics. + + :rtype: tuple, (list, str) + :returns: list of ``Topic`` resource dicts, plus a + "next page token" string: if not None, indicates that + more topics can be retrieved with another call (pass that + value as ``page_token``). + """ + conn = self._connection + params = {} + + if page_size is not None: + params['pageSize'] = page_size + + if page_token is not None: + params['pageToken'] = page_token + + path = '/projects/%s/topics' % (project,) + resp = conn.api_request(method='GET', path=path, query_params=params) + return resp.get('topics', ()), resp.get('nextPageToken') + + def topic_create(self, topic_path): + """API call: create a topic + + See: + https://cloud.google.com/pubsub/reference/rest/v1/projects.topics/create + + :type topic_path: string + :param topic_path: the fully-qualified path of the new topic, in format + ``projects/<PROJECT>/topics/<TOPIC_NAME>``. + + :rtype: dict + :returns: ``Topic`` resource returned from the API. + """ + conn = self._connection + return conn.api_request(method='PUT', path='/%s' % (topic_path,)) + + def topic_get(self, topic_path): + """API call: retrieve a topic + + See: + https://cloud.google.com/pubsub/reference/rest/v1/projects.topics/get + + :type topic_path: string + :param topic_path: the fully-qualified path of the topic, in format + ``projects/<PROJECT>/topics/<TOPIC_NAME>``. + + :rtype: dict + :returns: ``Topic`` resource returned from the API. + """ + conn = self._connection + return conn.api_request(method='GET', path='/%s' % (topic_path,)) + + def topic_delete(self, topic_path): + """API call: delete a topic + + See: + https://cloud.google.com/pubsub/reference/rest/v1/projects.topics/delete + + :type topic_path: string + :param topic_path: the fully-qualified path of the topic, in format + ``projects/<PROJECT>/topics/<TOPIC_NAME>``. + """ + conn = self._connection + conn.api_request(method='DELETE', path='/%s' % (topic_path,)) + + def topic_publish(self, topic_path, messages): + """API call: publish one or more messages to a topic + + See: + https://cloud.google.com/pubsub/reference/rest/v1/projects.topics/publish + + :type topic_path: string + :param topic_path: the fully-qualified path of the topic, in format + ``projects/<PROJECT>/topics/<TOPIC_NAME>``. + + :type messages: list of dict + :param messages: messages to be published. + + :rtype: list of string + :returns: list of opaque IDs for published messages. + """ + conn = self._connection + data = {'messages': messages} + response = conn.api_request( + method='POST', path='/%s:publish' % (topic_path,), data=data) + return response['messageIds'] + + def topic_list_subscriptions(self, topic_path, page_size=None, + page_token=None): + """API call: list subscriptions bound to a topic + + See: + https://cloud.google.com/pubsub/reference/rest/v1/projects.topics.subscriptions/list + + :type topic_path: string + :param topic_path: the fully-qualified path of the topic, in format + ``projects/<PROJECT>/topics/<TOPIC_NAME>``. + + :type page_size: int + :param page_size: maximum number of subscriptions to return, If not + passed, defaults to a value set by the API. + + :type page_token: string + :param page_token: opaque marker for the next "page" of topics. If not + passed, the API will return the first page of + topics. + + :rtype: list of strings + :returns: fully-qualified names of subscriptions for the supplied + topic. + """ + conn = self._connection + params = {} + + if page_size is not None: + params['pageSize'] = page_size + + if page_token is not None: + params['pageToken'] = page_token + + path = '/%s/subscriptions' % (topic_path,) + resp = conn.api_request(method='GET', path=path, query_params=params) + return resp.get('subscriptions', ()), resp.get('nextPageToken') + + +class _SubscriberAPI(object): + """Helper mapping subscriber-related APIs. + + :type connection: :class:`Connection` + :param connection: the connection used to make API requests. + """ + + def __init__(self, connection): + self._connection = connection + + def list_subscriptions(self, project, page_size=None, page_token=None): + """API call: list subscriptions for a given project + + See: + https://cloud.google.com/pubsub/reference/rest/v1/projects.subscriptions/list + + :type project: string + :param project: project ID + + :type page_size: int + :param page_size: maximum number of subscriptions to return, If not + passed, defaults to a value set by the API. + + :type page_token: string + :param page_token: opaque marker for the next "page" of subscriptions. + If not passed, the API will return the first page + of subscriptions. + + :rtype: tuple, (list, str) + :returns: list of ``Subscription`` resource dicts, plus a + "next page token" string: if not None, indicates that + more subscriptions can be retrieved with another call (pass + that value as ``page_token``). + """ + conn = self._connection + params = {} + + if page_size is not None: + params['pageSize'] = page_size + + if page_token is not None: + params['pageToken'] = page_token + + path = '/projects/%s/subscriptions' % (project,) + resp = conn.api_request(method='GET', path=path, query_params=params) + return resp.get('subscriptions', ()), resp.get('nextPageToken') + + def subscription_create(self, subscription_path, topic_path, + ack_deadline=None, push_endpoint=None): + """API call: create a subscription + + See: + https://cloud.google.com/pubsub/reference/rest/v1/projects.subscriptions/create + + :type subscription_path: string + :param subscription_path: the fully-qualified path of the new + subscription, in format + ``projects/<PROJECT>/subscriptions/<SUB_NAME>``. + + :type topic_path: string + :param topic_path: the fully-qualified path of the topic being + subscribed, in format + ``projects/<PROJECT>/topics/<TOPIC_NAME>``. + + :type ack_deadline: int, or ``NoneType`` + :param ack_deadline: the deadline (in seconds) by which messages pulled + from the back-end must be acknowledged. + + :type push_endpoint: string, or ``NoneType`` + :param push_endpoint: URL to which messages will be pushed by the + back-end. If not set, the application must pull + messages. + + :rtype: dict + :returns: ``Subscription`` resource returned from the API. + """ + conn = self._connection + path = '/%s' % (subscription_path,) + resource = {'topic': topic_path} + + if ack_deadline is not None: + resource['ackDeadlineSeconds'] = ack_deadline + + if push_endpoint is not None: + resource['pushConfig'] = {'pushEndpoint': push_endpoint} + + return conn.api_request(method='PUT', path=path, data=resource) + + def subscription_get(self, subscription_path): + """API call: retrieve a subscription + + See: + https://cloud.google.com/pubsub/reference/rest/v1/projects.subscriptions/get + + :type subscription_path: string + :param subscription_path: the fully-qualified path of the subscription, + in format + ``projects/<PROJECT>/subscriptions/<SUB_NAME>``. + + :rtype: dict + :returns: ``Subscription`` resource returned from the API. + """ + conn = self._connection + path = '/%s' % (subscription_path,) + return conn.api_request(method='GET', path=path) + + def subscription_delete(self, subscription_path): + """API call: delete a subscription + + See: + https://cloud.google.com/pubsub/reference/rest/v1/projects.subscriptions/delete + + :type subscription_path: string + :param subscription_path: the fully-qualified path of the subscription, + in format + ``projects/<PROJECT>/subscriptions/<SUB_NAME>``. + """ + conn = self._connection + path = '/%s' % (subscription_path,) + conn.api_request(method='DELETE', path=path) + + def subscription_modify_push_config(self, subscription_path, + push_endpoint): + """API call: update push config of a subscription + + See: + https://cloud.google.com/pubsub/reference/rest/v1/projects.subscriptions/modifyPushConfig + + :type subscription_path: string + :param subscription_path: the fully-qualified path of the new + subscription, in format + ``projects/<PROJECT>/subscriptions/<SUB_NAME>``. + + :type push_endpoint: string, or ``NoneType`` + :param push_endpoint: URL to which messages will be pushed by the + back-end. If not set, the application must pull + messages. + """ + conn = self._connection + path = '/%s:modifyPushConfig' % (subscription_path,) + resource = {'pushConfig': {'pushEndpoint': push_endpoint}} + conn.api_request(method='POST', path=path, data=resource) + + def subscription_pull(self, subscription_path, return_immediately=False, + max_messages=1): + """API call: retrieve messages for a subscription + + See: + https://cloud.google.com/pubsub/reference/rest/v1/projects.subscriptions/modifyPushConfig + + :type subscription_path: string + :param subscription_path: the fully-qualified path of the new + subscription, in format + ``projects/<PROJECT>/subscriptions/<SUB_NAME>``. + + :type return_immediately: boolean + :param return_immediately: if True, the back-end returns even if no + messages are available; if False, the API + call blocks until one or more messages are + available. + + :type max_messages: int + :param max_messages: the maximum number of messages to return. + + :rtype: list of dict + :returns: the ``receivedMessages`` element of the response. + """ + conn = self._connection + path = '/%s:pull' % (subscription_path,) + data = { + 'returnImmediately': return_immediately, + 'maxMessages': max_messages, + } + response = conn.api_request(method='POST', path=path, data=data) + return response.get('receivedMessages', ()) + + def subscription_acknowledge(self, subscription_path, ack_ids): + """API call: acknowledge retrieved messages + + See: + https://cloud.google.com/pubsub/reference/rest/v1/projects.subscriptions/modifyPushConfig + + :type subscription_path: string + :param subscription_path: the fully-qualified path of the new + subscription, in format + ``projects/<PROJECT>/subscriptions/<SUB_NAME>``. + + :type ack_ids: list of string + :param ack_ids: ack IDs of messages being acknowledged + """ + conn = self._connection + path = '/%s:acknowledge' % (subscription_path,) + data = { + 'ackIds': ack_ids, + } + conn.api_request(method='POST', path=path, data=data) + + def subscription_modify_ack_deadline(self, subscription_path, ack_ids, + ack_deadline): + """API call: update ack deadline for retrieved messages + + See: + https://cloud.google.com/pubsub/reference/rest/v1/projects.subscriptions/modifyAckDeadline + + :type subscription_path: string + :param subscription_path: the fully-qualified path of the new + subscription, in format + ``projects/<PROJECT>/subscriptions/<SUB_NAME>``. + + :type ack_ids: list of string + :param ack_ids: ack IDs of messages being acknowledged + + :type ack_deadline: int + :param ack_deadline: the deadline (in seconds) by which messages pulled + from the back-end must be acknowledged. + """ + conn = self._connection + path = '/%s:modifyAckDeadline' % (subscription_path,) + data = { + 'ackIds': ack_ids, + 'ackDeadlineSeconds': ack_deadline, + } + conn.api_request(method='POST', path=path, data=data) + + +class _IAMPolicyAPI(object): + """Helper mapping IAM policy-related APIs. + + :type connection: :class:`Connection` + :param connection: the connection used to make API requests. + """ + + def __init__(self, connection): + self._connection = connection + + def get_iam_policy(self, target_path): + """API call: fetch the IAM policy for the target + + See: + https://cloud.google.com/pubsub/reference/rest/v1/projects.topics/getIamPolicy + https://cloud.google.com/pubsub/reference/rest/v1/projects.subscriptions/getIamPolicy + + :type target_path: string + :param target_path: the path of the target object. + + :rtype: dict + :returns: the resource returned by the ``getIamPolicy`` API request. + """ + conn = self._connection + path = '/%s:getIamPolicy' % (target_path,) + return conn.api_request(method='GET', path=path) + + def set_iam_policy(self, target_path, policy): + """API call: update the IAM policy for the target + + See: + https://cloud.google.com/pubsub/reference/rest/v1/projects.topics/setIamPolicy + https://cloud.google.com/pubsub/reference/rest/v1/projects.subscriptions/setIamPolicy + + :type target_path: string + :param target_path: the path of the target object. + + :type policy: dict + :param policy: the new policy resource. + + :rtype: dict + :returns: the resource returned by the ``setIamPolicy`` API request. + """ + conn = self._connection + wrapped = {'policy': policy} + path = '/%s:setIamPolicy' % (target_path,) + return conn.api_request(method='POST', path=path, data=wrapped) + + def test_iam_permissions(self, target_path, permissions): + """API call: test permissions + + See: + https://cloud.google.com/pubsub/reference/rest/v1/projects.topics/testIamPermissions + https://cloud.google.com/pubsub/reference/rest/v1/projects.subscriptions/testIamPermissions + + :type target_path: string + :param target_path: the path of the target object. + + :type permissions: list of string + :param permissions: the permissions to check + + :rtype: dict + :returns: the resource returned by the ``getIamPolicy`` API request. + """ + conn = self._connection + wrapped = {'permissions': permissions} + path = '/%s:testIamPermissions' % (target_path,) + resp = conn.api_request(method='POST', path=path, data=wrapped) + return resp.get('permissions', []) +
+ +
+ +
+ + + + + + + \ No newline at end of file diff --git a/0.18.1/_modules/gcloud/pubsub/iam.html b/0.18.1/_modules/gcloud/pubsub/iam.html new file mode 100644 index 000000000000..5c2518a82a08 --- /dev/null +++ b/0.18.1/_modules/gcloud/pubsub/iam.html @@ -0,0 +1,492 @@ + + + + + + + + gcloud.pubsub.iam — gcloud 0.18.1 documentation + + + + + + + + + + + + + + +
+ +
+ +
+ + + Report an Issue + +
+
+ +
+ +

Source code for gcloud.pubsub.iam

+# Copyright 2016 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""PubSub API IAM policy definitions
+
+For allowed roles / permissions, see:
+https://cloud.google.com/pubsub/access_control#permissions
+"""
+
+# Generic IAM roles
+
+OWNER_ROLE = 'roles/owner'
+"""Generic role implying all rights to an object."""
+
+EDITOR_ROLE = 'roles/editor'
+"""Generic role implying rights to modify an object."""
+
+VIEWER_ROLE = 'roles/viewer'
+"""Generic role implying rights to access an object."""
+
+# Pubsub-specific IAM roles
+
+PUBSUB_ADMIN_ROLE = 'roles/pubsub.admin'
+"""Role implying all rights to an object."""
+
+PUBSUB_EDITOR_ROLE = 'roles/pubsub.editor'
+"""Role implying rights to modify an object."""
+
+PUBSUB_VIEWER_ROLE = 'roles/pubsub.viewer'
+"""Role implying rights to access an object."""
+
+PUBSUB_PUBLISHER_ROLE = 'roles/pubsub.publisher'
+"""Role implying rights to publish to a topic."""
+
+PUBSUB_SUBSCRIBER_ROLE = 'roles/pubsub.subscriber'
+"""Role implying rights to subscribe to a topic."""
+
+
+# Pubsub-specific permissions
+
+PUBSUB_TOPICS_CONSUME = 'pubsub.topics.consume'
+"""Permission: consume events from a subscription."""
+
+PUBSUB_TOPICS_CREATE = 'pubsub.topics.create'
+"""Permission: create topics."""
+
+PUBSUB_TOPICS_DELETE = 'pubsub.topics.delete'
+"""Permission: delete topics."""
+
+PUBSUB_TOPICS_GET = 'pubsub.topics.get'
+"""Permission: retrieve topics."""
+
+PUBSUB_TOPICS_GET_IAM_POLICY = 'pubsub.topics.getIamPolicy'
+"""Permission: retrieve subscription IAM policies."""
+
+PUBSUB_TOPICS_LIST = 'pubsub.topics.list'
+"""Permission: list topics."""
+
+PUBSUB_TOPICS_SET_IAM_POLICY = 'pubsub.topics.setIamPolicy'
+"""Permission: update subscription IAM policies."""
+
+PUBSUB_SUBSCRIPTIONS_CONSUME = 'pubsub.subscriptions.consume'
+"""Permission: consume events from a subscription."""
+
+PUBSUB_SUBSCRIPTIONS_CREATE = 'pubsub.subscriptions.create'
+"""Permission: create subscriptions."""
+
+PUBSUB_SUBSCRIPTIONS_DELETE = 'pubsub.subscriptions.delete'
+"""Permission: delete subscriptions."""
+
+PUBSUB_SUBSCRIPTIONS_GET = 'pubsub.subscriptions.get'
+"""Permission: retrieve subscriptions."""
+
+PUBSUB_SUBSCRIPTIONS_GET_IAM_POLICY = 'pubsub.subscriptions.getIamPolicy'
+"""Permission: retrieve subscription IAM policies."""
+
+PUBSUB_SUBSCRIPTIONS_LIST = 'pubsub.subscriptions.list'
+"""Permission: list subscriptions."""
+
+PUBSUB_SUBSCRIPTIONS_SET_IAM_POLICY = 'pubsub.subscriptions.setIamPolicy'
+"""Permission: update subscription IAM policies."""
+
+PUBSUB_SUBSCRIPTIONS_UPDATE = 'pubsub.subscriptions.update'
+"""Permission: update subscriptions."""
+
+
+
[docs]class Policy(object): + """Combined IAM Policy / Bindings. + + See: + https://cloud.google.com/pubsub/reference/rest/Shared.Types/Policy + https://cloud.google.com/pubsub/reference/rest/Shared.Types/Binding + + :type etag: string + :param etag: ETag used to identify a unique of the policy + + :type version: int + :param version: unique version of the policy + """ + def __init__(self, etag=None, version=None): + self.etag = etag + self.version = version + self.owners = set() + self.editors = set() + self.viewers = set() + self.publishers = set() + self.subscribers = set() + + @staticmethod +
[docs] def user(email): + """Factory method for a user member. + + :type email: string + :param email: E-mail for this particular user. + + :rtype: string + :returns: A member string corresponding to the given user. + """ + return 'user:%s' % (email,)
+ + @staticmethod +
[docs] def service_account(email): + """Factory method for a service account member. + + :type email: string + :param email: E-mail for this particular service account. + + :rtype: string + :returns: A member string corresponding to the given service account. + """ + return 'serviceAccount:%s' % (email,)
+ + @staticmethod +
[docs] def group(email): + """Factory method for a group member. + + :type email: string + :param email: An id or e-mail for this particular group. + + :rtype: string + :returns: A member string corresponding to the given group. + """ + return 'group:%s' % (email,)
+ + @staticmethod +
[docs] def domain(domain): + """Factory method for a domain member. + + :type domain: string + :param domain: The domain for this member. + + :rtype: string + :returns: A member string corresponding to the given domain. + """ + return 'domain:%s' % (domain,)
+ + @staticmethod +
[docs] def all_users(): + """Factory method for a member representing all users. + + :rtype: string + :returns: A member string representing all users. + """ + return 'allUsers'
+ + @staticmethod +
[docs] def authenticated_users(): + """Factory method for a member representing all authenticated users. + + :rtype: string + :returns: A member string representing all authenticated users. + """ + return 'allAuthenticatedUsers'
+ + @classmethod +
[docs] def from_api_repr(cls, resource): + """Create a policy from the resource returned from the API. + + :type resource: dict + :param resource: resource returned from the ``getIamPolicy`` API. + + :rtype: :class:`Policy` + :returns: the parsed policy + """ + version = resource.get('version') + etag = resource.get('etag') + policy = cls(etag, version) + for binding in resource.get('bindings', ()): + role = binding['role'] + members = set(binding['members']) + if role in (OWNER_ROLE, PUBSUB_ADMIN_ROLE): + policy.owners |= members + elif role in (EDITOR_ROLE, PUBSUB_EDITOR_ROLE): + policy.editors |= members + elif role in (VIEWER_ROLE, PUBSUB_VIEWER_ROLE): + policy.viewers |= members + elif role == PUBSUB_PUBLISHER_ROLE: + policy.publishers |= members + elif role == PUBSUB_SUBSCRIBER_ROLE: + policy.subscribers |= members + else: + raise ValueError('Unknown role: %s' % (role,)) + return policy
+ +
[docs] def to_api_repr(self): + """Construct a Policy resource. + + :rtype: dict + :returns: a resource to be passed to the ``setIamPolicy`` API. + """ + resource = {} + + if self.etag is not None: + resource['etag'] = self.etag + + if self.version is not None: + resource['version'] = self.version + + bindings = [] + + if self.owners: + bindings.append( + {'role': PUBSUB_ADMIN_ROLE, + 'members': sorted(self.owners)}) + + if self.editors: + bindings.append( + {'role': PUBSUB_EDITOR_ROLE, + 'members': sorted(self.editors)}) + + if self.viewers: + bindings.append( + {'role': PUBSUB_VIEWER_ROLE, + 'members': sorted(self.viewers)}) + + if self.publishers: + bindings.append( + {'role': PUBSUB_PUBLISHER_ROLE, + 'members': sorted(self.publishers)}) + + if self.subscribers: + bindings.append( + {'role': PUBSUB_SUBSCRIBER_ROLE, + 'members': sorted(self.subscribers)}) + + if bindings: + resource['bindings'] = bindings + + return resource
+
+ +
+ +
+ + + + + + + \ No newline at end of file diff --git a/0.18.1/_modules/gcloud/pubsub/message.html b/0.18.1/_modules/gcloud/pubsub/message.html new file mode 100644 index 000000000000..b20a18876600 --- /dev/null +++ b/0.18.1/_modules/gcloud/pubsub/message.html @@ -0,0 +1,326 @@ + + + + + + + + gcloud.pubsub.message — gcloud 0.18.1 documentation + + + + + + + + + + + + + + +
+ +
+ +
+ + + Report an Issue + +
+
+ +
+ +

Source code for gcloud.pubsub.message

+# Copyright 2015 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Define API Topics."""
+
+import base64
+
+from gcloud._helpers import _rfc3339_to_datetime
+
+
+
[docs]class Message(object): + """Messages can be published to a topic and received by subscribers. + + See: + https://cloud.google.com/pubsub/reference/rest/v1/PubsubMessage + + :type data: bytes + :param data: the payload of the message + + :type message_id: string + :param message_id: An ID assigned to the message by the API. + + :type attributes: dict or None + :param attributes: Extra metadata associated by the publisher with the + message. + """ + _service_timestamp = None + + def __init__(self, data, message_id, attributes=None): + self.data = data + self.message_id = message_id + self._attributes = attributes + + @property + def attributes(self): + """Lazily-constructed attribute dictionary""" + if self._attributes is None: + self._attributes = {} + return self._attributes + + @property + def timestamp(self): + """Return sortable timestamp from attributes, if passed. + + Allows sorting messages in publication order (assuming consistent + clocks across all publishers). + + :rtype: :class:`datetime.datetime` + :returns: timestamp (in UTC timezone) parsed from RFC 3339 timestamp + :raises: ValueError if timestamp not in ``attributes``, or if it does + not match the RFC 3339 format. + """ + stamp = self.attributes.get('timestamp') + if stamp is None: + raise ValueError('No timestamp') + return _rfc3339_to_datetime(stamp) + + @property + def service_timestamp(self): + """Return server-set timestamp. + + :rtype: string + :returns: timestamp (in UTC timezone) in RFC 3339 format + """ + return self._service_timestamp + + @classmethod +
[docs] def from_api_repr(cls, api_repr): + """Factory: construct message from API representation. + + :type api_repr: dict or None + :param api_repr: The API representation of the message + + :rtype: :class:`Message` + :returns: The message created from the response. + """ + data = base64.b64decode(api_repr.get('data', b'')) + instance = cls( + data=data, message_id=api_repr['messageId'], + attributes=api_repr.get('attributes')) + instance._service_timestamp = api_repr.get('publishTimestamp') + return instance
+
+ +
+ +
+ + + + + + + \ No newline at end of file diff --git a/0.18.1/_modules/gcloud/pubsub/subscription.html b/0.18.1/_modules/gcloud/pubsub/subscription.html new file mode 100644 index 000000000000..83523e59c0cf --- /dev/null +++ b/0.18.1/_modules/gcloud/pubsub/subscription.html @@ -0,0 +1,726 @@ + + + + + + + + gcloud.pubsub.subscription — gcloud 0.18.1 documentation + + + + + + + + + + + + + + +
+ +
+ +
+ + + Report an Issue + +
+
+ +
+ +

Source code for gcloud.pubsub.subscription

+# Copyright 2015 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Define API Subscriptions."""
+
+from gcloud.exceptions import NotFound
+from gcloud.pubsub._helpers import topic_name_from_path
+from gcloud.pubsub.iam import Policy
+from gcloud.pubsub.message import Message
+
+
+
[docs]class Subscription(object): + """Subscriptions receive messages published to their topics. + + See: + https://cloud.google.com/pubsub/reference/rest/v1/projects.subscriptions + + :type name: string + :param name: the name of the subscription + + :type topic: :class:`gcloud.pubsub.topic.Topic` or ``NoneType`` + :param topic: the topic to which the subscription belongs; if ``None``, + the subscription's topic has been deleted. + + :type ack_deadline: int + :param ack_deadline: the deadline (in seconds) by which messages pulled + from the back-end must be acknowledged. + + :type push_endpoint: string + :param push_endpoint: URL to which messages will be pushed by the back-end. + If not set, the application must pull messages. + + :type client: :class:`gcloud.pubsub.client.Client` or ``NoneType`` + :param client: the client to use. If not passed, falls back to the + ``client`` stored on the topic. + """ + + _DELETED_TOPIC_PATH = '_deleted-topic_' + """Value of ``projects.subscriptions.topic`` when topic has been deleted. + + See: + https://cloud.google.com/pubsub/reference/rest/v1/projects.subscriptions#Subscription.FIELDS.topic + """ + + def __init__(self, name, topic=None, ack_deadline=None, push_endpoint=None, + client=None): + + if client is None and topic is None: + raise TypeError("Pass only one of 'topic' or 'client'.") + + if client is not None and topic is not None: + raise TypeError("Pass only one of 'topic' or 'client'.") + + self.name = name + self.topic = topic + self._client = client or topic._client + self._project = self._client.project + self.ack_deadline = ack_deadline + self.push_endpoint = push_endpoint + + @classmethod +
[docs] def from_api_repr(cls, resource, client, topics=None): + """Factory: construct a topic given its API representation + + :type resource: dict + :param resource: topic resource representation returned from the API + + :type client: :class:`gcloud.pubsub.client.Client` + :param client: Client which holds credentials and project + configuration for a topic. + + :type topics: dict or None + :param topics: A mapping of topic names -> topics. If not passed, + the subscription will have a newly-created topic. + + :rtype: :class:`gcloud.pubsub.subscription.Subscription` + :returns: Subscription parsed from ``resource``. + """ + if topics is None: + topics = {} + topic_path = resource['topic'] + if topic_path == cls._DELETED_TOPIC_PATH: + topic = None + else: + topic = topics.get(topic_path) + if topic is None: + # NOTE: This duplicates behavior from Topic.from_api_repr to + # avoid an import cycle. + topic_name = topic_name_from_path(topic_path, client.project) + topic = topics[topic_path] = client.topic(topic_name) + _, _, _, name = resource['name'].split('/') + ack_deadline = resource.get('ackDeadlineSeconds') + push_config = resource.get('pushConfig', {}) + push_endpoint = push_config.get('pushEndpoint') + if topic is None: + return cls(name, ack_deadline=ack_deadline, + push_endpoint=push_endpoint, client=client) + return cls(name, topic, ack_deadline, push_endpoint)
+ + @property + def project(self): + """Project bound to the subscription.""" + return self._client.project + + @property + def full_name(self): + """Fully-qualified name used in subscription APIs""" + return 'projects/%s/subscriptions/%s' % (self.project, self.name) + + @property + def path(self): + """URL path for the subscription's APIs""" + return '/%s' % (self.full_name,) + +
[docs] def auto_ack(self, return_immediately=False, max_messages=1, client=None): + """:class:`AutoAck` factory + + :type return_immediately: boolean + :param return_immediately: passed through to :meth:`Subscription.pull` + + :type max_messages: int + :param max_messages: passed through to :meth:`Subscription.pull` + + :type client: :class:`gcloud.pubsub.client.Client` or ``NoneType`` + :param client: passed through to :meth:`Subscription.pull` and + :meth:`Subscription.acknowledge`. + + :rtype: :class:`AutoAck` + :returns: the instance created for the given ``ack_id`` and ``message`` + """ + return AutoAck(self, return_immediately, max_messages, client)
+ + def _require_client(self, client): + """Check client or verify over-ride. + + :type client: :class:`gcloud.pubsub.client.Client` or ``NoneType`` + :param client: the client to use. If not passed, falls back to the + ``client`` stored on the topic of the + current subscription. + + :rtype: :class:`gcloud.pubsub.client.Client` + :returns: The client passed in or the currently bound client. + """ + if client is None: + client = self._client + return client + +
[docs] def create(self, client=None): + """API call: create the subscription via a PUT request + + See: + https://cloud.google.com/pubsub/reference/rest/v1/projects.subscriptions/create + + Example: + + .. literalinclude:: pubsub_snippets.py + :start-after: [START subscription_create] + :end-before: [END subscription_create] + + :type client: :class:`gcloud.pubsub.client.Client` or ``NoneType`` + :param client: the client to use. If not passed, falls back to the + ``client`` stored on the current subscription's topic. + """ + client = self._require_client(client) + api = client.subscriber_api + api.subscription_create( + self.full_name, self.topic.full_name, self.ack_deadline, + self.push_endpoint)
+ +
[docs] def exists(self, client=None): + """API call: test existence of the subscription via a GET request + + See + https://cloud.google.com/pubsub/reference/rest/v1/projects.subscriptions/get + + Example: + + .. literalinclude:: pubsub_snippets.py + :start-after: [START subscription_exists] + :end-before: [END subscription_exists] + + :type client: :class:`gcloud.pubsub.client.Client` or ``NoneType`` + :param client: the client to use. If not passed, falls back to the + ``client`` stored on the current subscription's topic. + + :rtype: bool + :returns: Boolean indicating existence of the subscription. + """ + client = self._require_client(client) + api = client.subscriber_api + try: + api.subscription_get(self.full_name) + except NotFound: + return False + else: + return True
+ +
[docs] def reload(self, client=None): + """API call: sync local subscription configuration via a GET request + + See + https://cloud.google.com/pubsub/reference/rest/v1/projects.subscriptions/get + + Example: + + .. literalinclude:: pubsub_snippets.py + :start-after: [START subscription_reload] + :end-before: [END subscription_reload] + + :type client: :class:`gcloud.pubsub.client.Client` or ``NoneType`` + :param client: the client to use. If not passed, falls back to the + ``client`` stored on the current subscription's topic. + """ + client = self._require_client(client) + api = client.subscriber_api + data = api.subscription_get(self.full_name) + self.ack_deadline = data.get('ackDeadlineSeconds') + push_config = data.get('pushConfig', {}) + self.push_endpoint = push_config.get('pushEndpoint')
+ +
[docs] def delete(self, client=None): + """API call: delete the subscription via a DELETE request. + + See: + https://cloud.google.com/pubsub/reference/rest/v1/projects.subscriptions/delete + + Example: + + .. literalinclude:: pubsub_snippets.py + :start-after: [START subscription_delete] + :end-before: [END subscription_delete] + + :type client: :class:`gcloud.pubsub.client.Client` or ``NoneType`` + :param client: the client to use. If not passed, falls back to the + ``client`` stored on the current subscription's topic. + """ + client = self._require_client(client) + api = client.subscriber_api + api.subscription_delete(self.full_name)
+ +
[docs] def modify_push_configuration(self, push_endpoint, client=None): + """API call: update the push endpoint for the subscription. + + See: + https://cloud.google.com/pubsub/reference/rest/v1/projects.subscriptions/modifyPushConfig + + Example: + + .. literalinclude:: pubsub_snippets.py + :start-after: [START subscription_push_pull] + :end-before: [END subscription_push_pull] + + .. literalinclude:: pubsub_snippets.py + :start-after: [START subscription_pull_push] + :end-before: [END subscription_pull_push] + + :type push_endpoint: string + :param push_endpoint: URL to which messages will be pushed by the + back-end. If None, the application must pull + messages. + + :type client: :class:`gcloud.pubsub.client.Client` or ``NoneType`` + :param client: the client to use. If not passed, falls back to the + ``client`` stored on the current subscription's topic. + """ + client = self._require_client(client) + api = client.subscriber_api + api.subscription_modify_push_config(self.full_name, push_endpoint) + self.push_endpoint = push_endpoint
+ +
[docs] def pull(self, return_immediately=False, max_messages=1, client=None): + """API call: retrieve messages for the subscription. + + See: + https://cloud.google.com/pubsub/reference/rest/v1/projects.subscriptions/pull + + Example: + + .. literalinclude:: pubsub_snippets.py + :start-after: [START subscription_pull] + :end-before: [END subscription_pull] + + :type return_immediately: boolean + :param return_immediately: if True, the back-end returns even if no + messages are available; if False, the API + call blocks until one or more messages are + available. + + :type max_messages: int + :param max_messages: the maximum number of messages to return. + + :type client: :class:`gcloud.pubsub.client.Client` or ``NoneType`` + :param client: the client to use. If not passed, falls back to the + ``client`` stored on the current subscription's topic. + + :rtype: list of (ack_id, message) tuples + :returns: sequence of tuples: ``ack_id`` is the ID to be used in a + subsequent call to :meth:`acknowledge`, and ``message`` + is an instance of :class:`gcloud.pubsub.message.Message`. + """ + client = self._require_client(client) + api = client.subscriber_api + response = api.subscription_pull( + self.full_name, return_immediately, max_messages) + return [(info['ackId'], Message.from_api_repr(info['message'])) + for info in response]
+ +
[docs] def acknowledge(self, ack_ids, client=None): + """API call: acknowledge retrieved messages for the subscription. + + See: + https://cloud.google.com/pubsub/reference/rest/v1/projects.subscriptions/acknowledge + + Example: + + .. literalinclude:: pubsub_snippets.py + :start-after: [START subscription_acknowledge] + :end-before: [END subscription_acknowledge] + + :type ack_ids: list of string + :param ack_ids: ack IDs of messages being acknowledged + + :type client: :class:`gcloud.pubsub.client.Client` or ``NoneType`` + :param client: the client to use. If not passed, falls back to the + ``client`` stored on the current subscription's topic. + """ + client = self._require_client(client) + api = client.subscriber_api + api.subscription_acknowledge(self.full_name, ack_ids)
+ +
[docs] def modify_ack_deadline(self, ack_ids, ack_deadline, client=None): + """API call: update acknowledgement deadline for a retrieved message. + + See: + https://cloud.google.com/pubsub/reference/rest/v1/projects.subscriptions/modifyAckDeadline + + :type ack_ids: list of string + :param ack_ids: ack IDs of messages being updated + + :type ack_deadline: int + :param ack_deadline: new deadline for the message, in seconds + + :type client: :class:`gcloud.pubsub.client.Client` or ``NoneType`` + :param client: the client to use. If not passed, falls back to the + ``client`` stored on the current subscription's topic. + """ + client = self._require_client(client) + api = client.subscriber_api + api.subscription_modify_ack_deadline( + self.full_name, ack_ids, ack_deadline)
+ +
[docs] def get_iam_policy(self, client=None): + """Fetch the IAM policy for the subscription. + + See: + https://cloud.google.com/pubsub/reference/rest/v1/projects.subscriptions/getIamPolicy + + Example: + + .. literalinclude:: pubsub_snippets.py + :start-after: [START subscription_get_iam_policy] + :end-before: [END subscription_get_iam_policy] + + :type client: :class:`gcloud.pubsub.client.Client` or ``NoneType`` + :param client: the client to use. If not passed, falls back to the + ``client`` stored on the current subscription's topic. + + :rtype: :class:`gcloud.pubsub.iam.Policy` + :returns: policy created from the resource returned by the + ``getIamPolicy`` API request. + """ + client = self._require_client(client) + api = client.iam_policy_api + resp = api.get_iam_policy(self.full_name) + return Policy.from_api_repr(resp)
+ +
[docs] def set_iam_policy(self, policy, client=None): + """Update the IAM policy for the subscription. + + See: + https://cloud.google.com/pubsub/reference/rest/v1/projects.subscriptions/setIamPolicy + + Example: + + .. literalinclude:: pubsub_snippets.py + :start-after: [START subscription_set_iam_policy] + :end-before: [END subscription_set_iam_policy] + + :type policy: :class:`gcloud.pubsub.iam.Policy` + :param policy: the new policy, typically fetched via + :meth:`get_iam_policy` and updated in place. + + :type client: :class:`gcloud.pubsub.client.Client` or ``NoneType`` + :param client: the client to use. If not passed, falls back to the + ``client`` stored on the current subscription's topic. + + :rtype: :class:`gcloud.pubsub.iam.Policy` + :returns: updated policy created from the resource returned by the + ``setIamPolicy`` API request. + """ + client = self._require_client(client) + api = client.iam_policy_api + resource = policy.to_api_repr() + resp = api.set_iam_policy(self.full_name, resource) + return Policy.from_api_repr(resp)
+ +
[docs] def check_iam_permissions(self, permissions, client=None): + """Verify permissions allowed for the current user. + + See: + https://cloud.google.com/pubsub/reference/rest/v1/projects.subscriptions/testIamPermissions + + Example: + + .. literalinclude:: pubsub_snippets.py + :start-after: [START subscription_check_iam_permissions] + :end-before: [END subscription_check_iam_permissions] + + :type permissions: list of string + :param permissions: list of permissions to be tested + + :type client: :class:`gcloud.pubsub.client.Client` or ``NoneType`` + :param client: the client to use. If not passed, falls back to the + ``client`` stored on the current subscription's topic. + + :rtype: sequence of string + :returns: subset of ``permissions`` allowed by current IAM policy. + """ + client = self._require_client(client) + api = client.iam_policy_api + return api.test_iam_permissions( + self.full_name, list(permissions))
+ + +
[docs]class AutoAck(dict): + """Wrapper for :meth:`Subscription.pull` results. + + Mapping, tracks messages still-to-be-acknowledged. + + When used as a context manager, acknowledges all messages still in the + mapping on `__exit__`. When processing the pulled messsages, application + code MUST delete messages from the :class:`AutoAck` mapping which are not + successfully processed, e.g.: + + .. code-block: python + + with AutoAck(subscription) as ack: # calls ``subscription.pull`` + for ack_id, message in ack.items(): + try: + do_something_with(message): + except: + del ack[ack_id] + + :type subscription: :class:`Subscription` + :param subscription: subcription to be pulled. + + :type return_immediately: boolean + :param return_immediately: passed through to :meth:`Subscription.pull` + + :type max_messages: int + :param max_messages: passed through to :meth:`Subscription.pull` + + :type client: :class:`gcloud.pubsub.client.Client` or ``NoneType`` + :param client: passed through to :meth:`Subscription.pull` and + :meth:`Subscription.acknowledge`. + """ + def __init__(self, subscription, + return_immediately=False, max_messages=1, client=None): + super(AutoAck, self).__init__() + self._subscription = subscription + self._return_immediately = return_immediately + self._max_messages = max_messages + self._client = client + + def __enter__(self): + items = self._subscription.pull( + self._return_immediately, self._max_messages, self._client) + self.update(items) + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self._subscription.acknowledge(list(self), self._client)
+
+ +
+ +
+ + + + + + + \ No newline at end of file diff --git a/0.18.1/_modules/gcloud/pubsub/topic.html b/0.18.1/_modules/gcloud/pubsub/topic.html new file mode 100644 index 000000000000..764b4867ff19 --- /dev/null +++ b/0.18.1/_modules/gcloud/pubsub/topic.html @@ -0,0 +1,693 @@ + + + + + + + + gcloud.pubsub.topic — gcloud 0.18.1 documentation + + + + + + + + + + + + + + +
+ +
+ +
+ + + Report an Issue + +
+
+ +
+ +

Source code for gcloud.pubsub.topic

+# Copyright 2015 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Define API Topics."""
+
+import base64
+
+from gcloud._helpers import _datetime_to_rfc3339
+from gcloud._helpers import _NOW
+from gcloud.exceptions import NotFound
+from gcloud.pubsub._helpers import subscription_name_from_path
+from gcloud.pubsub._helpers import topic_name_from_path
+from gcloud.pubsub.iam import Policy
+from gcloud.pubsub.subscription import Subscription
+
+
+
[docs]class Topic(object): + """Topics are targets to which messages can be published. + + Subscribers then receive those messages. + + See: + https://cloud.google.com/pubsub/reference/rest/v1/projects.topics + + :type name: string + :param name: the name of the topic + + :type client: :class:`gcloud.pubsub.client.Client` + :param client: A client which holds credentials and project configuration + for the topic (which requires a project). + + :type timestamp_messages: boolean + :param timestamp_messages: If true, the topic will add a ``timestamp`` key + to the attributes of each published message: + the value will be an RFC 3339 timestamp. + """ + def __init__(self, name, client, timestamp_messages=False): + self.name = name + self._client = client + self.timestamp_messages = timestamp_messages + +
[docs] def subscription(self, name, ack_deadline=None, push_endpoint=None): + """Creates a subscription bound to the current topic. + + Example: pull-mode subcription, default paramter values + + .. literalinclude:: pubsub_snippets.py + :start-after: [START topic_subscription_defaults] + :end-before: [END topic_subscription_defaults] + + Example: pull-mode subcription, override ``ack_deadline`` default + + .. literalinclude:: pubsub_snippets.py + :start-after: [START topic_subscription_ack90] + :end-before: [END topic_subscription_ack90] + + Example: push-mode subcription + + .. literalinclude:: pubsub_snippets.py + :start-after: [START topic_subscription_push] + :end-before: [END topic_subscription_push] + + :type name: string + :param name: the name of the subscription + + :type ack_deadline: int + :param ack_deadline: the deadline (in seconds) by which messages pulled + from the back-end must be acknowledged. + + :type push_endpoint: string + :param push_endpoint: URL to which messages will be pushed by the + back-end. If not set, the application must pull + messages. + + :rtype: :class:`Subscription` + :returns: The subscription created with the passed in arguments. + """ + return Subscription(name, self, ack_deadline=ack_deadline, + push_endpoint=push_endpoint)
+ + @classmethod +
[docs] def from_api_repr(cls, resource, client): + """Factory: construct a topic given its API representation + + :type resource: dict + :param resource: topic resource representation returned from the API + + :type client: :class:`gcloud.pubsub.client.Client` + :param client: Client which holds credentials and project + configuration for the topic. + + :rtype: :class:`gcloud.pubsub.topic.Topic` + :returns: Topic parsed from ``resource``. + :raises: :class:`ValueError` if ``client`` is not ``None`` and the + project from the resource does not agree with the project + from the client. + """ + topic_name = topic_name_from_path(resource['name'], client.project) + return cls(topic_name, client=client)
+ + @property + def project(self): + """Project bound to the topic.""" + return self._client.project + + @property + def full_name(self): + """Fully-qualified name used in topic / subscription APIs""" + return 'projects/%s/topics/%s' % (self.project, self.name) + + def _require_client(self, client): + """Check client or verify over-ride. + + :type client: :class:`gcloud.pubsub.client.Client` or ``NoneType`` + :param client: the client to use. If not passed, falls back to the + ``client`` stored on the current topic. + + :rtype: :class:`gcloud.pubsub.client.Client` + :returns: The client passed in or the currently bound client. + """ + if client is None: + client = self._client + return client + +
[docs] def create(self, client=None): + """API call: create the topic via a PUT request + + See: + https://cloud.google.com/pubsub/reference/rest/v1/projects.topics/create + + Example: + + .. literalinclude:: pubsub_snippets.py + :start-after: [START topic_create] + :end-before: [END topic_create] + + :type client: :class:`gcloud.pubsub.client.Client` or ``NoneType`` + :param client: the client to use. If not passed, falls back to the + ``client`` stored on the current topic. + """ + client = self._require_client(client) + api = client.publisher_api + api.topic_create(topic_path=self.full_name)
+ +
[docs] def exists(self, client=None): + """API call: test for the existence of the topic via a GET request + + See + https://cloud.google.com/pubsub/reference/rest/v1/projects.topics/get + + Example: + + .. literalinclude:: pubsub_snippets.py + :start-after: [START topic_exists] + :end-before: [END topic_exists] + + :type client: :class:`gcloud.pubsub.client.Client` or ``NoneType`` + :param client: the client to use. If not passed, falls back to the + ``client`` stored on the current topic. + + :rtype: bool + :returns: Boolean indicating existence of the topic. + """ + client = self._require_client(client) + api = client.publisher_api + + try: + api.topic_get(topic_path=self.full_name) + except NotFound: + return False + else: + return True
+ +
[docs] def delete(self, client=None): + """API call: delete the topic via a DELETE request + + See: + https://cloud.google.com/pubsub/reference/rest/v1/projects.topics/delete + + Example: + + .. literalinclude:: pubsub_snippets.py + :start-after: [START topic_delete] + :end-before: [END topic_delete] + + :type client: :class:`gcloud.pubsub.client.Client` or ``NoneType`` + :param client: the client to use. If not passed, falls back to the + ``client`` stored on the current topic. + """ + client = self._require_client(client) + api = client.publisher_api + api.topic_delete(topic_path=self.full_name)
+ + def _timestamp_message(self, attrs): + """Add a timestamp to ``attrs``, if the topic is so configured. + + If ``attrs`` already has the key, do nothing. + + Helper method for ``publish``/``Batch.publish``. + """ + if self.timestamp_messages and 'timestamp' not in attrs: + attrs['timestamp'] = _datetime_to_rfc3339(_NOW()) + +
[docs] def publish(self, message, client=None, **attrs): + """API call: publish a message to a topic via a POST request + + See: + https://cloud.google.com/pubsub/reference/rest/v1/projects.topics/publish + + Example without message attributes: + + .. literalinclude:: pubsub_snippets.py + :start-after: [START topic_publish_simple_message] + :end-before: [END topic_publish_simple_message] + + With message attributes: + + .. literalinclude:: pubsub_snippets.py + :start-after: [START topic_publish_message_with_attrs] + :end-before: [END topic_publish_message_with_attrs] + + :type message: bytes + :param message: the message payload + + :type client: :class:`gcloud.pubsub.client.Client` or ``NoneType`` + :param client: the client to use. If not passed, falls back to the + ``client`` stored on the current topic. + + :type attrs: dict (string -> string) + :param attrs: key-value pairs to send as message attributes + + :rtype: str + :returns: message ID assigned by the server to the published message + """ + client = self._require_client(client) + api = client.publisher_api + + self._timestamp_message(attrs) + message_b = base64.b64encode(message).decode('ascii') + message_data = {'data': message_b, 'attributes': attrs} + message_ids = api.topic_publish(self.full_name, [message_data]) + return message_ids[0]
+ +
[docs] def batch(self, client=None): + """Return a batch to use as a context manager. + + Example: + + .. literalinclude:: pubsub_snippets.py + :start-after: [START topic_batch] + :end-before: [END topic_batch] + + .. note:: + + The only API request happens during the ``__exit__()`` of the topic + used as a context manager, and only if the block exits without + raising an exception. + + :type client: :class:`gcloud.pubsub.client.Client` or ``NoneType`` + :param client: the client to use. If not passed, falls back to the + ``client`` stored on the current topic. + + :rtype: :class:`Batch` + :returns: A batch to use as a context manager. + """ + client = self._require_client(client) + return Batch(self, client)
+ +
[docs] def list_subscriptions(self, page_size=None, page_token=None, client=None): + """List subscriptions for the project associated with this client. + + See: + https://cloud.google.com/pubsub/reference/rest/v1/projects.topics.subscriptions/list + + Example: + + .. literalinclude:: pubsub_snippets.py + :start-after: [START topic_list_subscriptions] + :end-before: [END topic_list_subscriptions] + + :type page_size: int + :param page_size: maximum number of topics to return, If not passed, + defaults to a value set by the API. + + :type page_token: string + :param page_token: opaque marker for the next "page" of topics. If not + passed, the API will return the first page of + topics. + + :type client: :class:`gcloud.pubsub.client.Client` or ``NoneType`` + :param client: the client to use. If not passed, falls back to the + ``client`` stored on the current topic. + + :rtype: tuple, (list, str) + :returns: list of :class:`gcloud.pubsub.subscription.Subscription`, + plus a "next page token" string: if not None, indicates that + more topics can be retrieved with another call (pass that + value as ``page_token``). + """ + client = self._require_client(client) + api = client.publisher_api + sub_paths, next_token = api.topic_list_subscriptions( + self.full_name, page_size, page_token) + subscriptions = [] + for sub_path in sub_paths: + sub_name = subscription_name_from_path(sub_path, self.project) + subscriptions.append(Subscription(sub_name, self)) + return subscriptions, next_token
+ +
[docs] def get_iam_policy(self, client=None): + """Fetch the IAM policy for the topic. + + See: + https://cloud.google.com/pubsub/reference/rest/v1/projects.topics/getIamPolicy + + Example: + + .. literalinclude:: pubsub_snippets.py + :start-after: [START topic_get_iam_policy] + :end-before: [END topic_get_iam_policy] + + :type client: :class:`gcloud.pubsub.client.Client` or ``NoneType`` + :param client: the client to use. If not passed, falls back to the + ``client`` stored on the current batch. + + :rtype: :class:`gcloud.pubsub.iam.Policy` + :returns: policy created from the resource returned by the + ``getIamPolicy`` API request. + """ + client = self._require_client(client) + api = client.iam_policy_api + resp = api.get_iam_policy(self.full_name) + return Policy.from_api_repr(resp)
+ +
[docs] def set_iam_policy(self, policy, client=None): + """Update the IAM policy for the topic. + + See: + https://cloud.google.com/pubsub/reference/rest/v1/projects.topics/setIamPolicy + + Example: + + .. literalinclude:: pubsub_snippets.py + :start-after: [START topic_set_iam_policy] + :end-before: [END topic_set_iam_policy] + + :type policy: :class:`gcloud.pubsub.iam.Policy` + :param policy: the new policy, typically fetched via + :meth:`get_iam_policy` and updated in place. + + :type client: :class:`gcloud.pubsub.client.Client` or ``NoneType`` + :param client: the client to use. If not passed, falls back to the + ``client`` stored on the current batch. + + :rtype: :class:`gcloud.pubsub.iam.Policy` + :returns: updated policy created from the resource returned by the + ``setIamPolicy`` API request. + """ + client = self._require_client(client) + api = client.iam_policy_api + resource = policy.to_api_repr() + resp = api.set_iam_policy(self.full_name, resource) + return Policy.from_api_repr(resp)
+ +
[docs] def check_iam_permissions(self, permissions, client=None): + """Verify permissions allowed for the current user. + + See: + https://cloud.google.com/pubsub/reference/rest/v1/projects.topics/testIamPermissions + + Example: + + .. literalinclude:: pubsub_snippets.py + :start-after: [START topic_check_iam_permissions] + :end-before: [END topic_check_iam_permissions] + + :type permissions: list of string + :param permissions: list of permissions to be tested + + :type client: :class:`gcloud.pubsub.client.Client` or ``NoneType`` + :param client: the client to use. If not passed, falls back to the + ``client`` stored on the current batch. + + :rtype: sequence of string + :returns: subset of ``permissions`` allowed by current IAM policy. + """ + client = self._require_client(client) + api = client.iam_policy_api + return api.test_iam_permissions( + self.full_name, list(permissions))
+ + +
[docs]class Batch(object): + """Context manager: collect messages to publish via a single API call. + + Helper returned by :meth:Topic.batch + + :type topic: :class:`gcloud.pubsub.topic.Topic` + :param topic: the topic being published + + :type client: :class:`gcloud.pubsub.client.Client` + :param client: The client to use. + """ + def __init__(self, topic, client): + self.topic = topic + self.messages = [] + self.message_ids = [] + self.client = client + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + if exc_type is None: + self.commit() + + def __iter__(self): + return iter(self.message_ids) + +
[docs] def publish(self, message, **attrs): + """Emulate publishing a message, but save it. + + :type message: bytes + :param message: the message payload + + :type attrs: dict (string -> string) + :param attrs: key-value pairs to send as message attributes + """ + self.topic._timestamp_message(attrs) + self.messages.append( + {'data': base64.b64encode(message).decode('ascii'), + 'attributes': attrs})
+ +
[docs] def commit(self, client=None): + """Send saved messages as a single API call. + + :type client: :class:`gcloud.pubsub.client.Client` or ``NoneType`` + :param client: the client to use. If not passed, falls back to the + ``client`` stored on the current batch. + """ + if not self.messages: + return + + if client is None: + client = self.client + api = client.publisher_api + message_ids = api.topic_publish(self.topic.full_name, self.messages[:]) + self.message_ids.extend(message_ids) + del self.messages[:]
+
+ +
+ +
+ + + + + + + \ No newline at end of file diff --git a/0.18.1/_modules/gcloud/resource_manager/client.html b/0.18.1/_modules/gcloud/resource_manager/client.html new file mode 100644 index 000000000000..25da2bfb2758 --- /dev/null +++ b/0.18.1/_modules/gcloud/resource_manager/client.html @@ -0,0 +1,418 @@ + + + + + + + + gcloud.resource_manager.client — gcloud 0.18.1 documentation + + + + + + + + + + + + + + +
+ +
+ +
+ + + Report an Issue + +
+
+ +
+ +

Source code for gcloud.resource_manager.client

+# Copyright 2015 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""A Client for interacting with the Resource Manager API."""
+
+
+from gcloud.client import Client as BaseClient
+from gcloud.iterator import Iterator
+from gcloud.resource_manager.connection import Connection
+from gcloud.resource_manager.project import Project
+
+
+
[docs]class Client(BaseClient): + """Client to bundle configuration needed for API requests. + + See + https://cloud.google.com/resource-manager/reference/rest/ + for more information on this API. + + Automatically get credentials:: + + >>> from gcloud import resource_manager + >>> client = resource_manager.Client() + + :type credentials: :class:`oauth2client.client.OAuth2Credentials` or + :class:`NoneType` + :param credentials: The OAuth2 Credentials to use for the connection + owned by this client. If not passed (and if no ``http`` + object is passed), falls back to the default inferred + from the environment. + + :type http: :class:`httplib2.Http` or class that defines ``request()``. + :param http: An optional HTTP object to make requests. If not passed, an + ``http`` object is created that is bound to the + ``credentials`` for the current object. + """ + + _connection_class = Connection + +
[docs] def new_project(self, project_id, name=None, labels=None): + """Creates a :class:`.Project` bound to the current client. + + Use :meth:`Project.reload() \ + <gcloud.resource_manager.project.Project.reload>` to retrieve + project metadata after creating a :class:`.Project` instance. + + .. note: + + This does not make an API call. + + :type project_id: str + :param project_id: The ID for this project. + + :type name: string + :param name: The display name of the project. + + :type labels: dict + :param labels: A list of labels associated with the project. + + :rtype: :class:`.Project` + :returns: A new instance of a :class:`.Project` **without** + any metadata loaded. + """ + return Project(project_id=project_id, + client=self, name=name, labels=labels)
+ +
[docs] def fetch_project(self, project_id): + """Fetch an existing project and it's relevant metadata by ID. + + .. note:: + + If the project does not exist, this will raise a + :class:`NotFound <gcloud.exceptions.NotFound>` error. + + :type project_id: str + :param project_id: The ID for this project. + + :rtype: :class:`.Project` + :returns: A :class:`.Project` with metadata fetched from the API. + """ + project = self.new_project(project_id) + project.reload() + return project
+ +
[docs] def list_projects(self, filter_params=None, page_size=None): + """List the projects visible to this client. + + Example:: + + >>> from gcloud import resource_manager + >>> client = resource_manager.Client() + >>> for project in client.list_projects(): + ... print project.project_id + + List all projects with label ``'environment'`` set to ``'prod'`` + (filtering by labels):: + + >>> from gcloud import resource_manager + >>> client = resource_manager.Client() + >>> env_filter = {'labels.environment': 'prod'} + >>> for project in client.list_projects(env_filter): + ... print project.project_id + + See: + https://cloud.google.com/resource-manager/reference/rest/v1beta1/projects/list + + Complete filtering example:: + + >>> project_filter = { # Return projects with... + ... 'name': 'My Project', # name set to 'My Project'. + ... 'id': 'my-project-id', # id set to 'my-project-id'. + ... 'labels.stage': 'prod', # the label 'stage' set to 'prod' + ... 'labels.color': '*' # a label 'color' set to anything. + ... } + >>> client.list_projects(project_filter) + + :type filter_params: dict + :param filter_params: (Optional) A dictionary of filter options where + each key is a property to filter on, and each + value is the (case-insensitive) value to check + (or the glob ``*`` to check for existence of the + property). See the example above for more + details. + + :type page_size: int + :param page_size: (Optional) Maximum number of projects to return in a + single page. If not passed, defaults to a value set + by the API. + + :rtype: :class:`_ProjectIterator` + :returns: A project iterator. The iterator will make multiple API + requests if you continue iterating and there are more + pages of results. Each item returned will be a. + :class:`.Project`. + """ + extra_params = {} + + if page_size is not None: + extra_params['pageSize'] = page_size + + if filter_params is not None: + extra_params['filter'] = filter_params + + return _ProjectIterator(self, extra_params=extra_params)
+ + +class _ProjectIterator(Iterator): + """An iterator over a list of Project resources. + + You shouldn't have to use this directly, but instead should use the + helper methods on :class:`gcloud.resource_manager.client.Client` + objects. + + :type client: :class:`gcloud.resource_manager.client.Client` + :param client: The client to use for making connections. + + :type extra_params: dict + :param extra_params: (Optional) Extra query string parameters for + the API call. + """ + + def __init__(self, client, extra_params=None): + super(_ProjectIterator, self).__init__(client=client, path='/projects', + extra_params=extra_params) + + def get_items_from_response(self, response): + """Yield :class:`.Project` items from response. + + :type response: dict + :param response: The JSON API response for a page of projects. + """ + for resource in response.get('projects', []): + item = Project.from_api_repr(resource, client=self.client) + yield item +
+ +
+ +
+ + + + + + + \ No newline at end of file diff --git a/0.18.1/_modules/gcloud/resource_manager/connection.html b/0.18.1/_modules/gcloud/resource_manager/connection.html new file mode 100644 index 000000000000..2592ef85af19 --- /dev/null +++ b/0.18.1/_modules/gcloud/resource_manager/connection.html @@ -0,0 +1,275 @@ + + + + + + + + gcloud.resource_manager.connection — gcloud 0.18.1 documentation + + + + + + + + + + + + + + +
+ +
+ +
+ + + Report an Issue + +
+
+ +
+ +

Source code for gcloud.resource_manager.connection

+# Copyright 2015 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Create / interact with gcloud.resource_manager connections."""
+
+
+from gcloud import connection as base_connection
+
+
+
[docs]class Connection(base_connection.JSONConnection): + """A connection to Google Cloud Resource Manager via the JSON REST API. + + :type credentials: :class:`oauth2client.client.OAuth2Credentials` + :param credentials: (Optional) The OAuth2 Credentials to use for this + connection. + + :type http: :class:`httplib2.Http` or class that defines ``request()``. + :param http: (Optional) HTTP object to make requests. + """ + + API_BASE_URL = 'https://cloudresourcemanager.googleapis.com' + """The base of the API call URL.""" + + API_VERSION = 'v1beta1' + """The version of the API, used in building the API call's URL.""" + + API_URL_TEMPLATE = '{api_base_url}/{api_version}{path}' + """A template for the URL of a particular API call.""" + + SCOPE = ('https://www.googleapis.com/auth/cloud-platform',) + """The scopes required for authenticating as a Resouce Manager consumer."""
+
+ +
+ +
+ + + + + + + \ No newline at end of file diff --git a/0.18.1/_modules/gcloud/resource_manager/project.html b/0.18.1/_modules/gcloud/resource_manager/project.html new file mode 100644 index 000000000000..c94ea7e56f66 --- /dev/null +++ b/0.18.1/_modules/gcloud/resource_manager/project.html @@ -0,0 +1,503 @@ + + + + + + + + gcloud.resource_manager.project — gcloud 0.18.1 documentation + + + + + + + + + + + + + + +
+ +
+ +
+ + + Report an Issue + +
+
+ +
+ +

Source code for gcloud.resource_manager.project

+# Copyright 2015 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Utility for managing projects via the Cloud Resource Manager API."""
+
+
+from gcloud.exceptions import NotFound
+
+
+
[docs]class Project(object): + """Projects are containers for your work on Google Cloud Platform. + + .. note:: + + A :class:`Project` can also be created via + :meth:`Client.new_project() \ + <gcloud.resource_manager.client.Client.new_project>` + + To manage labels on a :class:`Project`:: + + >>> from gcloud import resource_manager + >>> client = resource_manager.Client() + >>> project = client.new_project('purple-spaceship-123') + >>> project.labels = {'color': 'purple'} + >>> project.labels['environment'] = 'production' + >>> project.update() + + See: + https://cloud.google.com/resource-manager/reference/rest/v1beta1/projects + + :type project_id: string + :param project_id: The globally unique ID of the project. + + :type client: :class:`gcloud.resource_manager.client.Client` + :param client: The Client used with this project. + + :type name: string + :param name: The display name of the project. + + :type labels: dict + :param labels: A list of labels associated with the project. + """ + def __init__(self, project_id, client, name=None, labels=None): + self._client = client + self.project_id = project_id + self.name = name + self.number = None + self.labels = labels or {} + self.status = None + + def __repr__(self): + return '<Project: %r (%r)>' % (self.name, self.project_id) + + @classmethod +
[docs] def from_api_repr(cls, resource, client): + """Factory: construct a project given its API representation. + + :type resource: dict + :param resource: project resource representation returned from the API + + :type client: :class:`gcloud.resource_manager.client.Client` + :param client: The Client used with this project. + + :rtype: :class:`gcloud.resource_manager.project.Project` + :returns: The project created. + """ + project = cls(project_id=resource['projectId'], client=client) + project.set_properties_from_api_repr(resource) + return project
+ +
[docs] def set_properties_from_api_repr(self, resource): + """Update specific properties from its API representation.""" + self.name = resource.get('name') + self.number = resource['projectNumber'] + self.labels = resource.get('labels', {}) + self.status = resource['lifecycleState']
+ + @property + def full_name(self): + """Fully-qualified name (ie, ``'projects/purple-spaceship-123'``).""" + if not self.project_id: + raise ValueError('Missing project ID.') + return 'projects/%s' % (self.project_id) + + @property + def path(self): + """URL for the project (ie, ``'/projects/purple-spaceship-123'``).""" + return '/%s' % (self.full_name) + + def _require_client(self, client): + """Check client or verify over-ride. + + :type client: :class:`gcloud.resource_manager.client.Client` or + ``NoneType`` + :param client: the client to use. If not passed, falls back to the + ``client`` stored on the current project. + + :rtype: :class:`gcloud.resource_manager.client.Client` + :returns: The client passed in or the currently bound client. + """ + if client is None: + client = self._client + return client + +
[docs] def create(self, client=None): + """API call: create the project via a ``POST`` request. + + See + https://cloud.google.com/resource-manager/reference/rest/v1beta1/projects/create + + :type client: :class:`gcloud.resource_manager.client.Client` or + :data:`NoneType <types.NoneType>` + :param client: the client to use. If not passed, falls back to + the client stored on the current project. + """ + client = self._require_client(client) + + data = { + 'projectId': self.project_id, + 'name': self.name, + 'labels': self.labels, + } + resp = client.connection.api_request(method='POST', path='/projects', + data=data) + self.set_properties_from_api_repr(resource=resp)
+ +
[docs] def reload(self, client=None): + """API call: reload the project via a ``GET`` request. + + This method will reload the newest metadata for the project. If you've + created a new :class:`Project` instance via + :meth:`Client.new_project() \ + <gcloud.resource_manager.client.Client.new_project>`, + this method will retrieve project metadata. + + .. warning:: + + This will overwrite any local changes you've made and not saved + via :meth:`update`. + + See + https://cloud.google.com/resource-manager/reference/rest/v1beta1/projects/get + + :type client: :class:`gcloud.resource_manager.client.Client` or + :data:`NoneType <types.NoneType>` + :param client: the client to use. If not passed, falls back to + the client stored on the current project. + """ + client = self._require_client(client) + + # We assume the project exists. If it doesn't it will raise a NotFound + # exception. + resp = client.connection.api_request(method='GET', path=self.path) + self.set_properties_from_api_repr(resource=resp)
+ +
[docs] def exists(self, client=None): + """API call: test the existence of a project via a ``GET`` request. + + See + https://cloud.google.com/resource-manager/reference/rest/v1beta1/projects/get + + :type client: :class:`gcloud.resource_manager.client.Client` or + :data:`NoneType <types.NoneType>` + :param client: the client to use. If not passed, falls back to + the client stored on the current project. + + :rtype: bool + :returns: Boolean indicating existence of the project. + """ + client = self._require_client(client) + + try: + # Note that we have to request the entire resource as the API + # doesn't provide a way tocheck for existence only. + client.connection.api_request(method='GET', path=self.path) + except NotFound: + return False + else: + return True
+ +
[docs] def update(self, client=None): + """API call: update the project via a ``PUT`` request. + + See + https://cloud.google.com/resource-manager/reference/rest/v1beta1/projects/update + + :type client: :class:`gcloud.resource_manager.client.Client` or + :data:`NoneType <types.NoneType>` + :param client: the client to use. If not passed, falls back to + the client stored on the current project. + """ + client = self._require_client(client) + + data = {'name': self.name, 'labels': self.labels} + resp = client.connection.api_request(method='PUT', path=self.path, + data=data) + self.set_properties_from_api_repr(resp)
+ +
[docs] def delete(self, client=None, reload_data=False): + """API call: delete the project via a ``DELETE`` request. + + See: + https://cloud.google.com/resource-manager/reference/rest/v1beta1/projects/delete + + This actually changes the status (``lifecycleState``) from ``ACTIVE`` + to ``DELETE_REQUESTED``. + Later (it's not specified when), the project will move into the + ``DELETE_IN_PROGRESS`` state, which means the deleting has actually + begun. + + :type client: :class:`gcloud.resource_manager.client.Client` or + :data:`NoneType <types.NoneType>` + :param client: the client to use. If not passed, falls back to + the client stored on the current project. + + :type reload_data: bool + :param reload_data: Whether to reload the project with the latest + state. If you want to get the updated status, + you'll want this set to :data:`True` as the DELETE + method doesn't send back the updated project. + Default: :data:`False`. + """ + client = self._require_client(client) + client.connection.api_request(method='DELETE', path=self.path) + + # If the reload flag is set, reload the project. + if reload_data: + self.reload()
+ +
[docs] def undelete(self, client=None, reload_data=False): + """API call: undelete the project via a ``POST`` request. + + See + https://cloud.google.com/resource-manager/reference/rest/v1beta1/projects/undelete + + This actually changes the project status (``lifecycleState``) from + ``DELETE_REQUESTED`` to ``ACTIVE``. + If the project has already reached a status of ``DELETE_IN_PROGRESS``, + this request will fail and the project cannot be restored. + + :type client: :class:`gcloud.resource_manager.client.Client` or + :data:`NoneType <types.NoneType>` + :param client: the client to use. If not passed, falls back to + the client stored on the current project. + + :type reload_data: bool + :param reload_data: Whether to reload the project with the latest + state. If you want to get the updated status, + you'll want this set to :data:`True` as the DELETE + method doesn't send back the updated project. + Default: :data:`False`. + """ + client = self._require_client(client) + client.connection.api_request(method='POST', + path=self.path + ':undelete') + + # If the reload flag is set, reload the project. + if reload_data: + self.reload()
+
+ +
+ +
+ + + + + + + \ No newline at end of file diff --git a/0.18.1/_modules/gcloud/storage/acl.html b/0.18.1/_modules/gcloud/storage/acl.html new file mode 100644 index 000000000000..1cc20903d987 --- /dev/null +++ b/0.18.1/_modules/gcloud/storage/acl.html @@ -0,0 +1,793 @@ + + + + + + + + gcloud.storage.acl — gcloud 0.18.1 documentation + + + + + + + + + + + + + + +
+ +
+ +
+ + + Report an Issue + +
+
+ +
+ +

Source code for gcloud.storage.acl

+# Copyright 2014 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Manipulate access control lists that Cloud Storage provides.
+
+:class:`gcloud.storage.bucket.Bucket` has a getting method that creates
+an ACL object under the hood, and you can interact with that using
+:func:`gcloud.storage.bucket.Bucket.acl`::
+
+  >>> from gcloud import storage
+  >>> client = storage.Client()
+  >>> bucket = client.get_bucket(bucket_name)
+  >>> acl = bucket.acl
+
+Adding and removing permissions can be done with the following methods
+(in increasing order of granularity):
+
+- :func:`ACL.all`
+  corresponds to access for all users.
+- :func:`ACL.all_authenticated` corresponds
+  to access for all users that are signed into a Google account.
+- :func:`ACL.domain` corresponds to access on a
+  per Google Apps domain (ie, ``example.com``).
+- :func:`ACL.group` corresponds to access on a
+  per group basis (either by ID or e-mail address).
+- :func:`ACL.user` corresponds to access on a
+  per user basis (either by ID or e-mail address).
+
+And you are able to ``grant`` and ``revoke`` the following roles:
+
+- **Reading**:
+  :func:`_ACLEntity.grant_read` and :func:`_ACLEntity.revoke_read`
+- **Writing**:
+  :func:`_ACLEntity.grant_write` and :func:`_ACLEntity.revoke_write`
+- **Owning**:
+  :func:`_ACLEntity.grant_owner` and :func:`_ACLEntity.revoke_owner`
+
+You can use any of these like any other factory method (these happen to
+be :class:`_ACLEntity` factories)::
+
+  >>> acl.user('me@example.org').grant_read()
+  >>> acl.all_authenticated().grant_write()
+
+You can also chain these ``grant_*`` and ``revoke_*`` methods together
+for brevity::
+
+  >>> acl.all().grant_read().revoke_write()
+
+After that, you can save any changes you make with the
+:func:`gcloud.storage.acl.ACL.save` method::
+
+  >>> acl.save()
+
+You can alternatively save any existing :class:`gcloud.storage.acl.ACL`
+object (whether it was created by a factory method or not) from a
+:class:`gcloud.storage.bucket.Bucket`::
+
+  >>> bucket.acl.save(acl=acl)
+
+To get the list of ``entity`` and ``role`` for each unique pair, the
+:class:`ACL` class is iterable::
+
+  >>> print list(ACL)
+  [{'role': 'OWNER', 'entity': 'allUsers'}, ...]
+
+This list of tuples can be used as the ``entity`` and ``role`` fields
+when sending metadata for ACLs to the API.
+"""
+
+
+class _ACLEntity(object):
+    """Class representing a set of roles for an entity.
+
+    This is a helper class that you likely won't ever construct
+    outside of using the factor methods on the :class:`ACL` object.
+
+    :type entity_type: string
+    :param entity_type: The type of entity (ie, 'group' or 'user').
+
+    :type identifier: string
+    :param identifier: The ID or e-mail of the entity. For the special
+                       entity types (like 'allUsers') this is optional.
+    """
+
+    READER_ROLE = 'READER'
+    WRITER_ROLE = 'WRITER'
+    OWNER_ROLE = 'OWNER'
+
+    def __init__(self, entity_type, identifier=None):
+        self.identifier = identifier
+        self.roles = set([])
+        self.type = entity_type
+
+    def __str__(self):
+        if not self.identifier:
+            return str(self.type)
+        else:
+            return '{acl.type}-{acl.identifier}'.format(acl=self)
+
+    def __repr__(self):
+        return '<ACL Entity: {acl} ({roles})>'.format(
+            acl=self, roles=', '.join(self.roles))
+
+    def get_roles(self):
+        """Get the list of roles permitted by this entity.
+
+        :rtype: list of strings
+        :returns: The list of roles associated with this entity.
+        """
+        return self.roles
+
+    def grant(self, role):
+        """Add a role to the entity.
+
+        :type role: string
+        :param role: The role to add to the entity.
+        """
+        self.roles.add(role)
+
+    def revoke(self, role):
+        """Remove a role from the entity.
+
+        :type role: string
+        :param role: The role to remove from the entity.
+        """
+        if role in self.roles:
+            self.roles.remove(role)
+
+    def grant_read(self):
+        """Grant read access to the current entity."""
+        self.grant(_ACLEntity.READER_ROLE)
+
+    def grant_write(self):
+        """Grant write access to the current entity."""
+        self.grant(_ACLEntity.WRITER_ROLE)
+
+    def grant_owner(self):
+        """Grant owner access to the current entity."""
+        self.grant(_ACLEntity.OWNER_ROLE)
+
+    def revoke_read(self):
+        """Revoke read access from the current entity."""
+        self.revoke(_ACLEntity.READER_ROLE)
+
+    def revoke_write(self):
+        """Revoke write access from the current entity."""
+        self.revoke(_ACLEntity.WRITER_ROLE)
+
+    def revoke_owner(self):
+        """Revoke owner access from the current entity."""
+        self.revoke(_ACLEntity.OWNER_ROLE)
+
+
+
[docs]class ACL(object): + """Container class representing a list of access controls.""" + + _URL_PATH_ELEM = 'acl' + _PREDEFINED_QUERY_PARAM = 'predefinedAcl' + + PREDEFINED_XML_ACLS = { + # XML API name -> JSON API name + 'project-private': 'projectPrivate', + 'public-read': 'publicRead', + 'public-read-write': 'publicReadWrite', + 'authenticated-read': 'authenticatedRead', + 'bucket-owner-read': 'bucketOwnerRead', + 'bucket-owner-full-control': 'bucketOwnerFullControl', + } + + PREDEFINED_JSON_ACLS = frozenset([ + 'private', + 'projectPrivate', + 'publicRead', + 'publicReadWrite', + 'authenticatedRead', + 'bucketOwnerRead', + 'bucketOwnerFullControl', + ]) + """See: + https://cloud.google.com/storage/docs/access-control#predefined-acl + """ + + loaded = False + + # Subclasses must override to provide these attributes (typically, + # as properties). + reload_path = None + save_path = None + + def __init__(self): + self.entities = {} + + def _ensure_loaded(self): + """Load if not already loaded.""" + if not self.loaded: + self.reload() + +
[docs] def reset(self): + """Remove all entities from the ACL, and clear the ``loaded`` flag.""" + self.entities.clear() + self.loaded = False
+ + def __iter__(self): + self._ensure_loaded() + + for entity in self.entities.values(): + for role in entity.get_roles(): + if role: + yield {'entity': str(entity), 'role': role} + +
[docs] def entity_from_dict(self, entity_dict): + """Build an _ACLEntity object from a dictionary of data. + + An entity is a mutable object that represents a list of roles + belonging to either a user or group or the special types for all + users and all authenticated users. + + :type entity_dict: dict + :param entity_dict: Dictionary full of data from an ACL lookup. + + :rtype: :class:`_ACLEntity` + :returns: An Entity constructed from the dictionary. + """ + entity = entity_dict['entity'] + role = entity_dict['role'] + + if entity == 'allUsers': + entity = self.all() + + elif entity == 'allAuthenticatedUsers': + entity = self.all_authenticated() + + elif '-' in entity: + entity_type, identifier = entity.split('-', 1) + entity = self.entity(entity_type=entity_type, + identifier=identifier) + + if not isinstance(entity, _ACLEntity): + raise ValueError('Invalid dictionary: %s' % entity_dict) + + entity.grant(role) + return entity
+ +
[docs] def has_entity(self, entity): + """Returns whether or not this ACL has any entries for an entity. + + :type entity: :class:`_ACLEntity` + :param entity: The entity to check for existence in this ACL. + + :rtype: boolean + :returns: True of the entity exists in the ACL. + """ + self._ensure_loaded() + return str(entity) in self.entities
+ +
[docs] def get_entity(self, entity, default=None): + """Gets an entity object from the ACL. + + :type entity: :class:`_ACLEntity` or string + :param entity: The entity to get lookup in the ACL. + + :type default: anything + :param default: This value will be returned if the entity + doesn't exist. + + :rtype: :class:`_ACLEntity` + :returns: The corresponding entity or the value provided + to ``default``. + """ + self._ensure_loaded() + return self.entities.get(str(entity), default)
+ +
[docs] def add_entity(self, entity): + """Add an entity to the ACL. + + :type entity: :class:`_ACLEntity` + :param entity: The entity to add to this ACL. + """ + self._ensure_loaded() + self.entities[str(entity)] = entity
+ +
[docs] def entity(self, entity_type, identifier=None): + """Factory method for creating an Entity. + + If an entity with the same type and identifier already exists, + this will return a reference to that entity. If not, it will + create a new one and add it to the list of known entities for + this ACL. + + :type entity_type: string + :param entity_type: The type of entity to create + (ie, ``user``, ``group``, etc) + + :type identifier: string + :param identifier: The ID of the entity (if applicable). + This can be either an ID or an e-mail address. + + :rtype: :class:`_ACLEntity` + :returns: A new Entity or a reference to an existing identical entity. + """ + entity = _ACLEntity(entity_type=entity_type, identifier=identifier) + if self.has_entity(entity): + entity = self.get_entity(entity) + else: + self.add_entity(entity) + return entity
+ +
[docs] def user(self, identifier): + """Factory method for a user Entity. + + :type identifier: string + :param identifier: An id or e-mail for this particular user. + + :rtype: :class:`_ACLEntity` + :returns: An Entity corresponding to this user. + """ + return self.entity('user', identifier=identifier)
+ +
[docs] def group(self, identifier): + """Factory method for a group Entity. + + :type identifier: string + :param identifier: An id or e-mail for this particular group. + + :rtype: :class:`_ACLEntity` + :returns: An Entity corresponding to this group. + """ + return self.entity('group', identifier=identifier)
+ +
[docs] def domain(self, domain): + """Factory method for a domain Entity. + + :type domain: string + :param domain: The domain for this entity. + + :rtype: :class:`_ACLEntity` + :returns: An entity corresponding to this domain. + """ + return self.entity('domain', identifier=domain)
+ +
[docs] def all(self): + """Factory method for an Entity representing all users. + + :rtype: :class:`_ACLEntity` + :returns: An entity representing all users. + """ + return self.entity('allUsers')
+ +
[docs] def all_authenticated(self): + """Factory method for an Entity representing all authenticated users. + + :rtype: :class:`_ACLEntity` + :returns: An entity representing all authenticated users. + """ + return self.entity('allAuthenticatedUsers')
+ +
[docs] def get_entities(self): + """Get a list of all Entity objects. + + :rtype: list of :class:`_ACLEntity` objects + :returns: A list of all Entity objects. + """ + self._ensure_loaded() + return list(self.entities.values())
+ + @property + def client(self): + """Abstract getter for the object client.""" + raise NotImplementedError + + def _require_client(self, client): + """Check client or verify over-ride. + + :type client: :class:`gcloud.storage.client.Client` or ``NoneType`` + :param client: the client to use. If not passed, falls back to the + ``client`` stored on the current ACL. + + :rtype: :class:`gcloud.storage.client.Client` + :returns: The client passed in or the currently bound client. + """ + if client is None: + client = self.client + return client + +
[docs] def reload(self, client=None): + """Reload the ACL data from Cloud Storage. + + :type client: :class:`gcloud.storage.client.Client` or ``NoneType`` + :param client: Optional. The client to use. If not passed, falls back + to the ``client`` stored on the ACL's parent. + """ + path = self.reload_path + client = self._require_client(client) + + self.entities.clear() + + found = client.connection.api_request(method='GET', path=path) + self.loaded = True + for entry in found.get('items', ()): + self.add_entity(self.entity_from_dict(entry))
+ + def _save(self, acl, predefined, client): + """Helper for :meth:`save` and :meth:`save_predefined`. + + :type acl: :class:`gcloud.storage.acl.ACL`, or a compatible list. + :param acl: The ACL object to save. If left blank, this will save + current entries. + + :type predefined: string or None + :param predefined: An identifier for a predefined ACL. Must be one + of the keys in :attr:`PREDEFINED_JSON_ACLS` + If passed, `acl` must be None. + + :type client: :class:`gcloud.storage.client.Client` or ``NoneType`` + :param client: Optional. The client to use. If not passed, falls back + to the ``client`` stored on the ACL's parent. + """ + query_params = {'projection': 'full'} + if predefined is not None: + acl = [] + query_params[self._PREDEFINED_QUERY_PARAM] = predefined + + path = self.save_path + client = self._require_client(client) + result = client.connection.api_request( + method='PATCH', + path=path, + data={self._URL_PATH_ELEM: list(acl)}, + query_params=query_params) + self.entities.clear() + for entry in result.get(self._URL_PATH_ELEM, ()): + self.add_entity(self.entity_from_dict(entry)) + self.loaded = True + +
[docs] def save(self, acl=None, client=None): + """Save this ACL for the current bucket. + + :type acl: :class:`gcloud.storage.acl.ACL`, or a compatible list. + :param acl: The ACL object to save. If left blank, this will save + current entries. + + :type client: :class:`gcloud.storage.client.Client` or ``NoneType`` + :param client: Optional. The client to use. If not passed, falls back + to the ``client`` stored on the ACL's parent. + """ + if acl is None: + acl = self + save_to_backend = acl.loaded + else: + save_to_backend = True + + if save_to_backend: + self._save(acl, None, client)
+ +
[docs] def save_predefined(self, predefined, client=None): + """Save this ACL for the current bucket using a predefined ACL. + + :type predefined: string + :param predefined: An identifier for a predefined ACL. Must be one + of the keys in :attr:`PREDEFINED_JSON_ACLS` + or :attr:`PREDEFINED_XML_ACLS` (which will be + aliased to the corresponding JSON name). + If passed, `acl` must be None. + + :type client: :class:`gcloud.storage.client.Client` or ``NoneType`` + :param client: Optional. The client to use. If not passed, falls back + to the ``client`` stored on the ACL's parent. + """ + predefined = self.PREDEFINED_XML_ACLS.get(predefined, predefined) + + if predefined not in self.PREDEFINED_JSON_ACLS: + raise ValueError("Invalid predefined ACL: %s" % (predefined,)) + + self._save(None, predefined, client)
+ +
[docs] def clear(self, client=None): + """Remove all ACL entries. + + Note that this won't actually remove *ALL* the rules, but it + will remove all the non-default rules. In short, you'll still + have access to a bucket that you created even after you clear + ACL rules with this method. + + :type client: :class:`gcloud.storage.client.Client` or ``NoneType`` + :param client: Optional. The client to use. If not passed, falls back + to the ``client`` stored on the ACL's parent. + """ + self.save([], client=client)
+ + +
[docs]class BucketACL(ACL): + """An ACL specifically for a bucket. + + :type bucket: :class:`gcloud.storage.bucket.Bucket` + :param bucket: The bucket to which this ACL relates. + """ + + def __init__(self, bucket): + super(BucketACL, self).__init__() + self.bucket = bucket + + @property + def client(self): + """The client bound to this ACL's bucket.""" + return self.bucket.client + + @property + def reload_path(self): + """Compute the path for GET API requests for this ACL.""" + return '%s/%s' % (self.bucket.path, self._URL_PATH_ELEM) + + @property + def save_path(self): + """Compute the path for PATCH API requests for this ACL.""" + return self.bucket.path
+ + +
[docs]class DefaultObjectACL(BucketACL): + """A class representing the default object ACL for a bucket.""" + + _URL_PATH_ELEM = 'defaultObjectAcl' + _PREDEFINED_QUERY_PARAM = 'predefinedDefaultObjectAcl'
+ + +
[docs]class ObjectACL(ACL): + """An ACL specifically for a Cloud Storage object / blob. + + :type blob: :class:`gcloud.storage.blob.Blob` + :param blob: The blob that this ACL corresponds to. + """ + + def __init__(self, blob): + super(ObjectACL, self).__init__() + self.blob = blob + + @property + def client(self): + """The client bound to this ACL's blob.""" + return self.blob.client + + @property + def reload_path(self): + """Compute the path for GET API requests for this ACL.""" + return '%s/acl' % self.blob.path + + @property + def save_path(self): + """Compute the path for PATCH API requests for this ACL.""" + return self.blob.path
+
+ +
+ +
+ + + + + + + \ No newline at end of file diff --git a/0.18.1/_modules/gcloud/storage/batch.html b/0.18.1/_modules/gcloud/storage/batch.html new file mode 100644 index 000000000000..fcd7ba8693ce --- /dev/null +++ b/0.18.1/_modules/gcloud/storage/batch.html @@ -0,0 +1,557 @@ + + + + + + + + gcloud.storage.batch — gcloud 0.18.1 documentation + + + + + + + + + + + + + + +
+ +
+ +
+ + + Report an Issue + +
+
+ +
+ +

Source code for gcloud.storage.batch

+# Copyright 2014 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Batch updates / deletes of storage buckets / blobs.
+
+See: https://cloud.google.com/storage/docs/json_api/v1/how-tos/batch
+"""
+from email.encoders import encode_noop
+from email.generator import Generator
+from email.mime.application import MIMEApplication
+from email.mime.multipart import MIMEMultipart
+from email.parser import Parser
+import io
+import json
+
+import httplib2
+import six
+
+from gcloud.exceptions import make_exception
+from gcloud.storage.connection import Connection
+
+
+
[docs]class MIMEApplicationHTTP(MIMEApplication): + """MIME type for ``application/http``. + + Constructs payload from headers and body + + :type method: str + :param method: HTTP method + + :type uri: str + :param uri: URI for HTTP request + + :type headers: dict + :param headers: HTTP headers + + :type body: str or None + :param body: HTTP payload + + """ + def __init__(self, method, uri, headers, body): + if isinstance(body, dict): + body = json.dumps(body) + headers['Content-Type'] = 'application/json' + headers['Content-Length'] = len(body) + if body is None: + body = '' + lines = ['%s %s HTTP/1.1' % (method, uri)] + lines.extend(['%s: %s' % (key, value) + for key, value in sorted(headers.items())]) + lines.append('') + lines.append(body) + payload = '\r\n'.join(lines) + if six.PY2: + # email.message.Message is an old-style class, so we + # cannot use 'super()'. + MIMEApplication.__init__(self, payload, 'http', encode_noop) + else: # pragma: NO COVER Python3 + super_init = super(MIMEApplicationHTTP, self).__init__ + super_init(payload, 'http', encode_noop)
+ + +
[docs]class NoContent(object): + """Emulate an HTTP '204 No Content' response.""" + status = 204
+ + +class _FutureDict(object): + """Class to hold a future value for a deferred request. + + Used by for requests that get sent in a :class:`Batch`. + """ + + @staticmethod + def get(key, default=None): + """Stand-in for dict.get. + + :type key: object + :param key: Hashable dictionary key. + + :type default: object + :param default: Fallback value to dict.get. + + :raises: :class:`KeyError` always since the future is intended to fail + as a dictionary. + """ + raise KeyError('Cannot get(%r, default=%r) on a future' % ( + key, default)) + + def __getitem__(self, key): + """Stand-in for dict[key]. + + :type key: object + :param key: Hashable dictionary key. + + :raises: :class:`KeyError` always since the future is intended to fail + as a dictionary. + """ + raise KeyError('Cannot get item %r from a future' % (key,)) + + def __setitem__(self, key, value): + """Stand-in for dict[key] = value. + + :type key: object + :param key: Hashable dictionary key. + + :type value: object + :param value: Dictionary value. + + :raises: :class:`KeyError` always since the future is intended to fail + as a dictionary. + """ + raise KeyError('Cannot set %r -> %r on a future' % (key, value)) + + +
[docs]class Batch(Connection): + """Proxy an underlying connection, batching up change operations. + + :type client: :class:`gcloud.storage.client.Client` + :param client: The client to use for making connections. + """ + _MAX_BATCH_SIZE = 1000 + + def __init__(self, client): + super(Batch, self).__init__() + self._client = client + self._requests = [] + self._target_objects = [] + + def _do_request(self, method, url, headers, data, target_object): + """Override Connection: defer actual HTTP request. + + Only allow up to ``_MAX_BATCH_SIZE`` requests to be deferred. + + :type method: str + :param method: The HTTP method to use in the request. + + :type url: str + :param url: The URL to send the request to. + + :type headers: dict + :param headers: A dictionary of HTTP headers to send with the request. + + :type data: str + :param data: The data to send as the body of the request. + + :type target_object: object or :class:`NoneType` + :param target_object: This allows us to enable custom behavior in our + batch connection. Here we defer an HTTP request + and complete initialization of the object at a + later time. + + :rtype: tuple of ``response`` (a dictionary of sorts) + and ``content`` (a string). + :returns: The HTTP response object and the content of the response. + """ + if len(self._requests) >= self._MAX_BATCH_SIZE: + raise ValueError("Too many deferred requests (max %d)" % + self._MAX_BATCH_SIZE) + self._requests.append((method, url, headers, data)) + result = _FutureDict() + self._target_objects.append(target_object) + if target_object is not None: + target_object._properties = result + return NoContent(), result + + def _prepare_batch_request(self): + """Prepares headers and body for a batch request. + + :rtype: tuple (dict, str) + :returns: The pair of headers and body of the batch request to be sent. + :raises: :class:`ValueError` if no requests have been deferred. + """ + if len(self._requests) == 0: + raise ValueError("No deferred requests") + + multi = MIMEMultipart() + + for method, uri, headers, body in self._requests: + subrequest = MIMEApplicationHTTP(method, uri, headers, body) + multi.attach(subrequest) + + # The `email` package expects to deal with "native" strings + if six.PY3: # pragma: NO COVER Python3 + buf = io.StringIO() + else: + buf = io.BytesIO() + generator = Generator(buf, False, 0) + generator.flatten(multi) + payload = buf.getvalue() + + # Strip off redundant header text + _, body = payload.split('\n\n', 1) + return dict(multi._headers), body + + def _finish_futures(self, responses): + """Apply all the batch responses to the futures created. + + :type responses: list of (headers, payload) tuples. + :param responses: List of headers and payloads from each response in + the batch. + + :raises: :class:`ValueError` if no requests have been deferred. + """ + # If a bad status occurs, we track it, but don't raise an exception + # until all futures have been populated. + exception_args = None + + if len(self._target_objects) != len(responses): + raise ValueError('Expected a response for every request.') + + for target_object, sub_response in zip(self._target_objects, + responses): + resp_headers, sub_payload = sub_response + if not 200 <= resp_headers.status < 300: + exception_args = exception_args or (resp_headers, + sub_payload) + elif target_object is not None: + target_object._properties = sub_payload + + if exception_args is not None: + raise make_exception(*exception_args) + +
[docs] def finish(self): + """Submit a single `multipart/mixed` request w/ deferred requests. + + :rtype: list of tuples + :returns: one ``(headers, payload)`` tuple per deferred request. + """ + headers, body = self._prepare_batch_request() + + url = '%s/batch' % self.API_BASE_URL + + # Use the private ``_connection`` rather than the public + # ``.connection``, since the public connection may be this + # current batch. + response, content = self._client._connection._make_request( + 'POST', url, data=body, headers=headers) + responses = list(_unpack_batch_response(response, content)) + self._finish_futures(responses) + return responses
+ +
[docs] def current(self): + """Return the topmost batch, or None.""" + return self._client.current_batch
+ + def __enter__(self): + self._client._push_batch(self) + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + try: + if exc_type is None: + self.finish() + finally: + self._client._pop_batch()
+ + +def _generate_faux_mime_message(parser, response, content): + """Convert response, content -> (multipart) email.message. + + Helper for _unpack_batch_response. + """ + # We coerce to bytes to get consitent concat across + # Py2 and Py3. Percent formatting is insufficient since + # it includes the b in Py3. + if not isinstance(content, six.binary_type): + content = content.encode('utf-8') + content_type = response['content-type'] + if not isinstance(content_type, six.binary_type): + content_type = content_type.encode('utf-8') + faux_message = b''.join([ + b'Content-Type: ', + content_type, + b'\nMIME-Version: 1.0\n\n', + content, + ]) + + if six.PY2: + return parser.parsestr(faux_message) + else: # pragma: NO COVER Python3 + return parser.parsestr(faux_message.decode('utf-8')) + + +def _unpack_batch_response(response, content): + """Convert response, content -> [(headers, payload)]. + + Creates a generator of tuples of emulating the responses to + :meth:`httplib2.Http.request` (a pair of headers and payload). + + :type response: :class:`httplib2.Response` + :param response: HTTP response / headers from a request. + + :type content: str + :param content: Response payload with a batch response. + """ + parser = Parser() + message = _generate_faux_mime_message(parser, response, content) + + if not isinstance(message._payload, list): + raise ValueError('Bad response: not multi-part') + + for subrequest in message._payload: + status_line, rest = subrequest._payload.split('\n', 1) + _, status, _ = status_line.split(' ', 2) + sub_message = parser.parsestr(rest) + payload = sub_message._payload + ctype = sub_message['Content-Type'] + msg_headers = dict(sub_message._headers) + msg_headers['status'] = status + headers = httplib2.Response(msg_headers) + if ctype and ctype.startswith('application/json'): + payload = json.loads(payload) + yield headers, payload +
+ +
+ +
+ + + + + + + \ No newline at end of file diff --git a/0.18.1/_modules/gcloud/storage/blob.html b/0.18.1/_modules/gcloud/storage/blob.html new file mode 100644 index 000000000000..b8efce859bd6 --- /dev/null +++ b/0.18.1/_modules/gcloud/storage/blob.html @@ -0,0 +1,1179 @@ + + + + + + + + gcloud.storage.blob — gcloud 0.18.1 documentation + + + + + + + + + + + + + + +
+ +
+ +
+ + + Report an Issue + +
+
+ +
+ +

Source code for gcloud.storage.blob

+# Copyright 2014 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Create / interact with Google Cloud Storage blobs."""
+
+import base64
+import copy
+import hashlib
+from io import BytesIO
+from io import UnsupportedOperation
+import json
+import mimetypes
+import os
+import time
+
+import httplib2
+import six
+from six.moves.urllib.parse import quote
+
+from gcloud._helpers import _rfc3339_to_datetime
+from gcloud._helpers import _to_bytes
+from gcloud._helpers import _bytes_to_unicode
+from gcloud.credentials import generate_signed_url
+from gcloud.exceptions import NotFound
+from gcloud.exceptions import make_exception
+from gcloud.storage._helpers import _PropertyMixin
+from gcloud.storage._helpers import _scalar_property
+from gcloud.storage.acl import ObjectACL
+from gcloud.streaming.http_wrapper import Request
+from gcloud.streaming.http_wrapper import make_api_request
+from gcloud.streaming.transfer import Download
+from gcloud.streaming.transfer import RESUMABLE_UPLOAD
+from gcloud.streaming.transfer import Upload
+
+
+_API_ACCESS_ENDPOINT = 'https://storage.googleapis.com'
+
+
+
[docs]class Blob(_PropertyMixin): + """A wrapper around Cloud Storage's concept of an ``Object``. + + :type name: string + :param name: The name of the blob. This corresponds to the + unique path of the object in the bucket. + + :type bucket: :class:`gcloud.storage.bucket.Bucket` + :param bucket: The bucket to which this blob belongs. + + :type chunk_size: integer + :param chunk_size: The size of a chunk of data whenever iterating (1 MB). + This must be a multiple of 256 KB per the API + specification. + """ + + _chunk_size = None # Default value for each instance. + + _CHUNK_SIZE_MULTIPLE = 256 * 1024 + """Number (256 KB, in bytes) that must divide the chunk size.""" + + def __init__(self, name, bucket, chunk_size=None): + super(Blob, self).__init__(name=name) + + self.chunk_size = chunk_size # Check that setter accepts value. + self.bucket = bucket + self._acl = ObjectACL(self) + + @property + def chunk_size(self): + """Get the blob's default chunk size. + + :rtype: integer or ``NoneType`` + :returns: The current blob's chunk size, if it is set. + """ + return self._chunk_size + + @chunk_size.setter + def chunk_size(self, value): + """Set the blob's default chunk size. + + :type value: integer or ``NoneType`` + :param value: The current blob's chunk size, if it is set. + + :raises: :class:`ValueError` if ``value`` is not ``None`` and is not a + multiple of 256 KB. + """ + if value is not None and value % self._CHUNK_SIZE_MULTIPLE != 0: + raise ValueError('Chunk size must be a multiple of %d.' % ( + self._CHUNK_SIZE_MULTIPLE,)) + self._chunk_size = value + + @staticmethod +
[docs] def path_helper(bucket_path, blob_name): + """Relative URL path for a blob. + + :type bucket_path: string + :param bucket_path: The URL path for a bucket. + + :type blob_name: string + :param blob_name: The name of the blob. + + :rtype: string + :returns: The relative URL path for ``blob_name``. + """ + return bucket_path + '/o/' + quote(blob_name, safe='')
+ + @property + def acl(self): + """Create our ACL on demand.""" + return self._acl + + def __repr__(self): + if self.bucket: + bucket_name = self.bucket.name + else: + bucket_name = None + + return '<Blob: %s, %s>' % (bucket_name, self.name) + + @property + def path(self): + """Getter property for the URL path to this Blob. + + :rtype: string + :returns: The URL path to this Blob. + """ + if not self.name: + raise ValueError('Cannot determine path without a blob name.') + + return self.path_helper(self.bucket.path, self.name) + + @property + def client(self): + """The client bound to this blob.""" + return self.bucket.client + + @property + def public_url(self): + """The public URL for this blob's object. + + :rtype: `string` + :returns: The public URL for this blob. + """ + return '{storage_base_url}/{bucket_name}/{quoted_name}'.format( + storage_base_url='https://storage.googleapis.com', + bucket_name=self.bucket.name, + quoted_name=quote(self.name, safe='')) + +
[docs] def generate_signed_url(self, expiration, method='GET', + content_type=None, + generation=None, response_disposition=None, + response_type=None, client=None, credentials=None): + """Generates a signed URL for this blob. + + .. note:: + + If you are on Google Compute Engine, you can't generate a signed + URL. Follow `Issue 922`_ for updates on this. If you'd like to + be able to generate a signed URL from GCE, you can use a standard + service account from a JSON file rather than a GCE service account. + + .. _Issue 922: https://github.com/GoogleCloudPlatform/\ + gcloud-python/issues/922 + + If you have a blob that you want to allow access to for a set + amount of time, you can use this method to generate a URL that + is only valid within a certain time period. + + This is particularly useful if you don't want publicly + accessible blobs, but don't want to require users to explicitly + log in. + + :type expiration: int, long, datetime.datetime, datetime.timedelta + :param expiration: When the signed URL should expire. + + :type method: str + :param method: The HTTP verb that will be used when requesting the URL. + + :type content_type: str + :param content_type: (Optional) The content type of the object + referenced by ``resource``. + + :type generation: str + :param generation: (Optional) A value that indicates which generation + of the resource to fetch. + + :type response_disposition: str + :param response_disposition: (Optional) Content disposition of + responses to requests for the signed URL. + For example, to enable the signed URL + to initiate a file of ``blog.png``, use + the value + ``'attachment; filename=blob.png'``. + + :type response_type: str + :param response_type: (Optional) Content type of responses to requests + for the signed URL. Used to over-ride the content + type of the underlying blob/object. + + :type client: :class:`gcloud.storage.client.Client` or ``NoneType`` + :param client: (Optional) The client to use. If not passed, falls back + to the ``client`` stored on the blob's bucket. + + + :type credentials: :class:`oauth2client.client.OAuth2Credentials` or + :class:`NoneType` + :param credentials: (Optional) The OAuth2 credentials to use to sign + the URL. Defaults to the credentials stored on the + client used. + + :rtype: str + :returns: A signed URL you can use to access the resource + until expiration. + """ + resource = '/{bucket_name}/{quoted_name}'.format( + bucket_name=self.bucket.name, + quoted_name=quote(self.name, safe='')) + + if credentials is None: + client = self._require_client(client) + credentials = client._connection.credentials + + return generate_signed_url( + credentials, resource=resource, + api_access_endpoint=_API_ACCESS_ENDPOINT, + expiration=expiration, method=method, + content_type=content_type, + response_type=response_type, + response_disposition=response_disposition, + generation=generation)
+ +
[docs] def exists(self, client=None): + """Determines whether or not this blob exists. + + :type client: :class:`gcloud.storage.client.Client` or ``NoneType`` + :param client: Optional. The client to use. If not passed, falls back + to the ``client`` stored on the blob's bucket. + + :rtype: boolean + :returns: True if the blob exists in Cloud Storage. + """ + client = self._require_client(client) + try: + # We only need the status code (200 or not) so we seek to + # minimize the returned payload. + query_params = {'fields': 'name'} + # We intentionally pass `_target_object=None` since fields=name + # would limit the local properties. + client.connection.api_request(method='GET', path=self.path, + query_params=query_params, + _target_object=None) + # NOTE: This will not fail immediately in a batch. However, when + # Batch.finish() is called, the resulting `NotFound` will be + # raised. + return True + except NotFound: + return False
+ +
[docs] def delete(self, client=None): + """Deletes a blob from Cloud Storage. + + :type client: :class:`gcloud.storage.client.Client` or ``NoneType`` + :param client: Optional. The client to use. If not passed, falls back + to the ``client`` stored on the blob's bucket. + + :rtype: :class:`Blob` + :returns: The blob that was just deleted. + :raises: :class:`gcloud.exceptions.NotFound` + (propagated from + :meth:`gcloud.storage.bucket.Bucket.delete_blob`). + """ + return self.bucket.delete_blob(self.name, client=client)
+ +
[docs] def download_to_file(self, file_obj, encryption_key=None, client=None): + """Download the contents of this blob into a file-like object. + + .. note:: + + If the server-set property, :attr:`media_link`, is not yet + initialized, makes an additional API request to load it. + + Downloading a file that has been encrypted with a `customer-supplied`_ + encryption key:: + + >>> from gcloud import storage + >>> from gcloud.storage import Blob + + >>> client = storage.Client(project='my-project') + >>> bucket = client.get_bucket('my-bucket') + >>> encryption_key = 'aa426195405adee2c8081bb9e7e74b19' + >>> blob = Blob('secure-data', bucket) + >>> with open('/tmp/my-secure-file', 'wb') as file_obj: + >>> blob.download_to_file(file_obj, + ... encryption_key=encryption_key) + + The ``encryption_key`` should be a str or bytes with a length of at + least 32. + + .. _customer-supplied: https://cloud.google.com/storage/docs/\ + encryption#customer-supplied + + :type file_obj: file + :param file_obj: A file handle to which to write the blob's data. + + :type encryption_key: str or bytes + :param encryption_key: Optional 32 byte encryption key for + customer-supplied encryption. + + :type client: :class:`gcloud.storage.client.Client` or ``NoneType`` + :param client: Optional. The client to use. If not passed, falls back + to the ``client`` stored on the blob's bucket. + + :raises: :class:`gcloud.exceptions.NotFound` + """ + client = self._require_client(client) + if self.media_link is None: # not yet loaded + self.reload() + + download_url = self.media_link + + # Use apitools 'Download' facility. + download = Download.from_stream(file_obj) + + if self.chunk_size is not None: + download.chunksize = self.chunk_size + + headers = {} + if encryption_key: + _set_encryption_headers(encryption_key, headers) + + request = Request(download_url, 'GET', headers) + + # Use the private ``_connection`` rather than the public + # ``.connection``, since the public connection may be a batch. A + # batch wraps a client's connection, but does not store the `http` + # object. The rest (API_BASE_URL and build_api_url) are also defined + # on the Batch class, but we just use the wrapped connection since + # it has all three (http, API_BASE_URL and build_api_url). + download.initialize_download(request, client._connection.http)
+ +
[docs] def download_to_filename(self, filename, encryption_key=None, client=None): + """Download the contents of this blob into a named file. + + :type filename: string + :param filename: A filename to be passed to ``open``. + + :type encryption_key: str or bytes + :param encryption_key: Optional 32 byte encryption key for + customer-supplied encryption. + + :type client: :class:`gcloud.storage.client.Client` or ``NoneType`` + :param client: Optional. The client to use. If not passed, falls back + to the ``client`` stored on the blob's bucket. + + :raises: :class:`gcloud.exceptions.NotFound` + """ + with open(filename, 'wb') as file_obj: + self.download_to_file(file_obj, encryption_key=encryption_key, + client=client) + + mtime = time.mktime(self.updated.timetuple()) + os.utime(file_obj.name, (mtime, mtime))
+ +
[docs] def download_as_string(self, encryption_key=None, client=None): + """Download the contents of this blob as a string. + + :type encryption_key: str or bytes + :param encryption_key: Optional 32 byte encryption key for + customer-supplied encryption. + + :type client: :class:`gcloud.storage.client.Client` or ``NoneType`` + :param client: Optional. The client to use. If not passed, falls back + to the ``client`` stored on the blob's bucket. + + :rtype: bytes + :returns: The data stored in this blob. + :raises: :class:`gcloud.exceptions.NotFound` + """ + string_buffer = BytesIO() + self.download_to_file(string_buffer, encryption_key=encryption_key, + client=client) + return string_buffer.getvalue()
+ + @staticmethod + def _check_response_error(request, http_response): + """Helper for :meth:`upload_from_file`.""" + info = http_response.info + status = int(info['status']) + if not 200 <= status < 300: + faux_response = httplib2.Response({'status': status}) + raise make_exception(faux_response, http_response.content, + error_info=request.url) + + # pylint: disable=too-many-locals +
[docs] def upload_from_file(self, file_obj, rewind=False, size=None, + encryption_key=None, content_type=None, num_retries=6, + client=None): + """Upload the contents of this blob from a file-like object. + + The content type of the upload will either be + - The value passed in to the function (if any) + - The value stored on the current blob + - The default value of 'application/octet-stream' + + .. note:: + The effect of uploading to an existing blob depends on the + "versioning" and "lifecycle" policies defined on the blob's + bucket. In the absence of those policies, upload will + overwrite any existing contents. + + See the `object versioning + <https://cloud.google.com/storage/docs/object-versioning>`_ and + `lifecycle <https://cloud.google.com/storage/docs/lifecycle>`_ + API documents for details. + + Uploading a file with a `customer-supplied`_ encryption key:: + + >>> from gcloud import storage + >>> from gcloud.storage import Blob + + >>> client = storage.Client(project='my-project') + >>> bucket = client.get_bucket('my-bucket') + >>> encryption_key = 'aa426195405adee2c8081bb9e7e74b19' + >>> blob = Blob('secure-data', bucket) + >>> with open('my-file', 'rb') as my_file: + >>> blob.upload_from_file(my_file, + ... encryption_key=encryption_key) + + The ``encryption_key`` should be a str or bytes with a length of at + least 32. + + .. _customer-supplied: https://cloud.google.com/storage/docs/\ + encryption#customer-supplied + + :type file_obj: file + :param file_obj: A file handle open for reading. + + :type rewind: boolean + :param rewind: If True, seek to the beginning of the file handle before + writing the file to Cloud Storage. + + :type size: int + :param size: The number of bytes to read from the file handle. + If not provided, we'll try to guess the size using + :func:`os.fstat`. (If the file handle is not from the + filesystem this won't be possible.) + + :type encryption_key: str or bytes + :param encryption_key: Optional 32 byte encryption key for + customer-supplied encryption. + + :type content_type: string or ``NoneType`` + :param content_type: Optional type of content being uploaded. + + :type num_retries: integer + :param num_retries: Number of upload retries. Defaults to 6. + + :type client: :class:`gcloud.storage.client.Client` or ``NoneType`` + :param client: Optional. The client to use. If not passed, falls back + to the ``client`` stored on the blob's bucket. + + :raises: :class:`ValueError` if size is not passed in and can not be + determined; :class:`gcloud.exceptions.GCloudError` if the + upload response returns an error status. + """ + client = self._require_client(client) + # Use the private ``_connection`` rather than the public + # ``.connection``, since the public connection may be a batch. A + # batch wraps a client's connection, but does not store the `http` + # object. The rest (API_BASE_URL and build_api_url) are also defined + # on the Batch class, but we just use the wrapped connection since + # it has all three (http, API_BASE_URL and build_api_url). + connection = client._connection + content_type = (content_type or self._properties.get('contentType') or + 'application/octet-stream') + + # Rewind the file if desired. + if rewind: + file_obj.seek(0, os.SEEK_SET) + + # Get the basic stats about the file. + total_bytes = size + if total_bytes is None: + if hasattr(file_obj, 'fileno'): + try: + total_bytes = os.fstat(file_obj.fileno()).st_size + except (OSError, UnsupportedOperation): + pass # Assuming fd is not an actual file (maybe socket). + + headers = { + 'Accept': 'application/json', + 'Accept-Encoding': 'gzip, deflate', + 'User-Agent': connection.USER_AGENT, + } + + if encryption_key: + _set_encryption_headers(encryption_key, headers) + + upload = Upload(file_obj, content_type, total_bytes, + auto_transfer=False) + + if self.chunk_size is not None: + upload.chunksize = self.chunk_size + + if total_bytes is None: + upload.strategy = RESUMABLE_UPLOAD + elif total_bytes is None: + raise ValueError('total bytes could not be determined. Please ' + 'pass an explicit size, or supply a chunk size ' + 'for a streaming transfer.') + + url_builder = _UrlBuilder(bucket_name=self.bucket.name, + object_name=self.name) + upload_config = _UploadConfig() + + # Temporary URL, until we know simple vs. resumable. + base_url = connection.API_BASE_URL + '/upload' + upload_url = connection.build_api_url(api_base_url=base_url, + path=self.bucket.path + '/o') + + # Use apitools 'Upload' facility. + request = Request(upload_url, 'POST', headers) + + upload.configure_request(upload_config, request, url_builder) + query_params = url_builder.query_params + base_url = connection.API_BASE_URL + '/upload' + request.url = connection.build_api_url(api_base_url=base_url, + path=self.bucket.path + '/o', + query_params=query_params) + upload.initialize_upload(request, connection.http) + + if upload.strategy == RESUMABLE_UPLOAD: + http_response = upload.stream_file(use_chunks=True) + else: + http_response = make_api_request(connection.http, request, + retries=num_retries) + + self._check_response_error(request, http_response) + response_content = http_response.content + + if not isinstance(response_content, + six.string_types): # pragma: NO COVER Python3 + response_content = response_content.decode('utf-8') + self._set_properties(json.loads(response_content))
+ # pylint: enable=too-many-locals + +
[docs] def upload_from_filename(self, filename, content_type=None, + encryption_key=None, client=None): + """Upload this blob's contents from the content of a named file. + + The content type of the upload will either be + - The value passed in to the function (if any) + - The value stored on the current blob + - The value given by mimetypes.guess_type + + .. note:: + The effect of uploading to an existing blob depends on the + "versioning" and "lifecycle" policies defined on the blob's + bucket. In the absence of those policies, upload will + overwrite any existing contents. + + See the `object versioning + <https://cloud.google.com/storage/docs/object-versioning>`_ and + `lifecycle <https://cloud.google.com/storage/docs/lifecycle>`_ + API documents for details. + + :type filename: string + :param filename: The path to the file. + + :type content_type: string or ``NoneType`` + :param content_type: Optional type of content being uploaded. + + :type encryption_key: str or bytes + :param encryption_key: Optional 32 byte encryption key for + customer-supplied encryption. + + :type client: :class:`gcloud.storage.client.Client` or ``NoneType`` + :param client: Optional. The client to use. If not passed, falls back + to the ``client`` stored on the blob's bucket. + """ + content_type = content_type or self._properties.get('contentType') + if content_type is None: + content_type, _ = mimetypes.guess_type(filename) + + with open(filename, 'rb') as file_obj: + self.upload_from_file(file_obj, content_type=content_type, + encryption_key=encryption_key, client=client)
+ +
[docs] def upload_from_string(self, data, content_type='text/plain', + encryption_key=None, client=None): + """Upload contents of this blob from the provided string. + + .. note:: + The effect of uploading to an existing blob depends on the + "versioning" and "lifecycle" policies defined on the blob's + bucket. In the absence of those policies, upload will + overwrite any existing contents. + + See the `object versioning + <https://cloud.google.com/storage/docs/object-versioning>`_ and + `lifecycle <https://cloud.google.com/storage/docs/lifecycle>`_ + API documents for details. + + :type data: bytes or text + :param data: The data to store in this blob. If the value is + text, it will be encoded as UTF-8. + + :type content_type: string + :param content_type: Optional type of content being uploaded. Defaults + to ``'text/plain'``. + + :type encryption_key: str or bytes + :param encryption_key: Optional 32 byte encryption key for + customer-supplied encryption. + + :type client: :class:`gcloud.storage.client.Client` or ``NoneType`` + :param client: Optional. The client to use. If not passed, falls back + to the ``client`` stored on the blob's bucket. + """ + if isinstance(data, six.text_type): + data = data.encode('utf-8') + string_buffer = BytesIO() + string_buffer.write(data) + self.upload_from_file(file_obj=string_buffer, rewind=True, + size=len(data), content_type=content_type, + encryption_key=encryption_key, client=client)
+ +
[docs] def make_public(self, client=None): + """Make this blob public giving all users read access. + + :type client: :class:`gcloud.storage.client.Client` or ``NoneType`` + :param client: Optional. The client to use. If not passed, falls back + to the ``client`` stored on the blob's bucket. + """ + self.acl.all().grant_read() + self.acl.save(client=client)
+ + cache_control = _scalar_property('cacheControl') + """HTTP 'Cache-Control' header for this object. + + See: https://tools.ietf.org/html/rfc7234#section-5.2 and + https://cloud.google.com/storage/docs/json_api/v1/objects + + If the property is not set locally, returns ``None``. + + :rtype: string or ``NoneType`` + """ + + content_disposition = _scalar_property('contentDisposition') + """HTTP 'Content-Disposition' header for this object. + + See: https://tools.ietf.org/html/rfc6266 and + https://cloud.google.com/storage/docs/json_api/v1/objects + + If the property is not set locally, returns ``None``. + + :rtype: string or ``NoneType`` + """ + + content_encoding = _scalar_property('contentEncoding') + """HTTP 'Content-Encoding' header for this object. + + See: https://tools.ietf.org/html/rfc7231#section-3.1.2.2 and + https://cloud.google.com/storage/docs/json_api/v1/objects + + If the property is not set locally, returns ``None``. + + :rtype: string or ``NoneType`` + """ + + content_language = _scalar_property('contentLanguage') + """HTTP 'Content-Language' header for this object. + + See: http://tools.ietf.org/html/bcp47 and + https://cloud.google.com/storage/docs/json_api/v1/objects + + If the property is not set locally, returns ``None``. + + :rtype: string or ``NoneType`` + """ + + content_type = _scalar_property('contentType') + """HTTP 'Content-Type' header for this object. + + See: https://tools.ietf.org/html/rfc2616#section-14.17 and + https://cloud.google.com/storage/docs/json_api/v1/objects + + If the property is not set locally, returns ``None``. + + :rtype: string or ``NoneType`` + """ + + crc32c = _scalar_property('crc32c') + """CRC32C checksum for this object. + + See: http://tools.ietf.org/html/rfc4960#appendix-B and + https://cloud.google.com/storage/docs/json_api/v1/objects + + If the property is not set locally, returns ``None``. + + :rtype: string or ``NoneType`` + """ + + @property + def component_count(self): + """Number of underlying components that make up this object. + + See: https://cloud.google.com/storage/docs/json_api/v1/objects + + :rtype: integer or ``NoneType`` + :returns: The component count (in case of a composed object) or + ``None`` if the property is not set locally. This property + will not be set on objects not created via ``compose``. + """ + component_count = self._properties.get('componentCount') + if component_count is not None: + return int(component_count) + + @property + def etag(self): + """Retrieve the ETag for the object. + + See: http://tools.ietf.org/html/rfc2616#section-3.11 and + https://cloud.google.com/storage/docs/json_api/v1/objects + + :rtype: string or ``NoneType`` + :returns: The blob etag or ``None`` if the property is not set locally. + """ + return self._properties.get('etag') + + @property + def generation(self): + """Retrieve the generation for the object. + + See: https://cloud.google.com/storage/docs/json_api/v1/objects + + :rtype: integer or ``NoneType`` + :returns: The generation of the blob or ``None`` if the property + is not set locally. + """ + generation = self._properties.get('generation') + if generation is not None: + return int(generation) + + @property + def id(self): + """Retrieve the ID for the object. + + See: https://cloud.google.com/storage/docs/json_api/v1/objects + + :rtype: string or ``NoneType`` + :returns: The ID of the blob or ``None`` if the property is not + set locally. + """ + return self._properties.get('id') + + md5_hash = _scalar_property('md5Hash') + """MD5 hash for this object. + + See: http://tools.ietf.org/html/rfc4960#appendix-B and + https://cloud.google.com/storage/docs/json_api/v1/objects + + If the property is not set locally, returns ``None``. + + :rtype: string or ``NoneType`` + """ + + @property + def media_link(self): + """Retrieve the media download URI for the object. + + See: https://cloud.google.com/storage/docs/json_api/v1/objects + + :rtype: string or ``NoneType`` + :returns: The media link for the blob or ``None`` if the property is + not set locally. + """ + return self._properties.get('mediaLink') + + @property + def metadata(self): + """Retrieve arbitrary/application specific metadata for the object. + + See: https://cloud.google.com/storage/docs/json_api/v1/objects + + :rtype: dict or ``NoneType`` + :returns: The metadata associated with the blob or ``None`` if the + property is not set locally. + """ + return copy.deepcopy(self._properties.get('metadata')) + + @metadata.setter + def metadata(self, value): + """Update arbitrary/application specific metadata for the object. + + See: https://cloud.google.com/storage/docs/json_api/v1/objects + + :type value: dict or ``NoneType`` + :param value: The blob metadata to set. + """ + self._patch_property('metadata', value) + + @property + def metageneration(self): + """Retrieve the metageneration for the object. + + See: https://cloud.google.com/storage/docs/json_api/v1/objects + + :rtype: integer or ``NoneType`` + :returns: The metageneration of the blob or ``None`` if the property + is not set locally. + """ + metageneration = self._properties.get('metageneration') + if metageneration is not None: + return int(metageneration) + + @property + def owner(self): + """Retrieve info about the owner of the object. + + See: https://cloud.google.com/storage/docs/json_api/v1/objects + + :rtype: dict or ``NoneType`` + :returns: Mapping of owner's role/ID. If the property is not set + locally, returns ``None``. + """ + return copy.deepcopy(self._properties.get('owner')) + + @property + def self_link(self): + """Retrieve the URI for the object. + + See: https://cloud.google.com/storage/docs/json_api/v1/objects + + :rtype: string or ``NoneType`` + :returns: The self link for the blob or ``None`` if the property is + not set locally. + """ + return self._properties.get('selfLink') + + @property + def size(self): + """Size of the object, in bytes. + + See: https://cloud.google.com/storage/docs/json_api/v1/objects + + :rtype: integer or ``NoneType`` + :returns: The size of the blob or ``None`` if the property + is not set locally. + """ + size = self._properties.get('size') + if size is not None: + return int(size) + + @property + def storage_class(self): + """Retrieve the storage class for the object. + + See: https://cloud.google.com/storage/docs/storage-classes + https://cloud.google.com/storage/docs/nearline-storage + https://cloud.google.com/storage/docs/durable-reduced-availability + + :rtype: string or ``NoneType`` + :returns: If set, one of "STANDARD", "NEARLINE", or + "DURABLE_REDUCED_AVAILABILITY", else ``None``. + """ + return self._properties.get('storageClass') + + @property + def time_deleted(self): + """Retrieve the timestamp at which the object was deleted. + + See: https://cloud.google.com/storage/docs/json_api/v1/objects + + :rtype: :class:`datetime.datetime` or ``NoneType`` + :returns: Datetime object parsed from RFC3339 valid timestamp, or + ``None`` if the property is not set locally. If the blob has + not been deleted, this will never be set. + """ + value = self._properties.get('timeDeleted') + if value is not None: + return _rfc3339_to_datetime(value) + + @property + def updated(self): + """Retrieve the timestamp at which the object was updated. + + See: https://cloud.google.com/storage/docs/json_api/v1/objects + + :rtype: :class:`datetime.datetime` or ``NoneType`` + :returns: Datetime object parsed from RFC3339 valid timestamp, or + ``None`` if the property is not set locally. + """ + value = self._properties.get('updated') + if value is not None: + return _rfc3339_to_datetime(value)
+ + +class _UploadConfig(object): + """Faux message FBO apitools' 'configure_request'. + + Values extracted from apitools + 'samples/storage_sample/storage/storage_v1_client.py' + """ + accept = ['*/*'] + max_size = None + resumable_multipart = True + resumable_path = u'/resumable/upload/storage/v1/b/{bucket}/o' + simple_multipart = True + simple_path = u'/upload/storage/v1/b/{bucket}/o' + + +class _UrlBuilder(object): + """Faux builder FBO apitools' 'configure_request'""" + def __init__(self, bucket_name, object_name): + self.query_params = {'name': object_name} + self._bucket_name = bucket_name + self._relative_path = '' + + +def _set_encryption_headers(key, headers): + """Builds customer encyrption key headers + + :type key: str or bytes + :param key: 32 byte key to build request key and hash. + + :type headers: dict + :param headers: dict of HTTP headers being sent in request. + """ + key = _to_bytes(key) + sha256_key = hashlib.sha256(key).digest() + key_hash = base64.b64encode(sha256_key).rstrip() + encoded_key = base64.b64encode(key).rstrip() + headers['X-Goog-Encryption-Algorithm'] = 'AES256' + headers['X-Goog-Encryption-Key'] = _bytes_to_unicode(encoded_key) + headers['X-Goog-Encryption-Key-Sha256'] = _bytes_to_unicode(key_hash) +
+ +
+ +
+ + + + + + + \ No newline at end of file diff --git a/0.18.1/_modules/gcloud/storage/bucket.html b/0.18.1/_modules/gcloud/storage/bucket.html new file mode 100644 index 000000000000..8bf77f71fa38 --- /dev/null +++ b/0.18.1/_modules/gcloud/storage/bucket.html @@ -0,0 +1,1051 @@ + + + + + + + + gcloud.storage.bucket — gcloud 0.18.1 documentation + + + + + + + + + + + + + + +
+ +
+ +
+ + + Report an Issue + +
+
+ +
+ +

Source code for gcloud.storage.bucket

+# Copyright 2014 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Create / interact with gcloud storage buckets."""
+
+import copy
+
+import six
+
+from gcloud._helpers import _rfc3339_to_datetime
+from gcloud.exceptions import NotFound
+from gcloud.iterator import Iterator
+from gcloud.storage._helpers import _PropertyMixin
+from gcloud.storage._helpers import _scalar_property
+from gcloud.storage.acl import BucketACL
+from gcloud.storage.acl import DefaultObjectACL
+from gcloud.storage.blob import Blob
+
+
+class _BlobIterator(Iterator):
+    """An iterator listing blobs in a bucket
+
+    You shouldn't have to use this directly, but instead should use the
+    :class:`gcloud.storage.blob.Bucket.list_blobs` method.
+
+    :type bucket: :class:`gcloud.storage.bucket.Bucket`
+    :param bucket: The bucket from which to list blobs.
+
+    :type extra_params: dict or None
+    :param extra_params: Extra query string parameters for the API call.
+
+    :type client: :class:`gcloud.storage.client.Client`
+    :param client: Optional. The client to use for making connections.
+                   Defaults to the bucket's client.
+    """
+    def __init__(self, bucket, extra_params=None, client=None):
+        if client is None:
+            client = bucket.client
+        self.bucket = bucket
+        self.prefixes = set()
+        self._current_prefixes = None
+        super(_BlobIterator, self).__init__(
+            client=client, path=bucket.path + '/o',
+            extra_params=extra_params)
+
+    def get_items_from_response(self, response):
+        """Yield :class:`.storage.blob.Blob` items from response.
+
+        :type response: dict
+        :param response: The JSON API response for a page of blobs.
+        """
+        self._current_prefixes = tuple(response.get('prefixes', ()))
+        self.prefixes.update(self._current_prefixes)
+        for item in response.get('items', []):
+            name = item.get('name')
+            blob = Blob(name, bucket=self.bucket)
+            blob._set_properties(item)
+            yield blob
+
+
+
[docs]class Bucket(_PropertyMixin): + """A class representing a Bucket on Cloud Storage. + + :type client: :class:`gcloud.storage.client.Client` + :param client: A client which holds credentials and project configuration + for the bucket (which requires a project). + + :type name: string + :param name: The name of the bucket. + """ + _iterator_class = _BlobIterator + + _MAX_OBJECTS_FOR_ITERATION = 256 + """Maximum number of existing objects allowed in iteration. + + This is used in Bucket.delete() and Bucket.make_public(). + """ + + _STORAGE_CLASSES = ('STANDARD', 'NEARLINE', 'DURABLE_REDUCED_AVAILABILITY') + + def __init__(self, client, name=None): + super(Bucket, self).__init__(name=name) + self._client = client + self._acl = BucketACL(self) + self._default_object_acl = DefaultObjectACL(self) + + def __repr__(self): + return '<Bucket: %s>' % self.name + + @property + def client(self): + """The client bound to this bucket.""" + return self._client + +
[docs] def blob(self, blob_name, chunk_size=None): + """Factory constructor for blob object. + + .. note:: + This will not make an HTTP request; it simply instantiates + a blob object owned by this bucket. + + :type blob_name: string + :param blob_name: The name of the blob to be instantiated. + + :type chunk_size: integer + :param chunk_size: The size of a chunk of data whenever iterating + (1 MB). This must be a multiple of 256 KB per the + API specification. + + :rtype: :class:`gcloud.storage.blob.Blob` + :returns: The blob object created. + """ + return Blob(name=blob_name, bucket=self, chunk_size=chunk_size)
+ +
[docs] def exists(self, client=None): + """Determines whether or not this bucket exists. + + :type client: :class:`gcloud.storage.client.Client` or ``NoneType`` + :param client: Optional. The client to use. If not passed, falls back + to the ``client`` stored on the current bucket. + + :rtype: boolean + :returns: True if the bucket exists in Cloud Storage. + """ + client = self._require_client(client) + try: + # We only need the status code (200 or not) so we seek to + # minimize the returned payload. + query_params = {'fields': 'name'} + # We intentionally pass `_target_object=None` since fields=name + # would limit the local properties. + client.connection.api_request(method='GET', path=self.path, + query_params=query_params, + _target_object=None) + # NOTE: This will not fail immediately in a batch. However, when + # Batch.finish() is called, the resulting `NotFound` will be + # raised. + return True + except NotFound: + return False
+ +
[docs] def create(self, client=None): + """Creates current bucket. + + If the bucket already exists, will raise + :class:`gcloud.exceptions.Conflict`. + + This implements "storage.buckets.insert". + + :type client: :class:`gcloud.storage.client.Client` or ``NoneType`` + :param client: Optional. The client to use. If not passed, falls back + to the ``client`` stored on the current bucket. + """ + client = self._require_client(client) + query_params = {'project': client.project} + properties = dict( + (key, self._properties[key]) for key in self._changes) + properties['name'] = self.name + api_response = client.connection.api_request( + method='POST', path='/b', query_params=query_params, + data=properties, _target_object=self) + self._set_properties(api_response)
+ + @property + def acl(self): + """Create our ACL on demand.""" + return self._acl + + @property + def default_object_acl(self): + """Create our defaultObjectACL on demand.""" + return self._default_object_acl + + @staticmethod +
[docs] def path_helper(bucket_name): + """Relative URL path for a bucket. + + :type bucket_name: string + :param bucket_name: The bucket name in the path. + + :rtype: string + :returns: The relative URL path for ``bucket_name``. + """ + return '/b/' + bucket_name
+ + @property + def path(self): + """The URL path to this bucket.""" + if not self.name: + raise ValueError('Cannot determine path without bucket name.') + + return self.path_helper(self.name) + +
[docs] def get_blob(self, blob_name, client=None): + """Get a blob object by name. + + This will return None if the blob doesn't exist:: + + >>> from gcloud import storage + >>> client = storage.Client() + >>> bucket = client.get_bucket('my-bucket') + >>> print bucket.get_blob('/path/to/blob.txt') + <Blob: my-bucket, /path/to/blob.txt> + >>> print bucket.get_blob('/does-not-exist.txt') + None + + :type blob_name: string + :param blob_name: The name of the blob to retrieve. + + :type client: :class:`gcloud.storage.client.Client` or ``NoneType`` + :param client: Optional. The client to use. If not passed, falls back + to the ``client`` stored on the current bucket. + + :rtype: :class:`gcloud.storage.blob.Blob` or None + :returns: The blob object if it exists, otherwise None. + """ + client = self._require_client(client) + blob = Blob(bucket=self, name=blob_name) + try: + response = client.connection.api_request( + method='GET', path=blob.path, _target_object=blob) + # NOTE: We assume response.get('name') matches `blob_name`. + blob._set_properties(response) + # NOTE: This will not fail immediately in a batch. However, when + # Batch.finish() is called, the resulting `NotFound` will be + # raised. + return blob + except NotFound: + return None
+ +
[docs] def list_blobs(self, max_results=None, page_token=None, prefix=None, + delimiter=None, versions=None, + projection='noAcl', fields=None, client=None): + """Return an iterator used to find blobs in the bucket. + + :type max_results: integer or ``NoneType`` + :param max_results: maximum number of blobs to return. + + :type page_token: string + :param page_token: opaque marker for the next "page" of blobs. If not + passed, will return the first page of blobs. + + :type prefix: string or ``NoneType`` + :param prefix: optional prefix used to filter blobs. + + :type delimiter: string or ``NoneType`` + :param delimiter: optional delimter, used with ``prefix`` to + emulate hierarchy. + + :type versions: boolean or ``NoneType`` + :param versions: whether object versions should be returned as + separate blobs. + + :type projection: string or ``NoneType`` + :param projection: If used, must be 'full' or 'noAcl'. Defaults to + 'noAcl'. Specifies the set of properties to return. + + :type fields: string or ``NoneType`` + :param fields: Selector specifying which fields to include in a + partial response. Must be a list of fields. For example + to get a partial response with just the next page token + and the language of each blob returned: + 'items/contentLanguage,nextPageToken' + + :type client: :class:`gcloud.storage.client.Client` or ``NoneType`` + :param client: Optional. The client to use. If not passed, falls back + to the ``client`` stored on the current bucket. + + :rtype: :class:`_BlobIterator`. + :returns: An iterator of blobs. + """ + extra_params = {} + + if max_results is not None: + extra_params['maxResults'] = max_results + + if prefix is not None: + extra_params['prefix'] = prefix + + if delimiter is not None: + extra_params['delimiter'] = delimiter + + if versions is not None: + extra_params['versions'] = versions + + extra_params['projection'] = projection + + if fields is not None: + extra_params['fields'] = fields + + result = self._iterator_class( + self, extra_params=extra_params, client=client) + # Page token must be handled specially since the base `Iterator` + # class has it as a reserved property. + if page_token is not None: + result.next_page_token = page_token + return result
+ +
[docs] def delete(self, force=False, client=None): + """Delete this bucket. + + The bucket **must** be empty in order to submit a delete request. If + ``force=True`` is passed, this will first attempt to delete all the + objects / blobs in the bucket (i.e. try to empty the bucket). + + If the bucket doesn't exist, this will raise + :class:`gcloud.exceptions.NotFound`. If the bucket is not empty + (and ``force=False``), will raise :class:`gcloud.exceptions.Conflict`. + + If ``force=True`` and the bucket contains more than 256 objects / blobs + this will cowardly refuse to delete the objects (or the bucket). This + is to prevent accidental bucket deletion and to prevent extremely long + runtime of this method. + + :type force: boolean + :param force: If True, empties the bucket's objects then deletes it. + + :type client: :class:`gcloud.storage.client.Client` or ``NoneType`` + :param client: Optional. The client to use. If not passed, falls back + to the ``client`` stored on the current bucket. + + :raises: :class:`ValueError` if ``force`` is ``True`` and the bucket + contains more than 256 objects / blobs. + """ + client = self._require_client(client) + if force: + blobs = list(self.list_blobs( + max_results=self._MAX_OBJECTS_FOR_ITERATION + 1, + client=client)) + if len(blobs) > self._MAX_OBJECTS_FOR_ITERATION: + message = ( + 'Refusing to delete bucket with more than ' + '%d objects. If you actually want to delete ' + 'this bucket, please delete the objects ' + 'yourself before calling Bucket.delete().' + ) % (self._MAX_OBJECTS_FOR_ITERATION,) + raise ValueError(message) + + # Ignore 404 errors on delete. + self.delete_blobs(blobs, on_error=lambda blob: None, + client=client) + + # We intentionally pass `_target_object=None` since a DELETE + # request has no response value (whether in a standard request or + # in a batch request). + client.connection.api_request(method='DELETE', path=self.path, + _target_object=None)
+ +
[docs] def delete_blob(self, blob_name, client=None): + """Deletes a blob from the current bucket. + + If the blob isn't found (backend 404), raises a + :class:`gcloud.exceptions.NotFound`. + + For example:: + + >>> from gcloud.exceptions import NotFound + >>> from gcloud import storage + >>> client = storage.Client() + >>> bucket = client.get_bucket('my-bucket') + >>> print bucket.list_blobs() + [<Blob: my-bucket, my-file.txt>] + >>> bucket.delete_blob('my-file.txt') + >>> try: + ... bucket.delete_blob('doesnt-exist') + ... except NotFound: + ... pass + + :type blob_name: string + :param blob_name: A blob name to delete. + + :type client: :class:`gcloud.storage.client.Client` or ``NoneType`` + :param client: Optional. The client to use. If not passed, falls back + to the ``client`` stored on the current bucket. + + :raises: :class:`gcloud.exceptions.NotFound` (to suppress + the exception, call ``delete_blobs``, passing a no-op + ``on_error`` callback, e.g.:: + + >>> bucket.delete_blobs([blob], on_error=lambda blob: None) + """ + client = self._require_client(client) + blob_path = Blob.path_helper(self.path, blob_name) + # We intentionally pass `_target_object=None` since a DELETE + # request has no response value (whether in a standard request or + # in a batch request). + client.connection.api_request(method='DELETE', path=blob_path, + _target_object=None)
+ +
[docs] def delete_blobs(self, blobs, on_error=None, client=None): + """Deletes a list of blobs from the current bucket. + + Uses :func:`Bucket.delete_blob` to delete each individual blob. + + :type blobs: list of string or :class:`gcloud.storage.blob.Blob` + :param blobs: A list of blob names or Blob objects to delete. + + :type on_error: a callable taking (blob) + :param on_error: If not ``None``, called once for each blob raising + :class:`gcloud.exceptions.NotFound`; + otherwise, the exception is propagated. + + :type client: :class:`gcloud.storage.client.Client` or ``NoneType`` + :param client: Optional. The client to use. If not passed, falls back + to the ``client`` stored on the current bucket. + + :raises: :class:`gcloud.exceptions.NotFound` (if + `on_error` is not passed). + """ + for blob in blobs: + try: + blob_name = blob + if not isinstance(blob_name, six.string_types): + blob_name = blob.name + self.delete_blob(blob_name, client=client) + except NotFound: + if on_error is not None: + on_error(blob) + else: + raise
+ +
[docs] def copy_blob(self, blob, destination_bucket, new_name=None, + client=None): + """Copy the given blob to the given bucket, optionally with a new name. + + :type blob: :class:`gcloud.storage.blob.Blob` + :param blob: The blob to be copied. + + :type destination_bucket: :class:`gcloud.storage.bucket.Bucket` + :param destination_bucket: The bucket into which the blob should be + copied. + + :type new_name: string + :param new_name: (optional) the new name for the copied file. + + :type client: :class:`gcloud.storage.client.Client` or ``NoneType`` + :param client: Optional. The client to use. If not passed, falls back + to the ``client`` stored on the current bucket. + + :rtype: :class:`gcloud.storage.blob.Blob` + :returns: The new Blob. + """ + client = self._require_client(client) + if new_name is None: + new_name = blob.name + new_blob = Blob(bucket=destination_bucket, name=new_name) + api_path = blob.path + '/copyTo' + new_blob.path + copy_result = client.connection.api_request( + method='POST', path=api_path, _target_object=new_blob) + new_blob._set_properties(copy_result) + return new_blob
+ +
[docs] def rename_blob(self, blob, new_name, client=None): + """Rename the given blob using copy and delete operations. + + Effectively, copies blob to the same bucket with a new name, then + deletes the blob. + + .. warning:: + + This method will first duplicate the data and then delete the + old blob. This means that with very large objects renaming + could be a very (temporarily) costly or a very slow operation. + + :type blob: :class:`gcloud.storage.blob.Blob` + :param blob: The blob to be renamed. + + :type new_name: string + :param new_name: The new name for this blob. + + :type client: :class:`gcloud.storage.client.Client` or ``NoneType`` + :param client: Optional. The client to use. If not passed, falls back + to the ``client`` stored on the current bucket. + + :rtype: :class:`Blob` + :returns: The newly-renamed blob. + """ + new_blob = self.copy_blob(blob, self, new_name, client=client) + blob.delete(client=client) + return new_blob
+ + @property + def cors(self): + """Retrieve CORS policies configured for this bucket. + + See: http://www.w3.org/TR/cors/ and + https://cloud.google.com/storage/docs/json_api/v1/buckets + + :rtype: list of dictionaries + :returns: A sequence of mappings describing each CORS policy. + """ + return [copy.deepcopy(policy) + for policy in self._properties.get('cors', ())] + + @cors.setter + def cors(self, entries): + """Set CORS policies configured for this bucket. + + See: http://www.w3.org/TR/cors/ and + https://cloud.google.com/storage/docs/json_api/v1/buckets + + :type entries: list of dictionaries + :param entries: A sequence of mappings describing each CORS policy. + """ + self._patch_property('cors', entries) + + @property + def etag(self): + """Retrieve the ETag for the bucket. + + See: http://tools.ietf.org/html/rfc2616#section-3.11 and + https://cloud.google.com/storage/docs/json_api/v1/buckets + + :rtype: string or ``NoneType`` + :returns: The bucket etag or ``None`` if the property is not + set locally. + """ + return self._properties.get('etag') + + @property + def id(self): + """Retrieve the ID for the bucket. + + See: https://cloud.google.com/storage/docs/json_api/v1/buckets + + :rtype: string or ``NoneType`` + :returns: The ID of the bucket or ``None`` if the property is not + set locally. + """ + return self._properties.get('id') + + @property + def lifecycle_rules(self): + """Lifecycle rules configured for this bucket. + + See: https://cloud.google.com/storage/docs/lifecycle and + https://cloud.google.com/storage/docs/json_api/v1/buckets + + :rtype: list(dict) + :returns: A sequence of mappings describing each lifecycle rule. + """ + info = self._properties.get('lifecycle', {}) + return [copy.deepcopy(rule) for rule in info.get('rule', ())] + + @lifecycle_rules.setter + def lifecycle_rules(self, rules): + self._patch_property('lifecycle', {'rule': rules}) + + location = _scalar_property('location') + """Retrieve location configured for this bucket. + + See: https://cloud.google.com/storage/docs/json_api/v1/buckets and + https://cloud.google.com/storage/docs/concepts-techniques#specifyinglocations + + If the property is not set locally, returns ``None``. + + :rtype: string or ``NoneType`` + """ + +
[docs] def get_logging(self): + """Return info about access logging for this bucket. + + See: https://cloud.google.com/storage/docs/accesslogs#status + + :rtype: dict or None + :returns: a dict w/ keys, ``logBucket`` and ``logObjectPrefix`` + (if logging is enabled), or None (if not). + """ + info = self._properties.get('logging') + return copy.deepcopy(info)
+ +
[docs] def enable_logging(self, bucket_name, object_prefix=''): + """Enable access logging for this bucket. + + See: https://cloud.google.com/storage/docs/accesslogs#delivery + + :type bucket_name: string + :param bucket_name: name of bucket in which to store access logs + + :type object_prefix: string + :param object_prefix: prefix for access log filenames + """ + info = {'logBucket': bucket_name, 'logObjectPrefix': object_prefix} + self._patch_property('logging', info)
+ +
[docs] def disable_logging(self): + """Disable access logging for this bucket. + + See: https://cloud.google.com/storage/docs/accesslogs#disabling + """ + self._patch_property('logging', None)
+ + @property + def metageneration(self): + """Retrieve the metageneration for the bucket. + + See: https://cloud.google.com/storage/docs/json_api/v1/buckets + + :rtype: integer or ``NoneType`` + :returns: The metageneration of the bucket or ``None`` if the property + is not set locally. + """ + metageneration = self._properties.get('metageneration') + if metageneration is not None: + return int(metageneration) + + @property + def owner(self): + """Retrieve info about the owner of the bucket. + + See: https://cloud.google.com/storage/docs/json_api/v1/buckets + + :rtype: dict or ``NoneType`` + :returns: Mapping of owner's role/ID. If the property is not set + locally, returns ``None``. + """ + return copy.deepcopy(self._properties.get('owner')) + + @property + def project_number(self): + """Retrieve the number of the project to which the bucket is assigned. + + See: https://cloud.google.com/storage/docs/json_api/v1/buckets + + :rtype: integer or ``NoneType`` + :returns: The project number that owns the bucket or ``None`` if the + property is not set locally. + """ + project_number = self._properties.get('projectNumber') + if project_number is not None: + return int(project_number) + + @property + def self_link(self): + """Retrieve the URI for the bucket. + + See: https://cloud.google.com/storage/docs/json_api/v1/buckets + + :rtype: string or ``NoneType`` + :returns: The self link for the bucket or ``None`` if the property is + not set locally. + """ + return self._properties.get('selfLink') + + @property + def storage_class(self): + """Retrieve the storage class for the bucket. + + See: https://cloud.google.com/storage/docs/storage-classes + https://cloud.google.com/storage/docs/nearline-storage + https://cloud.google.com/storage/docs/durable-reduced-availability + + :rtype: string or ``NoneType`` + :returns: If set, one of "STANDARD", "NEARLINE", or + "DURABLE_REDUCED_AVAILABILITY", else ``None``. + """ + return self._properties.get('storageClass') + + @storage_class.setter + def storage_class(self, value): + """Set the storage class for the bucket. + + See: https://cloud.google.com/storage/docs/storage-classes + https://cloud.google.com/storage/docs/nearline-storage + https://cloud.google.com/storage/docs/durable-reduced-availability + + :type value: string + :param value: one of "STANDARD", "NEARLINE", or + "DURABLE_REDUCED_AVAILABILITY" + """ + if value not in self._STORAGE_CLASSES: + raise ValueError('Invalid storage class: %s' % (value,)) + self._patch_property('storageClass', value) + + @property + def time_created(self): + """Retrieve the timestamp at which the bucket was created. + + See: https://cloud.google.com/storage/docs/json_api/v1/buckets + + :rtype: :class:`datetime.datetime` or ``NoneType`` + :returns: Datetime object parsed from RFC3339 valid timestamp, or + ``None`` if the property is not set locally. + """ + value = self._properties.get('timeCreated') + if value is not None: + return _rfc3339_to_datetime(value) + + @property + def versioning_enabled(self): + """Is versioning enabled for this bucket? + + See: https://cloud.google.com/storage/docs/object-versioning for + details. + + :rtype: boolean + :returns: True if enabled, else False. + """ + versioning = self._properties.get('versioning', {}) + return versioning.get('enabled', False) + + @versioning_enabled.setter + def versioning_enabled(self, value): + """Enable versioning for this bucket. + + See: https://cloud.google.com/storage/docs/object-versioning for + details. + + :type value: convertible to boolean + :param value: should versioning be anabled for the bucket? + """ + self._patch_property('versioning', {'enabled': bool(value)}) + +
[docs] def configure_website(self, main_page_suffix=None, not_found_page=None): + """Configure website-related properties. + + See: https://developers.google.com/storage/docs/website-configuration + + .. note:: + This (apparently) only works + if your bucket name is a domain name + (and to do that, you need to get approved somehow...). + + If you want this bucket to host a website, just provide the name + of an index page and a page to use when a blob isn't found:: + + >>> from gcloud import storage + >>> client = storage.Client() + >>> bucket = client.get_bucket(bucket_name) + >>> bucket.configure_website('index.html', '404.html') + + You probably should also make the whole bucket public:: + + >>> bucket.make_public(recursive=True, future=True) + + This says: "Make the bucket public, and all the stuff already in + the bucket, and anything else I add to the bucket. Just make it + all public." + + :type main_page_suffix: string + :param main_page_suffix: The page to use as the main page + of a directory. + Typically something like index.html. + + :type not_found_page: string + :param not_found_page: The file to use when a page isn't found. + """ + data = { + 'mainPageSuffix': main_page_suffix, + 'notFoundPage': not_found_page, + } + self._patch_property('website', data)
+ +
[docs] def disable_website(self): + """Disable the website configuration for this bucket. + + This is really just a shortcut for setting the website-related + attributes to ``None``. + """ + return self.configure_website(None, None)
+ +
[docs] def make_public(self, recursive=False, future=False, client=None): + """Make a bucket public. + + If ``recursive=True`` and the bucket contains more than 256 + objects / blobs this will cowardly refuse to make the objects public. + This is to prevent extremely long runtime of this method. + + :type recursive: boolean + :param recursive: If True, this will make all blobs inside the bucket + public as well. + + :type future: boolean + :param future: If True, this will make all objects created in the + future public as well. + + :type client: :class:`gcloud.storage.client.Client` or ``NoneType`` + :param client: Optional. The client to use. If not passed, falls back + to the ``client`` stored on the current bucket. + """ + self.acl.all().grant_read() + self.acl.save(client=client) + + if future: + doa = self.default_object_acl + if not doa.loaded: + doa.reload(client=client) + doa.all().grant_read() + doa.save(client=client) + + if recursive: + blobs = list(self.list_blobs( + projection='full', + max_results=self._MAX_OBJECTS_FOR_ITERATION + 1, + client=client)) + if len(blobs) > self._MAX_OBJECTS_FOR_ITERATION: + message = ( + 'Refusing to make public recursively with more than ' + '%d objects. If you actually want to make every object ' + 'in this bucket public, please do it on the objects ' + 'yourself.' + ) % (self._MAX_OBJECTS_FOR_ITERATION,) + raise ValueError(message) + + for blob in blobs: + blob.acl.all().grant_read() + blob.acl.save(client=client)
+
+ +
+ +
+ + + + + + + \ No newline at end of file diff --git a/0.18.1/_modules/gcloud/storage/client.html b/0.18.1/_modules/gcloud/storage/client.html new file mode 100644 index 000000000000..7d62aa3eaac1 --- /dev/null +++ b/0.18.1/_modules/gcloud/storage/client.html @@ -0,0 +1,539 @@ + + + + + + + + gcloud.storage.client — gcloud 0.18.1 documentation + + + + + + + + + + + + + + +
+ +
+ +
+ + + Report an Issue + +
+
+ +
+ +

Source code for gcloud.storage.client

+# Copyright 2015 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Client for interacting with the Google Cloud Storage API."""
+
+
+from gcloud._helpers import _LocalStack
+from gcloud.client import JSONClient
+from gcloud.exceptions import NotFound
+from gcloud.iterator import Iterator
+from gcloud.storage.batch import Batch
+from gcloud.storage.bucket import Bucket
+from gcloud.storage.connection import Connection
+
+
+
[docs]class Client(JSONClient): + """Client to bundle configuration needed for API requests. + + :type project: string + :param project: the project which the client acts on behalf of. Will be + passed when creating a topic. If not passed, + falls back to the default inferred from the environment. + + :type credentials: :class:`oauth2client.client.OAuth2Credentials` or + :class:`NoneType` + :param credentials: The OAuth2 Credentials to use for the connection + owned by this client. If not passed (and if no ``http`` + object is passed), falls back to the default inferred + from the environment. + + :type http: :class:`httplib2.Http` or class that defines ``request()``. + :param http: An optional HTTP object to make requests. If not passed, an + ``http`` object is created that is bound to the + ``credentials`` for the current object. + """ + + _connection_class = Connection + + def __init__(self, project=None, credentials=None, http=None): + self._connection = None + super(Client, self).__init__(project=project, credentials=credentials, + http=http) + self._batch_stack = _LocalStack() + + @property + def connection(self): + """Get connection or batch on the client. + + :rtype: :class:`gcloud.storage.connection.Connection` + :returns: The connection set on the client, or the batch + if one is set. + """ + if self.current_batch is not None: + return self.current_batch + else: + return self._connection + + @connection.setter + def connection(self, value): + """Set connection on the client. + + Intended to be used by constructor (since the base class calls) + self.connection = connection + Will raise if the connection is set more than once. + + :type value: :class:`gcloud.storage.connection.Connection` + :param value: The connection set on the client. + + :raises: :class:`ValueError` if connection has already been set. + """ + if self._connection is not None: + raise ValueError('Connection already set on client') + self._connection = value + + def _push_batch(self, batch): + """Push a batch onto our stack. + + "Protected", intended for use by batch context mgrs. + + :type batch: :class:`gcloud.storage.batch.Batch` + :param batch: newly-active batch + """ + self._batch_stack.push(batch) + + def _pop_batch(self): + """Pop a batch from our stack. + + "Protected", intended for use by batch context mgrs. + + :raises: IndexError if the stack is empty. + :rtype: :class:`gcloud.storage.batch.Batch` + :returns: the top-most batch/transaction, after removing it. + """ + return self._batch_stack.pop() + + @property + def current_batch(self): + """Currently-active batch. + + :rtype: :class:`gcloud.storage.batch.Batch` or ``NoneType`` (if + no batch is active). + :returns: The batch at the top of the batch stack. + """ + return self._batch_stack.top + +
[docs] def bucket(self, bucket_name): + """Factory constructor for bucket object. + + .. note:: + This will not make an HTTP request; it simply instantiates + a bucket object owned by this client. + + :type bucket_name: string + :param bucket_name: The name of the bucket to be instantiated. + + :rtype: :class:`gcloud.storage.bucket.Bucket` + :returns: The bucket object created. + """ + return Bucket(client=self, name=bucket_name)
+ +
[docs] def batch(self): + """Factory constructor for batch object. + + .. note:: + This will not make an HTTP request; it simply instantiates + a batch object owned by this client. + + :rtype: :class:`gcloud.storage.batch.Batch` + :returns: The batch object created. + """ + return Batch(client=self)
+ +
[docs] def get_bucket(self, bucket_name): + """Get a bucket by name. + + If the bucket isn't found, this will raise a + :class:`gcloud.storage.exceptions.NotFound`. + + For example:: + + >>> try: + >>> bucket = client.get_bucket('my-bucket') + >>> except gcloud.exceptions.NotFound: + >>> print 'Sorry, that bucket does not exist!' + + This implements "storage.buckets.get". + + :type bucket_name: string + :param bucket_name: The name of the bucket to get. + + :rtype: :class:`gcloud.storage.bucket.Bucket` + :returns: The bucket matching the name provided. + :raises: :class:`gcloud.exceptions.NotFound` + """ + bucket = Bucket(self, name=bucket_name) + bucket.reload(client=self) + return bucket
+ +
[docs] def lookup_bucket(self, bucket_name): + """Get a bucket by name, returning None if not found. + + You can use this if you would rather check for a None value + than catching an exception:: + + >>> bucket = client.lookup_bucket('doesnt-exist') + >>> print bucket + None + >>> bucket = client.lookup_bucket('my-bucket') + >>> print bucket + <Bucket: my-bucket> + + :type bucket_name: string + :param bucket_name: The name of the bucket to get. + + :rtype: :class:`gcloud.storage.bucket.Bucket` + :returns: The bucket matching the name provided or None if not found. + """ + try: + return self.get_bucket(bucket_name) + except NotFound: + return None
+ +
[docs] def create_bucket(self, bucket_name): + """Create a new bucket. + + For example:: + + >>> bucket = client.create_bucket('my-bucket') + >>> print bucket + <Bucket: my-bucket> + + This implements "storage.buckets.insert". + + If the bucket already exists, will raise + :class:`gcloud.exceptions.Conflict`. + + :type bucket_name: string + :param bucket_name: The bucket name to create. + + :rtype: :class:`gcloud.storage.bucket.Bucket` + :returns: The newly created bucket. + """ + bucket = Bucket(self, name=bucket_name) + bucket.create(client=self) + return bucket
+ +
[docs] def list_buckets(self, max_results=None, page_token=None, prefix=None, + projection='noAcl', fields=None): + """Get all buckets in the project associated to the client. + + This will not populate the list of blobs available in each + bucket. + + >>> for bucket in client.list_buckets(): + >>> print bucket + + This implements "storage.buckets.list". + + :type max_results: integer or ``NoneType`` + :param max_results: Optional. Maximum number of buckets to return. + + :type page_token: string or ``NoneType`` + :param page_token: Optional. Opaque marker for the next "page" of + buckets. If not passed, will return the first page + of buckets. + + :type prefix: string or ``NoneType`` + :param prefix: Optional. Filter results to buckets whose names begin + with this prefix. + + :type projection: string or ``NoneType`` + :param projection: If used, must be 'full' or 'noAcl'. Defaults to + 'noAcl'. Specifies the set of properties to return. + + :type fields: string or ``NoneType`` + :param fields: Selector specifying which fields to include in a + partial response. Must be a list of fields. For example + to get a partial response with just the next page token + and the language of each bucket returned: + 'items/id,nextPageToken' + + :rtype: iterable of :class:`gcloud.storage.bucket.Bucket` objects. + :returns: All buckets belonging to this project. + """ + extra_params = {'project': self.project} + + if max_results is not None: + extra_params['maxResults'] = max_results + + if prefix is not None: + extra_params['prefix'] = prefix + + extra_params['projection'] = projection + + if fields is not None: + extra_params['fields'] = fields + + result = _BucketIterator(client=self, + extra_params=extra_params) + # Page token must be handled specially since the base `Iterator` + # class has it as a reserved property. + if page_token is not None: + result.next_page_token = page_token + return result
+ + +class _BucketIterator(Iterator): + """An iterator listing all buckets. + + You shouldn't have to use this directly, but instead should use the + helper methods on :class:`gcloud.storage.connection.Connection` + objects. + + :type client: :class:`gcloud.storage.client.Client` + :param client: The client to use for making connections. + + :type extra_params: dict or ``NoneType`` + :param extra_params: Extra query string parameters for the API call. + """ + + def __init__(self, client, extra_params=None): + super(_BucketIterator, self).__init__(client=client, path='/b', + extra_params=extra_params) + + def get_items_from_response(self, response): + """Factory method which yields :class:`.Bucket` items from a response. + + :type response: dict + :param response: The JSON API response for a page of buckets. + """ + for item in response.get('items', []): + name = item.get('name') + bucket = Bucket(self.client, name) + bucket._set_properties(item) + yield bucket +
+ +
+ +
+ + + + + + + \ No newline at end of file diff --git a/0.18.1/_modules/gcloud/storage/connection.html b/0.18.1/_modules/gcloud/storage/connection.html new file mode 100644 index 000000000000..1d2e2b556f5e --- /dev/null +++ b/0.18.1/_modules/gcloud/storage/connection.html @@ -0,0 +1,276 @@ + + + + + + + + gcloud.storage.connection — gcloud 0.18.1 documentation + + + + + + + + + + + + + + +
+ +
+ +
+ + + Report an Issue + +
+
+ +
+ +

Source code for gcloud.storage.connection

+# Copyright 2014 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Create / interact with gcloud storage connections."""
+
+from gcloud import connection as base_connection
+
+
+
[docs]class Connection(base_connection.JSONConnection): + """A connection to Google Cloud Storage via the JSON REST API. + + :type credentials: :class:`oauth2client.client.OAuth2Credentials` + :param credentials: (Optional) The OAuth2 Credentials to use for this + connection. + + :type http: :class:`httplib2.Http` or class that defines ``request()``. + :param http: (Optional) HTTP object to make requests. + """ + + API_BASE_URL = base_connection.API_BASE_URL + """The base of the API call URL.""" + + API_VERSION = 'v1' + """The version of the API, used in building the API call's URL.""" + + API_URL_TEMPLATE = '{api_base_url}/storage/{api_version}{path}' + """A template for the URL of a particular API call.""" + + SCOPE = ('https://www.googleapis.com/auth/devstorage.full_control', + 'https://www.googleapis.com/auth/devstorage.read_only', + 'https://www.googleapis.com/auth/devstorage.read_write') + """The scopes required for authenticating as a Cloud Storage consumer."""
+
+ +
+ +
+ + + + + + + \ No newline at end of file diff --git a/0.18.1/_modules/gcloud/translate/client.html b/0.18.1/_modules/gcloud/translate/client.html new file mode 100644 index 000000000000..03a027a97fc2 --- /dev/null +++ b/0.18.1/_modules/gcloud/translate/client.html @@ -0,0 +1,457 @@ + + + + + + + + gcloud.translate.client — gcloud 0.18.1 documentation + + + + + + + + + + + + + + +
+ +
+ +
+ + + Report an Issue + +
+
+ +
+ +

Source code for gcloud.translate.client

+# Copyright 2016 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Client for interacting with the Google Cloud Translate API."""
+
+
+import httplib2
+import six
+
+from gcloud._helpers import _to_bytes
+from gcloud.translate.connection import Connection
+
+
+ENGLISH_ISO_639 = 'en'
+"""ISO 639-1 language code for English."""
+
+
+
[docs]class Client(object): + """Client to bundle configuration needed for API requests. + + :type api_key: str + :param api_key: The key used to send with requests as a query + parameter. + + :type http: :class:`httplib2.Http` or class that defines ``request()``. + :param http: (Optional) HTTP object to make requests. If not + passed, an :class:`httplib.Http` object is created. + + :type target_language: str + :param target_language: (Optional) The target language used for + translations and language names. (Defaults to + :data:`ENGLISH_ISO_639`.) + """ + + def __init__(self, api_key, http=None, target_language=ENGLISH_ISO_639): + self.api_key = api_key + if http is None: + http = httplib2.Http() + self.connection = Connection(http=http) + self.target_language = target_language + +
[docs] def get_languages(self, target_language=None): + """Get list of supported languages for translation. + + Response + + See: https://cloud.google.com/translate/v2/\ + discovering-supported-languages-with-rest + + :type target_language: str + :param target_language: (Optional) The language used to localize + returned language names. Defaults to the + target language on the current client. + + :rtype: list + :returns: List of dictionaries. Each dictionary contains a supported + ISO 639-1 language code (using the dictionary key + ``language``). If ``target_language`` is passed, each + dictionary will also contain the name of each supported + language (localized to the target language). + """ + query_params = {'key': self.api_key} + if target_language is None: + target_language = self.target_language + if target_language is not None: + query_params['target'] = target_language + response = self.connection.api_request( + method='GET', path='/languages', query_params=query_params) + return response.get('data', {}).get('languages', ())
+ +
[docs] def detect_language(self, values): + """Detect the language of a string or list of strings. + + See: https://cloud.google.com/translate/v2/\ + detecting-language-with-rest + + :type values: str or list + :param values: String or list of strings that will have + language detected. + + :rtype: str or list + :returns: A list of dictionaries for each queried value. Each + dictionary typically contains three keys + + * ``confidence``: The confidence in language detection, a + float between 0 and 1. + * ``input``: The corresponding input value. + * ``language``: The detected language (as an ISO 639-1 + language code). + + though the key ``confidence`` may not always be present. + + If only a single value is passed, then only a single + dictionary will be returned. + :raises: :class:`ValueError <exceptions.ValueError>` if the number of + detections is not equal to the number of values. + :class:`ValueError <exceptions.ValueError>` if a value + produces a list of detections with 0 or multiple results + in it. + """ + single_value = False + if isinstance(values, six.string_types): + single_value = True + values = [values] + + query_params = [('key', self.api_key)] + query_params.extend(('q', _to_bytes(value, 'utf-8')) + for value in values) + response = self.connection.api_request( + method='GET', path='/detect', query_params=query_params) + detections = response.get('data', {}).get('detections', ()) + + if len(values) != len(detections): + raise ValueError('Expected same number of values and detections', + values, detections) + + for index, value in enumerate(values): + # Empirically, even clearly ambiguous text like "no" only returns + # a single detection, so we replace the list of detections with + # the single detection contained. + if len(detections[index]) == 1: + detections[index] = detections[index][0] + else: + message = ('Expected a single detection per value, API ' + 'returned %d') % (len(detections[index]),) + raise ValueError(message, value, detections[index]) + + detections[index]['input'] = value + # The ``isReliable`` field is deprecated. + detections[index].pop('isReliable', None) + + if single_value: + return detections[0] + else: + return detections
+ +
[docs] def translate(self, values, target_language=None, format_=None, + source_language=None, customization_ids=()): + """Translate a string or list of strings. + + See: https://cloud.google.com/translate/v2/\ + translating-text-with-rest + + :type values: str or list + :param values: String or list of strings to translate. + + :type target_language: str + :param target_language: The language to translate results into. This + is required by the API and defaults to + the target language of the current instance. + + :type format_: str + :param format_: (Optional) One of ``text`` or ``html``, to specify + if the input text is plain text or HTML. + + :type source_language: str + :param source_language: (Optional) The language of the text to + be translated. + + :type customization_ids: str or list + :param customization_ids: (Optional) ID or list of customization IDs + for translation. Sets the ``cid`` parameter + in the query. + + :rtype: str or list list + :returns: A list of dictionaries for each queried value. Each + dictionary typically contains three keys (though not + all will be present in all cases) + + * ``detectedSourceLanguage``: The detected language (as an + ISO 639-1 language code) of the text. + * ``translatedText``: The translation of the text into the + target language. + * ``input``: The corresponding input value. + + If only a single value is passed, then only a single + dictionary will be returned. + :raises: :class:`ValueError <exceptions.ValueError>` if the number of + values and translations differ. + """ + single_value = False + if isinstance(values, six.string_types): + single_value = True + values = [values] + + if target_language is None: + target_language = self.target_language + if isinstance(customization_ids, six.string_types): + customization_ids = [customization_ids] + + query_params = [('key', self.api_key), ('target', target_language)] + query_params.extend(('q', _to_bytes(value, 'utf-8')) + for value in values) + query_params.extend(('cid', cid) for cid in customization_ids) + if format_ is not None: + query_params.append(('format', format_)) + if source_language is not None: + query_params.append(('source', source_language)) + + response = self.connection.api_request( + method='GET', path='', query_params=query_params) + + translations = response.get('data', {}).get('translations', ()) + if len(values) != len(translations): + raise ValueError('Expected iterations to have same length', + values, translations) + for value, translation in six.moves.zip(values, translations): + translation['input'] = value + + if single_value: + return translations[0] + else: + return translations
+
+ +
+ +
+ + + + + + + \ No newline at end of file diff --git a/0.18.1/_modules/gcloud/translate/connection.html b/0.18.1/_modules/gcloud/translate/connection.html new file mode 100644 index 000000000000..e16fa2bd95cf --- /dev/null +++ b/0.18.1/_modules/gcloud/translate/connection.html @@ -0,0 +1,263 @@ + + + + + + + + gcloud.translate.connection — gcloud 0.18.1 documentation + + + + + + + + + + + + + + +
+ +
+ +
+ + + Report an Issue + +
+
+ +
+ +

Source code for gcloud.translate.connection

+# Copyright 2016 Google Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Create / interact with Google Cloud Translate connections."""
+
+from gcloud import connection as base_connection
+
+
+
[docs]class Connection(base_connection.JSONConnection): + """A connection to Google Cloud Translate via the JSON REST API.""" + + API_BASE_URL = 'https://www.googleapis.com' + """The base of the API call URL.""" + + API_VERSION = 'v2' + """The version of the API, used in building the API call's URL.""" + + API_URL_TEMPLATE = '{api_base_url}/language/translate/{api_version}{path}' + """A template for the URL of a particular API call."""
+
+ +
+ +
+ + + + + + + \ No newline at end of file diff --git a/0.18.1/_modules/index.html b/0.18.1/_modules/index.html new file mode 100644 index 000000000000..3de8adaaa30e --- /dev/null +++ b/0.18.1/_modules/index.html @@ -0,0 +1,299 @@ + + + + + + + + Overview: module code — gcloud 0.18.1 documentation + + + + + + + + + + + + + +
+ +
+ +
+ + + Report an Issue + +
+
+ +
+ +

All modules for which code is available

+ + +
+ +
+ + + + + + + \ No newline at end of file diff --git a/0.18.1/_sources/bigquery-client.txt b/0.18.1/_sources/bigquery-client.txt new file mode 100644 index 000000000000..2dbb17c02ed9 --- /dev/null +++ b/0.18.1/_sources/bigquery-client.txt @@ -0,0 +1,13 @@ +BigQuery Client +=============== + +.. automodule:: gcloud.bigquery.client + :members: + :show-inheritance: + +Connection +~~~~~~~~~~ + +.. automodule:: gcloud.bigquery.connection + :members: + :show-inheritance: diff --git a/0.18.1/_sources/bigquery-dataset.txt b/0.18.1/_sources/bigquery-dataset.txt new file mode 100644 index 000000000000..7ea68c97c4e8 --- /dev/null +++ b/0.18.1/_sources/bigquery-dataset.txt @@ -0,0 +1,6 @@ +Datasets +~~~~~~~~ + +.. automodule:: gcloud.bigquery.dataset + :members: + :show-inheritance: diff --git a/0.18.1/_sources/bigquery-job.txt b/0.18.1/_sources/bigquery-job.txt new file mode 100644 index 000000000000..3af44e9da0a3 --- /dev/null +++ b/0.18.1/_sources/bigquery-job.txt @@ -0,0 +1,6 @@ +Jobs +~~~~ + +.. automodule:: gcloud.bigquery.job + :members: + :show-inheritance: diff --git a/0.18.1/_sources/bigquery-query.txt b/0.18.1/_sources/bigquery-query.txt new file mode 100644 index 000000000000..735964725327 --- /dev/null +++ b/0.18.1/_sources/bigquery-query.txt @@ -0,0 +1,6 @@ +Query +~~~~~ + +.. automodule:: gcloud.bigquery.query + :members: + :show-inheritance: diff --git a/0.18.1/_sources/bigquery-table.txt b/0.18.1/_sources/bigquery-table.txt new file mode 100644 index 000000000000..63297bb471fd --- /dev/null +++ b/0.18.1/_sources/bigquery-table.txt @@ -0,0 +1,6 @@ +Tables +~~~~~~ + +.. automodule:: gcloud.bigquery.table + :members: + :show-inheritance: diff --git a/0.18.1/_sources/bigquery-usage.txt b/0.18.1/_sources/bigquery-usage.txt new file mode 100644 index 000000000000..6ff904da5876 --- /dev/null +++ b/0.18.1/_sources/bigquery-usage.txt @@ -0,0 +1,459 @@ +Using the API +============= + +Authentication / Configuration +------------------------------ + +- Use :class:`Client ` objects to configure + your applications. + +- :class:`Client ` objects hold both a ``project`` + and an authenticated connection to the BigQuery service. + +- The authentication credentials can be implicitly determined from the + environment or directly via + :meth:`from_service_account_json ` + and + :meth:`from_service_account_p12 `. + +- After setting :envvar:`GOOGLE_APPLICATION_CREDENTIALS` and + :envvar:`GCLOUD_PROJECT` environment variables, create an instance of + :class:`Client `. + + .. doctest:: + + >>> from gcloud import bigquery + >>> client = bigquery.Client() + + +Projects +-------- + +A project is the top-level container in the ``BigQuery`` API: it is tied +closely to billing, and can provide default access control across all its +datasets. If no ``project`` is passed to the client container, the library +attempts to infer a project using the environment (including explicit +environment variables, GAE, and GCE). + +To override the project inferred from the environment, pass an explicit +``project`` to the constructor, or to either of the alternative +``classmethod`` factories: + + .. doctest:: + + >>> from gcloud import bigquery + >>> client = bigquery.Client(project='PROJECT_ID') + + +Project ACLs +~~~~~~~~~~~~ + +Each project has an access control list granting reader / writer / owner +permission to one or more entities. This list cannot be queried or set +via the API: it must be managed using the Google Developer Console. + + +Datasets +-------- + +A dataset represents a collection of tables, and applies several default +policies to tables as they are created: + +- An access control list (ACL). When created, a dataset has an ACL + which maps to the ACL inherited from its project. + +- A default table expiration period. If set, tables created within the + dataset will have the value as their expiration period. + + +Dataset operations +~~~~~~~~~~~~~~~~~~ + +List datasets for the client's project: + +.. literalinclude:: bigquery_snippets.py + :start-after: [START client_list_datasets] + :end-before: [END client_list_datasets] + +Create a new dataset for the client's project: + +.. literalinclude:: bigquery_snippets.py + :start-after: [START dataset_create] + :end-before: [END dataset_create] + +Check for the existence of a dataset: + +.. literalinclude:: bigquery_snippets.py + :start-after: [START dataset_exists] + :end-before: [END dataset_exists] + +Refresh metadata for a dataset (to pick up changes made by another client): + +.. literalinclude:: bigquery_snippets.py + :start-after: [START dataset_reload] + :end-before: [END dataset_reload] + +Patch metadata for a dataset: + +.. literalinclude:: bigquery_snippets.py + :start-after: [START dataset_patch] + :end-before: [END dataset_patch] + +Replace the ACL for a dataset, and update all writeable fields: + +.. doctest:: + + >>> from gcloud import bigquery + >>> client = bigquery.Client() + >>> dataset = client.dataset('dataset_name') + >>> dataset.get() # API request + >>> acl = list(dataset.acl) + >>> acl.append(bigquery.Access(role='READER', entity_type='domain', entity='example.com')) + >>> dataset.acl = acl + >>> dataset.update() # API request + +Delete a dataset: + +.. literalinclude:: bigquery_snippets.py + :start-after: [START dataset_delete] + :end-before: [END dataset_delete] + + +Tables +------ + +Tables exist within datasets. List tables for the dataset: + +.. literalinclude:: bigquery_snippets.py + :start-after: [START dataset_list_tables] + :end-before: [END dataset_list_tables] + +Create a table: + +.. literalinclude:: bigquery_snippets.py + :start-after: [START table_create] + :end-before: [END table_create] + +Check for the existence of a table: + +.. literalinclude:: bigquery_snippets.py + :start-after: [START table_exists] + :end-before: [END table_exists] + +Refresh metadata for a table (to pick up changes made by another client): + +.. literalinclude:: bigquery_snippets.py + :start-after: [START table_reload] + :end-before: [END table_reload] + +Patch specific properties for a table: + +.. literalinclude:: bigquery_snippets.py + :start-after: [START table_patch] + :end-before: [END table_patch] + +Update all writable metadata for a table + +.. literalinclude:: bigquery_snippets.py + :start-after: [START table_update] + :end-before: [END table_update] + +Get rows from a table's data: + +.. literalinclude:: bigquery_snippets.py + :start-after: [START table_fetch_data] + :end-before: [END table_fetch_data] + +Insert rows into a table's data: + +.. literalinclude:: bigquery_snippets.py + :start-after: [START table_insert_data] + :end-before: [END table_insert_data] + +Upload table data from a file: + +.. literalinclude:: bigquery_snippets.py + :start-after: [START table_upload_from_file] + :end-before: [END table_upload_from_file] + +Delete a table: + +.. literalinclude:: bigquery_snippets.py + :start-after: [START table_delete] + :end-before: [END table_delete] + + +Jobs +---- + +Jobs describe actions peformed on data in BigQuery tables: + +- Load data into a table +- Run a query against data in one or more tables +- Extract data from a table +- Copy a table + +List jobs for a project: + +.. literalinclude:: bigquery_snippets.py + :start-after: [START client_list_jobs] + :end-before: [END client_list_jobs] + + +Querying data (synchronous) +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Run a query which can be expected to complete within bounded time: + +.. literalinclude:: bigquery_snippets.py + :start-after: [START client_run_sync_query] + :end-before: [END client_run_sync_query] + +If the rows returned by the query do not fit into the inital response, +then we need to fetch the remaining rows via ``fetch_data``: + +.. literalinclude:: bigquery_snippets.py + :start-after: [START client_run_sync_query_paged] + :end-before: [END client_run_sync_query_paged] + +If the query takes longer than the timeout allowed, ``query.complete`` +will be ``False``. In that case, we need to poll the associated job until +it is done, and then fetch the reuslts: + +.. literalinclude:: bigquery_snippets.py + :start-after: [START client_run_sync_query_timeout] + :end-before: [END client_run_sync_query_timeout] + + +Querying data (asynchronous) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Background a query, loading the results into a table: + +.. doctest:: + + >>> from gcloud import bigquery + >>> client = bigquery.Client() + >>> query = """\ + SELECT firstname + ' ' + last_name AS full_name, + FLOOR(DATEDIFF(CURRENT_DATE(), birth_date) / 365) AS age + FROM dataset_name.persons + """ + >>> dataset = client.dataset('dataset_name') + >>> table = dataset.table(name='person_ages') + >>> job = client.run_async_query('fullname-age-query-job', query) + >>> job.destination_table = table + >>> job.write_disposition= 'truncate' + >>> job.name + 'fullname-age-query-job' + >>> job.job_type + 'query' + >>> job.created + None + >>> job.state + None + +.. note:: + + - ``gcloud.bigquery`` generates a UUID for each job. + - The ``created`` and ``state`` fields are not set until the job + is submitted to the BigQuery back-end. + +Then, begin executing the job on the server: + +.. doctest:: + + >>> job.submit() # API call + >>> job.created + datetime.datetime(2015, 7, 23, 9, 30, 20, 268260, tzinfo=) + >>> job.state + 'running' + +Poll until the job is complete: + +.. doctest:: + + >>> import time + >>> retry_count = 100 + >>> while retry_count > 0 and job.state == 'running': + ... retry_count -= 1 + ... time.sleep(10) + ... job.reload() # API call + >>> job.state + 'done' + >>> job.ended + datetime.datetime(2015, 7, 23, 9, 30, 21, 334792, tzinfo=) + + +Inserting data (asynchronous) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Start a job loading data asynchronously from a set of CSV files, located on +Google Cloud Storage, appending rows into an existing table. First, create +the job locally: + +.. doctest:: + + >>> from gcloud import bigquery + >>> from gcloud.bigquery import SchemaField + >>> client = bigquery.Client() + >>> table = dataset.table(name='person_ages') + >>> table.schema = [ + ... SchemaField('full_name', 'STRING', mode='required'), + ... SchemaField('age', 'INTEGER', mode='required)] + >>> job = client.load_table_from_storage( + ... 'load-from-storage-job', table, 'gs://bucket-name/object-prefix*') + >>> job.source_format = 'CSV' + >>> job.skip_leading_rows = 1 # count of skipped header rows + >>> job.write_disposition = 'truncate' + >>> job.name + 'load-from-storage-job' + >>> job.job_type + 'load' + >>> job.created + None + >>> job.state + None + +.. note:: + + - ``gcloud.bigquery`` generates a UUID for each job. + - The ``created`` and ``state`` fields are not set until the job + is submitted to the BigQuery back-end. + +Then, begin executing the job on the server: + +.. doctest:: + + >>> job.begin() # API call + >>> job.created + datetime.datetime(2015, 7, 23, 9, 30, 20, 268260, tzinfo=) + >>> job.state + 'running' + +Poll until the job is complete: + +.. doctest:: + + >>> import time + >>> retry_count = 100 + >>> while retry_count > 0 and job.state == 'running': + ... retry_count -= 1 + ... time.sleep(10) + ... job.reload() # API call + >>> job.state + 'done' + >>> job.ended + datetime.datetime(2015, 7, 23, 9, 30, 21, 334792, tzinfo=) + + +Exporting data (async) +~~~~~~~~~~~~~~~~~~~~~~ + +Start a job exporting a table's data asynchronously to a set of CSV files, +located on Google Cloud Storage. First, create the job locally: + +.. doctest:: + + >>> from gcloud import bigquery + >>> client = bigquery.Client() + >>> table = dataset.table(name='person_ages') + >>> job = client.extract_table_to_storage( + ... 'extract-person-ages-job', table, + ... 'gs://bucket-name/export-prefix*.csv') + ... job.destination_format = 'CSV' + ... job.print_header = True + ... job.write_disposition = 'truncate' + >>> job.name + 'extract-person-ages-job' + >>> job.job_type + 'extract' + >>> job.created + None + >>> job.state + None + +.. note:: + + - ``gcloud.bigquery`` generates a UUID for each job. + - The ``created`` and ``state`` fields are not set until the job + is submitted to the BigQuery back-end. + +Then, begin executing the job on the server: + +.. doctest:: + + >>> job.begin() # API call + >>> job.created + datetime.datetime(2015, 7, 23, 9, 30, 20, 268260, tzinfo=) + >>> job.state + 'running' + +Poll until the job is complete: + +.. doctest:: + + >>> import time + >>> retry_count = 100 + >>> while retry_count > 0 and job.state == 'running': + ... retry_count -= 1 + ... time.sleep(10) + ... job.reload() # API call + >>> job.state + 'done' + >>> job.ended + datetime.datetime(2015, 7, 23, 9, 30, 21, 334792, tzinfo=) + + +Copy tables (async) +~~~~~~~~~~~~~~~~~~~ + +First, create the job locally: + +.. doctest:: + + >>> from gcloud import bigquery + >>> client = bigquery.Client() + >>> source_table = dataset.table(name='person_ages') + >>> destination_table = dataset.table(name='person_ages_copy') + >>> job = client.copy_table( + ... 'copy-table-job', destination_table, source_table) + >>> job.name + 'copy-table-job' + >>> job.job_type + 'copy' + >>> job.created + None + >>> job.state + None + +.. note:: + + - ``gcloud.bigquery`` generates a UUID for each job. + - The ``created`` and ``state`` fields are not set until the job + is submitted to the BigQuery back-end. + +Then, begin executing the job on the server: + +.. doctest:: + + >>> job.begin() # API call + >>> job.created + datetime.datetime(2015, 7, 23, 9, 30, 20, 268260, tzinfo=) + >>> job.state + 'running' + +Poll until the job is complete: + +.. doctest:: + + >>> import time + >>> retry_count = 100 + >>> while retry_count > 0 and job.state == 'running': + ... retry_count -= 1 + ... time.sleep(10) + ... job.reload() # API call + >>> job.state + 'done' + >>> job.ended + datetime.datetime(2015, 7, 23, 9, 30, 21, 334792, tzinfo=) diff --git a/0.18.1/_sources/bigtable-client-intro.txt b/0.18.1/_sources/bigtable-client-intro.txt new file mode 100644 index 000000000000..db04ffa0e0c1 --- /dev/null +++ b/0.18.1/_sources/bigtable-client-intro.txt @@ -0,0 +1,98 @@ +Base for Everything +=================== + +.. warning:: + + gRPC is required for using the Cloud Bigtable API. As of May 2016, + ``grpcio`` is only supported in Python 2.7, so importing + :mod:`gcloud.bigtable` in other versions of Python will fail. + +To use the API, the :class:`Client ` +class defines a high-level interface which handles authorization +and creating other objects: + +.. code:: python + + from gcloud.bigtable.client import Client + client = Client() + +Long-lived Defaults +------------------- + +When creating a :class:`Client `, the +``user_agent`` and ``timeout_seconds`` arguments have sensible +defaults +(:data:`DEFAULT_USER_AGENT ` and +:data:`DEFAULT_TIMEOUT_SECONDS `). +However, you may over-ride them and these will be used throughout all API +requests made with the ``client`` you create. + +Configuration +------------- + +- For an overview of authentication in ``gcloud-python``, + see :doc:`gcloud-auth`. + +- In addition to any authentication configuration, you can also set the + :envvar:`GCLOUD_PROJECT` environment variable for the Google Cloud Console + project you'd like to interact with. If your code is running in Google App + Engine or Google Compute Engine the project will be detected automatically. + (Setting this environment variable is not required, you may instead pass the + ``project`` explicitly when constructing a + :class:`Client `). + +- After configuring your environment, create a + :class:`Client ` + + .. code:: + + >>> from gcloud import bigtable + >>> client = bigtable.Client() + + or pass in ``credentials`` and ``project`` explicitly + + .. code:: + + >>> from gcloud import bigtable + >>> client = bigtable.Client(project='my-project', credentials=creds) + +.. tip:: + + Be sure to use the **Project ID**, not the **Project Number**. + +Admin API Access +---------------- + +If you'll be using your client to make `Instance Admin`_ and `Table Admin`_ +API requests, you'll need to pass the ``admin`` argument: + +.. code:: python + + client = bigtable.Client(admin=True) + +Read-Only Mode +-------------- + +If on the other hand, you only have (or want) read access to the data, +you can pass the ``read_only`` argument: + +.. code:: python + + client = bigtable.Client(read_only=True) + +This will ensure that the +:data:`READ_ONLY_SCOPE ` is used +for API requests (so any accidental requests that would modify data will +fail). + +Next Step +--------- + +After a :class:`Client `, the next highest-level +object is a :class:`Instance `. You'll need +one before you can interact with tables or data. + +Head next to learn about the :doc:`bigtable-instance-api`. + +.. _Instance Admin: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/tree/master/bigtable-protos/src/main/proto/google/bigtable/admin/instance/v1 +.. _Table Admin: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/tree/master/bigtable-protos/src/main/proto/google/bigtable/admin/table/v1 diff --git a/0.18.1/_sources/bigtable-client.txt b/0.18.1/_sources/bigtable-client.txt new file mode 100644 index 000000000000..97dc99f1d177 --- /dev/null +++ b/0.18.1/_sources/bigtable-client.txt @@ -0,0 +1,12 @@ +Client +~~~~~~ + +.. warning:: + + gRPC is required for using the Cloud Bigtable API. As of May 2016, + ``grpcio`` is only supported in Python 2.7, so importing + :mod:`gcloud.bigtable` in other versions of Python will fail. + +.. automodule:: gcloud.bigtable.client + :members: + :show-inheritance: diff --git a/0.18.1/_sources/bigtable-cluster.txt b/0.18.1/_sources/bigtable-cluster.txt new file mode 100644 index 000000000000..f2986b820253 --- /dev/null +++ b/0.18.1/_sources/bigtable-cluster.txt @@ -0,0 +1,12 @@ +Cluster +~~~~~~~ + +.. warning:: + + gRPC is required for using the Cloud Bigtable API. As of May 2016, + ``grpcio`` is only supported in Python 2.7, so importing + :mod:`gcloud.bigtable` in other versions of Python will fail. + +.. automodule:: gcloud.bigtable.cluster + :members: + :show-inheritance: diff --git a/0.18.1/_sources/bigtable-column-family.txt b/0.18.1/_sources/bigtable-column-family.txt new file mode 100644 index 000000000000..10eb6307dc56 --- /dev/null +++ b/0.18.1/_sources/bigtable-column-family.txt @@ -0,0 +1,55 @@ +Column Families +=============== + +.. warning:: + + gRPC is required for using the Cloud Bigtable API. As of May 2016, + ``grpcio`` is only supported in Python 2.7, so importing + :mod:`gcloud.bigtable` in other versions of Python will fail. + +When creating a +:class:`ColumnFamily `, it is +possible to set garbage collection rules for expired data. + +By setting a rule, cells in the table matching the rule will be deleted +during periodic garbage collection (which executes opportunistically in the +background). + +The types +:class:`MaxAgeGCRule `, +:class:`MaxVersionsGCRule `, +:class:`GarbageCollectionRuleUnion ` and +:class:`GarbageCollectionRuleIntersection ` +can all be used as the optional ``gc_rule`` argument in the +:class:`ColumnFamily ` +constructor. This value is then used in the +:meth:`create() ` and +:meth:`update() ` methods. + +These rules can be nested arbitrarily, with a +:class:`MaxAgeGCRule ` or +:class:`MaxVersionsGCRule ` +at the lowest level of the nesting: + +.. code:: python + + import datetime + + max_age = datetime.timedelta(days=3) + rule1 = MaxAgeGCRule(max_age) + rule2 = MaxVersionsGCRule(1) + + # Make a composite that matches anything older than 3 days **AND** + # with more than 1 version. + rule3 = GarbageCollectionIntersection(rules=[rule1, rule2]) + + # Make another composite that matches our previous intersection + # **OR** anything that has more than 3 versions. + rule4 = GarbageCollectionRule(max_num_versions=3) + rule5 = GarbageCollectionUnion(rules=[rule3, rule4]) + +---- + +.. automodule:: gcloud.bigtable.column_family + :members: + :show-inheritance: diff --git a/0.18.1/_sources/bigtable-data-api.txt b/0.18.1/_sources/bigtable-data-api.txt new file mode 100644 index 000000000000..13ffd706b373 --- /dev/null +++ b/0.18.1/_sources/bigtable-data-api.txt @@ -0,0 +1,350 @@ +Data API +======== + +.. warning:: + + gRPC is required for using the Cloud Bigtable API. As of May 2016, + ``grpcio`` is only supported in Python 2.7, so importing + :mod:`gcloud.bigtable` in other versions of Python will fail. + +After creating a :class:`Table ` and some +column families, you are ready to store and retrieve data. + +Cells vs. Columns vs. Column Families ++++++++++++++++++++++++++++++++++++++ + +* As explained in the :doc:`table overview `, tables can + have many column families. +* As described below, a table can also have many rows which are + specified by row keys. +* Within a row, data is stored in a cell. A cell simply has a value (as + bytes) and a timestamp. The number of cells in each row can be + different, depending on what was stored in each row. +* Each cell lies in a column (**not** a column family). A column is really + just a more **specific** modifier within a column family. A column + can be present in every column family, in only one or anywhere in between. +* Within a column family there can be many columns. For example within + the column family ``foo`` we could have columns ``bar`` and ``baz``. + These would typically be represented as ``foo:bar`` and ``foo:baz``. + +Modifying Data +++++++++++++++ + +Since data is stored in cells, which are stored in rows, we +use the metaphor of a **row** in classes that are used to modify +(write, update, delete) data in a +:class:`Table `. + +Direct vs. Conditional vs. Append +--------------------------------- + +There are three ways to modify data in a table, described by the +`MutateRow`_, `CheckAndMutateRow`_ and `ReadModifyWriteRow`_ API +methods. + +* The **direct** way is via `MutateRow`_ which involves simply + adding, overwriting or deleting cells. The + :class:`DirectRow ` class + handles direct mutations. +* The **conditional** way is via `CheckAndMutateRow`_. This method + first checks if some filter is matched in a a given row, then + applies one of two sets of mutations, depending on if a match + occurred or not. (These mutation sets are called the "true + mutations" and "false mutations".) The + :class:`ConditionalRow ` class + handles conditional mutations. +* The **append** way is via `ReadModifyWriteRow`_. This simply + appends (as bytes) or increments (as an integer) data in a presumed + existing cell in a row. The + :class:`AppendRow ` class + handles append mutations. + +Row Factory +----------- + +A single factory can be used to create any of the three row types. +To create a :class:`DirectRow `: + +.. code:: python + + row = table.row(row_key) + +Unlike the previous string values we've used before, the row key must +be ``bytes``. + +To create a :class:`ConditionalRow `, +first create a :class:`RowFilter ` and +then + +.. code:: python + + cond_row = table.row(row_key, filter_=filter_) + +To create an :class:`AppendRow ` + +.. code:: python + + append_row = table.row(row_key, append=True) + +Building Up Mutations +--------------------- + +In all three cases, a set of mutations (or two sets) are built up +on a row before they are sent of in a batch via + +.. code:: python + + row.commit() + +Direct Mutations +---------------- + +Direct mutations can be added via one of four methods + +* :meth:`set_cell() ` allows a + single value to be written to a column + + .. code:: python + + row.set_cell(column_family_id, column, value, + timestamp=timestamp) + + If the ``timestamp`` is omitted, the current time on the Google Cloud + Bigtable server will be used when the cell is stored. + + The value can either by bytes or an integer (which will be converted to + bytes as a signed 64-bit integer). + +* :meth:`delete_cell() ` deletes + all cells (i.e. for all timestamps) in a given column + + .. code:: python + + row.delete_cell(column_family_id, column) + + Remember, this only happens in the ``row`` we are using. + + If we only want to delete cells from a limited range of time, a + :class:`TimestampRange ` can + be used + + .. code:: python + + row.delete_cell(column_family_id, column, + time_range=time_range) + +* :meth:`delete_cells() ` does + the same thing as + :meth:`delete_cell() ` + but accepts a list of columns in a column family rather than a single one. + + .. code:: python + + row.delete_cells(column_family_id, [column1, column2], + time_range=time_range) + + In addition, if we want to delete cells from every column in a column family, + the special :attr:`ALL_COLUMNS ` + value can be used + + .. code:: python + + row.delete_cells(column_family_id, row.ALL_COLUMNS, + time_range=time_range) + +* :meth:`delete() ` will delete the + entire row + + .. code:: python + + row.delete() + +Conditional Mutations +--------------------- + +Making **conditional** modifications is essentially identical +to **direct** modifications: it uses the exact same methods +to accumulate mutations. + +However, each mutation added must specify a ``state``: will the mutation be +applied if the filter matches or if it fails to match. + +For example: + +.. code:: python + + cond_row.set_cell(column_family_id, column, value, + timestamp=timestamp, state=True) + +will add to the set of true mutations. + +Append Mutations +---------------- + +Append mutations can be added via one of two methods + +* :meth:`append_cell_value() ` + appends a bytes value to an existing cell: + + .. code:: python + + append_row.append_cell_value(column_family_id, column, bytes_value) + +* :meth:`increment_cell_value() ` + increments an integer value in an existing cell: + + .. code:: python + + append_row.increment_cell_value(column_family_id, column, int_value) + + Since only bytes are stored in a cell, the cell value is decoded as + a signed 64-bit integer before being incremented. (This happens on + the Google Cloud Bigtable server, not in the library.) + +Notice that no timestamp was specified. This is because **append** mutations +operate on the latest value of the specified column. + +If there are no cells in the specified column, then the empty string (bytes +case) or zero (integer case) are the assumed values. + +Starting Fresh +-------------- + +If accumulated mutations need to be dropped, use + +.. code:: python + + row.clear() + +Reading Data +++++++++++++ + +Read Single Row from a Table +---------------------------- + +To make a `ReadRows`_ API request for a single row key, use +:meth:`Table.read_row() `: + +.. code:: python + + >>> row_data = table.read_row(row_key) + >>> row_data.cells + { + u'fam1': { + b'col1': [ + , + , + ], + b'col2': [ + , + ], + }, + u'fam2': { + b'col3': [ + , + , + , + ], + }, + } + >>> cell = row_data.cells[u'fam1'][b'col1'][0] + >>> cell + + >>> cell.value + b'val1' + >>> cell.timestamp + datetime.datetime(2016, 2, 27, 3, 41, 18, 122823, tzinfo=) + +Rather than returning a :class:`DirectRow ` +or similar class, this method returns a +:class:`PartialRowData ` +instance. This class is used for reading and parsing data rather than for +modifying data (as :class:`DirectRow ` is). + +A filter can also be applied to the results: + +.. code:: python + + row_data = table.read_row(row_key, filter_=filter_val) + +The allowable ``filter_`` values are the same as those used for a +:class:`ConditionalRow `. For +more information, see the +:meth:`Table.read_row() ` documentation. + +Stream Many Rows from a Table +----------------------------- + +To make a `ReadRows`_ API request for a stream of rows, use +:meth:`Table.read_rows() `: + +.. code:: python + + row_data = table.read_rows() + +Using gRPC over HTTP/2, a continual stream of responses will be delivered. +In particular + +* :meth:`consume_next() ` + pulls the next result from the stream, parses it and stores it on the + :class:`PartialRowsData ` instance +* :meth:`consume_all() ` + pulls results from the stream until there are no more +* :meth:`cancel() ` closes + the stream + +See the :class:`PartialRowsData ` +documentation for more information. + +As with +:meth:`Table.read_row() `, an optional +``filter_`` can be applied. In addition a ``start_key`` and / or ``end_key`` +can be supplied for the stream, a ``limit`` can be set and a boolean +``allow_row_interleaving`` can be specified to allow faster streamed results +at the potential cost of non-sequential reads. + +See the :meth:`Table.read_rows() ` +documentation for more information on the optional arguments. + +Sample Keys in a Table +---------------------- + +Make a `SampleRowKeys`_ API request with +:meth:`Table.sample_row_keys() `: + +.. code:: python + + keys_iterator = table.sample_row_keys() + +The returned row keys will delimit contiguous sections of the table of +approximately equal size, which can be used to break up the data for +distributed tasks like mapreduces. + +As with +:meth:`Table.read_rows() `, the +returned ``keys_iterator`` is connected to a cancellable HTTP/2 stream. + +The next key in the result can be accessed via + +.. code:: python + + next_key = keys_iterator.next() + +or all keys can be iterated over via + +.. code:: python + + for curr_key in keys_iterator: + do_something(curr_key) + +Just as with reading, the stream can be canceled: + +.. code:: python + + keys_iterator.cancel() + +.. _ReadRows: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/v1/bigtable_service.proto#L36-L38 +.. _SampleRowKeys: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/v1/bigtable_service.proto#L44-L46 +.. _MutateRow: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/v1/bigtable_service.proto#L50-L52 +.. _CheckAndMutateRow: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/v1/bigtable_service.proto#L62-L64 +.. _ReadModifyWriteRow: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/v1/bigtable_service.proto#L70-L72 diff --git a/0.18.1/_sources/bigtable-instance-api.txt b/0.18.1/_sources/bigtable-instance-api.txt new file mode 100644 index 000000000000..4d03fa7b1b97 --- /dev/null +++ b/0.18.1/_sources/bigtable-instance-api.txt @@ -0,0 +1,141 @@ +Instance Admin API +================== + +.. warning:: + + gRPC is required for using the Cloud Bigtable API. As of May 2016, + ``grpcio`` is only supported in Python 2.7, so importing + :mod:`gcloud.bigtable` in other versions of Python will fail. + +After creating a :class:`Client `, you can +interact with individual instances for a project. + +List Instances +-------------- + +If you want a comprehensive list of all existing instances, make a +`ListInstances`_ API request with +:meth:`Client.list_instances() `: + +.. code:: python + + instances = client.list_instances() + +Instance Factory +---------------- + +To create a :class:`Instance ` object: + +.. code:: python + + instance = client.instance(instance_id, location_id, + display_name=display_name) + +- ``location_id`` is the ID of the location in which the instance's cluster + will be hosted, e.g. ``'us-central1-c'``. ``location_id`` is required for + instances which do not already exist. + +- ``display_name`` is optional. When not provided, ``display_name`` defaults + to the ``instance_id`` value. + +You can also use :meth:`Client.instance` to create a local wrapper for +instances that have already been created with the API, or through the web +conole: + +.. code:: python + + instance = client.instance(existing_instance_id) + instance.reload() + +Create a new Instance +--------------------- + +After creating the instance object, make a `CreateInstance`_ API request +with :meth:`create() `: + +.. code:: python + + instance.display_name = 'My very own instance' + instance.create() + +Check on Current Operation +-------------------------- + +.. note:: + + When modifying a instance (via a `CreateInstance`_ request), the Bigtable + API will return a `long-running operation`_ and a corresponding + :class:`Operation ` object + will be returned by + :meth:`create() `. + +You can check if a long-running operation (for a +:meth:`create() ` has finished +by making a `GetOperation`_ request with +:meth:`Operation.finished() `: + +.. code:: python + + >>> operation = instance.create() + >>> operation.finished() + True + +.. note:: + + Once an :class:`Operation ` object + has returned :data:`True` from + :meth:`finished() `, the + object should not be re-used. Subsequent calls to + :meth:`finished() ` + will result in a :class:`ValueError `. + +Get metadata for an existing Instance +------------------------------------- + +After creating the instance object, make a `GetInstance`_ API request +with :meth:`reload() `: + +.. code:: python + + instance.reload() + +This will load ``display_name`` for the existing ``instance`` object. + +Update an existing Instance +--------------------------- + +After creating the instance object, make an `UpdateInstance`_ API request +with :meth:`update() `: + +.. code:: python + + client.display_name = 'New display_name' + instance.update() + +Delete an existing Instance +--------------------------- + +Make a `DeleteInstance`_ API request with +:meth:`delete() `: + +.. code:: python + + instance.delete() + +Next Step +--------- + +Now we go down the hierarchy from +:class:`Instance ` to a +:class:`Table `. + +Head next to learn about the :doc:`bigtable-table-api`. + +.. _Instance Admin API: https://cloud.google.com/bigtable/docs/creating-instance +.. _CreateInstance: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/instance/v1/bigtable_instance_service.proto#L66-L68 +.. _GetInstance: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/instance/v1/bigtable_instance_service.proto#L38-L40 +.. _UpdateInstance: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/instance/v1/bigtable_instance_service.proto#L93-L95 +.. _DeleteInstance: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/instance/v1/bigtable_instance_service.proto#L109-L111 +.. _ListInstances: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/instance/v1/bigtable_instance_service.proto#L44-L46 +.. _GetOperation: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/longrunning/operations.proto#L43-L45 +.. _long-running operation: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/longrunning/operations.proto#L73-L102 diff --git a/0.18.1/_sources/bigtable-instance.txt b/0.18.1/_sources/bigtable-instance.txt new file mode 100644 index 000000000000..7ba1c15d8df3 --- /dev/null +++ b/0.18.1/_sources/bigtable-instance.txt @@ -0,0 +1,12 @@ +Instance +~~~~~~~~ + +.. warning:: + + gRPC is required for using the Cloud Bigtable API. As of May 2016, + ``grpcio`` is only supported in Python 2.7, so importing + :mod:`gcloud.bigtable` in other versions of Python will fail. + +.. automodule:: gcloud.bigtable.instance + :members: + :show-inheritance: diff --git a/0.18.1/_sources/bigtable-row-data.txt b/0.18.1/_sources/bigtable-row-data.txt new file mode 100644 index 000000000000..1d45bb5f2962 --- /dev/null +++ b/0.18.1/_sources/bigtable-row-data.txt @@ -0,0 +1,12 @@ +Row Data +~~~~~~~~ + +.. warning:: + + gRPC is required for using the Cloud Bigtable API. As of May 2016, + ``grpcio`` is only supported in Python 2.7, so importing + :mod:`gcloud.bigtable` in other versions of Python will fail. + +.. automodule:: gcloud.bigtable.row_data + :members: + :show-inheritance: diff --git a/0.18.1/_sources/bigtable-row-filters.txt b/0.18.1/_sources/bigtable-row-filters.txt new file mode 100644 index 000000000000..1b03ef7ef59a --- /dev/null +++ b/0.18.1/_sources/bigtable-row-filters.txt @@ -0,0 +1,73 @@ +Bigtable Row Filters +==================== + +.. warning:: + + gRPC is required for using the Cloud Bigtable API. As of May 2016, + ``grpcio`` is only supported in Python 2.7, so importing + :mod:`gcloud.bigtable` in other versions of Python will fail. + +It is possible to use a +:class:`RowFilter ` +when adding mutations to a +:class:`ConditionalRow ` and when +reading row data with :meth:`read_row() ` +:meth:`read_rows() `. + +As laid out in the `RowFilter definition`_, the following basic filters +are provided: + +* :class:`SinkFilter <.row_filters.SinkFilter>` +* :class:`PassAllFilter <.row_filters.PassAllFilter>` +* :class:`BlockAllFilter <.row_filters.BlockAllFilter>` +* :class:`RowKeyRegexFilter <.row_filters.RowKeyRegexFilter>` +* :class:`RowSampleFilter <.row_filters.RowSampleFilter>` +* :class:`FamilyNameRegexFilter <.row_filters.FamilyNameRegexFilter>` +* :class:`ColumnQualifierRegexFilter <.row_filters.ColumnQualifierRegexFilter>` +* :class:`TimestampRangeFilter <.row_filters.TimestampRangeFilter>` +* :class:`ColumnRangeFilter <.row_filters.ColumnRangeFilter>` +* :class:`ValueRegexFilter <.row_filters.ValueRegexFilter>` +* :class:`ValueRangeFilter <.row_filters.ValueRangeFilter>` +* :class:`CellsRowOffsetFilter <.row_filters.CellsRowOffsetFilter>` +* :class:`CellsRowLimitFilter <.row_filters.CellsRowLimitFilter>` +* :class:`CellsColumnLimitFilter <.row_filters.CellsColumnLimitFilter>` +* :class:`StripValueTransformerFilter <.row_filters.StripValueTransformerFilter>` +* :class:`ApplyLabelFilter <.row_filters.ApplyLabelFilter>` + +In addition, these filters can be combined into composite filters with + +* :class:`RowFilterChain <.row_filters.RowFilterChain>` +* :class:`RowFilterUnion <.row_filters.RowFilterUnion>` +* :class:`ConditionalRowFilter <.row_filters.ConditionalRowFilter>` + +These rules can be nested arbitrarily, with a basic filter at the lowest +level. For example: + +.. code:: python + + # Filter in a specified column (matching any column family). + col1_filter = ColumnQualifierRegexFilter(b'columnbia') + + # Create a filter to label results. + label1 = u'label-red' + label1_filter = ApplyLabelFilter(label1) + + # Combine the filters to label all the cells in columnbia. + chain1 = RowFilterChain(filters=[col1_filter, label1_filter]) + + # Create a similar filter to label cells blue. + col2_filter = ColumnQualifierRegexFilter(b'columnseeya') + label2 = u'label-blue' + label2_filter = ApplyLabelFilter(label2) + chain2 = RowFilterChain(filters=[col2_filter, label2_filter]) + + # Bring our two labeled columns together. + row_filter = RowFilterUnion(filters=[chain1, chain2]) + +---- + +.. automodule:: gcloud.bigtable.row_filters + :members: + :show-inheritance: + +.. _RowFilter definition: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/1ff247c2e3b7cd0a2dd49071b2d95beaf6563092/bigtable-protos/src/main/proto/google/bigtable/v1/bigtable_data.proto#L195 diff --git a/0.18.1/_sources/bigtable-row.txt b/0.18.1/_sources/bigtable-row.txt new file mode 100644 index 000000000000..e5c95728fb42 --- /dev/null +++ b/0.18.1/_sources/bigtable-row.txt @@ -0,0 +1,13 @@ +Bigtable Row +============ + +.. warning:: + + gRPC is required for using the Cloud Bigtable API. As of May 2016, + ``grpcio`` is only supported in Python 2.7, so importing + :mod:`gcloud.bigtable` in other versions of Python will fail. + +.. automodule:: gcloud.bigtable.row + :members: + :show-inheritance: + :inherited-members: diff --git a/0.18.1/_sources/bigtable-table-api.txt b/0.18.1/_sources/bigtable-table-api.txt new file mode 100644 index 000000000000..554b157031f9 --- /dev/null +++ b/0.18.1/_sources/bigtable-table-api.txt @@ -0,0 +1,160 @@ +Table Admin API +=============== + +.. warning:: + + gRPC is required for using the Cloud Bigtable API. As of May 2016, + ``grpcio`` is only supported in Python 2.7, so importing + :mod:`gcloud.bigtable` in other versions of Python will fail. + +After creating a :class:`Instance `, you can +interact with individual tables, groups of tables or column families within +a table. + +List Tables +----------- + +If you want a comprehensive list of all existing tables in a instance, make a +`ListTables`_ API request with +:meth:`Instance.list_tables() `: + +.. code:: python + + >>> instance.list_tables() + [, + ] + +Table Factory +------------- + +To create a :class:`Table ` object: + +.. code:: python + + table = instance.table(table_id) + +Even if this :class:`Table ` already +has been created with the API, you'll want this object to use as a +parent of a :class:`ColumnFamily ` +or :class:`Row `. + +Create a new Table +------------------ + +After creating the table object, make a `CreateTable`_ API request +with :meth:`create() `: + +.. code:: python + + table.create() + +If you would to initially split the table into several tablets (Tablets are +similar to HBase regions): + +.. code:: python + + table.create(initial_split_keys=['s1', 's2']) + +Delete an existing Table +------------------------ + +Make a `DeleteTable`_ API request with +:meth:`delete() `: + +.. code:: python + + table.delete() + +List Column Families in a Table +------------------------------- + +Though there is no **official** method for retrieving `column families`_ +associated with a table, the `GetTable`_ API method returns a +table object with the names of the column families. + +To retrieve the list of column families use +:meth:`list_column_families() `: + +.. code:: python + + column_families = table.list_column_families() + +Column Family Factory +--------------------- + +To create a +:class:`ColumnFamily ` object: + +.. code:: python + + column_family = table.column_family(column_family_id) + +There is no real reason to use this factory unless you intend to +create or delete a column family. + +In addition, you can specify an optional ``gc_rule`` (a +:class:`GarbageCollectionRule ` +or similar): + +.. code:: python + + column_family = table.column_family(column_family_id, + gc_rule=gc_rule) + +This rule helps the backend determine when and how to clean up old cells +in the column family. + +See :doc:`bigtable-column-family` for more information about +:class:`GarbageCollectionRule ` +and related classes. + +Create a new Column Family +-------------------------- + +After creating the column family object, make a `CreateColumnFamily`_ API +request with +:meth:`ColumnFamily.create() ` + +.. code:: python + + column_family.create() + +Delete an existing Column Family +-------------------------------- + +Make a `DeleteColumnFamily`_ API request with +:meth:`ColumnFamily.delete() ` + +.. code:: python + + column_family.delete() + +Update an existing Column Family +-------------------------------- + +Make an `UpdateColumnFamily`_ API request with +:meth:`ColumnFamily.delete() ` + +.. code:: python + + column_family.update() + +Next Step +--------- + +Now we go down the final step of the hierarchy from +:class:`Table ` to +:class:`Row ` as well as streaming +data directly via a :class:`Table `. + +Head next to learn about the :doc:`bigtable-data-api`. + +.. _ListTables: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/table/v1/bigtable_table_service.proto#L40-L42 +.. _CreateTable: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/table/v1/bigtable_table_service.proto#L35-L37 +.. _DeleteTable: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/table/v1/bigtable_table_service.proto#L50-L52 +.. _RenameTable: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/table/v1/bigtable_table_service.proto#L56-L58 +.. _GetTable: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/table/v1/bigtable_table_service.proto#L45-L47 +.. _CreateColumnFamily: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/table/v1/bigtable_table_service.proto#L61-L63 +.. _UpdateColumnFamily: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/table/v1/bigtable_table_service.proto#L66-L68 +.. _DeleteColumnFamily: https://github.com/GoogleCloudPlatform/cloud-bigtable-client/blob/2aae624081f652427052fb652d3ae43d8ac5bf5a/bigtable-protos/src/main/proto/google/bigtable/admin/table/v1/bigtable_table_service.proto#L71-L73 +.. _column families: https://cloud.google.com/bigtable/docs/schema-design#column_families_and_column_qualifiers diff --git a/0.18.1/_sources/bigtable-table.txt b/0.18.1/_sources/bigtable-table.txt new file mode 100644 index 000000000000..414d567bfd03 --- /dev/null +++ b/0.18.1/_sources/bigtable-table.txt @@ -0,0 +1,12 @@ +Table +~~~~~ + +.. warning:: + + gRPC is required for using the Cloud Bigtable API. As of May 2016, + ``grpcio`` is only supported in Python 2.7, so importing + :mod:`gcloud.bigtable` in other versions of Python will fail. + +.. automodule:: gcloud.bigtable.table + :members: + :show-inheritance: diff --git a/0.18.1/_sources/bigtable-usage.txt b/0.18.1/_sources/bigtable-usage.txt new file mode 100644 index 000000000000..14cadd084f72 --- /dev/null +++ b/0.18.1/_sources/bigtable-usage.txt @@ -0,0 +1,32 @@ +Using the API +============= + +.. warning:: + + `gRPC`_ is required for using the Cloud Bigtable API. As of May 2016, + `grpcio`_ is only supported in Python 2.7, so importing + :mod:`gcloud.bigtable` in other versions of Python will fail. + +API requests are sent to the `Google Cloud Bigtable`_ API via RPC over HTTP/2. +In order to support this, we'll rely on `gRPC`_. We are working with the gRPC +team to rapidly make the install story more user-friendly. + +Get started by learning about the +:class:`Client ` on the +:doc:`bigtable-client-intro` page. + +In the hierarchy of API concepts + +* a :class:`Client ` owns a + :class:`Cluster ` +* a :class:`Table ` owns a + :class:`ColumnFamily ` +* a :class:`Table ` owns a + :class:`Row ` + (and all the cells in the row) + +.. _Google Cloud Bigtable: https://cloud.google.com/bigtable/docs/ +.. _gRPC: http://www.grpc.io/ +.. _grpcio: https://pypi.python.org/pypi/grpcio diff --git a/0.18.1/_sources/datastore-batches.txt b/0.18.1/_sources/datastore-batches.txt new file mode 100644 index 000000000000..bf7d5c4ffbcc --- /dev/null +++ b/0.18.1/_sources/datastore-batches.txt @@ -0,0 +1,6 @@ +Batches +~~~~~~~ + +.. automodule:: gcloud.datastore.batch + :members: + :show-inheritance: diff --git a/0.18.1/_sources/datastore-client.txt b/0.18.1/_sources/datastore-client.txt new file mode 100644 index 000000000000..23c489dd11c6 --- /dev/null +++ b/0.18.1/_sources/datastore-client.txt @@ -0,0 +1,13 @@ +Datastore Client +================ + +.. automodule:: gcloud.datastore.client + :members: + :show-inheritance: + +Connection +~~~~~~~~~~ + +.. automodule:: gcloud.datastore.connection + :members: + :show-inheritance: diff --git a/0.18.1/_sources/datastore-entities.txt b/0.18.1/_sources/datastore-entities.txt new file mode 100644 index 000000000000..6e263ceeda0e --- /dev/null +++ b/0.18.1/_sources/datastore-entities.txt @@ -0,0 +1,6 @@ +Entities +~~~~~~~~ + +.. automodule:: gcloud.datastore.entity + :members: + :show-inheritance: diff --git a/0.18.1/_sources/datastore-helpers.txt b/0.18.1/_sources/datastore-helpers.txt new file mode 100644 index 000000000000..a70e4d2cf534 --- /dev/null +++ b/0.18.1/_sources/datastore-helpers.txt @@ -0,0 +1,6 @@ +Helpers +~~~~~~~ + +.. automodule:: gcloud.datastore.helpers + :members: + :show-inheritance: diff --git a/0.18.1/_sources/datastore-keys.txt b/0.18.1/_sources/datastore-keys.txt new file mode 100644 index 000000000000..c202471d252d --- /dev/null +++ b/0.18.1/_sources/datastore-keys.txt @@ -0,0 +1,6 @@ +Keys +~~~~ + +.. automodule:: gcloud.datastore.key + :members: + :show-inheritance: diff --git a/0.18.1/_sources/datastore-queries.txt b/0.18.1/_sources/datastore-queries.txt new file mode 100644 index 000000000000..4ffed79925fa --- /dev/null +++ b/0.18.1/_sources/datastore-queries.txt @@ -0,0 +1,6 @@ +Queries +~~~~~~~ + +.. automodule:: gcloud.datastore.query + :members: + :show-inheritance: diff --git a/0.18.1/_sources/datastore-transactions.txt b/0.18.1/_sources/datastore-transactions.txt new file mode 100644 index 000000000000..d86d7fd74c71 --- /dev/null +++ b/0.18.1/_sources/datastore-transactions.txt @@ -0,0 +1,7 @@ +Transactions +~~~~~~~~~~~~ + +.. automodule:: gcloud.datastore.transaction + :members: + :show-inheritance: + :inherited-members: diff --git a/0.18.1/_sources/dns-changes.txt b/0.18.1/_sources/dns-changes.txt new file mode 100644 index 000000000000..40dc0dddc257 --- /dev/null +++ b/0.18.1/_sources/dns-changes.txt @@ -0,0 +1,6 @@ +Change Sets +~~~~~~~~~~~ + +.. automodule:: gcloud.dns.changes + :members: + :show-inheritance: diff --git a/0.18.1/_sources/dns-client.txt b/0.18.1/_sources/dns-client.txt new file mode 100644 index 000000000000..e499c2d0eeff --- /dev/null +++ b/0.18.1/_sources/dns-client.txt @@ -0,0 +1,13 @@ +DNS Client +========== + +.. automodule:: gcloud.dns.client + :members: + :show-inheritance: + +Connection +~~~~~~~~~~ + +.. automodule:: gcloud.dns.connection + :members: + :show-inheritance: diff --git a/0.18.1/_sources/dns-resource-record-set.txt b/0.18.1/_sources/dns-resource-record-set.txt new file mode 100644 index 000000000000..ff46e6c7f1ee --- /dev/null +++ b/0.18.1/_sources/dns-resource-record-set.txt @@ -0,0 +1,6 @@ +Resource Record Sets +~~~~~~~~~~~~~~~~~~~~ + +.. automodule:: gcloud.dns.resource_record_set + :members: + :show-inheritance: diff --git a/0.18.1/_sources/dns-usage.txt b/0.18.1/_sources/dns-usage.txt new file mode 100644 index 000000000000..42bd1d5d8548 --- /dev/null +++ b/0.18.1/_sources/dns-usage.txt @@ -0,0 +1,177 @@ +Using the API +============= + +Client +------ + +:class:`Client ` objects provide a means to +configure your DNS applications. Eash instance holds both a ``project`` +and an authenticated connection to the DNS service. + +For an overview of authentication in ``gcloud-python``, see :doc:`gcloud-auth`. + +Assuming your environment is set up as described in that document, +create an instance of :class:`Client `. + + .. doctest:: + + >>> from gcloud import dns + >>> client = dns.Client() + +Projects +-------- + +A project is the top-level container in the ``DNS`` API: it is tied +closely to billing, and can provide default access control across all its +datasets. If no ``project`` is passed to the client container, the library +attempts to infer a project using the environment (including explicit +environment variables, GAE, or GCE). + +To override the project inferred from the environment, pass an explicit +``project`` to the constructor, or to either of the alternative +``classmethod`` factories: + + .. doctest:: + + >>> from gcloud import dns + >>> client = dns.Client(project='PROJECT_ID') + +Project Quotas +-------------- + +Query the quotas for a given project: + + .. doctest:: + + >>> from gcloud import dns + >>> client = dns.Client(project='PROJECT_ID') + >>> quotas = client.quotas() # API request + >>> for key, value in sorted(quotas.items()): + ... print('%s: %s' % (key, value)) + managedZones: 10000 + resourceRecordsPerRrset: 100 + rrsetsPerManagedZone: 10000 + rrsetAdditionsPerChange: 100 + rrsetDeletionsPerChange: 100 + totalRrdataSizePerChange: 10000 + + +Project ACLs +~~~~~~~~~~~~ + +Each project has an access control list granting reader / writer / owner +permission to one or more entities. This list cannot be queried or set +via the API: it must be managed using the Google Developer Console. + + +Managed Zones +------------- + +A "managed zone" is the container for DNS records for the same DNS name +suffix and has a set of name servers that accept and responds to queries: + + .. doctest:: + + >>> from gcloud import dns + >>> client = dns.Client(project='PROJECT_ID') + >>> zone = client.zone('acme-co', 'example.com', + ... description='Acme Company zone') + + >>> zone.exists() # API request + False + >>> zone.create() # API request + >>> zone.exists() # API request + True + +List the zones for a given project: + + .. doctest:: + + >>> from gcloud import dns + >>> client = dns.Client(project='PROJECT_ID') + >>> zones = client.list_zones() # API request + >>> [zone.name for zone in zones] + ['acme-co'] + + +Resource Record Sets +-------------------- + +Each managed zone exposes a read-only set of resource records: + + .. doctest:: + + >>> from gcloud import dns + >>> client = dns.Client(project='PROJECT_ID') + >>> zone = client.zone('acme-co', 'example.com') + >>> records, page_token = zone.list_resource_record_sets() # API request + >>> [(record.name, record.record_type, record.ttl, record.rrdatas) + ... for record in records] + [('example.com.', 'SOA', 21600, ['ns-cloud1.googlecomains.com dns-admin.google.com 1 21600 3600 1209600 300'])] + +.. note:: + + The ``page_token`` returned from ``zone.list_resource_record_sets()`` will + be an opaque string if there are more resources than can be returned in a + single request. To enumerate them all, repeat calling + ``zone.list_resource_record_sets()``, passing the ``page_token``, until + the token is ``None``. E.g. + + .. doctest:: + + >>> records, page_token = zone.list_resource_record_sets() # API request + >>> while page_token is not None: + ... next_batch, page_token = zone.list_resource_record_sets( + ... page_token=page_token) # API request + ... records.extend(next_batch) + + +Change requests +--------------- + +Update the resource record set for a zone by creating a change request +bundling additions to or deletions from the set. + + .. doctest:: + + >>> import time + >>> from gcloud import dns + >>> client = dns.Client(project='PROJECT_ID') + >>> zone = client.zone('acme-co', 'example.com') + >>> TWO_HOURS = 2 * 60 * 60 # seconds + >>> record_set = zone.resource_record_set( + ... 'www.example.com.', 'CNAME', TWO_HOURS, ['www1.example.com.',]) + >>> changes = zone.changes() + >>> changes.add_record_set(record_set) + >>> changes.create() # API request + >>> while changes.status != 'done': + ... print('Waiting for changes to complete') + ... time.sleep(60) # or whatever interval is appropriate + ... changes.reload() # API request + + +List changes made to the resource record set for a given zone: + + .. doctest:: + + >>> from gcloud import dns + >>> client = dns.Client(project='PROJECT_ID') + >>> zone = client.zone('acme-co', 'example.com') + >>> changes = [] + >>> changes, page_token = zone.list_changes() # API request + +.. note:: + + The ``page_token`` returned from ``zone.list_changes()`` will be + an opaque string if there are more changes than can be returned in a + single request. To enumerate them all, repeat calling + ``zone.list_changes()``, passing the ``page_token``, until the token + is ``None``. E.g.: + + .. doctest:: + + >>> changes, page_token = zone.list_changes() # API request + >>> while page_token is not None: + ... next_batch, page_token = zone.list_changes( + ... page_token=page_token) # API request + ... changes.extend(next_batch) diff --git a/0.18.1/_sources/dns-zone.txt b/0.18.1/_sources/dns-zone.txt new file mode 100644 index 000000000000..22cb456b56f6 --- /dev/null +++ b/0.18.1/_sources/dns-zone.txt @@ -0,0 +1,6 @@ +Managed Zones +~~~~~~~~~~~~~ + +.. automodule:: gcloud.dns.zone + :members: + :show-inheritance: diff --git a/0.18.1/_sources/error-reporting-client.txt b/0.18.1/_sources/error-reporting-client.txt new file mode 100644 index 000000000000..55092baa0880 --- /dev/null +++ b/0.18.1/_sources/error-reporting-client.txt @@ -0,0 +1,7 @@ +Error Reporting Client +======================= + +.. automodule:: gcloud.error_reporting.client + :members: + :show-inheritance: + diff --git a/0.18.1/_sources/error-reporting-usage.txt b/0.18.1/_sources/error-reporting-usage.txt new file mode 100644 index 000000000000..c5dd5f451046 --- /dev/null +++ b/0.18.1/_sources/error-reporting-usage.txt @@ -0,0 +1,102 @@ +Using the API +============= + + +Authentication and Configuration +-------------------------------- + +- For an overview of authentication in ``gcloud-python``, + see :doc:`gcloud-auth`. + +- In addition to any authentication configuration, you should also set the + :envvar:`GCLOUD_PROJECT` environment variable for the project you'd like + to interact with. If you are Google App Engine or Google Compute Engine + this will be detected automatically. + +- After configuring your environment, create a + :class:`Client ` + + .. doctest:: + + >>> from gcloud import error_reporting + >>> client = error_reporting.Client() + + or pass in ``credentials`` and ``project`` explicitly + + .. doctest:: + + >>> from gcloud import error_reporting + >>> client = error_reporting.Client(project='my-project', credentials=creds) + + Error Reporting associates errors with a service, which is an identifier for an executable, + App Engine service, or job. The default service is "python", but a default can be specified + for the client on construction time. You can also optionally specify a version for that service, + which defaults to "default." + + + .. doctest:: + + >>> from gcloud import error_reporting + >>> client = error_reporting.Client(project='my-project', + ... service="login_service", + ... version="0.1.0") + +Reporting an exception +----------------------- + +Report a stacktrace to Stackdriver Error Reporting after an exception + +.. doctest:: + + >>> from gcloud import error_reporting + >>> client = error_reporting.Client() + >>> try: + >>> raise NameError + >>> except Exception: + >>> client.report_exception() + + +By default, the client will report the error using the service specified in the client's +constructor, or the default service of "python". + +The user and HTTP context can also be included in the exception. The HTTP context +can be constructed using :class:`gcloud.error_reporting.HTTPContext`. This will +be used by Stackdriver Error Reporting to help group exceptions. + +.. doctest:: + + >>> from gcloud import error_reporting + >>> client = error_reporting.Client() + >>> user = 'example@gmail.com' + >>> http_context = HTTPContext(method='GET', url='/', userAgent='test agent', + ... referrer='example.com', responseStatusCode=500, + ... remote_ip='1.2.3.4') + >>> try: + >>> raise NameError + >>> except Exception: + >>> client.report_exception(http_context=http_context, user=user)) + +Reporting an error without an exception +----------------------------------------- + +Errors can also be reported to Stackdriver Error Reporting outside the context of an exception. +The library will include the file path, function name, and line number of the location where the +error was reported. + +.. doctest:: + + >>> from gcloud import error_reporting + >>> client = error_reporting.Client() + >>> error_reporting.report("Found an error!") + +Similarly to reporting an exception, the user and HTTP context can be provided: + +.. doctest:: + + >>> from gcloud import error_reporting + >>> client = error_reporting.Client() + >>> user = 'example@gmail.com' + >>> http_context = HTTPContext(method='GET', url='/', userAgent='test agent', + ... referrer='example.com', responseStatusCode=500, + ... remote_ip='1.2.3.4') + >>> error_reporting.report("Found an error!", http_context=http_context, user=user)) diff --git a/0.18.1/_sources/gcloud-api.txt b/0.18.1/_sources/gcloud-api.txt new file mode 100644 index 000000000000..7226dd59c3bf --- /dev/null +++ b/0.18.1/_sources/gcloud-api.txt @@ -0,0 +1,38 @@ +Shared Core Modules +------------------- + +Base Client +~~~~~~~~~~~ + +.. automodule:: gcloud.client + :members: + :show-inheritance: + :inherited-members: + +Credentials Helpers +~~~~~~~~~~~~~~~~~~~ + +.. automodule:: gcloud.credentials + :members: + :show-inheritance: + +Base Connections +~~~~~~~~~~~~~~~~ + +.. automodule:: gcloud.connection + :members: + :show-inheritance: + +Exceptions +~~~~~~~~~~ + +.. automodule:: gcloud.exceptions + :members: + :show-inheritance: + +Environment Variables +~~~~~~~~~~~~~~~~~~~~~ + +.. automodule:: gcloud.environment_vars + :members: + :show-inheritance: diff --git a/0.18.1/_sources/gcloud-auth.txt b/0.18.1/_sources/gcloud-auth.txt new file mode 100644 index 000000000000..c5b10b8a6763 --- /dev/null +++ b/0.18.1/_sources/gcloud-auth.txt @@ -0,0 +1,344 @@ +Authentication +************** + +.. _Overview: + +Overview +======== + +* **If you're running in Compute Engine or App Engine**, + authentication should "just work". + +* **If you're developing locally**, + the easiest way to authenticate is using the `Google Cloud SDK`_: + + .. code-block:: bash + + $ gcloud beta auth application-default login + + Note that this command generates credentials for client libraries. To authenticate the CLI itself, use: + + .. code-block:: bash + + $ gcloud auth login + + Previously, `gcloud auth login` was used for both use cases. If your gcloud installation does not support the new command, please update it: + + .. code-block:: bash + + $ gcloud components update + +.. _Google Cloud SDK: http://cloud.google.com/sdk + + +* **If you're running your application elsewhere**, + you should download a `service account`_ JSON keyfile + and point to it using an environment variable: + + .. code-block:: bash + + $ export GOOGLE_APPLICATION_CREDENTIALS="/path/to/keyfile.json" + +.. _service account: https://cloud.google.com/storage/docs/authentication#generating-a-private-key + +Client-Provided Authentication +============================== + +Every package uses a :class:`Client ` +as a base for interacting with an API. +For example: + +.. code-block:: python + + from gcloud import datastore + client = datastore.Client() + +Passing no arguments at all will "just work" if you've followed the +instructions in the :ref:`Overview`. +The credentials are inferred from your local environment by using +Google `Application Default Credentials`_. + +.. _Application Default Credentials: https://developers.google.com/identity/protocols/application-default-credentials + +.. _Precedence: + +Credential Discovery Precedence +------------------------------- + +When loading the `Application Default Credentials`_, +the library will check properties of your local environment +in the following order: + +#. Application running in Google App Engine +#. JSON or PKCS12/P12 keyfile pointed to by + ``GOOGLE_APPLICATION_CREDENTIALS`` environment variable +#. Credentials provided by the Google Cloud SDK (via ``gcloud auth login``) +#. Application running in Google Compute Engine + +Explicit Credentials +==================== + +The Application Default Credentials discussed above can be useful +if your code needs to run in many different environments or +if you just don't want authentication to be a focus in your code. + +However, you may want to be explicit because + +* your code will only run in one place +* you may have code which needs to be run as a specific service account + every time (rather than with the locally inferred credentials) +* you may want to use two separate accounts to simultaneously access data + from different projects + +In these situations, you can create an explicit +:class:`Credentials ` object suited to your +environment. +After creation, +you can pass it directly to a :class:`Client `: + +.. code:: python + + client = Client(credentials=credentials) + +Google App Engine Environment +----------------------------- + +To create +:class:`credentials ` +just for Google App Engine: + +.. code:: python + + from oauth2client.contrib.appengine import AppAssertionCredentials + credentials = AppAssertionCredentials([]) + +Google Compute Engine Environment +--------------------------------- + +To create +:class:`credentials ` +just for Google Compute Engine: + +.. code:: python + + from oauth2client.contrib.gce import AppAssertionCredentials + credentials = AppAssertionCredentials([]) + +Service Accounts +---------------- + +A `service account`_ can be used with both a JSON keyfile and +a PKCS12/P12 keyfile. + +Directly creating ``credentials`` in `oauth2client`_ for a service +account is a rather complex process, +so as a convenience, the +:meth:`from_service_account_json() ` +and +:meth:`from_service_account_p12() ` +factories are provided to create a :class:`Client ` with +service account credentials. + +.. _oauth2client: http://oauth2client.readthedocs.org/en/latest/ + +For example, with a JSON keyfile: + +.. code:: python + + client = Client.from_service_account_json('/path/to/keyfile.json') + +.. tip:: + + Unless you have a specific reason to use a PKCS12/P12 key for your + service account, + we recommend using a JSON key. + +User Accounts (3-legged OAuth 2.0) with a refresh token +------------------------------------------------------- + +The majority of cases are intended to authenticate machines or +workers rather than actual user accounts. However, it's also +possible to call Google Cloud APIs with a user account via +`OAuth 2.0`_. + +.. _OAuth 2.0: https://developers.google.com/identity/protocols/OAuth2 + +.. tip:: + + A production application should **use a service account**, + but you may wish to use your own personal user account when first + getting started with the ``gcloud-python`` library. + +The simplest way to use credentials from a user account is via +Application Default Credentials using ``gcloud auth login`` +(as mentioned above): + +.. code:: python + + from oauth2client.client import GoogleCredentials + credentials = GoogleCredentials.get_application_default() + +This will still follow the :ref:`precedence ` +described above, +so be sure none of the other possible environments conflict +with your user provided credentials. + +Advanced users of `oauth2client`_ can also use custom flows to +create credentials using `client secrets`_ or using a +`webserver flow`_. +After creation, :class:`Credentials ` +can be serialized with +:meth:`to_json() ` +and stored in a file and then and deserialized with +:meth:`from_json() `. + +.. _client secrets: https://developers.google.com/api-client-library/python/guide/aaa_oauth#flow_from_clientsecrets +.. _webserver flow: https://developers.google.com/api-client-library/python/guide/aaa_oauth#OAuth2WebServerFlow + +Troubleshooting +=============== + +Setting up a Service Account +---------------------------- + +If your application is not running on Google Compute Engine, +you need a `Google Developers Service Account`_. + +#. Visit the `Google Developers Console`_. + +#. Create a new project or click on an existing project. + +#. Navigate to **APIs & auth** > **APIs** and enable the APIs + that your application requires. + + .. raw:: html + + + + .. note:: + + You may need to enable billing in order to use these services. + + * **BigQuery** + + * BigQuery API + + * **Datastore** + + * Google Cloud Datastore API + + * **Pub/Sub** + + * Google Cloud Pub/Sub + + * **Storage** + + * Google Cloud Storage + * Google Cloud Storage JSON API + +#. Navigate to **APIs & auth** > **Credentials**. + + You should see a screen like one of the following: + + .. raw:: html + + + + .. raw:: html + + + + Find the "Add credentials" drop down and select "Service account" to be + guided through downloading a new JSON keyfile. + + If you want to re-use an existing service account, + you can easily generate a new keyfile. + Just select the account you wish to re-use, + and click **Generate new JSON key**: + + .. raw:: html + + + +.. _Google Developers Console: https://console.developers.google.com/project +.. _Google Developers Service Account: https://developers.google.com/accounts/docs/OAuth2ServiceAccount + +Using Google Compute Engine +--------------------------- + +If your code is running on Google Compute Engine, +using the inferred Google `Application Default Credentials`_ +will be sufficient for retrieving credentials. + +However, by default your credentials may not grant you +access to the services you intend to use. +Be sure when you `set up the GCE instance`_, +you add the correct scopes for the APIs you want to access: + +* **All APIs** + + * ``https://www.googleapis.com/auth/cloud-platform`` + * ``https://www.googleapis.com/auth/cloud-platform.read-only`` + +* **BigQuery** + + * ``https://www.googleapis.com/auth/bigquery`` + * ``https://www.googleapis.com/auth/bigquery.insertdata`` + +* **Datastore** + + * ``https://www.googleapis.com/auth/datastore`` + * ``https://www.googleapis.com/auth/userinfo.email`` + +* **Pub/Sub** + + * ``https://www.googleapis.com/auth/pubsub`` + +* **Storage** + + * ``https://www.googleapis.com/auth/devstorage.full_control`` + * ``https://www.googleapis.com/auth/devstorage.read_only`` + * ``https://www.googleapis.com/auth/devstorage.read_write`` + +.. _set up the GCE instance: https://cloud.google.com/compute/docs/authentication#using + +Advanced Customization +====================== + +Though the ``gcloud-python`` library defaults to using `oauth2client`_ +to sign requests and ``httplib2`` for sending requests, +it is not a strict requirement. + +The :class:`Client ` constructor accepts an optional +``http`` argument in place of a ``credentials`` object. +If passed, all HTTP requests made by the client will use your +custom HTTP object. + +In order for this to be possible, +the ``http`` object must do two things: + +* Handle authentication on its own +* Define a method ``request()`` that can subsitute for + :meth:`httplib2.Http.request`. + +The entire signature from ``httplib2`` need not be implemented, +we only use it as + +.. code-block:: python + + http.request(uri, method=method_name, body=body, headers=headers) + +For an example of such an implementation, +a ``gcloud-python`` user created a `custom HTTP class`_ +using the `requests`_ library. + +.. _custom HTTP class: https://github.com/GoogleCloudPlatform/gcloud-python/issues/908#issuecomment-110811556 +.. _requests: http://www.python-requests.org/en/latest/ + +As for handling authentication on your own, +it may be easiest just to re-use bits from ``oauth2client``. +Unfortunately, these parts have a hard dependency on ``httplib2``. +We hope to enable using `custom HTTP libraries`_ with ``oauth2client`` at +some point. + +.. _custom HTTP libraries: https://github.com/google/oauth2client/issues/128 diff --git a/0.18.1/_sources/gcloud-config.txt b/0.18.1/_sources/gcloud-config.txt new file mode 100644 index 000000000000..8d30d7573cdb --- /dev/null +++ b/0.18.1/_sources/gcloud-config.txt @@ -0,0 +1,60 @@ +Configuration +************* + +Overview +======== + +Use service client objects to configure your applications. + +For example: + +.. code-block:: python + + >>> from gcloud import bigquery + >>> client = bigquery.Client() + +When creating a client in this way, the project ID will be determined by +searching these locations in the following order. + +* GCLOUD_PROJECT environment variable +* GOOGLE_APPLICATION_CREDENTIALS JSON file +* Default service configuration path from + ``$ gcloud beta auth application-default login``. +* Google App Engine application ID +* Google Compute Engine project ID (from metadata server) + +You can override the detection of your default project by setting the + ``project`` parameter when creating client objects. + +.. code-block:: python + + >>> from gcloud import bigquery + >>> client = bigquery.Client(project='my-project') + +You can see what project ID a client is referencing by accessing the ``project`` +property on the client object. + +.. code-block:: python + + >>> client.project + u'my-project' + +Authentication +============== + +The authentication credentials can be implicitly determined from the +environment or directly. See :doc:`gcloud-auth`. + +Logging in via ``gcloud beta auth application-default login`` will +automatically configure a JSON key file with your default project ID and +credentials. + +Setting the ``GOOGLE_APPLICATION_CREDENTIALS`` and ``GCLOUD_PROJECT`` +environment variables will override the automatically configured credentials. + +You can change your default project ID to ``my-new-default-project`` by +using the ``gcloud`` CLI tool to change the configuration. + +.. code-block:: bash + + $ gcloud config set project my-new-default-project diff --git a/0.18.1/_sources/happybase-batch.txt b/0.18.1/_sources/happybase-batch.txt new file mode 100644 index 000000000000..ebeab35082f5 --- /dev/null +++ b/0.18.1/_sources/happybase-batch.txt @@ -0,0 +1,12 @@ +HappyBase Batch +~~~~~~~~~~~~~~~ + +.. warning:: + + gRPC is required for using the Cloud Bigtable API. As of May 2016, + ``grpcio`` is only supported in Python 2.7, so importing + :mod:`gcloud.bigtable` in other versions of Python will fail. + +.. automodule:: gcloud.bigtable.happybase.batch + :members: + :show-inheritance: diff --git a/0.18.1/_sources/happybase-connection.txt b/0.18.1/_sources/happybase-connection.txt new file mode 100644 index 000000000000..ccdd762d9f44 --- /dev/null +++ b/0.18.1/_sources/happybase-connection.txt @@ -0,0 +1,12 @@ +HappyBase Connection +~~~~~~~~~~~~~~~~~~~~ + +.. warning:: + + gRPC is required for using the Cloud Bigtable API. As of May 2016, + ``grpcio`` is only supported in Python 2.7, so importing + :mod:`gcloud.bigtable` in other versions of Python will fail. + +.. automodule:: gcloud.bigtable.happybase.connection + :members: + :show-inheritance: diff --git a/0.18.1/_sources/happybase-package.txt b/0.18.1/_sources/happybase-package.txt new file mode 100644 index 000000000000..19d15c4788e2 --- /dev/null +++ b/0.18.1/_sources/happybase-package.txt @@ -0,0 +1,12 @@ +HappyBase Package +~~~~~~~~~~~~~~~~~ + +.. warning:: + + gRPC is required for using the Cloud Bigtable API. As of May 2016, + ``grpcio`` is only supported in Python 2.7, so importing + :mod:`gcloud.bigtable` in other versions of Python will fail. + +.. automodule:: gcloud.bigtable.happybase.__init__ + :members: + :show-inheritance: diff --git a/0.18.1/_sources/happybase-pool.txt b/0.18.1/_sources/happybase-pool.txt new file mode 100644 index 000000000000..566008445cc5 --- /dev/null +++ b/0.18.1/_sources/happybase-pool.txt @@ -0,0 +1,12 @@ +HappyBase Connection Pool +~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. warning:: + + gRPC is required for using the Cloud Bigtable API. As of May 2016, + ``grpcio`` is only supported in Python 2.7, so importing + :mod:`gcloud.bigtable` in other versions of Python will fail. + +.. automodule:: gcloud.bigtable.happybase.pool + :members: + :show-inheritance: diff --git a/0.18.1/_sources/happybase-table.txt b/0.18.1/_sources/happybase-table.txt new file mode 100644 index 000000000000..44cd8f6bb86c --- /dev/null +++ b/0.18.1/_sources/happybase-table.txt @@ -0,0 +1,12 @@ +HappyBase Table +~~~~~~~~~~~~~~~ + +.. warning:: + + gRPC is required for using the Cloud Bigtable API. As of May 2016, + ``grpcio`` is only supported in Python 2.7, so importing + :mod:`gcloud.bigtable` in other versions of Python will fail. + +.. automodule:: gcloud.bigtable.happybase.table + :members: + :show-inheritance: diff --git a/0.18.1/_sources/index.txt b/0.18.1/_sources/index.txt new file mode 100644 index 000000000000..892094048ce4 --- /dev/null +++ b/0.18.1/_sources/index.txt @@ -0,0 +1,206 @@ +.. toctree:: + :maxdepth: 0 + :hidden: + :caption: gcloud + + gcloud-api + gcloud-config + gcloud-auth + +.. toctree:: + :maxdepth: 0 + :hidden: + :caption: Datastore + + Client + datastore-entities + datastore-keys + datastore-queries + datastore-transactions + datastore-batches + datastore-helpers + +.. toctree:: + :maxdepth: 0 + :hidden: + :caption: Storage + + Client + storage-blobs + storage-buckets + storage-acl + storage-batch + +.. toctree:: + :maxdepth: 0 + :hidden: + :caption: Pub/Sub + + pubsub-usage + Client + pubsub-topic + pubsub-subscription + pubsub-message + pubsub-iam + +.. toctree:: + :maxdepth: 0 + :hidden: + :caption: BigQuery + + bigquery-usage + Client + bigquery-dataset + bigquery-job + bigquery-table + bigquery-query + +.. toctree:: + :maxdepth: 0 + :hidden: + :caption: Cloud Bigtable + + bigtable-usage + HappyBase + bigtable-client-intro + bigtable-instance-api + bigtable-table-api + bigtable-data-api + Client + bigtable-instance + bigtable-cluster + bigtable-table + bigtable-column-family + bigtable-row + bigtable-row-filters + bigtable-row-data + happybase-connection + happybase-pool + happybase-table + happybase-batch + +.. toctree:: + :maxdepth: 0 + :hidden: + :caption: Resource Manager + + Overview + resource-manager-client + resource-manager-project + +.. toctree:: + :maxdepth: 0 + :hidden: + :caption: DNS + + dns-usage + Client + dns-zone + dns-resource-record-set + dns-changes + +.. toctree:: + :maxdepth: 0 + :hidden: + :caption: Stackdriver Logging + + logging-usage + Client + logging-logger + logging-entries + logging-metric + logging-sink + +.. toctree:: + :maxdepth: 0 + :hidden: + :caption: Stackdriver Error Reporting + + error-reporting-usage + Client + +.. toctree:: + :maxdepth: 0 + :hidden: + :caption: Stackdriver Monitoring + + monitoring-usage + Client + monitoring-metric + monitoring-resource + monitoring-query + monitoring-timeseries + monitoring-label + +.. toctree:: + :maxdepth: 0 + :hidden: + :caption: Translate + + translate-usage + Client + +.. toctree:: + :maxdepth: 0 + :hidden: + :caption: External Links + + GitHub + Issues + Stack Overflow + PyPI + +Getting started +--------------- + +The ``gcloud`` library is ``pip`` install-able: + +.. code-block:: console + + $ pip install gcloud + +If you want to install ``gcloud-python`` from source, +you can clone the repository from GitHub: + +.. code-block:: console + + $ git clone git://github.com/GoogleCloudPlatform/gcloud-python.git + $ cd gcloud-python + $ python setup.py install + +---- + +Cloud Datastore +~~~~~~~~~~~~~~~ + +`Google Cloud Datastore`_ is a fully managed, schemaless database for storing non-relational data. + +.. _Google Cloud Datastore: https://developers.google.com/datastore/ + +.. code-block:: python + + from gcloud import datastore + + client = datastore.Client() + key = client.key('Person') + + entity = datastore.Entity(key=key) + entity['name'] = 'Your name' + entity['age'] = 25 + client.put(entity) + +Cloud Storage +~~~~~~~~~~~~~ + +`Google Cloud Storage`_ allows you to store data on Google infrastructure. + +.. _Google Cloud Storage: https://developers.google.com/storage/ + +.. code-block:: python + + from gcloud import storage + + client = storage.Client() + bucket = client.get_bucket('') + blob = bucket.blob('my-test-file.txt') + blob.upload_from_string('this is test content!') diff --git a/0.18.1/_sources/logging-client.txt b/0.18.1/_sources/logging-client.txt new file mode 100644 index 000000000000..fb5f009947cd --- /dev/null +++ b/0.18.1/_sources/logging-client.txt @@ -0,0 +1,13 @@ +Stackdriver Logging Client +========================== + +.. automodule:: gcloud.logging.client + :members: + :show-inheritance: + +Connection +~~~~~~~~~~ + +.. automodule:: gcloud.logging.connection + :members: + :show-inheritance: diff --git a/0.18.1/_sources/logging-entries.txt b/0.18.1/_sources/logging-entries.txt new file mode 100644 index 000000000000..a22f6094d927 --- /dev/null +++ b/0.18.1/_sources/logging-entries.txt @@ -0,0 +1,7 @@ +Entries +======= + +.. automodule:: gcloud.logging.entries + :members: + :show-inheritance: + diff --git a/0.18.1/_sources/logging-logger.txt b/0.18.1/_sources/logging-logger.txt new file mode 100644 index 000000000000..4a95d01662a7 --- /dev/null +++ b/0.18.1/_sources/logging-logger.txt @@ -0,0 +1,7 @@ +Logger +====== + +.. automodule:: gcloud.logging.logger + :members: + :show-inheritance: + diff --git a/0.18.1/_sources/logging-metric.txt b/0.18.1/_sources/logging-metric.txt new file mode 100644 index 000000000000..2c49a94049ce --- /dev/null +++ b/0.18.1/_sources/logging-metric.txt @@ -0,0 +1,6 @@ +Metrics +======= + +.. automodule:: gcloud.logging.metric + :members: + :show-inheritance: diff --git a/0.18.1/_sources/logging-sink.txt b/0.18.1/_sources/logging-sink.txt new file mode 100644 index 000000000000..4e2dd40d44dc --- /dev/null +++ b/0.18.1/_sources/logging-sink.txt @@ -0,0 +1,6 @@ +Sinks +===== + +.. automodule:: gcloud.logging.sink + :members: + :show-inheritance: diff --git a/0.18.1/_sources/logging-usage.txt b/0.18.1/_sources/logging-usage.txt new file mode 100644 index 000000000000..3dd2a6efab19 --- /dev/null +++ b/0.18.1/_sources/logging-usage.txt @@ -0,0 +1,379 @@ +Using the API +============= + + +Authentication and Configuration +-------------------------------- + +- For an overview of authentication in ``gcloud-python``, + see :doc:`gcloud-auth`. + +- In addition to any authentication configuration, you should also set the + :envvar:`GCLOUD_PROJECT` environment variable for the project you'd like + to interact with. If you are Google App Engine or Google Compute Engine + this will be detected automatically. + +- After configuring your environment, create a + :class:`Client ` + + .. doctest:: + + >>> from gcloud import logging + >>> client = logging.Client() + + or pass in ``credentials`` and ``project`` explicitly + + .. doctest:: + + >>> from gcloud import logging + >>> client = logging.Client(project='my-project', credentials=creds) + + +Writing log entries +------------------- + +Write a simple text entry to a logger. + +.. doctest:: + + >>> from gcloud import logging + >>> client = logging.Client() + >>> logger = client.logger('log_name') + >>> logger.log_text("A simple entry") # API call + +Write a dictionary entry to a logger. + +.. doctest:: + + >>> from gcloud import logging + >>> client = logging.Client() + >>> logger = client.logger('log_name') + >>> logger.log_struct( + ... message="My second entry", + ... weather="partly cloudy") # API call + + +Retrieving log entries +---------------------- + +Fetch entries for the default project. + +.. doctest:: + + >>> from gcloud import logging + >>> client = logging.Client() + >>> entries, token = client.list_entries() # API call + >>> for entry in entries: + ... timestamp = entry.timestamp.isoformat() + ... print('%sZ: %s' % + ... (timestamp, entry.payload)) + 2016-02-17T20:35:49.031864072Z: A simple entry | None + 2016-02-17T20:38:15.944418531Z: None | {'message': 'My second entry', 'weather': 'partly cloudy'} + +Fetch entries across multiple projects. + +.. doctest:: + + >>> from gcloud import logging + >>> client = logging.Client() + >>> entries, token = client.list_entries( + ... project_ids=['one-project', 'another-project']) # API call + +Filter entries retrieved using the `Advanced Logs Filters`_ syntax + +.. _Advanced Logs Filters: https://cloud.google.com/logging/docs/view/advanced_filters + +.. doctest:: + + >>> from gcloud import logging + >>> client = logging.Client() + >>> FILTER = "log:log_name AND textPayload:simple" + >>> entries, token = client.list_entries(filter=FILTER) # API call + +Sort entries in descending timestamp order. + +.. doctest:: + + >>> from gcloud import logging + >>> client = logging.Client() + >>> entries, token = client.list_entries(order_by=logging.DESCENDING) # API call + +Retrieve entries in batches of 10, iterating until done. + +.. doctest:: + + >>> from gcloud import logging + >>> client = logging.Client() + >>> retrieved = [] + >>> token = None + >>> while True: + ... entries, token = client.list_entries(page_size=10, page_token=token) # API call + ... retrieved.extend(entries) + ... if token is None: + ... break + +Retrieve entries for a single logger, sorting in descending timestamp order: + +.. doctest:: + + >>> from gcloud import logging + >>> client = logging.Client() + >>> logger = client.logger('log_name') + >>> entries, token = logger.list_entries(order_by=logging.DESCENDING) # API call + +Delete all entries for a logger +------------------------------- + +.. doctest:: + + >>> from gcloud import logging + >>> client = logging.Client() + >>> logger = client.logger('log_name') + >>> logger.delete() # API call + + +Manage log metrics +------------------ + +Metrics are counters of entries which match a given filter. They can be +used within Stackdriver Monitoring to create charts and alerts. + +Create a metric: + +.. doctest:: + + >>> from gcloud import logging + >>> client = logging.Client() + >>> metric = client.metric( + ... "robots", "Robots all up in your server", + ... filter='log:apache-access AND textPayload:robot') + >>> metric.exists() # API call + False + >>> metric.create() # API call + >>> metric.exists() # API call + True + +List all metrics for a project: + +.. doctest:: + + >>> from gcloud import logging + >>> client = logging.Client() + >>> metrics, token = client.list_metrics() + >>> len(metrics) + 1 + >>> metric = metrics[0] + >>> metric.name + "robots" + +Refresh local information about a metric: + +.. doctest:: + + >>> from gcloud import logging + >>> client = logging.Client() + >>> metric = client.metric("robots") + >>> metric.reload() # API call + >>> metric.description + "Robots all up in your server" + >>> metric.filter + "log:apache-access AND textPayload:robot" + +Update a metric: + +.. doctest:: + + >>> from gcloud import logging + >>> client = logging.Client() + >>> metric = client.metric("robots") + >>> metric.exists() # API call + True + >>> metric.reload() # API call + >>> metric.description = "Danger, Will Robinson!" + >>> metric.update() # API call + +Delete a metric: + +.. doctest:: + + >>> from gcloud import logging + >>> client = logging.Client() + >>> metric = client.metric("robots") + >>> metric.exists() # API call + True + >>> metric.delete() # API call + >>> metric.exists() # API call + False + +Export log entries using sinks +------------------------------ + +Sinks allow exporting entries which match a given filter to Cloud Storage +buckets, BigQuery datasets, or Cloud Pub/Sub topics. + +Export to Cloud Storage +~~~~~~~~~~~~~~~~~~~~~~~ + +Make sure that the storage bucket you want to export logs too has +``cloud-logs@google.com`` as the owner. See `Set permission for writing exported logs`_. + +Add ``cloud-logs@google.com`` as the owner of ``my-bucket-name``: + +.. doctest:: + + >>> from gcloud import storage + >>> client = storage.Client() + >>> bucket = client.get_bucket('my-bucket-name') + >>> bucket.acl.reload() + >>> logs_group = bucket.acl.group('cloud-logs@google.com') + >>> logs_group.grant_owner() + >>> bucket.acl.add_entity(logs_group) + >>> bucket.acl.save() + +.. _Set permission for writing exported logs: https://cloud.google.com/logging/docs/export/configure_export#setting_product_name_short_permissions_for_writing_exported_logs + +Export to BigQuery +~~~~~~~~~~~~~~~~~~ + +To export logs to BigQuery you must log into the Cloud Platform Console +and add ``cloud-logs@google.com`` to a dataset. + +See: `Setting permissions for BigQuery`_ + +.. doctest:: + + >>> from gcloud import bigquery + >>> from gcloud.bigquery.dataset import AccessGrant + >>> bigquery_client = bigquery.Client() + >>> dataset = bigquery_client.dataset('my-dataset-name') + >>> dataset.create() + >>> dataset.reload() + >>> grants = dataset.access_grants + >>> grants.append(AccessGrant( + ... 'WRITER', 'groupByEmail', 'cloud-logs@google.com'))) + >>> dataset.access_grants = grants + >>> dataset.update() + +.. _Setting permissions for BigQuery: https://cloud.google.com/logging/docs/export/configure_export#manual-access-bq + +Export to Pub/Sub +~~~~~~~~~~~~~~~~~ + +To export logs to BigQuery you must log into the Cloud Platform Console +and add ``cloud-logs@google.com`` to a topic. + +See: `Setting permissions for Pub/Sub`_ + +.. doctest:: + + >>> from gcloud import pubsub + >>> client = pubsub.Client() + >>> topic = client.topic('your-topic-name') + >>> policy = top.get_iam_policy() + >>> policy.owners.add(policy.group('cloud-logs@google.com')) + >>> topic.set_iam_policy(policy) + +.. _Setting permissions for Pub/Sub: https://cloud.google.com/logging/docs/export/configure_export#manual-access-pubsub + +Create a Cloud Storage sink: + +.. doctest:: + + >>> from gcloud import logging + >>> client = logging.Client() + >>> sink = client.sink( + ... "robots-storage", + ... 'log:apache-access AND textPayload:robot', + ... 'storage.googleapis.com/my-bucket-name') + >>> sink.exists() # API call + False + >>> sink.create() # API call + >>> sink.exists() # API call + True + +Create a BigQuery sink: + +.. doctest:: + + >>> from gcloud import logging + >>> client = logging.Client() + >>> sink = client.sink( + ... "robots-bq", + ... 'log:apache-access AND textPayload:robot', + ... 'bigquery.googleapis.com/projects/projects/my-project/datasets/my-dataset') + >>> sink.exists() # API call + False + >>> sink.create() # API call + >>> sink.exists() # API call + True + +Create a Cloud Pub/Sub sink: + +.. doctest:: + + >>> from gcloud import logging + >>> client = logging.Client() + + >>> sink = client.sink( + ... "robots-pubsub", + ... 'log:apache-access AND textPayload:robot', + ... 'pubsub.googleapis.com/projects/my-project/topics/my-topic') + >>> sink.exists() # API call + False + >>> sink.create() # API call + >>> sink.exists() # API call + True + +List all sinks for a project: + +.. doctest:: + + >>> from gcloud import logging + >>> client = logging.Client() + >>> sinks, token = client.list_sinks() + >>> for sink in sinks: + ... print('%s: %s' % (sink.name, sink.destination)) + robots-storage: storage.googleapis.com/my-bucket-name + robots-bq: bigquery.googleapis.com/projects/my-project/datasets/my-dataset + robots-pubsub: pubsub.googleapis.com/projects/my-project/topics/my-topic + +Refresh local information about a sink: + +.. doctest:: + + >>> from gcloud import logging + >>> client = logging.Client() + >>> sink = client.sink('robots-storage') + >>> sink.filter is None + True + >>> sink.reload() # API call + >>> sink.filter + 'log:apache-access AND textPayload:robot' + >>> sink.destination + 'storage.googleapis.com/my-bucket-name' + +Update a sink: + +.. doctest:: + + >>> from gcloud import logging + >>> client = logging.Client() + >>> sink = client.sink("robots") + >>> sink.reload() # API call + >>> sink.filter = "log:apache-access" + >>> sink.update() # API call + +Delete a sink: + +.. doctest:: + + >>> from gcloud import logging + >>> client = logging.Client() + >>> sink = client.sink( + ... "robots", + ... filter='log:apache-access AND textPayload:robot') + >>> sink.exists() # API call + True + >>> sink.delete() # API call + >>> sink.exists() # API call + False diff --git a/0.18.1/_sources/monitoring-client.txt b/0.18.1/_sources/monitoring-client.txt new file mode 100644 index 000000000000..baea7d2fd592 --- /dev/null +++ b/0.18.1/_sources/monitoring-client.txt @@ -0,0 +1,13 @@ +Stackdriver Monitoring Client +============================= + +.. automodule:: gcloud.monitoring.client + :members: + :show-inheritance: + +Connection +~~~~~~~~~~ + +.. automodule:: gcloud.monitoring.connection + :members: + :show-inheritance: diff --git a/0.18.1/_sources/monitoring-label.txt b/0.18.1/_sources/monitoring-label.txt new file mode 100644 index 000000000000..a20d5a741af0 --- /dev/null +++ b/0.18.1/_sources/monitoring-label.txt @@ -0,0 +1,6 @@ +Label Descriptors +================= + +.. automodule:: gcloud.monitoring.label + :members: + :show-inheritance: diff --git a/0.18.1/_sources/monitoring-metric.txt b/0.18.1/_sources/monitoring-metric.txt new file mode 100644 index 000000000000..e1fb979e3d22 --- /dev/null +++ b/0.18.1/_sources/monitoring-metric.txt @@ -0,0 +1,6 @@ +Metric Descriptors +================== + +.. automodule:: gcloud.monitoring.metric + :members: + :show-inheritance: diff --git a/0.18.1/_sources/monitoring-query.txt b/0.18.1/_sources/monitoring-query.txt new file mode 100644 index 000000000000..1da7f7e61fcc --- /dev/null +++ b/0.18.1/_sources/monitoring-query.txt @@ -0,0 +1,6 @@ +Time Series Query +================= + +.. automodule:: gcloud.monitoring.query + :members: + :show-inheritance: diff --git a/0.18.1/_sources/monitoring-resource.txt b/0.18.1/_sources/monitoring-resource.txt new file mode 100644 index 000000000000..d9750decd1c6 --- /dev/null +++ b/0.18.1/_sources/monitoring-resource.txt @@ -0,0 +1,6 @@ +Monitored Resource Descriptors +============================== + +.. automodule:: gcloud.monitoring.resource + :members: + :show-inheritance: diff --git a/0.18.1/_sources/monitoring-timeseries.txt b/0.18.1/_sources/monitoring-timeseries.txt new file mode 100644 index 000000000000..2d3133e7823a --- /dev/null +++ b/0.18.1/_sources/monitoring-timeseries.txt @@ -0,0 +1,6 @@ +Time Series +=========== + +.. automodule:: gcloud.monitoring.timeseries + :members: + :show-inheritance: diff --git a/0.18.1/_sources/monitoring-usage.txt b/0.18.1/_sources/monitoring-usage.txt new file mode 100644 index 000000000000..adfd93cefe67 --- /dev/null +++ b/0.18.1/_sources/monitoring-usage.txt @@ -0,0 +1,200 @@ +Using the API +============= + + +Introduction +------------ + +With the Stackdriver Monitoring API, you can work with Stackdriver metric data +pertaining to monitored resources in Google Cloud Platform (GCP) +or elsewhere. + +Essential concepts: + +- Metric data is associated with a **monitored resource**. A monitored + resource has a *resource type* and a set of *resource labels* — + key-value pairs — that identify the particular resource. +- A **metric** further identifies the particular kind of data that + is being collected. It has a *metric type* and a set of *metric + labels* that, when combined with the resource labels, identify + a particular time series. +- A **time series** is a collection of data points associated with + points or intervals in time. + +Please refer to the documentation for the `Stackdriver Monitoring API`_ for +more information. + +At present, this client library supports the following features +of the API: + +- Querying of time series. +- Querying of metric descriptors and monitored resource descriptors. +- Creation and deletion of metric descriptors for custom metrics. +- (Writing of custom metric data will be coming soon.) + +.. _Stackdriver Monitoring API: https://cloud.google.com/monitoring/api/v3/ + + +The Stackdriver Monitoring Client Object +---------------------------------------- + +The Stackdriver Monitoring client library generally makes its +functionality available as methods of the monitoring +:class:`~gcloud.monitoring.client.Client` class. +A :class:`~gcloud.monitoring.client.Client` instance holds +authentication credentials and the ID of the target project with +which the metric data of interest is associated. This project ID +will often refer to a `Stackdriver account`_ binding multiple +GCP projects and AWS accounts. It can also simply be the ID of +a monitored project. + +Most often the authentication credentials will be determined +implicitly from your environment. See :doc:`gcloud-auth` for +more information. + +It is thus typical to create a client object as follows:: + + >>> from gcloud import monitoring + >>> client = monitoring.Client(project='target-project') + +If you are running in Google Compute Engine or Google App Engine, +the current project is the default target project. This default +can be further overridden with the :envvar:`GCLOUD_PROJECT` +environment variable. Using the default target project is +even easier:: + + >>> client = monitoring.Client() + +If necessary, you can pass in ``credentials`` and ``project`` explicitly:: + + >>> client = monitoring.Client(project='target-project', credentials=...) + +.. _Stackdriver account: https://cloud.google.com/monitoring/accounts/ + + +Monitored Resource Descriptors +------------------------------ + +The available monitored resource types are defined by *monitored resource +descriptors*. You can fetch a list of these with the +:meth:`~gcloud.monitoring.client.Client.list_resource_descriptors` method:: + + >>> for descriptor in client.list_resource_descriptors(): + ... print(descriptor.type) + +Each :class:`~gcloud.monitoring.resource.ResourceDescriptor` +has a type, a display name, a description, and a list of +:class:`~gcloud.monitoring.label.LabelDescriptor` instances. +See the documentation about `Monitored Resources`_ +for more information. + +.. _Monitored Resources: + https://cloud.google.com/monitoring/api/v3/monitored-resources + + +Metric Descriptors +------------------ + +The available metric types are defined by *metric descriptors*. +They include `platform metrics`_, `agent metrics`_, and `custom metrics`_. +You can list all of these with the +:meth:`~gcloud.monitoring.client.Client.list_metric_descriptors` method:: + + >>> for descriptor in client.list_metric_descriptors(): + ... print(descriptor.type) + +See :class:`~gcloud.monitoring.metric.MetricDescriptor` and the +`Metric Descriptors`_ API documentation for more information. + +You can create new metric descriptors to define custom metrics in +the ``custom.googleapis.com`` namespace. You do this by creating a +:class:`~gcloud.monitoring.metric.MetricDescriptor` object using the +client's :meth:`~gcloud.monitoring.client.Client.metric_descriptor` +factory and then calling the object's +:meth:`~gcloud.monitoring.metric.MetricDescriptor.create` method:: + + >>> from gcloud.monitoring import MetricKind, ValueType + >>> descriptor = client.metric_descriptor( + ... 'custom.googleapis.com/my_metric', + ... metric_kind=MetricKind.GAUGE, + ... value_type=ValueType.DOUBLE, + ... description='This is a simple example of a custom metric.') + >>> descriptor.create() + +You can delete such a metric descriptor as follows:: + + >>> descriptor = client.metric_descriptor( + ... 'custom.googleapis.com/my_metric') + >>> descriptor.delete() + +To define a custom metric parameterized by one or more labels, +you must build the appropriate +:class:`~gcloud.monitoring.label.LabelDescriptor` objects +and include them in the +:class:`~gcloud.monitoring.metric.MetricDescriptor` object +before you call +:meth:`~gcloud.monitoring.metric.MetricDescriptor.create`:: + + >>> from gcloud.monitoring import LabelDescriptor, LabelValueType + >>> label = LabelDescriptor('response_code', LabelValueType.INT64, + ... description='HTTP status code') + >>> descriptor = client.metric_descriptor( + ... 'custom.googleapis.com/my_app/response_count', + ... metric_kind=MetricKind.CUMULATIVE, + ... value_type=ValueType.INT64, + ... labels=[label], + ... description='Cumulative count of HTTP responses.') + >>> descriptor.create() + +.. _platform metrics: https://cloud.google.com/monitoring/api/metrics +.. _agent metrics: https://cloud.google.com/monitoring/agent/ +.. _custom metrics: https://cloud.google.com/monitoring/custom-metrics/ +.. _Metric Descriptors: + https://cloud.google.com/monitoring/api/ref_v3/rest/v3/\ + projects.metricDescriptors + + +Time Series Queries +------------------- + +A time series includes a collection of data points and a set of +resource and metric label values. +See :class:`~gcloud.monitoring.timeseries.TimeSeries` and the +`Time Series`_ API documentation for more information. + +While you can obtain time series objects by iterating over a +:class:`~gcloud.monitoring.query.Query` object, usually it is +more useful to retrieve time series data in the form of a +:class:`pandas.DataFrame`, where each column corresponds to a +single time series. For this, you must have :mod:`pandas` installed; +it is not a required dependency of ``gcloud-python``. + +You can display CPU utilization across your GCE instances during +the last five minutes as follows:: + + >>> METRIC = 'compute.googleapis.com/instance/cpu/utilization' + >>> query = client.query(METRIC, minutes=5) + >>> print(query.as_dataframe()) + +:class:`~gcloud.monitoring.query.Query` objects provide a variety of +methods for refining the query. You can request temporal alignment +and cross-series reduction, and you can filter by label values. +See the client :meth:`~gcloud.monitoring.client.Client.query` method +and the :class:`~gcloud.monitoring.query.Query` class for more +information. + +For example, you can display CPU utilization during the last hour +across GCE instances with names beginning with ``"mycluster-"``, +averaged over five-minute intervals and aggregated per zone, as +follows:: + + >>> from gcloud.monitoring import Aligner, Reducer + >>> METRIC = 'compute.googleapis.com/instance/cpu/utilization' + >>> query = (client.query(METRIC, hours=1) + ... .select_metrics(instance_name_prefix='mycluster-') + ... .align(Aligner.ALIGN_MEAN, minutes=5) + ... .reduce(Reducer.REDUCE_MEAN, 'resource.zone')) + >>> print(query.as_dataframe()) + +.. _Time Series: + https://cloud.google.com/monitoring/api/ref_v3/rest/v3/TimeSeries diff --git a/0.18.1/_sources/pubsub-client.txt b/0.18.1/_sources/pubsub-client.txt new file mode 100644 index 000000000000..858d2b7332b7 --- /dev/null +++ b/0.18.1/_sources/pubsub-client.txt @@ -0,0 +1,13 @@ +Pub/Sub Client +============== + +.. automodule:: gcloud.pubsub.client + :members: + :show-inheritance: + +Connection +~~~~~~~~~~ + +.. automodule:: gcloud.pubsub.connection + :members: + :show-inheritance: diff --git a/0.18.1/_sources/pubsub-iam.txt b/0.18.1/_sources/pubsub-iam.txt new file mode 100644 index 000000000000..6cdfaf266cf5 --- /dev/null +++ b/0.18.1/_sources/pubsub-iam.txt @@ -0,0 +1,8 @@ +IAM Policy +~~~~~~~~~~ + +.. automodule:: gcloud.pubsub.iam + :members: + :member-order: bysource + :show-inheritance: + diff --git a/0.18.1/_sources/pubsub-message.txt b/0.18.1/_sources/pubsub-message.txt new file mode 100644 index 000000000000..3b2561f87b9f --- /dev/null +++ b/0.18.1/_sources/pubsub-message.txt @@ -0,0 +1,6 @@ +Message +~~~~~~~ + +.. automodule:: gcloud.pubsub.message + :members: + :show-inheritance: diff --git a/0.18.1/_sources/pubsub-subscription.txt b/0.18.1/_sources/pubsub-subscription.txt new file mode 100644 index 000000000000..d92d43e57ad4 --- /dev/null +++ b/0.18.1/_sources/pubsub-subscription.txt @@ -0,0 +1,7 @@ +Subscriptions +~~~~~~~~~~~~~ + +.. automodule:: gcloud.pubsub.subscription + :members: + :member-order: bysource + :show-inheritance: diff --git a/0.18.1/_sources/pubsub-topic.txt b/0.18.1/_sources/pubsub-topic.txt new file mode 100644 index 000000000000..e2ddc3c04570 --- /dev/null +++ b/0.18.1/_sources/pubsub-topic.txt @@ -0,0 +1,7 @@ +Topics +~~~~~~ + +.. automodule:: gcloud.pubsub.topic + :members: + :member-order: bysource + :show-inheritance: diff --git a/0.18.1/_sources/pubsub-usage.txt b/0.18.1/_sources/pubsub-usage.txt new file mode 100644 index 000000000000..9a7f7e7acf27 --- /dev/null +++ b/0.18.1/_sources/pubsub-usage.txt @@ -0,0 +1,223 @@ +Using the API +============= + +Authentication / Configuration +------------------------------ + +- Use :class:`Client ` objects to configure + your applications. + +- :class:`Client ` objects hold both a ``project`` + and an authenticated connection to the PubSub service. + +- The authentication credentials can be implicitly determined from the + environment or directly via + :meth:`from_service_account_json ` + and + :meth:`from_service_account_p12 `. + +- After setting ``GOOGLE_APPLICATION_CREDENTIALS`` and ``GCLOUD_PROJECT`` + environment variables, create a :class:`Client ` + + .. doctest:: + + >>> from gcloud import pubsub + >>> client = pubsub.Client() + + +Manage topics for a project +--------------------------- + +List topics for the default project: + +.. literalinclude:: pubsub_snippets.py + :start-after: [START client_list_topics] + :end-before: [END client_list_topics] + +Create a new topic for the default project: + +.. literalinclude:: pubsub_snippets.py + :start-after: [START topic_create] + :end-before: [END topic_create] + +Check for the existence of a topic: + +.. literalinclude:: pubsub_snippets.py + :start-after: [START topic_exists] + :end-before: [END topic_exists] + +Delete a topic: + +.. literalinclude:: pubsub_snippets.py + :start-after: [START topic_delete] + :end-before: [END topic_delete] + +Fetch the IAM policy for a topic: + +.. literalinclude:: pubsub_snippets.py + :start-after: [START topic_get_iam_policy] + :end-before: [END topic_get_iam_policy] + +Update the IAM policy for a topic: + +.. literalinclude:: pubsub_snippets.py + :start-after: [START topic_set_iam_policy] + :end-before: [END topic_set_iam_policy] + +Test permissions allowed by the current IAM policy on a topic: + +.. literalinclude:: pubsub_snippets.py + :start-after: [START topic_check_iam_permissions] + :end-before: [END topic_check_iam_permissions] + + +Publish messages to a topic +--------------------------- + +Publish a single message to a topic, without attributes: + +.. literalinclude:: pubsub_snippets.py + :start-after: [START topic_publish_simple_message] + :end-before: [END topic_publish_simple_message] + +Publish a single message to a topic, with attributes: + +.. literalinclude:: pubsub_snippets.py + :start-after: [START topic_publish_message_with_attrs] + :end-before: [END topic_publish_message_with_attrs] + +Publish a set of messages to a topic (as a single request): + +.. literalinclude:: pubsub_snippets.py + :start-after: [START topic_batch] + :end-before: [END topic_batch] + +.. note:: + + The only API request happens during the ``__exit__()`` of the topic + used as a context manager, and only if the block exits without raising + an exception. + + +Manage subscriptions to topics +------------------------------ + +List all subscriptions for the default project: + +.. literalinclude:: pubsub_snippets.py + :start-after: [START client_list_subscriptions] + :end-before: [END client_list_subscriptions] + +List subscriptions for a topic: + +.. literalinclude:: pubsub_snippets.py + :start-after: [START topic_list_subscriptions] + :end-before: [END topic_list_subscriptions] + +Create a new pull subscription for a topic, with defaults: + +.. literalinclude:: pubsub_snippets.py + :start-after: [START topic_subscription_defaults] + :end-before: [END topic_subscription_defaults] + +Create a new pull subscription for a topic with a non-default ACK deadline: + +.. literalinclude:: pubsub_snippets.py + :start-after: [START topic_subscription_ack90] + :end-before: [END topic_subscription_ack90] + +Create a new push subscription for a topic: + +.. literalinclude:: pubsub_snippets.py + :start-after: [START topic_subscription_push] + :end-before: [END topic_subscription_push] + +Check for the existence of a subscription: + +.. literalinclude:: pubsub_snippets.py + :start-after: [START subscription_exists] + :end-before: [END subscription_exists] + +Convert a pull subscription to push: + +.. literalinclude:: pubsub_snippets.py + :start-after: [START subscription_pull_push] + :end-before: [END subscription_pull_push] + +Convert a push subscription to pull: + +.. literalinclude:: pubsub_snippets.py + :start-after: [START subscription_push_pull] + :end-before: [END subscription_push_pull] + +Re-synchronize a subscription with the back-end: + +.. literalinclude:: pubsub_snippets.py + :start-after: [START subscription_reload] + :end-before: [END subscription_reload] + +Fetch the IAM policy for a subscription + +.. literalinclude:: pubsub_snippets.py + :start-after: [START subscription_get_iam_policy] + :end-before: [END subscription_get_iam_policy] + +Update the IAM policy for a subscription: + +.. literalinclude:: pubsub_snippets.py + :start-after: [START subscription_set_iam_policy] + :end-before: [END subscription_set_iam_policy] + +Test permissions allowed by the current IAM policy on a subscription: + +.. literalinclude:: pubsub_snippets.py + :start-after: [START subscription_check_iam_permissions] + :end-before: [END subscription_check_iam_permissions] + +Delete a subscription: + +.. literalinclude:: pubsub_snippets.py + :start-after: [START subscription_delete] + :end-before: [END subscription_delete] + + +Pull messages from a subscription +--------------------------------- + +Fetch pending messages for a pull subscription: + +.. literalinclude:: pubsub_snippets.py + :start-after: [START subscription_pull] + :end-before: [END subscription_pull] + +Note that received messages must be acknowledged, or else the back-end +will re-send them later: + +.. literalinclude:: pubsub_snippets.py + :start-after: [START subscription_acknowledge] + :end-before: [END subscription_acknowledge] + +Fetch messages for a pull subscription without blocking (none pending): + +.. literalinclude:: pubsub_snippets.py + :start-after: [START subscription_pull_return_immediately] + :end-before: [END subscription_pull_return_immediately] + +Update the acknowlegement deadline for pulled messages: + +.. literalinclude:: pubsub_snippets.py + :start-after: [START subscription_modify_ack_deadline] + :end-before: [END subscription_modify_ack_deadline] + +Fetch pending messages, acknowledging those whose processing doesn't raise an +error: + +.. literalinclude:: pubsub_snippets.py + :start-after: [START subscription_pull_autoack] + :end-before: [END subscription_pull_autoack] + +.. note:: + + The ``pull`` API request occurs at entry to the ``with`` block, and the + ``acknowlege`` API request occurs at the end, passing only the ``ack_ids`` + which haven't been deleted from ``ack`` diff --git a/0.18.1/_sources/resource-manager-api.txt b/0.18.1/_sources/resource-manager-api.txt new file mode 100644 index 000000000000..6caf53e93b1d --- /dev/null +++ b/0.18.1/_sources/resource-manager-api.txt @@ -0,0 +1,86 @@ +Resource Manager Overview +------------------------- + +The Cloud Resource Manager API provides methods that you can use +to programmatically manage your projects in the Google Cloud Platform. +With this API, you can do the following: + +- Get a list of all projects associated with an account +- Create new projects +- Update existing projects +- Delete projects +- Undelete, or recover, projects that you don't want to delete + +.. note:: + + Don't forget to look at the :ref:`Authentication` section below. + It's slightly different from the rest of this library. + +Here's a quick example of the full life-cycle: + +.. code-block:: python + + >>> from gcloud import resource_manager + >>> client = resource_manager.Client() + + >>> # List all projects you have access to + >>> for project in client.list_projects(): + ... print(project) + + >>> # Create a new project + >>> new_project = client.new_project('your-project-id-here', + ... name='My new project') + >>> new_project.create() + + >>> # Update an existing project + >>> project = client.fetch_project('my-existing-project') + >>> print(project) + + >>> project.name = 'Modified name' + >>> project.update() + >>> print(project) + + + >>> # Delete a project + >>> project = client.new_project('my-existing-project') + >>> project.delete() + + >>> # Undelete a project + >>> project = client.new_project('my-existing-project') + >>> project.undelete() + +.. _Authentication: + +Authentication +~~~~~~~~~~~~~~ + +Unlike the other APIs, the Resource Manager API is focused on managing your +various projects inside Google Cloud Platform. What this means (currently, as +of August 2015) is that you can't use a Service Account to work with some +parts of this API (for example, creating projects). + +The reason is actually pretty simple: if your API call is trying to do +something like create a project, what project's Service Account can you use? +Currently none. + +This means that for this API you should always use the credentials +provided by the `Google Cloud SDK`_, which you can get by running +``gcloud auth login``. + +.. _Google Cloud SDK: http://cloud.google.com/sdk + +Once you run that command, ``gcloud-python`` will automatically pick up the +credentials, and you can use the "automatic discovery" feature of the library. + +Start by authenticating: + +.. code-block:: bash + + $ gcloud auth login + +And then simply create a client: + +.. code-block:: python + + >>> from gcloud import resource_manager + >>> client = resource_manager.Client() diff --git a/0.18.1/_sources/resource-manager-client.txt b/0.18.1/_sources/resource-manager-client.txt new file mode 100644 index 000000000000..4c354b25bec5 --- /dev/null +++ b/0.18.1/_sources/resource-manager-client.txt @@ -0,0 +1,17 @@ +.. toctree:: + :maxdepth: 0 + :hidden: + +Client +------ + +.. automodule:: gcloud.resource_manager.client + :members: + :show-inheritance: + +Connection +~~~~~~~~~~ + +.. automodule:: gcloud.resource_manager.connection + :members: + :show-inheritance: diff --git a/0.18.1/_sources/resource-manager-project.txt b/0.18.1/_sources/resource-manager-project.txt new file mode 100644 index 000000000000..eee478c2533f --- /dev/null +++ b/0.18.1/_sources/resource-manager-project.txt @@ -0,0 +1,6 @@ +Projects +~~~~~~~~ + +.. automodule:: gcloud.resource_manager.project + :members: + :show-inheritance: diff --git a/0.18.1/_sources/storage-acl.txt b/0.18.1/_sources/storage-acl.txt new file mode 100644 index 000000000000..6c2e60b55042 --- /dev/null +++ b/0.18.1/_sources/storage-acl.txt @@ -0,0 +1,6 @@ +ACL +~~~ + +.. automodule:: gcloud.storage.acl + :members: + :show-inheritance: diff --git a/0.18.1/_sources/storage-batch.txt b/0.18.1/_sources/storage-batch.txt new file mode 100644 index 000000000000..15d96186975c --- /dev/null +++ b/0.18.1/_sources/storage-batch.txt @@ -0,0 +1,6 @@ +Batches +~~~~~~~ + +.. automodule:: gcloud.storage.batch + :members: + :show-inheritance: diff --git a/0.18.1/_sources/storage-blobs.txt b/0.18.1/_sources/storage-blobs.txt new file mode 100644 index 000000000000..e6a157771a1d --- /dev/null +++ b/0.18.1/_sources/storage-blobs.txt @@ -0,0 +1,6 @@ +Blobs / Objects +~~~~~~~~~~~~~~~ + +.. automodule:: gcloud.storage.blob + :members: + :show-inheritance: diff --git a/0.18.1/_sources/storage-buckets.txt b/0.18.1/_sources/storage-buckets.txt new file mode 100644 index 000000000000..44ed1118040d --- /dev/null +++ b/0.18.1/_sources/storage-buckets.txt @@ -0,0 +1,6 @@ +Buckets +~~~~~~~ + +.. automodule:: gcloud.storage.bucket + :members: + :show-inheritance: diff --git a/0.18.1/_sources/storage-client.txt b/0.18.1/_sources/storage-client.txt new file mode 100644 index 000000000000..9fb00e91f92f --- /dev/null +++ b/0.18.1/_sources/storage-client.txt @@ -0,0 +1,13 @@ +Storage Client +============== + +.. automodule:: gcloud.storage.client + :members: + :show-inheritance: + +Connection +~~~~~~~~~~ + +.. automodule:: gcloud.storage.connection + :members: + :show-inheritance: diff --git a/0.18.1/_sources/translate-client.txt b/0.18.1/_sources/translate-client.txt new file mode 100644 index 000000000000..14c76ba9d6c0 --- /dev/null +++ b/0.18.1/_sources/translate-client.txt @@ -0,0 +1,15 @@ +Translate Client +================ + +.. automodule:: gcloud.translate.client + :members: + :undoc-members: + :show-inheritance: + +Connection +~~~~~~~~~~ + +.. automodule:: gcloud.translate.connection + :members: + :undoc-members: + :show-inheritance: diff --git a/0.18.1/_sources/translate-usage.txt b/0.18.1/_sources/translate-usage.txt new file mode 100644 index 000000000000..f79f7e07d1c5 --- /dev/null +++ b/0.18.1/_sources/translate-usage.txt @@ -0,0 +1,123 @@ +Using the API +============= + +With `Google Translate`_, you can dynamically translate text +between thousands of language pairs. The Google Translate API +lets websites and programs integrate with Google Translate +programmatically. Google Translate API is available as a +paid service. See the `Pricing`_ and `FAQ`_ pages for details. + +Authentication / Configuration +------------------------------ + +- Use :class:`~gcloud.translate.client.Client` objects to configure + your applications. + +- :class:`~gcloud.translate.client.Client` objects hold both a ``key`` + and a connection to the Translate service. + +- **An API key is required for Translate.** See + `Identifying your application to Google`_ for details. This is + significantly different than the other clients in ``gcloud-python``. + +Methods +------- + +To create a client: + + .. code:: + + >>> from gcloud import translate + >>> client = translate.Client('my-api-key') + +By default, the client targets English when doing detections +and translations, but a non-default value can be used as +well: + + .. code:: + + >>> from gcloud import translate + >>> client = translate.Client('my-api-key', target_language='es') + +The Google Translate API has three supported methods, and they +map to three methods on a client: +:meth:`~gcloud.translate.client.Client.get_languages`, +:meth:`~gcloud.translate.client.Client.detect_language` and +:meth:`~gcloud.translate.client.Client.translate`. + +To get a list of languages supported by Google Translate + + .. code:: + + >>> from gcloud import translate + >>> client = translate.Client('my-api-key') + >>> client.get_languages() + [ + { + 'language': 'af', + 'name': 'Afrikaans', + }, + ... + ] + +To detect the language that some given text is written in: + + .. code:: + + >>> from gcloud import translate + >>> client = translate.Client('my-api-key') + >>> client.detect_language(['Me llamo', 'I am']) + [ + { + 'confidence': 0.25830904, + 'input': 'Me llamo', + 'language': 'es', + }, { + 'confidence': 0.17112699, + 'input': 'I am', + 'language': 'en', + }, + ] + +The `confidence`_ value is an optional floating point value between 0 and 1. +The closer this value is to 1, the higher the confidence level for the +language detection. This member is not always available. + +To translate text: + + .. code:: + + >>> from gcloud import translate + >>> client = translate.Client('my-api-key') + >>> client.translate('koszula') + { + 'translatedText': 'shirt', + 'detectedSourceLanguage': 'pl', + 'input': 'koszula', + } + +or to use a non-default target language: + + .. code:: + + >>> from gcloud import translate + >>> client = translate.Client('my-api-key') + >>> client.translate(['Me llamo Jeff', 'My name is Jeff'], + ... target_language='de') + [ + { + 'translatedText': 'Mein Name ist Jeff', + 'detectedSourceLanguage': 'es', + 'input': 'Me llamo Jeff', + }, { + 'translatedText': 'Mein Name ist Jeff', + 'detectedSourceLanguage': 'en', + 'input': 'My name is Jeff', + }, + ] + +.. _Google Translate: https://cloud.google.com/translate +.. _Pricing: https://cloud.google.com/translate/v2/pricing.html +.. _FAQ: https://cloud.google.com/translate/v2/faq.html +.. _Identifying your application to Google: https://cloud.google.com/translate/v2/using_rest#auth +.. _confidence: https://cloud.google.com/translate/v2/detecting-language-with-rest diff --git a/0.18.1/_static/ajax-loader.gif b/0.18.1/_static/ajax-loader.gif new file mode 100644 index 000000000000..61faf8cab239 Binary files /dev/null and b/0.18.1/_static/ajax-loader.gif differ diff --git a/0.18.1/_static/alabaster.css b/0.18.1/_static/alabaster.css new file mode 100644 index 000000000000..a88ce299cbae --- /dev/null +++ b/0.18.1/_static/alabaster.css @@ -0,0 +1,693 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +@import url("basic.css"); + +/* -- page layout ----------------------------------------------------------- */ + +body { + font-family: 'goudy old style', 'minion pro', 'bell mt', Georgia, 'Hiragino Mincho Pro', serif; + font-size: 17px; + background-color: #fff; + color: #000; + margin: 0; + padding: 0; +} + + +div.document { + width: 940px; + margin: 30px auto 0 auto; +} + +div.documentwrapper { + float: left; + width: 100%; +} + +div.bodywrapper { + margin: 0 0 0 220px; +} + +div.sphinxsidebar { + width: 220px; + font-size: 14px; + line-height: 1.5; +} + +hr { + border: 1px solid #B1B4B6; +} + +div.body { + background-color: #fff; + color: #3E4349; + padding: 0 30px 0 30px; +} + +div.body > .section { + text-align: left; +} + +div.footer { + width: 940px; + margin: 20px auto 30px auto; + font-size: 14px; + color: #888; + text-align: right; +} + +div.footer a { + color: #888; +} + +p.caption { + font-family: inherit; + font-size: inherit; +} + + +div.relations { + display: none; +} + + +div.sphinxsidebar a { + color: #444; + text-decoration: none; + border-bottom: 1px dotted #999; +} + +div.sphinxsidebar a:hover { + border-bottom: 1px solid #999; +} + +div.sphinxsidebarwrapper { + padding: 18px 10px; +} + +div.sphinxsidebarwrapper p.logo { + padding: 0; + margin: -10px 0 0 0px; + text-align: center; +} + +div.sphinxsidebarwrapper h1.logo { + margin-top: -10px; + text-align: center; + margin-bottom: 5px; + text-align: left; +} + +div.sphinxsidebarwrapper h1.logo-name { + margin-top: 0px; +} + +div.sphinxsidebarwrapper p.blurb { + margin-top: 0; + font-style: normal; +} + +div.sphinxsidebar h3, +div.sphinxsidebar h4 { + font-family: 'Garamond', 'Georgia', serif; + color: #444; + font-size: 24px; + font-weight: normal; + margin: 0 0 5px 0; + padding: 0; +} + +div.sphinxsidebar h4 { + font-size: 20px; +} + +div.sphinxsidebar h3 a { + color: #444; +} + +div.sphinxsidebar p.logo a, +div.sphinxsidebar h3 a, +div.sphinxsidebar p.logo a:hover, +div.sphinxsidebar h3 a:hover { + border: none; +} + +div.sphinxsidebar p { + color: #555; + margin: 10px 0; +} + +div.sphinxsidebar ul { + margin: 10px 0; + padding: 0; + color: #000; +} + +div.sphinxsidebar ul li.toctree-l1 > a { + font-size: 120%; +} + +div.sphinxsidebar ul li.toctree-l2 > a { + font-size: 110%; +} + +div.sphinxsidebar input { + border: 1px solid #CCC; + font-family: 'goudy old style', 'minion pro', 'bell mt', Georgia, 'Hiragino Mincho Pro', serif; + font-size: 1em; +} + +div.sphinxsidebar hr { + border: none; + height: 1px; + color: #AAA; + background: #AAA; + + text-align: left; + margin-left: 0; + width: 50%; +} + +/* -- body styles ----------------------------------------------------------- */ + +a { + color: #004B6B; + text-decoration: underline; +} + +a:hover { + color: #6D4100; + text-decoration: underline; +} + +div.body h1, +div.body h2, +div.body h3, +div.body h4, +div.body h5, +div.body h6 { + font-family: 'Garamond', 'Georgia', serif; + font-weight: normal; + margin: 30px 0px 10px 0px; + padding: 0; +} + +div.body h1 { margin-top: 0; padding-top: 0; font-size: 240%; } +div.body h2 { font-size: 180%; } +div.body h3 { font-size: 150%; } +div.body h4 { font-size: 130%; } +div.body h5 { font-size: 100%; } +div.body h6 { font-size: 100%; } + +a.headerlink { + color: #DDD; + padding: 0 4px; + text-decoration: none; +} + +a.headerlink:hover { + color: #444; + background: #EAEAEA; +} + +div.body p, div.body dd, div.body li { + line-height: 1.4em; +} + +div.admonition { + margin: 20px 0px; + padding: 10px 30px; + background-color: #EEE; + border: 1px solid #CCC; +} + +div.admonition tt.xref, div.admonition code.xref, div.admonition a tt { + background-color: ; + border-bottom: 1px solid #fafafa; +} + +dd div.admonition { + margin-left: -60px; + padding-left: 60px; +} + +div.admonition p.admonition-title { + font-family: 'Garamond', 'Georgia', serif; + font-weight: normal; + font-size: 24px; + margin: 0 0 10px 0; + padding: 0; + line-height: 1; +} + +div.admonition p.last { + margin-bottom: 0; +} + +div.highlight { + background-color: #fff; +} + +dt:target, .highlight { + background: #FAF3E8; +} + +div.warning { + background-color: #FCC; + border: 1px solid #FAA; +} + +div.danger { + background-color: #FCC; + border: 1px solid #FAA; + -moz-box-shadow: 2px 2px 4px #D52C2C; + -webkit-box-shadow: 2px 2px 4px #D52C2C; + box-shadow: 2px 2px 4px #D52C2C; +} + +div.error { + background-color: #FCC; + border: 1px solid #FAA; + -moz-box-shadow: 2px 2px 4px #D52C2C; + -webkit-box-shadow: 2px 2px 4px #D52C2C; + box-shadow: 2px 2px 4px #D52C2C; +} + +div.caution { + background-color: #FCC; + border: 1px solid #FAA; +} + +div.attention { + background-color: #FCC; + border: 1px solid #FAA; +} + +div.important { + background-color: #EEE; + border: 1px solid #CCC; +} + +div.note { + background-color: #EEE; + border: 1px solid #CCC; +} + +div.tip { + background-color: #EEE; + border: 1px solid #CCC; +} + +div.hint { + background-color: #EEE; + border: 1px solid #CCC; +} + +div.seealso { + background-color: #EEE; + border: 1px solid #CCC; +} + +div.topic { + background-color: #EEE; +} + +p.admonition-title { + display: inline; +} + +p.admonition-title:after { + content: ":"; +} + +pre, tt, code { + font-family: 'Consolas', 'Menlo', 'Deja Vu Sans Mono', 'Bitstream Vera Sans Mono', monospace; + font-size: 0.9em; +} + +.hll { + background-color: #FFC; + margin: 0 -12px; + padding: 0 12px; + display: block; +} + +img.screenshot { +} + +tt.descname, tt.descclassname, code.descname, code.descclassname { + font-size: 0.95em; +} + +tt.descname, code.descname { + padding-right: 0.08em; +} + +img.screenshot { + -moz-box-shadow: 2px 2px 4px #EEE; + -webkit-box-shadow: 2px 2px 4px #EEE; + box-shadow: 2px 2px 4px #EEE; +} + +table.docutils { + border: 1px solid #888; + -moz-box-shadow: 2px 2px 4px #EEE; + -webkit-box-shadow: 2px 2px 4px #EEE; + box-shadow: 2px 2px 4px #EEE; +} + +table.docutils td, table.docutils th { + border: 1px solid #888; + padding: 0.25em 0.7em; +} + +table.field-list, table.footnote { + border: none; + -moz-box-shadow: none; + -webkit-box-shadow: none; + box-shadow: none; +} + +table.footnote { + margin: 15px 0; + width: 100%; + border: 1px solid #EEE; + background: #FDFDFD; + font-size: 0.9em; +} + +table.footnote + table.footnote { + margin-top: -15px; + border-top: none; +} + +table.field-list th { + padding: 0 0.8em 0 0; +} + +table.field-list td { + padding: 0; +} + +table.field-list p { + margin-bottom: 0.8em; +} + +table.footnote td.label { + width: .1px; + padding: 0.3em 0 0.3em 0.5em; +} + +table.footnote td { + padding: 0.3em 0.5em; +} + +dl { + margin: 0; + padding: 0; +} + +dl dd { + margin-left: 30px; +} + +blockquote { + margin: 0 0 0 30px; + padding: 0; +} + +ul, ol { + /* Matches the 30px from the narrow-screen "li > ul" selector below */ + margin: 10px 0 10px 30px; + padding: 0; +} + +pre { + background: #EEE; + padding: 7px 30px; + margin: 15px 0px; + line-height: 1.3em; +} + +div.viewcode-block:target { + background: #ffd; +} + +dl pre, blockquote pre, li pre { + margin-left: 0; + padding-left: 30px; +} + +dl dl pre { + margin-left: -90px; + padding-left: 90px; +} + +tt, code { + background-color: #ecf0f3; + color: #222; + /* padding: 1px 2px; */ +} + +tt.xref, code.xref, a tt { + background-color: #FBFBFB; + border-bottom: 1px solid #fff; +} + +a.reference { + text-decoration: none; + border-bottom: 1px dotted #004B6B; +} + +/* Don't put an underline on images */ +a.image-reference, a.image-reference:hover { + border-bottom: none; +} + +a.reference:hover { + border-bottom: 1px solid #6D4100; +} + +a.footnote-reference { + text-decoration: none; + font-size: 0.7em; + vertical-align: top; + border-bottom: 1px dotted #004B6B; +} + +a.footnote-reference:hover { + border-bottom: 1px solid #6D4100; +} + +a:hover tt, a:hover code { + background: #EEE; +} + + +@media screen and (max-width: 870px) { + + div.sphinxsidebar { + display: none; + } + + div.document { + width: 100%; + + } + + div.documentwrapper { + margin-left: 0; + margin-top: 0; + margin-right: 0; + margin-bottom: 0; + } + + div.bodywrapper { + margin-top: 0; + margin-right: 0; + margin-bottom: 0; + margin-left: 0; + } + + ul { + margin-left: 0; + } + + li > ul { + /* Matches the 30px from the "ul, ol" selector above */ + margin-left: 30px; + } + + .document { + width: auto; + } + + .footer { + width: auto; + } + + .bodywrapper { + margin: 0; + } + + .footer { + width: auto; + } + + .github { + display: none; + } + + + +} + + + +@media screen and (max-width: 875px) { + + body { + margin: 0; + padding: 20px 30px; + } + + div.documentwrapper { + float: none; + background: #fff; + } + + div.sphinxsidebar { + display: block; + float: none; + width: 102.5%; + margin: 50px -30px -20px -30px; + padding: 10px 20px; + background: #333; + color: #FFF; + } + + div.sphinxsidebar h3, div.sphinxsidebar h4, div.sphinxsidebar p, + div.sphinxsidebar h3 a { + color: #fff; + } + + div.sphinxsidebar a { + color: #AAA; + } + + div.sphinxsidebar p.logo { + display: none; + } + + div.document { + width: 100%; + margin: 0; + } + + div.footer { + display: none; + } + + div.bodywrapper { + margin: 0; + } + + div.body { + min-height: 0; + padding: 0; + } + + .rtd_doc_footer { + display: none; + } + + .document { + width: auto; + } + + .footer { + width: auto; + } + + .footer { + width: auto; + } + + .github { + display: none; + } +} + + +/* misc. */ + +.revsys-inline { + display: none!important; +} + +/* Make nested-list/multi-paragraph items look better in Releases changelog + * pages. Without this, docutils' magical list fuckery causes inconsistent + * formatting between different release sub-lists. + */ +div#changelog > div.section > ul > li > p:only-child { + margin-bottom: 0; +} + +/* Hide fugly table cell borders in ..bibliography:: directive output */ +table.docutils.citation, table.docutils.citation td, table.docutils.citation th { + border: none; + /* Below needed in some edge cases; if not applied, bottom shadows appear */ + -moz-box-shadow: none; + -webkit-box-shadow: none; + box-shadow: none; +} \ No newline at end of file diff --git a/0.18.1/_static/basic.css b/0.18.1/_static/basic.css new file mode 100644 index 000000000000..2b513f0c9643 --- /dev/null +++ b/0.18.1/_static/basic.css @@ -0,0 +1,604 @@ +/* + * basic.css + * ~~~~~~~~~ + * + * Sphinx stylesheet -- basic theme. + * + * :copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ + +/* -- main layout ----------------------------------------------------------- */ + +div.clearer { + clear: both; +} + +/* -- relbar ---------------------------------------------------------------- */ + +div.related { + width: 100%; + font-size: 90%; +} + +div.related h3 { + display: none; +} + +div.related ul { + margin: 0; + padding: 0 0 0 10px; + list-style: none; +} + +div.related li { + display: inline; +} + +div.related li.right { + float: right; + margin-right: 5px; +} + +/* -- sidebar --------------------------------------------------------------- */ + +div.sphinxsidebarwrapper { + padding: 10px 5px 0 10px; +} + +div.sphinxsidebar { + float: left; + width: 230px; + margin-left: -100%; + font-size: 90%; + word-wrap: break-word; + overflow-wrap : break-word; +} + +div.sphinxsidebar ul { + list-style: none; +} + +div.sphinxsidebar ul ul, +div.sphinxsidebar ul.want-points { + margin-left: 20px; + list-style: square; +} + +div.sphinxsidebar ul ul { + margin-top: 0; + margin-bottom: 0; +} + +div.sphinxsidebar form { + margin-top: 10px; +} + +div.sphinxsidebar input { + border: 1px solid #98dbcc; + font-family: sans-serif; + font-size: 1em; +} + +div.sphinxsidebar #searchbox input[type="text"] { + width: 170px; +} + +img { + border: 0; + max-width: 100%; +} + +/* -- search page ----------------------------------------------------------- */ + +ul.search { + margin: 10px 0 0 20px; + padding: 0; +} + +ul.search li { + padding: 5px 0 5px 20px; + background-image: url(file.png); + background-repeat: no-repeat; + background-position: 0 7px; +} + +ul.search li a { + font-weight: bold; +} + +ul.search li div.context { + color: #888; + margin: 2px 0 0 30px; + text-align: left; +} + +ul.keywordmatches li.goodmatch a { + font-weight: bold; +} + +/* -- index page ------------------------------------------------------------ */ + +table.contentstable { + width: 90%; +} + +table.contentstable p.biglink { + line-height: 150%; +} + +a.biglink { + font-size: 1.3em; +} + +span.linkdescr { + font-style: italic; + padding-top: 5px; + font-size: 90%; +} + +/* -- general index --------------------------------------------------------- */ + +table.indextable { + width: 100%; +} + +table.indextable td { + text-align: left; + vertical-align: top; +} + +table.indextable dl, table.indextable dd { + margin-top: 0; + margin-bottom: 0; +} + +table.indextable tr.pcap { + height: 10px; +} + +table.indextable tr.cap { + margin-top: 10px; + background-color: #f2f2f2; +} + +img.toggler { + margin-right: 3px; + margin-top: 3px; + cursor: pointer; +} + +div.modindex-jumpbox { + border-top: 1px solid #ddd; + border-bottom: 1px solid #ddd; + margin: 1em 0 1em 0; + padding: 0.4em; +} + +div.genindex-jumpbox { + border-top: 1px solid #ddd; + border-bottom: 1px solid #ddd; + margin: 1em 0 1em 0; + padding: 0.4em; +} + +/* -- general body styles --------------------------------------------------- */ + +div.body p, div.body dd, div.body li, div.body blockquote { + -moz-hyphens: auto; + -ms-hyphens: auto; + -webkit-hyphens: auto; + hyphens: auto; +} + +a.headerlink { + visibility: hidden; +} + +h1:hover > a.headerlink, +h2:hover > a.headerlink, +h3:hover > a.headerlink, +h4:hover > a.headerlink, +h5:hover > a.headerlink, +h6:hover > a.headerlink, +dt:hover > a.headerlink, +caption:hover > a.headerlink, +p.caption:hover > a.headerlink, +div.code-block-caption:hover > a.headerlink { + visibility: visible; +} + +div.body p.caption { + text-align: inherit; +} + +div.body td { + text-align: left; +} + +.field-list ul { + padding-left: 1em; +} + +.first { + margin-top: 0 !important; +} + +p.rubric { + margin-top: 30px; + font-weight: bold; +} + +img.align-left, .figure.align-left, object.align-left { + clear: left; + float: left; + margin-right: 1em; +} + +img.align-right, .figure.align-right, object.align-right { + clear: right; + float: right; + margin-left: 1em; +} + +img.align-center, .figure.align-center, object.align-center { + display: block; + margin-left: auto; + margin-right: auto; +} + +.align-left { + text-align: left; +} + +.align-center { + text-align: center; +} + +.align-right { + text-align: right; +} + +/* -- sidebars -------------------------------------------------------------- */ + +div.sidebar { + margin: 0 0 0.5em 1em; + border: 1px solid #ddb; + padding: 7px 7px 0 7px; + background-color: #ffe; + width: 40%; + float: right; +} + +p.sidebar-title { + font-weight: bold; +} + +/* -- topics ---------------------------------------------------------------- */ + +div.topic { + border: 1px solid #ccc; + padding: 7px 7px 0 7px; + margin: 10px 0 10px 0; +} + +p.topic-title { + font-size: 1.1em; + font-weight: bold; + margin-top: 10px; +} + +/* -- admonitions ----------------------------------------------------------- */ + +div.admonition { + margin-top: 10px; + margin-bottom: 10px; + padding: 7px; +} + +div.admonition dt { + font-weight: bold; +} + +div.admonition dl { + margin-bottom: 0; +} + +p.admonition-title { + margin: 0px 10px 5px 0px; + font-weight: bold; +} + +div.body p.centered { + text-align: center; + margin-top: 25px; +} + +/* -- tables ---------------------------------------------------------------- */ + +table.docutils { + border: 0; + border-collapse: collapse; +} + +table caption span.caption-number { + font-style: italic; +} + +table caption span.caption-text { +} + +table.docutils td, table.docutils th { + padding: 1px 8px 1px 5px; + border-top: 0; + border-left: 0; + border-right: 0; + border-bottom: 1px solid #aaa; +} + +table.field-list td, table.field-list th { + border: 0 !important; +} + +table.footnote td, table.footnote th { + border: 0 !important; +} + +th { + text-align: left; + padding-right: 5px; +} + +table.citation { + border-left: solid 1px gray; + margin-left: 1px; +} + +table.citation td { + border-bottom: none; +} + +/* -- figures --------------------------------------------------------------- */ + +div.figure { + margin: 0.5em; + padding: 0.5em; +} + +div.figure p.caption { + padding: 0.3em; +} + +div.figure p.caption span.caption-number { + font-style: italic; +} + +div.figure p.caption span.caption-text { +} + + +/* -- other body styles ----------------------------------------------------- */ + +ol.arabic { + list-style: decimal; +} + +ol.loweralpha { + list-style: lower-alpha; +} + +ol.upperalpha { + list-style: upper-alpha; +} + +ol.lowerroman { + list-style: lower-roman; +} + +ol.upperroman { + list-style: upper-roman; +} + +dl { + margin-bottom: 15px; +} + +dd p { + margin-top: 0px; +} + +dd ul, dd table { + margin-bottom: 10px; +} + +dd { + margin-top: 3px; + margin-bottom: 10px; + margin-left: 30px; +} + +dt:target, .highlighted { + background-color: #fbe54e; +} + +dl.glossary dt { + font-weight: bold; + font-size: 1.1em; +} + +.field-list ul { + margin: 0; + padding-left: 1em; +} + +.field-list p { + margin: 0; +} + +.optional { + font-size: 1.3em; +} + +.sig-paren { + font-size: larger; +} + +.versionmodified { + font-style: italic; +} + +.system-message { + background-color: #fda; + padding: 5px; + border: 3px solid red; +} + +.footnote:target { + background-color: #ffa; +} + +.line-block { + display: block; + margin-top: 1em; + margin-bottom: 1em; +} + +.line-block .line-block { + margin-top: 0; + margin-bottom: 0; + margin-left: 1.5em; +} + +.guilabel, .menuselection { + font-family: sans-serif; +} + +.accelerator { + text-decoration: underline; +} + +.classifier { + font-style: oblique; +} + +abbr, acronym { + border-bottom: dotted 1px; + cursor: help; +} + +/* -- code displays --------------------------------------------------------- */ + +pre { + overflow: auto; + overflow-y: hidden; /* fixes display issues on Chrome browsers */ +} + +td.linenos pre { + padding: 5px 0px; + border: 0; + background-color: transparent; + color: #aaa; +} + +table.highlighttable { + margin-left: 0.5em; +} + +table.highlighttable td { + padding: 0 0.5em 0 0.5em; +} + +div.code-block-caption { + padding: 2px 5px; + font-size: small; +} + +div.code-block-caption code { + background-color: transparent; +} + +div.code-block-caption + div > div.highlight > pre { + margin-top: 0; +} + +div.code-block-caption span.caption-number { + padding: 0.1em 0.3em; + font-style: italic; +} + +div.code-block-caption span.caption-text { +} + +div.literal-block-wrapper { + padding: 1em 1em 0; +} + +div.literal-block-wrapper div.highlight { + margin: 0; +} + +code.descname { + background-color: transparent; + font-weight: bold; + font-size: 1.2em; +} + +code.descclassname { + background-color: transparent; +} + +code.xref, a code { + background-color: transparent; + font-weight: bold; +} + +h1 code, h2 code, h3 code, h4 code, h5 code, h6 code { + background-color: transparent; +} + +.viewcode-link { + float: right; +} + +.viewcode-back { + float: right; + font-family: sans-serif; +} + +div.viewcode-block:target { + margin: -1px -10px; + padding: 0 10px; +} + +/* -- math display ---------------------------------------------------------- */ + +img.math { + vertical-align: middle; +} + +div.body div.math p { + text-align: center; +} + +span.eqno { + float: right; +} + +/* -- printout stylesheet --------------------------------------------------- */ + +@media print { + div.document, + div.documentwrapper, + div.bodywrapper { + margin: 0 !important; + width: 100%; + } + + div.sphinxsidebar, + div.related, + div.footer, + #top-link { + display: none; + } +} \ No newline at end of file diff --git a/0.18.1/_static/comment-bright.png b/0.18.1/_static/comment-bright.png new file mode 100644 index 000000000000..551517b8c83b Binary files /dev/null and b/0.18.1/_static/comment-bright.png differ diff --git a/0.18.1/_static/comment-close.png b/0.18.1/_static/comment-close.png new file mode 100644 index 000000000000..09b54be46da3 Binary files /dev/null and b/0.18.1/_static/comment-close.png differ diff --git a/0.18.1/_static/comment.png b/0.18.1/_static/comment.png new file mode 100644 index 000000000000..92feb52b8824 Binary files /dev/null and b/0.18.1/_static/comment.png differ diff --git a/0.18.1/_static/css/main.css b/0.18.1/_static/css/main.css new file mode 100644 index 000000000000..6b2e6d0e1903 --- /dev/null +++ b/0.18.1/_static/css/main.css @@ -0,0 +1,1432 @@ +/*! HTML5 Boilerplate v4.3.0 | MIT License | http://h5bp.com/ */ + +/* + * What follows is the result of much research on cross-browser styling. + * Credit left inline and big thanks to Nicolas Gallagher, Jonathan Neal, + * Kroc Camen, and the H5BP dev community and team. + */ + +/* ========================================================================== + Base styles: opinionated defaults + ========================================================================== */ + +html, +button, +input, +select, +textarea { + color: #222; +} + +html { + font-size: 1em; + line-height: 1.4; +} + +*[id]:before { + display: block; + content: " "; + margin-top: -70px; + height: 70px; + visibility: hidden; +} + +/* + * Remove text-shadow in selection highlight: h5bp.com/i + * These selection rule sets have to be separate. + * Customize the background color to match your design. + */ + +::-moz-selection { + background: #b3d4fc; + text-shadow: none; +} + +::selection { + background: #b3d4fc; + text-shadow: none; +} + +/* + * A better looking default horizontal rule + */ + +hr { + display: block; + height: 1px; + border: 0; + border-top: 1px solid #ccc; + margin: 1em 0; + padding: 0; +} + +/* + * Remove the gap between images, videos, audio and canvas and the bottom of + * their containers: h5bp.com/i/440 + */ + +audio, +canvas, +img, +video { + vertical-align: middle; +} + +/* + * Remove default fieldset styles. + */ + +fieldset { + border: 0; + margin: 0; + padding: 0; +} + +/* + * Allow only vertical resizing of textareas. + */ + +textarea { + resize: vertical; +} + +/* ========================================================================== + Browse Happy prompt + ========================================================================== */ + +.browsehappy { + margin: 0.2em 0; + background: #ccc; + color: #000; + padding: 0.2em 0; +} + +/* ========================================================================== + Author's custom styles + ========================================================================== */ + +html { + height: 100%; + background: #fff; +} + +body { + min-height: 100%; + font-family: 'Roboto', sans-serif; + color: #5d6061; +} + + +/* Global Elements + ========================================================================== */ + +pre { + border: 1px solid rgba(0,0,0,0.2); + /* Border Radius */ + -webkit-border-radius: 5px; + -moz-border-radius: 5px; + border-radius: 5px; + background: #fff; + font-size: 0.9em; + line-height: 1.6em; +} + +pre, +code { + font-family: Monaco, 'Droid Sans Mono', monospace !important; +} + +img { + max-width: 100%; +} + +h1, h2, h3, h4, h5, h6 { + font-weight: normal; + font-weight: 300; +} + +h2, h3 { + color: #4285f4; +} + +/* Header + ========================================================================== */ + +.page-header, +.hero-banner { + background: #4285f4; + color: #fff; +} + +.page-header { + position: relative; + padding: 1em; +} + +.page-header.fixed { + position: fixed; + z-index: 2; + top: 0; + width: 100%; + padding: 0; +} + +/* + Logo +*/ + +.logo { + margin: 0; + width: 13em; + font-size: 1em; + line-height: normal; +} + +.page-header.fixed .logo { + width: auto; +} + +.page-header.fixed a { + color: #fff; + text-decoration: none; +} + +.page-header.fixed a:hover { + opacity: 0.4; +} + +.page-header.fixed .logo img { + position: relative; + top: -0.2em; + width: 2em; + margin: 0 0.5em; +} + +.page-header.fixed .gcloud { + display: inline-block; + padding: 0.4em 0 0.6em 0.6em; + border-left: 1px solid rgba(255,255,255,0.2); + font-family: 'Open Sans', sans-serif; + font-weight: 300; + font-size: 1.4em; +} + +/* + Menu +*/ + +.nav-current { + display: block; + position: absolute; + top: 1.2em; + right: 1em; + width: 24px; + height: 20px; + background: url(../images/icon-menu.svg) no-repeat; + text-indent: -90000px; + cursor: pointer; +} + +.page-header.fixed .nav-current { + top: 1em; +} + +.menu { + display: none; + position: absolute; + top: 3.6em; + right: 0; + width: 100%; + margin: 0; + padding: 0; + background: #2570ec; + /* Box Shadow */ + -webkit-box-shadow: 5px 5px 8px rgba(0,16,41,0.3); + -moz-box-shadow: 5px 5px 8px rgba(0,16,41,0.3); + box-shadow: 5px 5px 8px rgba(0,16,41,0.3); + /* Transitions */ + -webkit-transition: all 0.3s ease; + -moz-transition: all 0.3s ease; + -ms-transition: all 0.3s ease; + -o-transition: all 0.3s ease; + transition: all 0.3s ease; +} + +.page-header.fixed .menu { + top: 3.3em; +} + +.menu a { + display: block; + padding: 1em; + border-top: 1px solid rgba(255,255,255,0.2); + color: #fff; + text-decoration: none; + /* Transitions */ + -webkit-transition: all 0.3s ease; + -moz-transition: all 0.3s ease; + -ms-transition: all 0.3s ease; + -o-transition: all 0.3s ease; + transition: all 0.3s ease; +} + +.menu a:hover { + background: #1a65e0; +} + +.menu-icon { + margin-right: 0.5em; +} + +/* + Open Menu + */ + +.main-nav.open .nav-current { + opacity: 0.4; +} + +.main-nav.open .menu { + display: block; +} + + + +/* Home Content + ========================================================================== */ + +/* + Main Content +*/ + +.main { + font-size: 0.9em; + line-height: 1.8em; +} + +.container { + padding: 2.8em 2em; +} + +.block-title { + margin-top: 0; + font-size: 1.6em; +} + +/* + Hero Banner +*/ + +.hero-banner h1 { + margin: 0 0 0.6em; + font-family: 'Open Sans', sans-serif; + font-size: 3.5em; + font-weight: 300; +} + +.hero-banner p { + margin-bottom: 2.2em; + font-size: 0.9em; + line-height: 1.6em; +} + +.hero-banner h2 { + margin-bottom: 0.2em; + font-size: 1.3em; +} + +.hero-banner pre { + margin: 0; + padding: 1em; + border: none; + background: #2a74ed; +} + +/* + What is it? + */ +.about pre { + font-size: 110%; +} + +/* + Featuring +*/ + +.featuring .block-title { + text-align: center; +} + +.featuring p { + font-size: 0.9em; + line-height: 1.6em; +} + +.featuring-links { + list-style: none; + margin: 0; + margin-bottom: -1em; + padding: 0; +} + +.btn, +.ext-link { + display: block; +} + +.btn { + padding: 1em; + border: none; + /* Border Radius */ + -webkit-border-radius: 5px; + -moz-border-radius: 5px; + border-radius: 5px; + background: #db4437; + color: #fff; + text-decoration: none; + /* Transitions */ + -webkit-transition: all 0.3s ease; + -moz-transition: all 0.3s ease; + -ms-transition: all 0.3s ease; + -o-transition: all 0.3s ease; + transition: all 0.3s ease; +} + +.btn:hover { + background: #f24f41; +} + +.btn img { + margin-right: 0.5em; +} + +.featuring-links .btn { + margin-bottom: 1em; + padding: 1.5em; + font-size: 1.1em; + text-align: center; +} + +.featuring-links .btn img { + width: 2em; +} + +.btn-docs img { + background-color: #fff; + border: 1px solid transparent; + -webkit-border-radius: 80px; + -moz-border-radius: 80px; + border-radius: 80px; + padding: 4px; + width: 40px; + /* Transitions */ + -webkit-transition: all 0.5s ease; + -moz-transition: all 0.5s ease; + -ms-transition: all 0.5s ease; + -o-transition: all 0.5s ease; + transition: all 0.5s ease; +} + +.btn-docs:hover img { + border: 1px solid #ccc; + transform: rotate(135deg); + -webkit-box-shadow: 4px 4px 4px rgba(0, 0, 0, .3); + -moz-box-shadow: 4px 4px 4px rgba(0, 0, 0, .3); + box-shadow: 4px 4px 4px rgba(0, 0, 0, .3); +} + +.btn-docs:active img { + -webkit-box-shadow: 4px 4px 4px rgba(0, 0, 0, .5); + -moz-box-shadow: 4px 4px 4px rgba(0, 0, 0, .5); + box-shadow: 4px 4px 4px rgba(0, 0, 0, .5); +} + +.ext-link { + display: block; + padding: 1em; + border-bottom: 1px solid rgba(0,0,0,0.1); + color: #5d6061; + text-decoration: none; + /* Transitions */ + -webkit-transition: all 0.3s ease; + -moz-transition: all 0.3s ease; + -ms-transition: all 0.3s ease; + -o-transition: all 0.3s ease; + transition: all 0.3s ease; +} + +.featuring-links li:last-child .ext-link { + border-bottom: none; +} + +.ext-link:hover { + background: #f6f6f6; +} + +.ext-link img { + opacity: 0.5; + margin-right: 0.5em; + /* Transitions */ + -webkit-transition: all 0.3s ease; + -moz-transition: all 0.3s ease; + -ms-transition: all 0.3s ease; + -o-transition: all 0.3s ease; + transition: all 0.3s ease; +} + +.ext-link:hover img { + opacity: 0.7; +} + +.pagination { + margin: 2em 0 0; + padding: 0; + list-style: none; + text-decoration: none; + text-align: center; +} + +.pagination li { + display: inline-block; + width: 1em; + height: 1em; + margin: 0 0.2em; +} + +.pagination a { + display: block; + width: 100%; + height: 100%; + border: 1px solid rgba(0,0,0,0.2); + /* Border Radius */ + -webkit-border-radius: 50%; + -moz-border-radius: 50%; + border-radius: 50%; + background: #fff; + text-indent: -90000px; +} + +.pagination a:hover { + background: rgba(0,0,0,0.1); +} + +.pagination .current, +.pagination .current:hover { + background: #db4437; + border-color: #db4437; +} + +/* + About +*/ + +.about { + background: #eee; +} + +.about h4 { + margin-bottom: 0; + font-size: 1.2em; + font-weight: bold; + color: #4285f4; +} + +/* + FAQ +*/ + +.faq-btn, +.faq-questions { + max-width: 20em; + margin: 0; + padding: 0; + list-style: none; +} + +.faq-btn { + position: relative; + margin-bottom: 2em; +} + +.faq-btn .current { + background: #e6eefc url(../images/icon-dropdown-faq.svg) 95% 50% no-repeat; +} + +.faq-btn .current, +.faq-questions a { + display: block; + padding: 1em; + border: 1px solid #a7bfe8; + color: #2b70e2; + cursor: pointer; + text-decoration: none; + /* Transitions */ + -webkit-transition: all 0.3s ease; + -moz-transition: all 0.3s ease; + -ms-transition: all 0.3s ease; + -o-transition: all 0.3s ease; + transition: all 0.3s ease; +} + +.faq-questions { + display: none; + position: absolute; + width: 100%; +} + +.faq-questions a { + border-top: none; + background: #e6eefc; +} + +.faq-questions a:hover { + background: #fcfdff; +} + +/* + Open FAQ button + */ + +.faq-btn.open .current { + background-color: #c6d7f6; + /* Box Shadow */ + -webkit-box-shadow: inset 0 0 10px rgba(16,71,163,0.3); + -moz-box-shadow: inset 0 0 10px rgba(16,71,163,0.3); + box-shadow: inset 0 0 10px rgba(16,71,163,0.3); + color: #1555bf; +} + +.faq-btn.open .faq-questions { + display: block; +} + + +/* Docs Content + ========================================================================== */ + +.admonition { + background-color: #f8f8f8; +} + +.admonition p.admonition-title { + background-color: #e5ecf9; + font-weight: bold; + padding: 8px; +} + +.admonition pre { + background-color: #eeeeee !important; +} + +.admonition p { + padding: 0 8px; +} + +.admonition .last { + padding-bottom: 8px; +} + +.docs-header { + position: relative; + padding: 7em 2em 4em; + background: #f8f8f8; + border-bottom: 1px solid rgba(0,0,0,0.05); +} + +.headerlink { + display: none; + padding: 0 7px; + left: -24px; + text-decoration: none; +} + +.headerlink:hover { + color: #505050; + display: block; +} + +h2 .headerlink { + color: #2b70e2; +} + +h2 .headerlink:hover { + color: #4285f4; + display: block; +} + +.highlight pre { + background: #f8f8f8; +} + +.method-heading { + position: relative; +} + +.param-optional .param-types { + font-style: italic; +} + +.param-optional .param-description:before { + content: "optional. "; + color: #aaa; + font-style: italic; +} + +.sub-heading { + color: #5d6061; + margin: 0 !important; +} + +.toggler { + float: left; + min-width: 15px; + margin: auto; +} + +.toggle { + cursor: pointer; +} + +.viewcode-link { + float: right; +} + +/* + Page Title +*/ + +.page-title { + margin: 0; + font-family: 'Open Sans', sans-serif; + font-weight: 300; + color: #4285f4; + font-size: 2.4em; + line-height: 1em; +} + +/* + Versions +*/ + +.versions { + display: inline-block; + margin-top: 2em; +} + +.versions span, +.versions a { + display: block; +} + +.v-current { + font-size: 1.2em; + color: #2b70e2; +} + +.v-current i { + font-size: 0.7em; +} + +.v-btn { + padding: 0.5em; + border: 1px solid rgba(0,0,0,0.2); + background: rgba(0,0,0,0.07); + font-size: 0.8em; + color: rgba(0,0,0,0.6); + text-align: center; + text-decoration: none; + /* Transitions */ + -webkit-transition: all 0.3s ease; + -moz-transition: all 0.3s ease; + -ms-transition: all 0.3s ease; + -o-transition: all 0.3s ease; + transition: all 0.3s ease; +} + +.v-btn:hover { + background: rgba(0,0,0,0.02); +} + +.v-btn img { + position: relative; + top: -0.1em; + opacity: 0.3; +} + +#file-issue { + position: absolute; + right: 20px; + top: 20px; + display: none; +} + +#file-issue-secondary { + margin-top: 1em; + display: inline-block; +} + +.v-list { + color: rgba(0,0,0,0.2); +} + +.v-list a { + color: #4285f4; + text-decoration: none; +} + +.v-list a:hover { + text-decoration: underline; +} + +/* + Content + */ + + +.content { + padding: 1em 2em; +} + +.content pre, +.field-list { + border: 0; + margin-bottom: 2em; +} + +.content h2, .content h3, .content h4, .content h5, .content h6 { + margin: 2em 0 0.5em; +} + +.content>h2:first-child { + margin-top: 1em; +} + +/* + Tables + */ + +.field-list { + text-align: left; +} + +.field-list th, +.field-list td { + padding: 0.3em 1em; + border: 1px solid #cfcfcf; +} + +.field-list th[scope="col"] { + border-color: #2264d0; + background: #4285f4; + color: #fff; +} + +.field-name { + background: #f6f6f6; + font-weight: 'bold'; +} + +/* + Side Nav + */ + + .side-nav { + padding-bottom: 3em; + background: #efefef; + } + +.side-nav a { + display: block; + padding: 0.3em 2em; + color: #5d6061; + text-decoration: none; + /* Transitions */ + -webkit-transition: all 0.3s ease; + -moz-transition: all 0.3s ease; + -ms-transition: all 0.3s ease; + -o-transition: all 0.3s ease; + transition: all 0.3s ease; +} + +.side-nav a:hover { + background: rgba(255,255,255,0.7); +} + +.side-nav .current .current, +.side-nav .current .current:hover { + background: #e2e2e2; +} + +.side-nav ul { + margin: 0; + padding: 0; +} + +.side-nav ul ul a { + padding-left: 4em; +} + +.side-nav .external-links { + margin-top: 2em; +} + +.side-nav > ul > li.toctree-l1 { + padding-left: 2em; +} + +p.caption { + padding-left: 2em; +} + +span.caption-text { + font-weight: bold; +} + +.external-links img { + margin-right: 0.3em; + opacity: 0.3; + /* Transitions */ + -webkit-transition: all 0.3s ease; + -moz-transition: all 0.3s ease; + -ms-transition: all 0.3s ease; + -o-transition: all 0.3s ease; + transition: all 0.3s ease; +} + +.external-links a:hover img { + opacity: 0.6; +} + + + +/* ========================================================================== + Helper classes + ========================================================================== */ + +/* + * Image replacement + */ + +.ir { + background-color: transparent; + border: 0; + overflow: hidden; + /* IE 6/7 fallback */ + *text-indent: -9999px; +} + +.ir:before { + content: ""; + display: block; + width: 0; + height: 150%; +} + +/* + * Hide from both screenreaders and browsers: h5bp.com/u + */ + +.hidden { + display: none !important; + visibility: hidden; +} + +/* + * Hide only visually, but have it available for screenreaders: h5bp.com/v + */ + +.visuallyhidden { + border: 0; + clip: rect(0 0 0 0); + height: 1px; + margin: -1px; + overflow: hidden; + padding: 0; + position: absolute; + width: 1px; +} + +/* + * Extends the .visuallyhidden class to allow the element to be focusable + * when navigated to via the keyboard: h5bp.com/p + */ + +.visuallyhidden.focusable:active, +.visuallyhidden.focusable:focus { + clip: auto; + height: auto; + margin: 0; + overflow: visible; + position: static; + width: auto; +} + +/* + * Hide visually and from screenreaders, but maintain layout + */ + +.invisible { + visibility: hidden; +} + +/* + * Clearfix: contain floats + * + * For modern browsers + * 1. The space content is one way to avoid an Opera bug when the + * `contenteditable` attribute is included anywhere else in the document. + * Otherwise it causes space to appear at the top and bottom of elements + * that receive the `clearfix` class. + * 2. The use of `table` rather than `block` is only necessary if using + * `:before` to contain the top-margins of child elements. + */ + +.clearfix:before, +.clearfix:after { + content: " "; /* 1 */ + display: table; /* 2 */ +} + +.clearfix:after { + clear: both; +} + +/* + * For IE 6/7 only + * Include this rule to trigger hasLayout and contain floats. + */ + +.clearfix { + *zoom: 1; +} + +/* ========================================================================== + EXAMPLE Media Queries for Responsive Design. + These examples override the primary ('mobile first') styles. + Modify as content requires. + ========================================================================== */ + +@media only screen and (min-width: 37em) { + + /* + Main + */ + + .main { + font-size: 1em; + } + + /* + Featuring + */ + + .featuring-links li:first-child { + display: block; + } + + .featuring-links { + text-align: center; + } + + .featuring-links li { + display: inline-block; + } + + .featuring-links .btn { + display: inline-block; + padding: 1em 2.4em; + } + + .ext-link { + display: inline-block; + padding: 0.8em 1.2em; + border: none; + /* Border Radius */ + -webkit-border-radius: 5px; + -moz-border-radius: 5px; + border-radius: 5px; + } + + .pagination li { + width: 0.6em; + height: 0.6em; + } + +} + +@media only screen and (min-width: 50em) { + + /* + Header + */ + + .page-header { + padding: 1.6em; + } + + .page-header.fixed .logo img { + margin: 0 0.8em; + } + + .page-header.fixed .gcloud { + padding: 0 0 0 1em; + height: 70px; + line-height: 70px; + } + + #file-issue { + display: inline; + } + + #file-issue-secondary { + display: none; + } + + /* + Logo + */ + + .logo { + width: 280px; + } + + /* + Menu + */ + + .main-nav { + position: absolute; + top: 1.2em; + left: 21.5em; + } + + .page-header.fixed .main-nav { + top: 0; + left: 11.5em; + } + + .nav-current { + position: relative; + top: 0; + left: 0; + padding: 0.8em 1.6em; + width: 150px; + height: auto; + border: 1px solid rgba(255,255,255,0.4); + background: url(../images/icon-dropdown.svg) 90% 50% no-repeat; + text-indent: 0; + } + + .page-header.fixed .nav-current { + top: 0; + padding: 0 1.6em; + height: 70px; + border: 1px solid rgba(255,255,255,0.2); + border-top: none; + border-bottom: none; + line-height: 70px; + } + + .nav-current:hover { + background-color: rgba(255,255,255,0.1); + } + + .menu { + top: 3em; + left: 0; + } + + .menu a { + padding: 1.2em 1.5em; + } + + .page-header.fixed .menu { + top: 70px; + } + + /* + Docs Header + */ + + .docs-header { + padding-top: 7.7em; + } + + .versions { + position: absolute; + top: 6em; + right: 2em; + margin: 0; + } + + .v-btn { + font-size: 0.7em; + line-height: normal; + } + + /* + Content + */ + + .container, + .content { + width: 80%; + margin: 0 auto; + padding: 2em 0; + } + + /* + Hero Banner + */ + + .hero-banner { + padding: 2em 0; + } + + .hero-banner h1 { + font-size: 5em; + margin-bottom: 0.8em; + } + + .hero-banner p { + font-size: 1em; + line-height: 2em; + } + + /* + Featuring + */ + + .featuring .block-title { + margin-bottom: 1.4em; + } + +} + +@media only screen and (min-width: 60em) { + + /* + Content + */ + + .container { + width: 90%; + max-width: 1020px; + font-size: 0.9em; + } + + .col { + width: 46%; + } + + .col-left { + float: left; + } + + .col-right { + float: right; + } + + .block-title { + font-size: 2em; + } + + /* + Hero Banner + */ + + .hero-banner { + padding-bottom: 0; + } + + .hero-banner .col-right { + padding-top: 3.6em; + } + + .hero-banner h1 { + font-size: 5.6em; + } + + .hero-banner p { + font-size: 1.1em; + } + + .hero-banner h2 { + font-size: 1.3em; + margin-bottom: 0.4em; + } + + .hero-banner pre { + font-size: 1.1em; + padding: 1em 1.5em; + } + + /* + Featuring + */ + + .featuring { + text-align: center; + } + + .featuring-links li { + font-size: 1em; + } + + .featuring-links li:first-child { + display: inline-block; + } + + .featuring-links .btn { + margin-right: 0.5em; + } + + .ext-link { + padding: 0.5em 1.2em; + } + + .featuring p { + max-width: 80%; + margin: 0 auto; + font-size: 1em; + } + + /* + About + */ + + .about .col-right { + padding-top: 2.4em; + } + + /* + FAQ + */ + + .faq .answer { + -moz-column-count: 2; + -moz-column-gap: 50px; + -webkit-column-count: 2; + -webkit-column-gap: 50px; + column-count: 2; + column-gap: 50px; + } + + /* + Docs Page + */ + + .lang-page { + background: url(../images/lang-bg.png) repeat-y; + } + + .docs-header { + margin-left: 240px; + } + + .content { + width: 100%; + max-width: 1070px; + padding-left: 290px; + padding-right: 2em; + /* Box Sizing */ + -webkit-box-sizing: border-box; + -moz-box-sizing: border-box; + box-sizing: border-box; + margin: 0; + font-size: 0.9em; + } + + .side-nav { + position: absolute; + top: 0; + left: 0; + width: 240px; + padding-top: 7.7em; + font-size: 0.9em; + } + + .side-nav a { + padding-left: 2.5em; + } + +} + +@media print, + (-o-min-device-pixel-ratio: 5/4), + (-webkit-min-device-pixel-ratio: 1.25), + (min-resolution: 120dpi) { + /* Style adjustments for high resolution devices */ +} + +/* ========================================================================== + Print styles. + Inlined to avoid required HTTP connection: h5bp.com/r + ========================================================================== */ + +@media print { + * { + background: transparent !important; + color: #000 !important; /* Black prints faster: h5bp.com/s */ + box-shadow: none !important; + text-shadow: none !important; + } + + a, + a:visited { + text-decoration: underline; + } + + a[href]:after { + content: " (" attr(href) ")"; + } + + abbr[title]:after { + content: " (" attr(title) ")"; + } + + /* + * Don't show links for images, or javascript/internal links + */ + + .ir a:after, + a[href^="javascript:"]:after, + a[href^="#"]:after { + content: ""; + } + + pre, + blockquote { + border: 1px solid #999; + page-break-inside: avoid; + } + + thead { + display: table-header-group; /* h5bp.com/t */ + } + + tr, + img { + page-break-inside: avoid; + } + + img { + max-width: 100% !important; + } + + @page { + margin: 0.5cm; + } + + p, + h2, + h3 { + orphans: 3; + widows: 3; + } + + h2, + h3 { + page-break-after: avoid; + } +} diff --git a/0.18.1/_static/css/normalize.css b/0.18.1/_static/css/normalize.css new file mode 100644 index 000000000000..42e24d6880b5 --- /dev/null +++ b/0.18.1/_static/css/normalize.css @@ -0,0 +1,527 @@ +/*! normalize.css v1.1.3 | MIT License | git.io/normalize */ + +/* ========================================================================== + HTML5 display definitions + ========================================================================== */ + +/** + * Correct `block` display not defined in IE 6/7/8/9 and Firefox 3. + */ + +article, +aside, +details, +figcaption, +figure, +footer, +header, +hgroup, +main, +nav, +section, +summary { + display: block; +} + +/** + * Correct `inline-block` display not defined in IE 6/7/8/9 and Firefox 3. + */ + +audio, +canvas, +video { + display: inline-block; + *display: inline; + *zoom: 1; +} + +/** + * Prevent modern browsers from displaying `audio` without controls. + * Remove excess height in iOS 5 devices. + */ + +audio:not([controls]) { + display: none; + height: 0; +} + +/** + * Address styling not present in IE 7/8/9, Firefox 3, and Safari 4. + * Known issue: no IE 6 support. + */ + +[hidden] { + display: none; +} + +/* ========================================================================== + Base + ========================================================================== */ + +/** + * 1. Correct text resizing oddly in IE 6/7 when body `font-size` is set using + * `em` units. + * 2. Prevent iOS text size adjust after orientation change, without disabling + * user zoom. + */ + +html { + font-size: 100%; /* 1 */ + -ms-text-size-adjust: 100%; /* 2 */ + -webkit-text-size-adjust: 100%; /* 2 */ +} + +/** + * Address `font-family` inconsistency between `textarea` and other form + * elements. + */ + +html, +button, +input, +select, +textarea { + font-family: sans-serif; +} + +/** + * Address margins handled incorrectly in IE 6/7. + */ + +body { + margin: 0; +} + +/* ========================================================================== + Links + ========================================================================== */ + +/** + * Address `outline` inconsistency between Chrome and other browsers. + */ + +a:focus { + outline: thin dotted; +} + +/** + * Improve readability when focused and also mouse hovered in all browsers. + */ + +a:active, +a:hover { + outline: 0; +} + +/* ========================================================================== + Typography + ========================================================================== */ + +/** + * Address font sizes and margins set differently in IE 6/7. + * Address font sizes within `section` and `article` in Firefox 4+, Safari 5, + * and Chrome. + */ + +h1 { + font-size: 2em; + margin: 0.67em 0; +} + +h2 { + font-size: 1.5em; + margin: 0.83em 0; +} + +h3 { + font-size: 1.17em; + margin: 1em 0; +} + +h4 { + font-size: 1em; + margin: 1.33em 0; +} + +h5 { + font-size: 0.83em; + margin: 1.67em 0; +} + +h6 { + font-size: 0.67em; + margin: 2.33em 0; +} + +/** + * Address styling not present in IE 7/8/9, Safari 5, and Chrome. + */ + +abbr[title] { + border-bottom: 1px dotted; +} + +/** + * Address style set to `bolder` in Firefox 3+, Safari 4/5, and Chrome. + */ + +b, +strong { + font-weight: bold; +} + +blockquote { + margin: 1em 40px; +} + +/** + * Address styling not present in Safari 5 and Chrome. + */ + +dfn { + font-style: italic; +} + +/** + * Address differences between Firefox and other browsers. + * Known issue: no IE 6/7 normalization. + */ + +hr { + -moz-box-sizing: content-box; + box-sizing: content-box; + height: 0; +} + +/** + * Address styling not present in IE 6/7/8/9. + */ + +mark { + background: #ff0; + color: #000; +} + +/** + * Address margins set differently in IE 6/7. + */ + +p, +pre { + margin: 1em 0; +} + +/** + * Correct font family set oddly in IE 6, Safari 4/5, and Chrome. + */ + +code, +kbd, +pre, +samp { + font-family: monospace, serif; + _font-family: 'courier new', monospace; + font-size: 1em; +} + +/** + * Improve readability of pre-formatted text in all browsers. + */ + +pre { + white-space: pre; + white-space: pre-wrap; + word-wrap: break-word; +} + +/** + * Address CSS quotes not supported in IE 6/7. + */ + +q { + quotes: none; +} + +/** + * Address `quotes` property not supported in Safari 4. + */ + +q:before, +q:after { + content: ''; + content: none; +} + +/** + * Address inconsistent and variable font size in all browsers. + */ + +small { + font-size: 80%; +} + +/** + * Prevent `sub` and `sup` affecting `line-height` in all browsers. + */ + +sub, +sup { + font-size: 75%; + line-height: 0; + position: relative; + vertical-align: baseline; +} + +sup { + top: -0.5em; +} + +sub { + bottom: -0.25em; +} + +/* ========================================================================== + Lists + ========================================================================== */ + +/** + * Address margins set differently in IE 6/7. + */ + +dl, +menu, +ol, +ul { + margin: 1em 0; +} + +dd { + margin: 0 0 0 40px; +} + +/** + * Address paddings set differently in IE 6/7. + */ + +menu, +ol, +ul { + padding: 0 0 0 40px; +} + +/** + * Correct list images handled incorrectly in IE 7. + */ + +nav ul, +nav ol { + list-style: none; + list-style-image: none; +} + +/* ========================================================================== + Embedded content + ========================================================================== */ + +/** + * 1. Remove border when inside `a` element in IE 6/7/8/9 and Firefox 3. + * 2. Improve image quality when scaled in IE 7. + */ + +img { + border: 0; /* 1 */ + -ms-interpolation-mode: bicubic; /* 2 */ +} + +/** + * Correct overflow displayed oddly in IE 9. + */ + +svg:not(:root) { + overflow: hidden; +} + +/* ========================================================================== + Figures + ========================================================================== */ + +/** + * Address margin not present in IE 6/7/8/9, Safari 5, and Opera 11. + */ + +figure { + margin: 0; +} + +/* ========================================================================== + Forms + ========================================================================== */ + +/** + * Correct margin displayed oddly in IE 6/7. + */ + +form { + margin: 0; +} + +/** + * Define consistent border, margin, and padding. + */ + +fieldset { + border: 1px solid #c0c0c0; + margin: 0 2px; + padding: 0.35em 0.625em 0.75em; +} + +/** + * 1. Correct color not being inherited in IE 6/7/8/9. + * 2. Correct text not wrapping in Firefox 3. + * 3. Correct alignment displayed oddly in IE 6/7. + */ + +legend { + border: 0; /* 1 */ + padding: 0; + white-space: normal; /* 2 */ + *margin-left: -7px; /* 3 */ +} + +/** + * 1. Correct font size not being inherited in all browsers. + * 2. Address margins set differently in IE 6/7, Firefox 3+, Safari 5, + * and Chrome. + * 3. Improve appearance and consistency in all browsers. + */ + +button, +input, +select, +textarea { + font-size: 100%; /* 1 */ + margin: 0; /* 2 */ + vertical-align: baseline; /* 3 */ + *vertical-align: middle; /* 3 */ +} + +/** + * Address Firefox 3+ setting `line-height` on `input` using `!important` in + * the UA stylesheet. + */ + +button, +input { + line-height: normal; +} + +/** + * Address inconsistent `text-transform` inheritance for `button` and `select`. + * All other form control elements do not inherit `text-transform` values. + * Correct `button` style inheritance in Chrome, Safari 5+, and IE 6+. + * Correct `select` style inheritance in Firefox 4+ and Opera. + */ + +button, +select { + text-transform: none; +} + +/** + * 1. Avoid the WebKit bug in Android 4.0.* where (2) destroys native `audio` + * and `video` controls. + * 2. Correct inability to style clickable `input` types in iOS. + * 3. Improve usability and consistency of cursor style between image-type + * `input` and others. + * 4. Remove inner spacing in IE 7 without affecting normal text inputs. + * Known issue: inner spacing remains in IE 6. + */ + +button, +html input[type="button"], /* 1 */ +input[type="reset"], +input[type="submit"] { + -webkit-appearance: button; /* 2 */ + cursor: pointer; /* 3 */ + *overflow: visible; /* 4 */ +} + +/** + * Re-set default cursor for disabled elements. + */ + +button[disabled], +html input[disabled] { + cursor: default; +} + +/** + * 1. Address box sizing set to content-box in IE 8/9. + * 2. Remove excess padding in IE 8/9. + * 3. Remove excess padding in IE 7. + * Known issue: excess padding remains in IE 6. + */ + +input[type="checkbox"], +input[type="radio"] { + box-sizing: border-box; /* 1 */ + padding: 0; /* 2 */ + *height: 13px; /* 3 */ + *width: 13px; /* 3 */ +} + +/** + * 1. Address `appearance` set to `searchfield` in Safari 5 and Chrome. + * 2. Address `box-sizing` set to `border-box` in Safari 5 and Chrome + * (include `-moz` to future-proof). + */ + +input[type="search"] { + -webkit-appearance: textfield; /* 1 */ + -moz-box-sizing: content-box; + -webkit-box-sizing: content-box; /* 2 */ + box-sizing: content-box; +} + +/** + * Remove inner padding and search cancel button in Safari 5 and Chrome + * on OS X. + */ + +input[type="search"]::-webkit-search-cancel-button, +input[type="search"]::-webkit-search-decoration { + -webkit-appearance: none; +} + +/** + * Remove inner padding and border in Firefox 3+. + */ + +button::-moz-focus-inner, +input::-moz-focus-inner { + border: 0; + padding: 0; +} + +/** + * 1. Remove default vertical scrollbar in IE 6/7/8/9. + * 2. Improve readability and alignment in all browsers. + */ + +textarea { + overflow: auto; /* 1 */ + vertical-align: top; /* 2 */ +} + +/* ========================================================================== + Tables + ========================================================================== */ + +/** + * Remove most spacing between table cells. + */ + +table { + border-collapse: collapse; + border-spacing: 0; +} diff --git a/0.18.1/_static/custom.css b/0.18.1/_static/custom.css new file mode 100644 index 000000000000..2a924f1d6a8b --- /dev/null +++ b/0.18.1/_static/custom.css @@ -0,0 +1 @@ +/* This file intentionally left blank. */ diff --git a/0.18.1/_static/doctools.js b/0.18.1/_static/doctools.js new file mode 100644 index 000000000000..816349563588 --- /dev/null +++ b/0.18.1/_static/doctools.js @@ -0,0 +1,287 @@ +/* + * doctools.js + * ~~~~~~~~~~~ + * + * Sphinx JavaScript utilities for all documentation. + * + * :copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ + +/** + * select a different prefix for underscore + */ +$u = _.noConflict(); + +/** + * make the code below compatible with browsers without + * an installed firebug like debugger +if (!window.console || !console.firebug) { + var names = ["log", "debug", "info", "warn", "error", "assert", "dir", + "dirxml", "group", "groupEnd", "time", "timeEnd", "count", "trace", + "profile", "profileEnd"]; + window.console = {}; + for (var i = 0; i < names.length; ++i) + window.console[names[i]] = function() {}; +} + */ + +/** + * small helper function to urldecode strings + */ +jQuery.urldecode = function(x) { + return decodeURIComponent(x).replace(/\+/g, ' '); +}; + +/** + * small helper function to urlencode strings + */ +jQuery.urlencode = encodeURIComponent; + +/** + * This function returns the parsed url parameters of the + * current request. Multiple values per key are supported, + * it will always return arrays of strings for the value parts. + */ +jQuery.getQueryParameters = function(s) { + if (typeof s == 'undefined') + s = document.location.search; + var parts = s.substr(s.indexOf('?') + 1).split('&'); + var result = {}; + for (var i = 0; i < parts.length; i++) { + var tmp = parts[i].split('=', 2); + var key = jQuery.urldecode(tmp[0]); + var value = jQuery.urldecode(tmp[1]); + if (key in result) + result[key].push(value); + else + result[key] = [value]; + } + return result; +}; + +/** + * highlight a given string on a jquery object by wrapping it in + * span elements with the given class name. + */ +jQuery.fn.highlightText = function(text, className) { + function highlight(node) { + if (node.nodeType == 3) { + var val = node.nodeValue; + var pos = val.toLowerCase().indexOf(text); + if (pos >= 0 && !jQuery(node.parentNode).hasClass(className)) { + var span = document.createElement("span"); + span.className = className; + span.appendChild(document.createTextNode(val.substr(pos, text.length))); + node.parentNode.insertBefore(span, node.parentNode.insertBefore( + document.createTextNode(val.substr(pos + text.length)), + node.nextSibling)); + node.nodeValue = val.substr(0, pos); + } + } + else if (!jQuery(node).is("button, select, textarea")) { + jQuery.each(node.childNodes, function() { + highlight(this); + }); + } + } + return this.each(function() { + highlight(this); + }); +}; + +/* + * backward compatibility for jQuery.browser + * This will be supported until firefox bug is fixed. + */ +if (!jQuery.browser) { + jQuery.uaMatch = function(ua) { + ua = ua.toLowerCase(); + + var match = /(chrome)[ \/]([\w.]+)/.exec(ua) || + /(webkit)[ \/]([\w.]+)/.exec(ua) || + /(opera)(?:.*version|)[ \/]([\w.]+)/.exec(ua) || + /(msie) ([\w.]+)/.exec(ua) || + ua.indexOf("compatible") < 0 && /(mozilla)(?:.*? rv:([\w.]+)|)/.exec(ua) || + []; + + return { + browser: match[ 1 ] || "", + version: match[ 2 ] || "0" + }; + }; + jQuery.browser = {}; + jQuery.browser[jQuery.uaMatch(navigator.userAgent).browser] = true; +} + +/** + * Small JavaScript module for the documentation. + */ +var Documentation = { + + init : function() { + this.fixFirefoxAnchorBug(); + this.highlightSearchWords(); + this.initIndexTable(); + + }, + + /** + * i18n support + */ + TRANSLATIONS : {}, + PLURAL_EXPR : function(n) { return n == 1 ? 0 : 1; }, + LOCALE : 'unknown', + + // gettext and ngettext don't access this so that the functions + // can safely bound to a different name (_ = Documentation.gettext) + gettext : function(string) { + var translated = Documentation.TRANSLATIONS[string]; + if (typeof translated == 'undefined') + return string; + return (typeof translated == 'string') ? translated : translated[0]; + }, + + ngettext : function(singular, plural, n) { + var translated = Documentation.TRANSLATIONS[singular]; + if (typeof translated == 'undefined') + return (n == 1) ? singular : plural; + return translated[Documentation.PLURALEXPR(n)]; + }, + + addTranslations : function(catalog) { + for (var key in catalog.messages) + this.TRANSLATIONS[key] = catalog.messages[key]; + this.PLURAL_EXPR = new Function('n', 'return +(' + catalog.plural_expr + ')'); + this.LOCALE = catalog.locale; + }, + + /** + * add context elements like header anchor links + */ + addContextElements : function() { + $('div[id] > :header:first').each(function() { + $('\u00B6'). + attr('href', '#' + this.id). + attr('title', _('Permalink to this headline')). + appendTo(this); + }); + $('dt[id]').each(function() { + $('\u00B6'). + attr('href', '#' + this.id). + attr('title', _('Permalink to this definition')). + appendTo(this); + }); + }, + + /** + * workaround a firefox stupidity + * see: https://bugzilla.mozilla.org/show_bug.cgi?id=645075 + */ + fixFirefoxAnchorBug : function() { + if (document.location.hash) + window.setTimeout(function() { + document.location.href += ''; + }, 10); + }, + + /** + * highlight the search words provided in the url in the text + */ + highlightSearchWords : function() { + var params = $.getQueryParameters(); + var terms = (params.highlight) ? params.highlight[0].split(/\s+/) : []; + if (terms.length) { + var body = $('div.body'); + if (!body.length) { + body = $('body'); + } + window.setTimeout(function() { + $.each(terms, function() { + body.highlightText(this.toLowerCase(), 'highlighted'); + }); + }, 10); + $('') + .appendTo($('#searchbox')); + } + }, + + /** + * init the domain index toggle buttons + */ + initIndexTable : function() { + var togglers = $('img.toggler').click(function() { + var src = $(this).attr('src'); + var idnum = $(this).attr('id').substr(7); + $('tr.cg-' + idnum).toggle(); + if (src.substr(-9) == 'minus.png') + $(this).attr('src', src.substr(0, src.length-9) + 'plus.png'); + else + $(this).attr('src', src.substr(0, src.length-8) + 'minus.png'); + }).css('display', ''); + if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) { + togglers.click(); + } + }, + + /** + * helper function to hide the search marks again + */ + hideSearchWords : function() { + $('#searchbox .highlight-link').fadeOut(300); + $('span.highlighted').removeClass('highlighted'); + }, + + /** + * make the url absolute + */ + makeURL : function(relativeURL) { + return DOCUMENTATION_OPTIONS.URL_ROOT + '/' + relativeURL; + }, + + /** + * get the current relative url + */ + getCurrentURL : function() { + var path = document.location.pathname; + var parts = path.split(/\//); + $.each(DOCUMENTATION_OPTIONS.URL_ROOT.split(/\//), function() { + if (this == '..') + parts.pop(); + }); + var url = parts.join('/'); + return path.substring(url.lastIndexOf('/') + 1, path.length - 1); + }, + + initOnKeyListeners: function() { + $(document).keyup(function(event) { + var activeElementType = document.activeElement.tagName; + // don't navigate when in search box or textarea + if (activeElementType !== 'TEXTAREA' && activeElementType !== 'INPUT' && activeElementType !== 'SELECT') { + switch (event.keyCode) { + case 37: // left + var prevHref = $('link[rel="prev"]').prop('href'); + if (prevHref) { + window.location.href = prevHref; + return false; + } + case 39: // right + var nextHref = $('link[rel="next"]').prop('href'); + if (nextHref) { + window.location.href = nextHref; + return false; + } + } + } + }); + } +}; + +// quick alias for translations +_ = Documentation.gettext; + +$(document).ready(function() { + Documentation.init(); +}); \ No newline at end of file diff --git a/0.18.1/_static/down-pressed.png b/0.18.1/_static/down-pressed.png new file mode 100644 index 000000000000..7c30d004b71b Binary files /dev/null and b/0.18.1/_static/down-pressed.png differ diff --git a/0.18.1/_static/down.png b/0.18.1/_static/down.png new file mode 100644 index 000000000000..f48098a43b0c Binary files /dev/null and b/0.18.1/_static/down.png differ diff --git a/0.18.1/_static/favicon.ico b/0.18.1/_static/favicon.ico new file mode 100644 index 000000000000..23c553a2966c Binary files /dev/null and b/0.18.1/_static/favicon.ico differ diff --git a/0.18.1/_static/file.png b/0.18.1/_static/file.png new file mode 100644 index 000000000000..254c60bfbe27 Binary files /dev/null and b/0.18.1/_static/file.png differ diff --git a/0.18.1/_static/images/datastore-logo.png b/0.18.1/_static/images/datastore-logo.png new file mode 100644 index 000000000000..a0fc0a0a0e9a Binary files /dev/null and b/0.18.1/_static/images/datastore-logo.png differ diff --git a/0.18.1/_static/images/favicon.ico b/0.18.1/_static/images/favicon.ico new file mode 100644 index 000000000000..23c553a2966c Binary files /dev/null and b/0.18.1/_static/images/favicon.ico differ diff --git a/0.18.1/_static/images/icon-arrow-bullet.svg b/0.18.1/_static/images/icon-arrow-bullet.svg new file mode 100644 index 000000000000..dd9acb566eb8 --- /dev/null +++ b/0.18.1/_static/images/icon-arrow-bullet.svg @@ -0,0 +1,7 @@ + + + + + + diff --git a/0.18.1/_static/images/icon-dropdown-faq.svg b/0.18.1/_static/images/icon-dropdown-faq.svg new file mode 100644 index 000000000000..786bcdc7d131 --- /dev/null +++ b/0.18.1/_static/images/icon-dropdown-faq.svg @@ -0,0 +1,7 @@ + + + + + + diff --git a/0.18.1/_static/images/icon-dropdown.svg b/0.18.1/_static/images/icon-dropdown.svg new file mode 100644 index 000000000000..3642565ff6b5 --- /dev/null +++ b/0.18.1/_static/images/icon-dropdown.svg @@ -0,0 +1,7 @@ + + + + + + diff --git a/0.18.1/_static/images/icon-lang-dotnet.svg b/0.18.1/_static/images/icon-lang-dotnet.svg new file mode 100644 index 000000000000..490a7f7101c1 --- /dev/null +++ b/0.18.1/_static/images/icon-lang-dotnet.svg @@ -0,0 +1,18 @@ + + diff --git a/0.18.1/_static/images/icon-lang-java-duke.svg b/0.18.1/_static/images/icon-lang-java-duke.svg new file mode 100644 index 000000000000..e91633e9cc08 --- /dev/null +++ b/0.18.1/_static/images/icon-lang-java-duke.svg @@ -0,0 +1,49 @@ + + + + + + + + + + + + + + + diff --git a/0.18.1/_static/images/icon-lang-nodejs.svg b/0.18.1/_static/images/icon-lang-nodejs.svg new file mode 100644 index 000000000000..24a4addc3c57 --- /dev/null +++ b/0.18.1/_static/images/icon-lang-nodejs.svg @@ -0,0 +1,16 @@ + + + + + + + + + + + + diff --git a/0.18.1/_static/images/icon-lang-php.svg b/0.18.1/_static/images/icon-lang-php.svg new file mode 100644 index 000000000000..8700fd8be354 --- /dev/null +++ b/0.18.1/_static/images/icon-lang-php.svg @@ -0,0 +1,47 @@ + + + + + + + + + + \ No newline at end of file diff --git a/0.18.1/_static/images/icon-lang-python.svg b/0.18.1/_static/images/icon-lang-python.svg new file mode 100644 index 000000000000..bc4737703c35 --- /dev/null +++ b/0.18.1/_static/images/icon-lang-python.svg @@ -0,0 +1,21 @@ + + + + + + + + + + + diff --git a/0.18.1/_static/images/icon-lang-ruby.svg b/0.18.1/_static/images/icon-lang-ruby.svg new file mode 100644 index 000000000000..5f4e5a25d893 --- /dev/null +++ b/0.18.1/_static/images/icon-lang-ruby.svg @@ -0,0 +1,15 @@ + + + + + + diff --git a/0.18.1/_static/images/icon-link-github.svg b/0.18.1/_static/images/icon-link-github.svg new file mode 100644 index 000000000000..2404f8b0be06 --- /dev/null +++ b/0.18.1/_static/images/icon-link-github.svg @@ -0,0 +1,19 @@ + + + + + + diff --git a/0.18.1/_static/images/icon-link-package-manager.svg b/0.18.1/_static/images/icon-link-package-manager.svg new file mode 100644 index 000000000000..3a12655fe6f9 --- /dev/null +++ b/0.18.1/_static/images/icon-link-package-manager.svg @@ -0,0 +1,19 @@ + + + + + + + + + + + + + + + + + + diff --git a/0.18.1/_static/images/icon-link-stackoverflow.svg b/0.18.1/_static/images/icon-link-stackoverflow.svg new file mode 100644 index 000000000000..e1a1f789a897 --- /dev/null +++ b/0.18.1/_static/images/icon-link-stackoverflow.svg @@ -0,0 +1,16 @@ + + + + + + + + + + + + + + diff --git a/0.18.1/_static/images/icon-menu.svg b/0.18.1/_static/images/icon-menu.svg new file mode 100644 index 000000000000..98d3e7073cde --- /dev/null +++ b/0.18.1/_static/images/icon-menu.svg @@ -0,0 +1,11 @@ + + + + + + + + + + diff --git a/0.18.1/_static/images/icon-table-check.svg b/0.18.1/_static/images/icon-table-check.svg new file mode 100644 index 000000000000..7934bef97f06 --- /dev/null +++ b/0.18.1/_static/images/icon-table-check.svg @@ -0,0 +1,7 @@ + + + + + + diff --git a/0.18.1/_static/images/lang-bg.png b/0.18.1/_static/images/lang-bg.png new file mode 100644 index 000000000000..654e12af3418 Binary files /dev/null and b/0.18.1/_static/images/lang-bg.png differ diff --git a/0.18.1/_static/images/logo-full.svg b/0.18.1/_static/images/logo-full.svg new file mode 100644 index 000000000000..3b84037fccc6 --- /dev/null +++ b/0.18.1/_static/images/logo-full.svg @@ -0,0 +1,112 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/0.18.1/_static/images/logo.svg b/0.18.1/_static/images/logo.svg new file mode 100644 index 000000000000..6c515095c5ad --- /dev/null +++ b/0.18.1/_static/images/logo.svg @@ -0,0 +1,25 @@ + + + + + + + + + diff --git a/0.18.1/_static/images/storage-logo.png b/0.18.1/_static/images/storage-logo.png new file mode 100644 index 000000000000..f5c80042dee4 Binary files /dev/null and b/0.18.1/_static/images/storage-logo.png differ diff --git a/0.18.1/_static/jquery-1.11.1.js b/0.18.1/_static/jquery-1.11.1.js new file mode 100644 index 000000000000..d4b67f7e6c1a --- /dev/null +++ b/0.18.1/_static/jquery-1.11.1.js @@ -0,0 +1,10308 @@ +/*! + * jQuery JavaScript Library v1.11.1 + * http://jquery.com/ + * + * Includes Sizzle.js + * http://sizzlejs.com/ + * + * Copyright 2005, 2014 jQuery Foundation, Inc. and other contributors + * Released under the MIT license + * http://jquery.org/license + * + * Date: 2014-05-01T17:42Z + */ + +(function( global, factory ) { + + if ( typeof module === "object" && typeof module.exports === "object" ) { + // For CommonJS and CommonJS-like environments where a proper window is present, + // execute the factory and get jQuery + // For environments that do not inherently posses a window with a document + // (such as Node.js), expose a jQuery-making factory as module.exports + // This accentuates the need for the creation of a real window + // e.g. var jQuery = require("jquery")(window); + // See ticket #14549 for more info + module.exports = global.document ? + factory( global, true ) : + function( w ) { + if ( !w.document ) { + throw new Error( "jQuery requires a window with a document" ); + } + return factory( w ); + }; + } else { + factory( global ); + } + +// Pass this if window is not defined yet +}(typeof window !== "undefined" ? window : this, function( window, noGlobal ) { + +// Can't do this because several apps including ASP.NET trace +// the stack via arguments.caller.callee and Firefox dies if +// you try to trace through "use strict" call chains. (#13335) +// Support: Firefox 18+ +// + +var deletedIds = []; + +var slice = deletedIds.slice; + +var concat = deletedIds.concat; + +var push = deletedIds.push; + +var indexOf = deletedIds.indexOf; + +var class2type = {}; + +var toString = class2type.toString; + +var hasOwn = class2type.hasOwnProperty; + +var support = {}; + + + +var + version = "1.11.1", + + // Define a local copy of jQuery + jQuery = function( selector, context ) { + // The jQuery object is actually just the init constructor 'enhanced' + // Need init if jQuery is called (just allow error to be thrown if not included) + return new jQuery.fn.init( selector, context ); + }, + + // Support: Android<4.1, IE<9 + // Make sure we trim BOM and NBSP + rtrim = /^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g, + + // Matches dashed string for camelizing + rmsPrefix = /^-ms-/, + rdashAlpha = /-([\da-z])/gi, + + // Used by jQuery.camelCase as callback to replace() + fcamelCase = function( all, letter ) { + return letter.toUpperCase(); + }; + +jQuery.fn = jQuery.prototype = { + // The current version of jQuery being used + jquery: version, + + constructor: jQuery, + + // Start with an empty selector + selector: "", + + // The default length of a jQuery object is 0 + length: 0, + + toArray: function() { + return slice.call( this ); + }, + + // Get the Nth element in the matched element set OR + // Get the whole matched element set as a clean array + get: function( num ) { + return num != null ? + + // Return just the one element from the set + ( num < 0 ? this[ num + this.length ] : this[ num ] ) : + + // Return all the elements in a clean array + slice.call( this ); + }, + + // Take an array of elements and push it onto the stack + // (returning the new matched element set) + pushStack: function( elems ) { + + // Build a new jQuery matched element set + var ret = jQuery.merge( this.constructor(), elems ); + + // Add the old object onto the stack (as a reference) + ret.prevObject = this; + ret.context = this.context; + + // Return the newly-formed element set + return ret; + }, + + // Execute a callback for every element in the matched set. + // (You can seed the arguments with an array of args, but this is + // only used internally.) + each: function( callback, args ) { + return jQuery.each( this, callback, args ); + }, + + map: function( callback ) { + return this.pushStack( jQuery.map(this, function( elem, i ) { + return callback.call( elem, i, elem ); + })); + }, + + slice: function() { + return this.pushStack( slice.apply( this, arguments ) ); + }, + + first: function() { + return this.eq( 0 ); + }, + + last: function() { + return this.eq( -1 ); + }, + + eq: function( i ) { + var len = this.length, + j = +i + ( i < 0 ? len : 0 ); + return this.pushStack( j >= 0 && j < len ? [ this[j] ] : [] ); + }, + + end: function() { + return this.prevObject || this.constructor(null); + }, + + // For internal use only. + // Behaves like an Array's method, not like a jQuery method. + push: push, + sort: deletedIds.sort, + splice: deletedIds.splice +}; + +jQuery.extend = jQuery.fn.extend = function() { + var src, copyIsArray, copy, name, options, clone, + target = arguments[0] || {}, + i = 1, + length = arguments.length, + deep = false; + + // Handle a deep copy situation + if ( typeof target === "boolean" ) { + deep = target; + + // skip the boolean and the target + target = arguments[ i ] || {}; + i++; + } + + // Handle case when target is a string or something (possible in deep copy) + if ( typeof target !== "object" && !jQuery.isFunction(target) ) { + target = {}; + } + + // extend jQuery itself if only one argument is passed + if ( i === length ) { + target = this; + i--; + } + + for ( ; i < length; i++ ) { + // Only deal with non-null/undefined values + if ( (options = arguments[ i ]) != null ) { + // Extend the base object + for ( name in options ) { + src = target[ name ]; + copy = options[ name ]; + + // Prevent never-ending loop + if ( target === copy ) { + continue; + } + + // Recurse if we're merging plain objects or arrays + if ( deep && copy && ( jQuery.isPlainObject(copy) || (copyIsArray = jQuery.isArray(copy)) ) ) { + if ( copyIsArray ) { + copyIsArray = false; + clone = src && jQuery.isArray(src) ? src : []; + + } else { + clone = src && jQuery.isPlainObject(src) ? src : {}; + } + + // Never move original objects, clone them + target[ name ] = jQuery.extend( deep, clone, copy ); + + // Don't bring in undefined values + } else if ( copy !== undefined ) { + target[ name ] = copy; + } + } + } + } + + // Return the modified object + return target; +}; + +jQuery.extend({ + // Unique for each copy of jQuery on the page + expando: "jQuery" + ( version + Math.random() ).replace( /\D/g, "" ), + + // Assume jQuery is ready without the ready module + isReady: true, + + error: function( msg ) { + throw new Error( msg ); + }, + + noop: function() {}, + + // See test/unit/core.js for details concerning isFunction. + // Since version 1.3, DOM methods and functions like alert + // aren't supported. They return false on IE (#2968). + isFunction: function( obj ) { + return jQuery.type(obj) === "function"; + }, + + isArray: Array.isArray || function( obj ) { + return jQuery.type(obj) === "array"; + }, + + isWindow: function( obj ) { + /* jshint eqeqeq: false */ + return obj != null && obj == obj.window; + }, + + isNumeric: function( obj ) { + // parseFloat NaNs numeric-cast false positives (null|true|false|"") + // ...but misinterprets leading-number strings, particularly hex literals ("0x...") + // subtraction forces infinities to NaN + return !jQuery.isArray( obj ) && obj - parseFloat( obj ) >= 0; + }, + + isEmptyObject: function( obj ) { + var name; + for ( name in obj ) { + return false; + } + return true; + }, + + isPlainObject: function( obj ) { + var key; + + // Must be an Object. + // Because of IE, we also have to check the presence of the constructor property. + // Make sure that DOM nodes and window objects don't pass through, as well + if ( !obj || jQuery.type(obj) !== "object" || obj.nodeType || jQuery.isWindow( obj ) ) { + return false; + } + + try { + // Not own constructor property must be Object + if ( obj.constructor && + !hasOwn.call(obj, "constructor") && + !hasOwn.call(obj.constructor.prototype, "isPrototypeOf") ) { + return false; + } + } catch ( e ) { + // IE8,9 Will throw exceptions on certain host objects #9897 + return false; + } + + // Support: IE<9 + // Handle iteration over inherited properties before own properties. + if ( support.ownLast ) { + for ( key in obj ) { + return hasOwn.call( obj, key ); + } + } + + // Own properties are enumerated firstly, so to speed up, + // if last one is own, then all properties are own. + for ( key in obj ) {} + + return key === undefined || hasOwn.call( obj, key ); + }, + + type: function( obj ) { + if ( obj == null ) { + return obj + ""; + } + return typeof obj === "object" || typeof obj === "function" ? + class2type[ toString.call(obj) ] || "object" : + typeof obj; + }, + + // Evaluates a script in a global context + // Workarounds based on findings by Jim Driscoll + // http://weblogs.java.net/blog/driscoll/archive/2009/09/08/eval-javascript-global-context + globalEval: function( data ) { + if ( data && jQuery.trim( data ) ) { + // We use execScript on Internet Explorer + // We use an anonymous function so that context is window + // rather than jQuery in Firefox + ( window.execScript || function( data ) { + window[ "eval" ].call( window, data ); + } )( data ); + } + }, + + // Convert dashed to camelCase; used by the css and data modules + // Microsoft forgot to hump their vendor prefix (#9572) + camelCase: function( string ) { + return string.replace( rmsPrefix, "ms-" ).replace( rdashAlpha, fcamelCase ); + }, + + nodeName: function( elem, name ) { + return elem.nodeName && elem.nodeName.toLowerCase() === name.toLowerCase(); + }, + + // args is for internal usage only + each: function( obj, callback, args ) { + var value, + i = 0, + length = obj.length, + isArray = isArraylike( obj ); + + if ( args ) { + if ( isArray ) { + for ( ; i < length; i++ ) { + value = callback.apply( obj[ i ], args ); + + if ( value === false ) { + break; + } + } + } else { + for ( i in obj ) { + value = callback.apply( obj[ i ], args ); + + if ( value === false ) { + break; + } + } + } + + // A special, fast, case for the most common use of each + } else { + if ( isArray ) { + for ( ; i < length; i++ ) { + value = callback.call( obj[ i ], i, obj[ i ] ); + + if ( value === false ) { + break; + } + } + } else { + for ( i in obj ) { + value = callback.call( obj[ i ], i, obj[ i ] ); + + if ( value === false ) { + break; + } + } + } + } + + return obj; + }, + + // Support: Android<4.1, IE<9 + trim: function( text ) { + return text == null ? + "" : + ( text + "" ).replace( rtrim, "" ); + }, + + // results is for internal usage only + makeArray: function( arr, results ) { + var ret = results || []; + + if ( arr != null ) { + if ( isArraylike( Object(arr) ) ) { + jQuery.merge( ret, + typeof arr === "string" ? + [ arr ] : arr + ); + } else { + push.call( ret, arr ); + } + } + + return ret; + }, + + inArray: function( elem, arr, i ) { + var len; + + if ( arr ) { + if ( indexOf ) { + return indexOf.call( arr, elem, i ); + } + + len = arr.length; + i = i ? i < 0 ? Math.max( 0, len + i ) : i : 0; + + for ( ; i < len; i++ ) { + // Skip accessing in sparse arrays + if ( i in arr && arr[ i ] === elem ) { + return i; + } + } + } + + return -1; + }, + + merge: function( first, second ) { + var len = +second.length, + j = 0, + i = first.length; + + while ( j < len ) { + first[ i++ ] = second[ j++ ]; + } + + // Support: IE<9 + // Workaround casting of .length to NaN on otherwise arraylike objects (e.g., NodeLists) + if ( len !== len ) { + while ( second[j] !== undefined ) { + first[ i++ ] = second[ j++ ]; + } + } + + first.length = i; + + return first; + }, + + grep: function( elems, callback, invert ) { + var callbackInverse, + matches = [], + i = 0, + length = elems.length, + callbackExpect = !invert; + + // Go through the array, only saving the items + // that pass the validator function + for ( ; i < length; i++ ) { + callbackInverse = !callback( elems[ i ], i ); + if ( callbackInverse !== callbackExpect ) { + matches.push( elems[ i ] ); + } + } + + return matches; + }, + + // arg is for internal usage only + map: function( elems, callback, arg ) { + var value, + i = 0, + length = elems.length, + isArray = isArraylike( elems ), + ret = []; + + // Go through the array, translating each of the items to their new values + if ( isArray ) { + for ( ; i < length; i++ ) { + value = callback( elems[ i ], i, arg ); + + if ( value != null ) { + ret.push( value ); + } + } + + // Go through every key on the object, + } else { + for ( i in elems ) { + value = callback( elems[ i ], i, arg ); + + if ( value != null ) { + ret.push( value ); + } + } + } + + // Flatten any nested arrays + return concat.apply( [], ret ); + }, + + // A global GUID counter for objects + guid: 1, + + // Bind a function to a context, optionally partially applying any + // arguments. + proxy: function( fn, context ) { + var args, proxy, tmp; + + if ( typeof context === "string" ) { + tmp = fn[ context ]; + context = fn; + fn = tmp; + } + + // Quick check to determine if target is callable, in the spec + // this throws a TypeError, but we will just return undefined. + if ( !jQuery.isFunction( fn ) ) { + return undefined; + } + + // Simulated bind + args = slice.call( arguments, 2 ); + proxy = function() { + return fn.apply( context || this, args.concat( slice.call( arguments ) ) ); + }; + + // Set the guid of unique handler to the same of original handler, so it can be removed + proxy.guid = fn.guid = fn.guid || jQuery.guid++; + + return proxy; + }, + + now: function() { + return +( new Date() ); + }, + + // jQuery.support is not used in Core but other projects attach their + // properties to it so it needs to exist. + support: support +}); + +// Populate the class2type map +jQuery.each("Boolean Number String Function Array Date RegExp Object Error".split(" "), function(i, name) { + class2type[ "[object " + name + "]" ] = name.toLowerCase(); +}); + +function isArraylike( obj ) { + var length = obj.length, + type = jQuery.type( obj ); + + if ( type === "function" || jQuery.isWindow( obj ) ) { + return false; + } + + if ( obj.nodeType === 1 && length ) { + return true; + } + + return type === "array" || length === 0 || + typeof length === "number" && length > 0 && ( length - 1 ) in obj; +} +var Sizzle = +/*! + * Sizzle CSS Selector Engine v1.10.19 + * http://sizzlejs.com/ + * + * Copyright 2013 jQuery Foundation, Inc. and other contributors + * Released under the MIT license + * http://jquery.org/license + * + * Date: 2014-04-18 + */ +(function( window ) { + +var i, + support, + Expr, + getText, + isXML, + tokenize, + compile, + select, + outermostContext, + sortInput, + hasDuplicate, + + // Local document vars + setDocument, + document, + docElem, + documentIsHTML, + rbuggyQSA, + rbuggyMatches, + matches, + contains, + + // Instance-specific data + expando = "sizzle" + -(new Date()), + preferredDoc = window.document, + dirruns = 0, + done = 0, + classCache = createCache(), + tokenCache = createCache(), + compilerCache = createCache(), + sortOrder = function( a, b ) { + if ( a === b ) { + hasDuplicate = true; + } + return 0; + }, + + // General-purpose constants + strundefined = typeof undefined, + MAX_NEGATIVE = 1 << 31, + + // Instance methods + hasOwn = ({}).hasOwnProperty, + arr = [], + pop = arr.pop, + push_native = arr.push, + push = arr.push, + slice = arr.slice, + // Use a stripped-down indexOf if we can't use a native one + indexOf = arr.indexOf || function( elem ) { + var i = 0, + len = this.length; + for ( ; i < len; i++ ) { + if ( this[i] === elem ) { + return i; + } + } + return -1; + }, + + booleans = "checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|ismap|loop|multiple|open|readonly|required|scoped", + + // Regular expressions + + // Whitespace characters http://www.w3.org/TR/css3-selectors/#whitespace + whitespace = "[\\x20\\t\\r\\n\\f]", + // http://www.w3.org/TR/css3-syntax/#characters + characterEncoding = "(?:\\\\.|[\\w-]|[^\\x00-\\xa0])+", + + // Loosely modeled on CSS identifier characters + // An unquoted value should be a CSS identifier http://www.w3.org/TR/css3-selectors/#attribute-selectors + // Proper syntax: http://www.w3.org/TR/CSS21/syndata.html#value-def-identifier + identifier = characterEncoding.replace( "w", "w#" ), + + // Attribute selectors: http://www.w3.org/TR/selectors/#attribute-selectors + attributes = "\\[" + whitespace + "*(" + characterEncoding + ")(?:" + whitespace + + // Operator (capture 2) + "*([*^$|!~]?=)" + whitespace + + // "Attribute values must be CSS identifiers [capture 5] or strings [capture 3 or capture 4]" + "*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|(" + identifier + "))|)" + whitespace + + "*\\]", + + pseudos = ":(" + characterEncoding + ")(?:\\((" + + // To reduce the number of selectors needing tokenize in the preFilter, prefer arguments: + // 1. quoted (capture 3; capture 4 or capture 5) + "('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|" + + // 2. simple (capture 6) + "((?:\\\\.|[^\\\\()[\\]]|" + attributes + ")*)|" + + // 3. anything else (capture 2) + ".*" + + ")\\)|)", + + // Leading and non-escaped trailing whitespace, capturing some non-whitespace characters preceding the latter + rtrim = new RegExp( "^" + whitespace + "+|((?:^|[^\\\\])(?:\\\\.)*)" + whitespace + "+$", "g" ), + + rcomma = new RegExp( "^" + whitespace + "*," + whitespace + "*" ), + rcombinators = new RegExp( "^" + whitespace + "*([>+~]|" + whitespace + ")" + whitespace + "*" ), + + rattributeQuotes = new RegExp( "=" + whitespace + "*([^\\]'\"]*?)" + whitespace + "*\\]", "g" ), + + rpseudo = new RegExp( pseudos ), + ridentifier = new RegExp( "^" + identifier + "$" ), + + matchExpr = { + "ID": new RegExp( "^#(" + characterEncoding + ")" ), + "CLASS": new RegExp( "^\\.(" + characterEncoding + ")" ), + "TAG": new RegExp( "^(" + characterEncoding.replace( "w", "w*" ) + ")" ), + "ATTR": new RegExp( "^" + attributes ), + "PSEUDO": new RegExp( "^" + pseudos ), + "CHILD": new RegExp( "^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\(" + whitespace + + "*(even|odd|(([+-]|)(\\d*)n|)" + whitespace + "*(?:([+-]|)" + whitespace + + "*(\\d+)|))" + whitespace + "*\\)|)", "i" ), + "bool": new RegExp( "^(?:" + booleans + ")$", "i" ), + // For use in libraries implementing .is() + // We use this for POS matching in `select` + "needsContext": new RegExp( "^" + whitespace + "*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\(" + + whitespace + "*((?:-\\d)?\\d*)" + whitespace + "*\\)|)(?=[^-]|$)", "i" ) + }, + + rinputs = /^(?:input|select|textarea|button)$/i, + rheader = /^h\d$/i, + + rnative = /^[^{]+\{\s*\[native \w/, + + // Easily-parseable/retrievable ID or TAG or CLASS selectors + rquickExpr = /^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/, + + rsibling = /[+~]/, + rescape = /'|\\/g, + + // CSS escapes http://www.w3.org/TR/CSS21/syndata.html#escaped-characters + runescape = new RegExp( "\\\\([\\da-f]{1,6}" + whitespace + "?|(" + whitespace + ")|.)", "ig" ), + funescape = function( _, escaped, escapedWhitespace ) { + var high = "0x" + escaped - 0x10000; + // NaN means non-codepoint + // Support: Firefox<24 + // Workaround erroneous numeric interpretation of +"0x" + return high !== high || escapedWhitespace ? + escaped : + high < 0 ? + // BMP codepoint + String.fromCharCode( high + 0x10000 ) : + // Supplemental Plane codepoint (surrogate pair) + String.fromCharCode( high >> 10 | 0xD800, high & 0x3FF | 0xDC00 ); + }; + +// Optimize for push.apply( _, NodeList ) +try { + push.apply( + (arr = slice.call( preferredDoc.childNodes )), + preferredDoc.childNodes + ); + // Support: Android<4.0 + // Detect silently failing push.apply + arr[ preferredDoc.childNodes.length ].nodeType; +} catch ( e ) { + push = { apply: arr.length ? + + // Leverage slice if possible + function( target, els ) { + push_native.apply( target, slice.call(els) ); + } : + + // Support: IE<9 + // Otherwise append directly + function( target, els ) { + var j = target.length, + i = 0; + // Can't trust NodeList.length + while ( (target[j++] = els[i++]) ) {} + target.length = j - 1; + } + }; +} + +function Sizzle( selector, context, results, seed ) { + var match, elem, m, nodeType, + // QSA vars + i, groups, old, nid, newContext, newSelector; + + if ( ( context ? context.ownerDocument || context : preferredDoc ) !== document ) { + setDocument( context ); + } + + context = context || document; + results = results || []; + + if ( !selector || typeof selector !== "string" ) { + return results; + } + + if ( (nodeType = context.nodeType) !== 1 && nodeType !== 9 ) { + return []; + } + + if ( documentIsHTML && !seed ) { + + // Shortcuts + if ( (match = rquickExpr.exec( selector )) ) { + // Speed-up: Sizzle("#ID") + if ( (m = match[1]) ) { + if ( nodeType === 9 ) { + elem = context.getElementById( m ); + // Check parentNode to catch when Blackberry 4.6 returns + // nodes that are no longer in the document (jQuery #6963) + if ( elem && elem.parentNode ) { + // Handle the case where IE, Opera, and Webkit return items + // by name instead of ID + if ( elem.id === m ) { + results.push( elem ); + return results; + } + } else { + return results; + } + } else { + // Context is not a document + if ( context.ownerDocument && (elem = context.ownerDocument.getElementById( m )) && + contains( context, elem ) && elem.id === m ) { + results.push( elem ); + return results; + } + } + + // Speed-up: Sizzle("TAG") + } else if ( match[2] ) { + push.apply( results, context.getElementsByTagName( selector ) ); + return results; + + // Speed-up: Sizzle(".CLASS") + } else if ( (m = match[3]) && support.getElementsByClassName && context.getElementsByClassName ) { + push.apply( results, context.getElementsByClassName( m ) ); + return results; + } + } + + // QSA path + if ( support.qsa && (!rbuggyQSA || !rbuggyQSA.test( selector )) ) { + nid = old = expando; + newContext = context; + newSelector = nodeType === 9 && selector; + + // qSA works strangely on Element-rooted queries + // We can work around this by specifying an extra ID on the root + // and working up from there (Thanks to Andrew Dupont for the technique) + // IE 8 doesn't work on object elements + if ( nodeType === 1 && context.nodeName.toLowerCase() !== "object" ) { + groups = tokenize( selector ); + + if ( (old = context.getAttribute("id")) ) { + nid = old.replace( rescape, "\\$&" ); + } else { + context.setAttribute( "id", nid ); + } + nid = "[id='" + nid + "'] "; + + i = groups.length; + while ( i-- ) { + groups[i] = nid + toSelector( groups[i] ); + } + newContext = rsibling.test( selector ) && testContext( context.parentNode ) || context; + newSelector = groups.join(","); + } + + if ( newSelector ) { + try { + push.apply( results, + newContext.querySelectorAll( newSelector ) + ); + return results; + } catch(qsaError) { + } finally { + if ( !old ) { + context.removeAttribute("id"); + } + } + } + } + } + + // All others + return select( selector.replace( rtrim, "$1" ), context, results, seed ); +} + +/** + * Create key-value caches of limited size + * @returns {Function(string, Object)} Returns the Object data after storing it on itself with + * property name the (space-suffixed) string and (if the cache is larger than Expr.cacheLength) + * deleting the oldest entry + */ +function createCache() { + var keys = []; + + function cache( key, value ) { + // Use (key + " ") to avoid collision with native prototype properties (see Issue #157) + if ( keys.push( key + " " ) > Expr.cacheLength ) { + // Only keep the most recent entries + delete cache[ keys.shift() ]; + } + return (cache[ key + " " ] = value); + } + return cache; +} + +/** + * Mark a function for special use by Sizzle + * @param {Function} fn The function to mark + */ +function markFunction( fn ) { + fn[ expando ] = true; + return fn; +} + +/** + * Support testing using an element + * @param {Function} fn Passed the created div and expects a boolean result + */ +function assert( fn ) { + var div = document.createElement("div"); + + try { + return !!fn( div ); + } catch (e) { + return false; + } finally { + // Remove from its parent by default + if ( div.parentNode ) { + div.parentNode.removeChild( div ); + } + // release memory in IE + div = null; + } +} + +/** + * Adds the same handler for all of the specified attrs + * @param {String} attrs Pipe-separated list of attributes + * @param {Function} handler The method that will be applied + */ +function addHandle( attrs, handler ) { + var arr = attrs.split("|"), + i = attrs.length; + + while ( i-- ) { + Expr.attrHandle[ arr[i] ] = handler; + } +} + +/** + * Checks document order of two siblings + * @param {Element} a + * @param {Element} b + * @returns {Number} Returns less than 0 if a precedes b, greater than 0 if a follows b + */ +function siblingCheck( a, b ) { + var cur = b && a, + diff = cur && a.nodeType === 1 && b.nodeType === 1 && + ( ~b.sourceIndex || MAX_NEGATIVE ) - + ( ~a.sourceIndex || MAX_NEGATIVE ); + + // Use IE sourceIndex if available on both nodes + if ( diff ) { + return diff; + } + + // Check if b follows a + if ( cur ) { + while ( (cur = cur.nextSibling) ) { + if ( cur === b ) { + return -1; + } + } + } + + return a ? 1 : -1; +} + +/** + * Returns a function to use in pseudos for input types + * @param {String} type + */ +function createInputPseudo( type ) { + return function( elem ) { + var name = elem.nodeName.toLowerCase(); + return name === "input" && elem.type === type; + }; +} + +/** + * Returns a function to use in pseudos for buttons + * @param {String} type + */ +function createButtonPseudo( type ) { + return function( elem ) { + var name = elem.nodeName.toLowerCase(); + return (name === "input" || name === "button") && elem.type === type; + }; +} + +/** + * Returns a function to use in pseudos for positionals + * @param {Function} fn + */ +function createPositionalPseudo( fn ) { + return markFunction(function( argument ) { + argument = +argument; + return markFunction(function( seed, matches ) { + var j, + matchIndexes = fn( [], seed.length, argument ), + i = matchIndexes.length; + + // Match elements found at the specified indexes + while ( i-- ) { + if ( seed[ (j = matchIndexes[i]) ] ) { + seed[j] = !(matches[j] = seed[j]); + } + } + }); + }); +} + +/** + * Checks a node for validity as a Sizzle context + * @param {Element|Object=} context + * @returns {Element|Object|Boolean} The input node if acceptable, otherwise a falsy value + */ +function testContext( context ) { + return context && typeof context.getElementsByTagName !== strundefined && context; +} + +// Expose support vars for convenience +support = Sizzle.support = {}; + +/** + * Detects XML nodes + * @param {Element|Object} elem An element or a document + * @returns {Boolean} True iff elem is a non-HTML XML node + */ +isXML = Sizzle.isXML = function( elem ) { + // documentElement is verified for cases where it doesn't yet exist + // (such as loading iframes in IE - #4833) + var documentElement = elem && (elem.ownerDocument || elem).documentElement; + return documentElement ? documentElement.nodeName !== "HTML" : false; +}; + +/** + * Sets document-related variables once based on the current document + * @param {Element|Object} [doc] An element or document object to use to set the document + * @returns {Object} Returns the current document + */ +setDocument = Sizzle.setDocument = function( node ) { + var hasCompare, + doc = node ? node.ownerDocument || node : preferredDoc, + parent = doc.defaultView; + + // If no document and documentElement is available, return + if ( doc === document || doc.nodeType !== 9 || !doc.documentElement ) { + return document; + } + + // Set our document + document = doc; + docElem = doc.documentElement; + + // Support tests + documentIsHTML = !isXML( doc ); + + // Support: IE>8 + // If iframe document is assigned to "document" variable and if iframe has been reloaded, + // IE will throw "permission denied" error when accessing "document" variable, see jQuery #13936 + // IE6-8 do not support the defaultView property so parent will be undefined + if ( parent && parent !== parent.top ) { + // IE11 does not have attachEvent, so all must suffer + if ( parent.addEventListener ) { + parent.addEventListener( "unload", function() { + setDocument(); + }, false ); + } else if ( parent.attachEvent ) { + parent.attachEvent( "onunload", function() { + setDocument(); + }); + } + } + + /* Attributes + ---------------------------------------------------------------------- */ + + // Support: IE<8 + // Verify that getAttribute really returns attributes and not properties (excepting IE8 booleans) + support.attributes = assert(function( div ) { + div.className = "i"; + return !div.getAttribute("className"); + }); + + /* getElement(s)By* + ---------------------------------------------------------------------- */ + + // Check if getElementsByTagName("*") returns only elements + support.getElementsByTagName = assert(function( div ) { + div.appendChild( doc.createComment("") ); + return !div.getElementsByTagName("*").length; + }); + + // Check if getElementsByClassName can be trusted + support.getElementsByClassName = rnative.test( doc.getElementsByClassName ) && assert(function( div ) { + div.innerHTML = "
"; + + // Support: Safari<4 + // Catch class over-caching + div.firstChild.className = "i"; + // Support: Opera<10 + // Catch gEBCN failure to find non-leading classes + return div.getElementsByClassName("i").length === 2; + }); + + // Support: IE<10 + // Check if getElementById returns elements by name + // The broken getElementById methods don't pick up programatically-set names, + // so use a roundabout getElementsByName test + support.getById = assert(function( div ) { + docElem.appendChild( div ).id = expando; + return !doc.getElementsByName || !doc.getElementsByName( expando ).length; + }); + + // ID find and filter + if ( support.getById ) { + Expr.find["ID"] = function( id, context ) { + if ( typeof context.getElementById !== strundefined && documentIsHTML ) { + var m = context.getElementById( id ); + // Check parentNode to catch when Blackberry 4.6 returns + // nodes that are no longer in the document #6963 + return m && m.parentNode ? [ m ] : []; + } + }; + Expr.filter["ID"] = function( id ) { + var attrId = id.replace( runescape, funescape ); + return function( elem ) { + return elem.getAttribute("id") === attrId; + }; + }; + } else { + // Support: IE6/7 + // getElementById is not reliable as a find shortcut + delete Expr.find["ID"]; + + Expr.filter["ID"] = function( id ) { + var attrId = id.replace( runescape, funescape ); + return function( elem ) { + var node = typeof elem.getAttributeNode !== strundefined && elem.getAttributeNode("id"); + return node && node.value === attrId; + }; + }; + } + + // Tag + Expr.find["TAG"] = support.getElementsByTagName ? + function( tag, context ) { + if ( typeof context.getElementsByTagName !== strundefined ) { + return context.getElementsByTagName( tag ); + } + } : + function( tag, context ) { + var elem, + tmp = [], + i = 0, + results = context.getElementsByTagName( tag ); + + // Filter out possible comments + if ( tag === "*" ) { + while ( (elem = results[i++]) ) { + if ( elem.nodeType === 1 ) { + tmp.push( elem ); + } + } + + return tmp; + } + return results; + }; + + // Class + Expr.find["CLASS"] = support.getElementsByClassName && function( className, context ) { + if ( typeof context.getElementsByClassName !== strundefined && documentIsHTML ) { + return context.getElementsByClassName( className ); + } + }; + + /* QSA/matchesSelector + ---------------------------------------------------------------------- */ + + // QSA and matchesSelector support + + // matchesSelector(:active) reports false when true (IE9/Opera 11.5) + rbuggyMatches = []; + + // qSa(:focus) reports false when true (Chrome 21) + // We allow this because of a bug in IE8/9 that throws an error + // whenever `document.activeElement` is accessed on an iframe + // So, we allow :focus to pass through QSA all the time to avoid the IE error + // See http://bugs.jquery.com/ticket/13378 + rbuggyQSA = []; + + if ( (support.qsa = rnative.test( doc.querySelectorAll )) ) { + // Build QSA regex + // Regex strategy adopted from Diego Perini + assert(function( div ) { + // Select is set to empty string on purpose + // This is to test IE's treatment of not explicitly + // setting a boolean content attribute, + // since its presence should be enough + // http://bugs.jquery.com/ticket/12359 + div.innerHTML = ""; + + // Support: IE8, Opera 11-12.16 + // Nothing should be selected when empty strings follow ^= or $= or *= + // The test attribute must be unknown in Opera but "safe" for WinRT + // http://msdn.microsoft.com/en-us/library/ie/hh465388.aspx#attribute_section + if ( div.querySelectorAll("[msallowclip^='']").length ) { + rbuggyQSA.push( "[*^$]=" + whitespace + "*(?:''|\"\")" ); + } + + // Support: IE8 + // Boolean attributes and "value" are not treated correctly + if ( !div.querySelectorAll("[selected]").length ) { + rbuggyQSA.push( "\\[" + whitespace + "*(?:value|" + booleans + ")" ); + } + + // Webkit/Opera - :checked should return selected option elements + // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked + // IE8 throws error here and will not see later tests + if ( !div.querySelectorAll(":checked").length ) { + rbuggyQSA.push(":checked"); + } + }); + + assert(function( div ) { + // Support: Windows 8 Native Apps + // The type and name attributes are restricted during .innerHTML assignment + var input = doc.createElement("input"); + input.setAttribute( "type", "hidden" ); + div.appendChild( input ).setAttribute( "name", "D" ); + + // Support: IE8 + // Enforce case-sensitivity of name attribute + if ( div.querySelectorAll("[name=d]").length ) { + rbuggyQSA.push( "name" + whitespace + "*[*^$|!~]?=" ); + } + + // FF 3.5 - :enabled/:disabled and hidden elements (hidden elements are still enabled) + // IE8 throws error here and will not see later tests + if ( !div.querySelectorAll(":enabled").length ) { + rbuggyQSA.push( ":enabled", ":disabled" ); + } + + // Opera 10-11 does not throw on post-comma invalid pseudos + div.querySelectorAll("*,:x"); + rbuggyQSA.push(",.*:"); + }); + } + + if ( (support.matchesSelector = rnative.test( (matches = docElem.matches || + docElem.webkitMatchesSelector || + docElem.mozMatchesSelector || + docElem.oMatchesSelector || + docElem.msMatchesSelector) )) ) { + + assert(function( div ) { + // Check to see if it's possible to do matchesSelector + // on a disconnected node (IE 9) + support.disconnectedMatch = matches.call( div, "div" ); + + // This should fail with an exception + // Gecko does not error, returns false instead + matches.call( div, "[s!='']:x" ); + rbuggyMatches.push( "!=", pseudos ); + }); + } + + rbuggyQSA = rbuggyQSA.length && new RegExp( rbuggyQSA.join("|") ); + rbuggyMatches = rbuggyMatches.length && new RegExp( rbuggyMatches.join("|") ); + + /* Contains + ---------------------------------------------------------------------- */ + hasCompare = rnative.test( docElem.compareDocumentPosition ); + + // Element contains another + // Purposefully does not implement inclusive descendent + // As in, an element does not contain itself + contains = hasCompare || rnative.test( docElem.contains ) ? + function( a, b ) { + var adown = a.nodeType === 9 ? a.documentElement : a, + bup = b && b.parentNode; + return a === bup || !!( bup && bup.nodeType === 1 && ( + adown.contains ? + adown.contains( bup ) : + a.compareDocumentPosition && a.compareDocumentPosition( bup ) & 16 + )); + } : + function( a, b ) { + if ( b ) { + while ( (b = b.parentNode) ) { + if ( b === a ) { + return true; + } + } + } + return false; + }; + + /* Sorting + ---------------------------------------------------------------------- */ + + // Document order sorting + sortOrder = hasCompare ? + function( a, b ) { + + // Flag for duplicate removal + if ( a === b ) { + hasDuplicate = true; + return 0; + } + + // Sort on method existence if only one input has compareDocumentPosition + var compare = !a.compareDocumentPosition - !b.compareDocumentPosition; + if ( compare ) { + return compare; + } + + // Calculate position if both inputs belong to the same document + compare = ( a.ownerDocument || a ) === ( b.ownerDocument || b ) ? + a.compareDocumentPosition( b ) : + + // Otherwise we know they are disconnected + 1; + + // Disconnected nodes + if ( compare & 1 || + (!support.sortDetached && b.compareDocumentPosition( a ) === compare) ) { + + // Choose the first element that is related to our preferred document + if ( a === doc || a.ownerDocument === preferredDoc && contains(preferredDoc, a) ) { + return -1; + } + if ( b === doc || b.ownerDocument === preferredDoc && contains(preferredDoc, b) ) { + return 1; + } + + // Maintain original order + return sortInput ? + ( indexOf.call( sortInput, a ) - indexOf.call( sortInput, b ) ) : + 0; + } + + return compare & 4 ? -1 : 1; + } : + function( a, b ) { + // Exit early if the nodes are identical + if ( a === b ) { + hasDuplicate = true; + return 0; + } + + var cur, + i = 0, + aup = a.parentNode, + bup = b.parentNode, + ap = [ a ], + bp = [ b ]; + + // Parentless nodes are either documents or disconnected + if ( !aup || !bup ) { + return a === doc ? -1 : + b === doc ? 1 : + aup ? -1 : + bup ? 1 : + sortInput ? + ( indexOf.call( sortInput, a ) - indexOf.call( sortInput, b ) ) : + 0; + + // If the nodes are siblings, we can do a quick check + } else if ( aup === bup ) { + return siblingCheck( a, b ); + } + + // Otherwise we need full lists of their ancestors for comparison + cur = a; + while ( (cur = cur.parentNode) ) { + ap.unshift( cur ); + } + cur = b; + while ( (cur = cur.parentNode) ) { + bp.unshift( cur ); + } + + // Walk down the tree looking for a discrepancy + while ( ap[i] === bp[i] ) { + i++; + } + + return i ? + // Do a sibling check if the nodes have a common ancestor + siblingCheck( ap[i], bp[i] ) : + + // Otherwise nodes in our document sort first + ap[i] === preferredDoc ? -1 : + bp[i] === preferredDoc ? 1 : + 0; + }; + + return doc; +}; + +Sizzle.matches = function( expr, elements ) { + return Sizzle( expr, null, null, elements ); +}; + +Sizzle.matchesSelector = function( elem, expr ) { + // Set document vars if needed + if ( ( elem.ownerDocument || elem ) !== document ) { + setDocument( elem ); + } + + // Make sure that attribute selectors are quoted + expr = expr.replace( rattributeQuotes, "='$1']" ); + + if ( support.matchesSelector && documentIsHTML && + ( !rbuggyMatches || !rbuggyMatches.test( expr ) ) && + ( !rbuggyQSA || !rbuggyQSA.test( expr ) ) ) { + + try { + var ret = matches.call( elem, expr ); + + // IE 9's matchesSelector returns false on disconnected nodes + if ( ret || support.disconnectedMatch || + // As well, disconnected nodes are said to be in a document + // fragment in IE 9 + elem.document && elem.document.nodeType !== 11 ) { + return ret; + } + } catch(e) {} + } + + return Sizzle( expr, document, null, [ elem ] ).length > 0; +}; + +Sizzle.contains = function( context, elem ) { + // Set document vars if needed + if ( ( context.ownerDocument || context ) !== document ) { + setDocument( context ); + } + return contains( context, elem ); +}; + +Sizzle.attr = function( elem, name ) { + // Set document vars if needed + if ( ( elem.ownerDocument || elem ) !== document ) { + setDocument( elem ); + } + + var fn = Expr.attrHandle[ name.toLowerCase() ], + // Don't get fooled by Object.prototype properties (jQuery #13807) + val = fn && hasOwn.call( Expr.attrHandle, name.toLowerCase() ) ? + fn( elem, name, !documentIsHTML ) : + undefined; + + return val !== undefined ? + val : + support.attributes || !documentIsHTML ? + elem.getAttribute( name ) : + (val = elem.getAttributeNode(name)) && val.specified ? + val.value : + null; +}; + +Sizzle.error = function( msg ) { + throw new Error( "Syntax error, unrecognized expression: " + msg ); +}; + +/** + * Document sorting and removing duplicates + * @param {ArrayLike} results + */ +Sizzle.uniqueSort = function( results ) { + var elem, + duplicates = [], + j = 0, + i = 0; + + // Unless we *know* we can detect duplicates, assume their presence + hasDuplicate = !support.detectDuplicates; + sortInput = !support.sortStable && results.slice( 0 ); + results.sort( sortOrder ); + + if ( hasDuplicate ) { + while ( (elem = results[i++]) ) { + if ( elem === results[ i ] ) { + j = duplicates.push( i ); + } + } + while ( j-- ) { + results.splice( duplicates[ j ], 1 ); + } + } + + // Clear input after sorting to release objects + // See https://github.com/jquery/sizzle/pull/225 + sortInput = null; + + return results; +}; + +/** + * Utility function for retrieving the text value of an array of DOM nodes + * @param {Array|Element} elem + */ +getText = Sizzle.getText = function( elem ) { + var node, + ret = "", + i = 0, + nodeType = elem.nodeType; + + if ( !nodeType ) { + // If no nodeType, this is expected to be an array + while ( (node = elem[i++]) ) { + // Do not traverse comment nodes + ret += getText( node ); + } + } else if ( nodeType === 1 || nodeType === 9 || nodeType === 11 ) { + // Use textContent for elements + // innerText usage removed for consistency of new lines (jQuery #11153) + if ( typeof elem.textContent === "string" ) { + return elem.textContent; + } else { + // Traverse its children + for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { + ret += getText( elem ); + } + } + } else if ( nodeType === 3 || nodeType === 4 ) { + return elem.nodeValue; + } + // Do not include comment or processing instruction nodes + + return ret; +}; + +Expr = Sizzle.selectors = { + + // Can be adjusted by the user + cacheLength: 50, + + createPseudo: markFunction, + + match: matchExpr, + + attrHandle: {}, + + find: {}, + + relative: { + ">": { dir: "parentNode", first: true }, + " ": { dir: "parentNode" }, + "+": { dir: "previousSibling", first: true }, + "~": { dir: "previousSibling" } + }, + + preFilter: { + "ATTR": function( match ) { + match[1] = match[1].replace( runescape, funescape ); + + // Move the given value to match[3] whether quoted or unquoted + match[3] = ( match[3] || match[4] || match[5] || "" ).replace( runescape, funescape ); + + if ( match[2] === "~=" ) { + match[3] = " " + match[3] + " "; + } + + return match.slice( 0, 4 ); + }, + + "CHILD": function( match ) { + /* matches from matchExpr["CHILD"] + 1 type (only|nth|...) + 2 what (child|of-type) + 3 argument (even|odd|\d*|\d*n([+-]\d+)?|...) + 4 xn-component of xn+y argument ([+-]?\d*n|) + 5 sign of xn-component + 6 x of xn-component + 7 sign of y-component + 8 y of y-component + */ + match[1] = match[1].toLowerCase(); + + if ( match[1].slice( 0, 3 ) === "nth" ) { + // nth-* requires argument + if ( !match[3] ) { + Sizzle.error( match[0] ); + } + + // numeric x and y parameters for Expr.filter.CHILD + // remember that false/true cast respectively to 0/1 + match[4] = +( match[4] ? match[5] + (match[6] || 1) : 2 * ( match[3] === "even" || match[3] === "odd" ) ); + match[5] = +( ( match[7] + match[8] ) || match[3] === "odd" ); + + // other types prohibit arguments + } else if ( match[3] ) { + Sizzle.error( match[0] ); + } + + return match; + }, + + "PSEUDO": function( match ) { + var excess, + unquoted = !match[6] && match[2]; + + if ( matchExpr["CHILD"].test( match[0] ) ) { + return null; + } + + // Accept quoted arguments as-is + if ( match[3] ) { + match[2] = match[4] || match[5] || ""; + + // Strip excess characters from unquoted arguments + } else if ( unquoted && rpseudo.test( unquoted ) && + // Get excess from tokenize (recursively) + (excess = tokenize( unquoted, true )) && + // advance to the next closing parenthesis + (excess = unquoted.indexOf( ")", unquoted.length - excess ) - unquoted.length) ) { + + // excess is a negative index + match[0] = match[0].slice( 0, excess ); + match[2] = unquoted.slice( 0, excess ); + } + + // Return only captures needed by the pseudo filter method (type and argument) + return match.slice( 0, 3 ); + } + }, + + filter: { + + "TAG": function( nodeNameSelector ) { + var nodeName = nodeNameSelector.replace( runescape, funescape ).toLowerCase(); + return nodeNameSelector === "*" ? + function() { return true; } : + function( elem ) { + return elem.nodeName && elem.nodeName.toLowerCase() === nodeName; + }; + }, + + "CLASS": function( className ) { + var pattern = classCache[ className + " " ]; + + return pattern || + (pattern = new RegExp( "(^|" + whitespace + ")" + className + "(" + whitespace + "|$)" )) && + classCache( className, function( elem ) { + return pattern.test( typeof elem.className === "string" && elem.className || typeof elem.getAttribute !== strundefined && elem.getAttribute("class") || "" ); + }); + }, + + "ATTR": function( name, operator, check ) { + return function( elem ) { + var result = Sizzle.attr( elem, name ); + + if ( result == null ) { + return operator === "!="; + } + if ( !operator ) { + return true; + } + + result += ""; + + return operator === "=" ? result === check : + operator === "!=" ? result !== check : + operator === "^=" ? check && result.indexOf( check ) === 0 : + operator === "*=" ? check && result.indexOf( check ) > -1 : + operator === "$=" ? check && result.slice( -check.length ) === check : + operator === "~=" ? ( " " + result + " " ).indexOf( check ) > -1 : + operator === "|=" ? result === check || result.slice( 0, check.length + 1 ) === check + "-" : + false; + }; + }, + + "CHILD": function( type, what, argument, first, last ) { + var simple = type.slice( 0, 3 ) !== "nth", + forward = type.slice( -4 ) !== "last", + ofType = what === "of-type"; + + return first === 1 && last === 0 ? + + // Shortcut for :nth-*(n) + function( elem ) { + return !!elem.parentNode; + } : + + function( elem, context, xml ) { + var cache, outerCache, node, diff, nodeIndex, start, + dir = simple !== forward ? "nextSibling" : "previousSibling", + parent = elem.parentNode, + name = ofType && elem.nodeName.toLowerCase(), + useCache = !xml && !ofType; + + if ( parent ) { + + // :(first|last|only)-(child|of-type) + if ( simple ) { + while ( dir ) { + node = elem; + while ( (node = node[ dir ]) ) { + if ( ofType ? node.nodeName.toLowerCase() === name : node.nodeType === 1 ) { + return false; + } + } + // Reverse direction for :only-* (if we haven't yet done so) + start = dir = type === "only" && !start && "nextSibling"; + } + return true; + } + + start = [ forward ? parent.firstChild : parent.lastChild ]; + + // non-xml :nth-child(...) stores cache data on `parent` + if ( forward && useCache ) { + // Seek `elem` from a previously-cached index + outerCache = parent[ expando ] || (parent[ expando ] = {}); + cache = outerCache[ type ] || []; + nodeIndex = cache[0] === dirruns && cache[1]; + diff = cache[0] === dirruns && cache[2]; + node = nodeIndex && parent.childNodes[ nodeIndex ]; + + while ( (node = ++nodeIndex && node && node[ dir ] || + + // Fallback to seeking `elem` from the start + (diff = nodeIndex = 0) || start.pop()) ) { + + // When found, cache indexes on `parent` and break + if ( node.nodeType === 1 && ++diff && node === elem ) { + outerCache[ type ] = [ dirruns, nodeIndex, diff ]; + break; + } + } + + // Use previously-cached element index if available + } else if ( useCache && (cache = (elem[ expando ] || (elem[ expando ] = {}))[ type ]) && cache[0] === dirruns ) { + diff = cache[1]; + + // xml :nth-child(...) or :nth-last-child(...) or :nth(-last)?-of-type(...) + } else { + // Use the same loop as above to seek `elem` from the start + while ( (node = ++nodeIndex && node && node[ dir ] || + (diff = nodeIndex = 0) || start.pop()) ) { + + if ( ( ofType ? node.nodeName.toLowerCase() === name : node.nodeType === 1 ) && ++diff ) { + // Cache the index of each encountered element + if ( useCache ) { + (node[ expando ] || (node[ expando ] = {}))[ type ] = [ dirruns, diff ]; + } + + if ( node === elem ) { + break; + } + } + } + } + + // Incorporate the offset, then check against cycle size + diff -= last; + return diff === first || ( diff % first === 0 && diff / first >= 0 ); + } + }; + }, + + "PSEUDO": function( pseudo, argument ) { + // pseudo-class names are case-insensitive + // http://www.w3.org/TR/selectors/#pseudo-classes + // Prioritize by case sensitivity in case custom pseudos are added with uppercase letters + // Remember that setFilters inherits from pseudos + var args, + fn = Expr.pseudos[ pseudo ] || Expr.setFilters[ pseudo.toLowerCase() ] || + Sizzle.error( "unsupported pseudo: " + pseudo ); + + // The user may use createPseudo to indicate that + // arguments are needed to create the filter function + // just as Sizzle does + if ( fn[ expando ] ) { + return fn( argument ); + } + + // But maintain support for old signatures + if ( fn.length > 1 ) { + args = [ pseudo, pseudo, "", argument ]; + return Expr.setFilters.hasOwnProperty( pseudo.toLowerCase() ) ? + markFunction(function( seed, matches ) { + var idx, + matched = fn( seed, argument ), + i = matched.length; + while ( i-- ) { + idx = indexOf.call( seed, matched[i] ); + seed[ idx ] = !( matches[ idx ] = matched[i] ); + } + }) : + function( elem ) { + return fn( elem, 0, args ); + }; + } + + return fn; + } + }, + + pseudos: { + // Potentially complex pseudos + "not": markFunction(function( selector ) { + // Trim the selector passed to compile + // to avoid treating leading and trailing + // spaces as combinators + var input = [], + results = [], + matcher = compile( selector.replace( rtrim, "$1" ) ); + + return matcher[ expando ] ? + markFunction(function( seed, matches, context, xml ) { + var elem, + unmatched = matcher( seed, null, xml, [] ), + i = seed.length; + + // Match elements unmatched by `matcher` + while ( i-- ) { + if ( (elem = unmatched[i]) ) { + seed[i] = !(matches[i] = elem); + } + } + }) : + function( elem, context, xml ) { + input[0] = elem; + matcher( input, null, xml, results ); + return !results.pop(); + }; + }), + + "has": markFunction(function( selector ) { + return function( elem ) { + return Sizzle( selector, elem ).length > 0; + }; + }), + + "contains": markFunction(function( text ) { + return function( elem ) { + return ( elem.textContent || elem.innerText || getText( elem ) ).indexOf( text ) > -1; + }; + }), + + // "Whether an element is represented by a :lang() selector + // is based solely on the element's language value + // being equal to the identifier C, + // or beginning with the identifier C immediately followed by "-". + // The matching of C against the element's language value is performed case-insensitively. + // The identifier C does not have to be a valid language name." + // http://www.w3.org/TR/selectors/#lang-pseudo + "lang": markFunction( function( lang ) { + // lang value must be a valid identifier + if ( !ridentifier.test(lang || "") ) { + Sizzle.error( "unsupported lang: " + lang ); + } + lang = lang.replace( runescape, funescape ).toLowerCase(); + return function( elem ) { + var elemLang; + do { + if ( (elemLang = documentIsHTML ? + elem.lang : + elem.getAttribute("xml:lang") || elem.getAttribute("lang")) ) { + + elemLang = elemLang.toLowerCase(); + return elemLang === lang || elemLang.indexOf( lang + "-" ) === 0; + } + } while ( (elem = elem.parentNode) && elem.nodeType === 1 ); + return false; + }; + }), + + // Miscellaneous + "target": function( elem ) { + var hash = window.location && window.location.hash; + return hash && hash.slice( 1 ) === elem.id; + }, + + "root": function( elem ) { + return elem === docElem; + }, + + "focus": function( elem ) { + return elem === document.activeElement && (!document.hasFocus || document.hasFocus()) && !!(elem.type || elem.href || ~elem.tabIndex); + }, + + // Boolean properties + "enabled": function( elem ) { + return elem.disabled === false; + }, + + "disabled": function( elem ) { + return elem.disabled === true; + }, + + "checked": function( elem ) { + // In CSS3, :checked should return both checked and selected elements + // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked + var nodeName = elem.nodeName.toLowerCase(); + return (nodeName === "input" && !!elem.checked) || (nodeName === "option" && !!elem.selected); + }, + + "selected": function( elem ) { + // Accessing this property makes selected-by-default + // options in Safari work properly + if ( elem.parentNode ) { + elem.parentNode.selectedIndex; + } + + return elem.selected === true; + }, + + // Contents + "empty": function( elem ) { + // http://www.w3.org/TR/selectors/#empty-pseudo + // :empty is negated by element (1) or content nodes (text: 3; cdata: 4; entity ref: 5), + // but not by others (comment: 8; processing instruction: 7; etc.) + // nodeType < 6 works because attributes (2) do not appear as children + for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { + if ( elem.nodeType < 6 ) { + return false; + } + } + return true; + }, + + "parent": function( elem ) { + return !Expr.pseudos["empty"]( elem ); + }, + + // Element/input types + "header": function( elem ) { + return rheader.test( elem.nodeName ); + }, + + "input": function( elem ) { + return rinputs.test( elem.nodeName ); + }, + + "button": function( elem ) { + var name = elem.nodeName.toLowerCase(); + return name === "input" && elem.type === "button" || name === "button"; + }, + + "text": function( elem ) { + var attr; + return elem.nodeName.toLowerCase() === "input" && + elem.type === "text" && + + // Support: IE<8 + // New HTML5 attribute values (e.g., "search") appear with elem.type === "text" + ( (attr = elem.getAttribute("type")) == null || attr.toLowerCase() === "text" ); + }, + + // Position-in-collection + "first": createPositionalPseudo(function() { + return [ 0 ]; + }), + + "last": createPositionalPseudo(function( matchIndexes, length ) { + return [ length - 1 ]; + }), + + "eq": createPositionalPseudo(function( matchIndexes, length, argument ) { + return [ argument < 0 ? argument + length : argument ]; + }), + + "even": createPositionalPseudo(function( matchIndexes, length ) { + var i = 0; + for ( ; i < length; i += 2 ) { + matchIndexes.push( i ); + } + return matchIndexes; + }), + + "odd": createPositionalPseudo(function( matchIndexes, length ) { + var i = 1; + for ( ; i < length; i += 2 ) { + matchIndexes.push( i ); + } + return matchIndexes; + }), + + "lt": createPositionalPseudo(function( matchIndexes, length, argument ) { + var i = argument < 0 ? argument + length : argument; + for ( ; --i >= 0; ) { + matchIndexes.push( i ); + } + return matchIndexes; + }), + + "gt": createPositionalPseudo(function( matchIndexes, length, argument ) { + var i = argument < 0 ? argument + length : argument; + for ( ; ++i < length; ) { + matchIndexes.push( i ); + } + return matchIndexes; + }) + } +}; + +Expr.pseudos["nth"] = Expr.pseudos["eq"]; + +// Add button/input type pseudos +for ( i in { radio: true, checkbox: true, file: true, password: true, image: true } ) { + Expr.pseudos[ i ] = createInputPseudo( i ); +} +for ( i in { submit: true, reset: true } ) { + Expr.pseudos[ i ] = createButtonPseudo( i ); +} + +// Easy API for creating new setFilters +function setFilters() {} +setFilters.prototype = Expr.filters = Expr.pseudos; +Expr.setFilters = new setFilters(); + +tokenize = Sizzle.tokenize = function( selector, parseOnly ) { + var matched, match, tokens, type, + soFar, groups, preFilters, + cached = tokenCache[ selector + " " ]; + + if ( cached ) { + return parseOnly ? 0 : cached.slice( 0 ); + } + + soFar = selector; + groups = []; + preFilters = Expr.preFilter; + + while ( soFar ) { + + // Comma and first run + if ( !matched || (match = rcomma.exec( soFar )) ) { + if ( match ) { + // Don't consume trailing commas as valid + soFar = soFar.slice( match[0].length ) || soFar; + } + groups.push( (tokens = []) ); + } + + matched = false; + + // Combinators + if ( (match = rcombinators.exec( soFar )) ) { + matched = match.shift(); + tokens.push({ + value: matched, + // Cast descendant combinators to space + type: match[0].replace( rtrim, " " ) + }); + soFar = soFar.slice( matched.length ); + } + + // Filters + for ( type in Expr.filter ) { + if ( (match = matchExpr[ type ].exec( soFar )) && (!preFilters[ type ] || + (match = preFilters[ type ]( match ))) ) { + matched = match.shift(); + tokens.push({ + value: matched, + type: type, + matches: match + }); + soFar = soFar.slice( matched.length ); + } + } + + if ( !matched ) { + break; + } + } + + // Return the length of the invalid excess + // if we're just parsing + // Otherwise, throw an error or return tokens + return parseOnly ? + soFar.length : + soFar ? + Sizzle.error( selector ) : + // Cache the tokens + tokenCache( selector, groups ).slice( 0 ); +}; + +function toSelector( tokens ) { + var i = 0, + len = tokens.length, + selector = ""; + for ( ; i < len; i++ ) { + selector += tokens[i].value; + } + return selector; +} + +function addCombinator( matcher, combinator, base ) { + var dir = combinator.dir, + checkNonElements = base && dir === "parentNode", + doneName = done++; + + return combinator.first ? + // Check against closest ancestor/preceding element + function( elem, context, xml ) { + while ( (elem = elem[ dir ]) ) { + if ( elem.nodeType === 1 || checkNonElements ) { + return matcher( elem, context, xml ); + } + } + } : + + // Check against all ancestor/preceding elements + function( elem, context, xml ) { + var oldCache, outerCache, + newCache = [ dirruns, doneName ]; + + // We can't set arbitrary data on XML nodes, so they don't benefit from dir caching + if ( xml ) { + while ( (elem = elem[ dir ]) ) { + if ( elem.nodeType === 1 || checkNonElements ) { + if ( matcher( elem, context, xml ) ) { + return true; + } + } + } + } else { + while ( (elem = elem[ dir ]) ) { + if ( elem.nodeType === 1 || checkNonElements ) { + outerCache = elem[ expando ] || (elem[ expando ] = {}); + if ( (oldCache = outerCache[ dir ]) && + oldCache[ 0 ] === dirruns && oldCache[ 1 ] === doneName ) { + + // Assign to newCache so results back-propagate to previous elements + return (newCache[ 2 ] = oldCache[ 2 ]); + } else { + // Reuse newcache so results back-propagate to previous elements + outerCache[ dir ] = newCache; + + // A match means we're done; a fail means we have to keep checking + if ( (newCache[ 2 ] = matcher( elem, context, xml )) ) { + return true; + } + } + } + } + } + }; +} + +function elementMatcher( matchers ) { + return matchers.length > 1 ? + function( elem, context, xml ) { + var i = matchers.length; + while ( i-- ) { + if ( !matchers[i]( elem, context, xml ) ) { + return false; + } + } + return true; + } : + matchers[0]; +} + +function multipleContexts( selector, contexts, results ) { + var i = 0, + len = contexts.length; + for ( ; i < len; i++ ) { + Sizzle( selector, contexts[i], results ); + } + return results; +} + +function condense( unmatched, map, filter, context, xml ) { + var elem, + newUnmatched = [], + i = 0, + len = unmatched.length, + mapped = map != null; + + for ( ; i < len; i++ ) { + if ( (elem = unmatched[i]) ) { + if ( !filter || filter( elem, context, xml ) ) { + newUnmatched.push( elem ); + if ( mapped ) { + map.push( i ); + } + } + } + } + + return newUnmatched; +} + +function setMatcher( preFilter, selector, matcher, postFilter, postFinder, postSelector ) { + if ( postFilter && !postFilter[ expando ] ) { + postFilter = setMatcher( postFilter ); + } + if ( postFinder && !postFinder[ expando ] ) { + postFinder = setMatcher( postFinder, postSelector ); + } + return markFunction(function( seed, results, context, xml ) { + var temp, i, elem, + preMap = [], + postMap = [], + preexisting = results.length, + + // Get initial elements from seed or context + elems = seed || multipleContexts( selector || "*", context.nodeType ? [ context ] : context, [] ), + + // Prefilter to get matcher input, preserving a map for seed-results synchronization + matcherIn = preFilter && ( seed || !selector ) ? + condense( elems, preMap, preFilter, context, xml ) : + elems, + + matcherOut = matcher ? + // If we have a postFinder, or filtered seed, or non-seed postFilter or preexisting results, + postFinder || ( seed ? preFilter : preexisting || postFilter ) ? + + // ...intermediate processing is necessary + [] : + + // ...otherwise use results directly + results : + matcherIn; + + // Find primary matches + if ( matcher ) { + matcher( matcherIn, matcherOut, context, xml ); + } + + // Apply postFilter + if ( postFilter ) { + temp = condense( matcherOut, postMap ); + postFilter( temp, [], context, xml ); + + // Un-match failing elements by moving them back to matcherIn + i = temp.length; + while ( i-- ) { + if ( (elem = temp[i]) ) { + matcherOut[ postMap[i] ] = !(matcherIn[ postMap[i] ] = elem); + } + } + } + + if ( seed ) { + if ( postFinder || preFilter ) { + if ( postFinder ) { + // Get the final matcherOut by condensing this intermediate into postFinder contexts + temp = []; + i = matcherOut.length; + while ( i-- ) { + if ( (elem = matcherOut[i]) ) { + // Restore matcherIn since elem is not yet a final match + temp.push( (matcherIn[i] = elem) ); + } + } + postFinder( null, (matcherOut = []), temp, xml ); + } + + // Move matched elements from seed to results to keep them synchronized + i = matcherOut.length; + while ( i-- ) { + if ( (elem = matcherOut[i]) && + (temp = postFinder ? indexOf.call( seed, elem ) : preMap[i]) > -1 ) { + + seed[temp] = !(results[temp] = elem); + } + } + } + + // Add elements to results, through postFinder if defined + } else { + matcherOut = condense( + matcherOut === results ? + matcherOut.splice( preexisting, matcherOut.length ) : + matcherOut + ); + if ( postFinder ) { + postFinder( null, results, matcherOut, xml ); + } else { + push.apply( results, matcherOut ); + } + } + }); +} + +function matcherFromTokens( tokens ) { + var checkContext, matcher, j, + len = tokens.length, + leadingRelative = Expr.relative[ tokens[0].type ], + implicitRelative = leadingRelative || Expr.relative[" "], + i = leadingRelative ? 1 : 0, + + // The foundational matcher ensures that elements are reachable from top-level context(s) + matchContext = addCombinator( function( elem ) { + return elem === checkContext; + }, implicitRelative, true ), + matchAnyContext = addCombinator( function( elem ) { + return indexOf.call( checkContext, elem ) > -1; + }, implicitRelative, true ), + matchers = [ function( elem, context, xml ) { + return ( !leadingRelative && ( xml || context !== outermostContext ) ) || ( + (checkContext = context).nodeType ? + matchContext( elem, context, xml ) : + matchAnyContext( elem, context, xml ) ); + } ]; + + for ( ; i < len; i++ ) { + if ( (matcher = Expr.relative[ tokens[i].type ]) ) { + matchers = [ addCombinator(elementMatcher( matchers ), matcher) ]; + } else { + matcher = Expr.filter[ tokens[i].type ].apply( null, tokens[i].matches ); + + // Return special upon seeing a positional matcher + if ( matcher[ expando ] ) { + // Find the next relative operator (if any) for proper handling + j = ++i; + for ( ; j < len; j++ ) { + if ( Expr.relative[ tokens[j].type ] ) { + break; + } + } + return setMatcher( + i > 1 && elementMatcher( matchers ), + i > 1 && toSelector( + // If the preceding token was a descendant combinator, insert an implicit any-element `*` + tokens.slice( 0, i - 1 ).concat({ value: tokens[ i - 2 ].type === " " ? "*" : "" }) + ).replace( rtrim, "$1" ), + matcher, + i < j && matcherFromTokens( tokens.slice( i, j ) ), + j < len && matcherFromTokens( (tokens = tokens.slice( j )) ), + j < len && toSelector( tokens ) + ); + } + matchers.push( matcher ); + } + } + + return elementMatcher( matchers ); +} + +function matcherFromGroupMatchers( elementMatchers, setMatchers ) { + var bySet = setMatchers.length > 0, + byElement = elementMatchers.length > 0, + superMatcher = function( seed, context, xml, results, outermost ) { + var elem, j, matcher, + matchedCount = 0, + i = "0", + unmatched = seed && [], + setMatched = [], + contextBackup = outermostContext, + // We must always have either seed elements or outermost context + elems = seed || byElement && Expr.find["TAG"]( "*", outermost ), + // Use integer dirruns iff this is the outermost matcher + dirrunsUnique = (dirruns += contextBackup == null ? 1 : Math.random() || 0.1), + len = elems.length; + + if ( outermost ) { + outermostContext = context !== document && context; + } + + // Add elements passing elementMatchers directly to results + // Keep `i` a string if there are no elements so `matchedCount` will be "00" below + // Support: IE<9, Safari + // Tolerate NodeList properties (IE: "length"; Safari: ) matching elements by id + for ( ; i !== len && (elem = elems[i]) != null; i++ ) { + if ( byElement && elem ) { + j = 0; + while ( (matcher = elementMatchers[j++]) ) { + if ( matcher( elem, context, xml ) ) { + results.push( elem ); + break; + } + } + if ( outermost ) { + dirruns = dirrunsUnique; + } + } + + // Track unmatched elements for set filters + if ( bySet ) { + // They will have gone through all possible matchers + if ( (elem = !matcher && elem) ) { + matchedCount--; + } + + // Lengthen the array for every element, matched or not + if ( seed ) { + unmatched.push( elem ); + } + } + } + + // Apply set filters to unmatched elements + matchedCount += i; + if ( bySet && i !== matchedCount ) { + j = 0; + while ( (matcher = setMatchers[j++]) ) { + matcher( unmatched, setMatched, context, xml ); + } + + if ( seed ) { + // Reintegrate element matches to eliminate the need for sorting + if ( matchedCount > 0 ) { + while ( i-- ) { + if ( !(unmatched[i] || setMatched[i]) ) { + setMatched[i] = pop.call( results ); + } + } + } + + // Discard index placeholder values to get only actual matches + setMatched = condense( setMatched ); + } + + // Add matches to results + push.apply( results, setMatched ); + + // Seedless set matches succeeding multiple successful matchers stipulate sorting + if ( outermost && !seed && setMatched.length > 0 && + ( matchedCount + setMatchers.length ) > 1 ) { + + Sizzle.uniqueSort( results ); + } + } + + // Override manipulation of globals by nested matchers + if ( outermost ) { + dirruns = dirrunsUnique; + outermostContext = contextBackup; + } + + return unmatched; + }; + + return bySet ? + markFunction( superMatcher ) : + superMatcher; +} + +compile = Sizzle.compile = function( selector, match /* Internal Use Only */ ) { + var i, + setMatchers = [], + elementMatchers = [], + cached = compilerCache[ selector + " " ]; + + if ( !cached ) { + // Generate a function of recursive functions that can be used to check each element + if ( !match ) { + match = tokenize( selector ); + } + i = match.length; + while ( i-- ) { + cached = matcherFromTokens( match[i] ); + if ( cached[ expando ] ) { + setMatchers.push( cached ); + } else { + elementMatchers.push( cached ); + } + } + + // Cache the compiled function + cached = compilerCache( selector, matcherFromGroupMatchers( elementMatchers, setMatchers ) ); + + // Save selector and tokenization + cached.selector = selector; + } + return cached; +}; + +/** + * A low-level selection function that works with Sizzle's compiled + * selector functions + * @param {String|Function} selector A selector or a pre-compiled + * selector function built with Sizzle.compile + * @param {Element} context + * @param {Array} [results] + * @param {Array} [seed] A set of elements to match against + */ +select = Sizzle.select = function( selector, context, results, seed ) { + var i, tokens, token, type, find, + compiled = typeof selector === "function" && selector, + match = !seed && tokenize( (selector = compiled.selector || selector) ); + + results = results || []; + + // Try to minimize operations if there is no seed and only one group + if ( match.length === 1 ) { + + // Take a shortcut and set the context if the root selector is an ID + tokens = match[0] = match[0].slice( 0 ); + if ( tokens.length > 2 && (token = tokens[0]).type === "ID" && + support.getById && context.nodeType === 9 && documentIsHTML && + Expr.relative[ tokens[1].type ] ) { + + context = ( Expr.find["ID"]( token.matches[0].replace(runescape, funescape), context ) || [] )[0]; + if ( !context ) { + return results; + + // Precompiled matchers will still verify ancestry, so step up a level + } else if ( compiled ) { + context = context.parentNode; + } + + selector = selector.slice( tokens.shift().value.length ); + } + + // Fetch a seed set for right-to-left matching + i = matchExpr["needsContext"].test( selector ) ? 0 : tokens.length; + while ( i-- ) { + token = tokens[i]; + + // Abort if we hit a combinator + if ( Expr.relative[ (type = token.type) ] ) { + break; + } + if ( (find = Expr.find[ type ]) ) { + // Search, expanding context for leading sibling combinators + if ( (seed = find( + token.matches[0].replace( runescape, funescape ), + rsibling.test( tokens[0].type ) && testContext( context.parentNode ) || context + )) ) { + + // If seed is empty or no tokens remain, we can return early + tokens.splice( i, 1 ); + selector = seed.length && toSelector( tokens ); + if ( !selector ) { + push.apply( results, seed ); + return results; + } + + break; + } + } + } + } + + // Compile and execute a filtering function if one is not provided + // Provide `match` to avoid retokenization if we modified the selector above + ( compiled || compile( selector, match ) )( + seed, + context, + !documentIsHTML, + results, + rsibling.test( selector ) && testContext( context.parentNode ) || context + ); + return results; +}; + +// One-time assignments + +// Sort stability +support.sortStable = expando.split("").sort( sortOrder ).join("") === expando; + +// Support: Chrome<14 +// Always assume duplicates if they aren't passed to the comparison function +support.detectDuplicates = !!hasDuplicate; + +// Initialize against the default document +setDocument(); + +// Support: Webkit<537.32 - Safari 6.0.3/Chrome 25 (fixed in Chrome 27) +// Detached nodes confoundingly follow *each other* +support.sortDetached = assert(function( div1 ) { + // Should return 1, but returns 4 (following) + return div1.compareDocumentPosition( document.createElement("div") ) & 1; +}); + +// Support: IE<8 +// Prevent attribute/property "interpolation" +// http://msdn.microsoft.com/en-us/library/ms536429%28VS.85%29.aspx +if ( !assert(function( div ) { + div.innerHTML = ""; + return div.firstChild.getAttribute("href") === "#" ; +}) ) { + addHandle( "type|href|height|width", function( elem, name, isXML ) { + if ( !isXML ) { + return elem.getAttribute( name, name.toLowerCase() === "type" ? 1 : 2 ); + } + }); +} + +// Support: IE<9 +// Use defaultValue in place of getAttribute("value") +if ( !support.attributes || !assert(function( div ) { + div.innerHTML = ""; + div.firstChild.setAttribute( "value", "" ); + return div.firstChild.getAttribute( "value" ) === ""; +}) ) { + addHandle( "value", function( elem, name, isXML ) { + if ( !isXML && elem.nodeName.toLowerCase() === "input" ) { + return elem.defaultValue; + } + }); +} + +// Support: IE<9 +// Use getAttributeNode to fetch booleans when getAttribute lies +if ( !assert(function( div ) { + return div.getAttribute("disabled") == null; +}) ) { + addHandle( booleans, function( elem, name, isXML ) { + var val; + if ( !isXML ) { + return elem[ name ] === true ? name.toLowerCase() : + (val = elem.getAttributeNode( name )) && val.specified ? + val.value : + null; + } + }); +} + +return Sizzle; + +})( window ); + + + +jQuery.find = Sizzle; +jQuery.expr = Sizzle.selectors; +jQuery.expr[":"] = jQuery.expr.pseudos; +jQuery.unique = Sizzle.uniqueSort; +jQuery.text = Sizzle.getText; +jQuery.isXMLDoc = Sizzle.isXML; +jQuery.contains = Sizzle.contains; + + + +var rneedsContext = jQuery.expr.match.needsContext; + +var rsingleTag = (/^<(\w+)\s*\/?>(?:<\/\1>|)$/); + + + +var risSimple = /^.[^:#\[\.,]*$/; + +// Implement the identical functionality for filter and not +function winnow( elements, qualifier, not ) { + if ( jQuery.isFunction( qualifier ) ) { + return jQuery.grep( elements, function( elem, i ) { + /* jshint -W018 */ + return !!qualifier.call( elem, i, elem ) !== not; + }); + + } + + if ( qualifier.nodeType ) { + return jQuery.grep( elements, function( elem ) { + return ( elem === qualifier ) !== not; + }); + + } + + if ( typeof qualifier === "string" ) { + if ( risSimple.test( qualifier ) ) { + return jQuery.filter( qualifier, elements, not ); + } + + qualifier = jQuery.filter( qualifier, elements ); + } + + return jQuery.grep( elements, function( elem ) { + return ( jQuery.inArray( elem, qualifier ) >= 0 ) !== not; + }); +} + +jQuery.filter = function( expr, elems, not ) { + var elem = elems[ 0 ]; + + if ( not ) { + expr = ":not(" + expr + ")"; + } + + return elems.length === 1 && elem.nodeType === 1 ? + jQuery.find.matchesSelector( elem, expr ) ? [ elem ] : [] : + jQuery.find.matches( expr, jQuery.grep( elems, function( elem ) { + return elem.nodeType === 1; + })); +}; + +jQuery.fn.extend({ + find: function( selector ) { + var i, + ret = [], + self = this, + len = self.length; + + if ( typeof selector !== "string" ) { + return this.pushStack( jQuery( selector ).filter(function() { + for ( i = 0; i < len; i++ ) { + if ( jQuery.contains( self[ i ], this ) ) { + return true; + } + } + }) ); + } + + for ( i = 0; i < len; i++ ) { + jQuery.find( selector, self[ i ], ret ); + } + + // Needed because $( selector, context ) becomes $( context ).find( selector ) + ret = this.pushStack( len > 1 ? jQuery.unique( ret ) : ret ); + ret.selector = this.selector ? this.selector + " " + selector : selector; + return ret; + }, + filter: function( selector ) { + return this.pushStack( winnow(this, selector || [], false) ); + }, + not: function( selector ) { + return this.pushStack( winnow(this, selector || [], true) ); + }, + is: function( selector ) { + return !!winnow( + this, + + // If this is a positional/relative selector, check membership in the returned set + // so $("p:first").is("p:last") won't return true for a doc with two "p". + typeof selector === "string" && rneedsContext.test( selector ) ? + jQuery( selector ) : + selector || [], + false + ).length; + } +}); + + +// Initialize a jQuery object + + +// A central reference to the root jQuery(document) +var rootjQuery, + + // Use the correct document accordingly with window argument (sandbox) + document = window.document, + + // A simple way to check for HTML strings + // Prioritize #id over to avoid XSS via location.hash (#9521) + // Strict HTML recognition (#11290: must start with <) + rquickExpr = /^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]*))$/, + + init = jQuery.fn.init = function( selector, context ) { + var match, elem; + + // HANDLE: $(""), $(null), $(undefined), $(false) + if ( !selector ) { + return this; + } + + // Handle HTML strings + if ( typeof selector === "string" ) { + if ( selector.charAt(0) === "<" && selector.charAt( selector.length - 1 ) === ">" && selector.length >= 3 ) { + // Assume that strings that start and end with <> are HTML and skip the regex check + match = [ null, selector, null ]; + + } else { + match = rquickExpr.exec( selector ); + } + + // Match html or make sure no context is specified for #id + if ( match && (match[1] || !context) ) { + + // HANDLE: $(html) -> $(array) + if ( match[1] ) { + context = context instanceof jQuery ? context[0] : context; + + // scripts is true for back-compat + // Intentionally let the error be thrown if parseHTML is not present + jQuery.merge( this, jQuery.parseHTML( + match[1], + context && context.nodeType ? context.ownerDocument || context : document, + true + ) ); + + // HANDLE: $(html, props) + if ( rsingleTag.test( match[1] ) && jQuery.isPlainObject( context ) ) { + for ( match in context ) { + // Properties of context are called as methods if possible + if ( jQuery.isFunction( this[ match ] ) ) { + this[ match ]( context[ match ] ); + + // ...and otherwise set as attributes + } else { + this.attr( match, context[ match ] ); + } + } + } + + return this; + + // HANDLE: $(#id) + } else { + elem = document.getElementById( match[2] ); + + // Check parentNode to catch when Blackberry 4.6 returns + // nodes that are no longer in the document #6963 + if ( elem && elem.parentNode ) { + // Handle the case where IE and Opera return items + // by name instead of ID + if ( elem.id !== match[2] ) { + return rootjQuery.find( selector ); + } + + // Otherwise, we inject the element directly into the jQuery object + this.length = 1; + this[0] = elem; + } + + this.context = document; + this.selector = selector; + return this; + } + + // HANDLE: $(expr, $(...)) + } else if ( !context || context.jquery ) { + return ( context || rootjQuery ).find( selector ); + + // HANDLE: $(expr, context) + // (which is just equivalent to: $(context).find(expr) + } else { + return this.constructor( context ).find( selector ); + } + + // HANDLE: $(DOMElement) + } else if ( selector.nodeType ) { + this.context = this[0] = selector; + this.length = 1; + return this; + + // HANDLE: $(function) + // Shortcut for document ready + } else if ( jQuery.isFunction( selector ) ) { + return typeof rootjQuery.ready !== "undefined" ? + rootjQuery.ready( selector ) : + // Execute immediately if ready is not present + selector( jQuery ); + } + + if ( selector.selector !== undefined ) { + this.selector = selector.selector; + this.context = selector.context; + } + + return jQuery.makeArray( selector, this ); + }; + +// Give the init function the jQuery prototype for later instantiation +init.prototype = jQuery.fn; + +// Initialize central reference +rootjQuery = jQuery( document ); + + +var rparentsprev = /^(?:parents|prev(?:Until|All))/, + // methods guaranteed to produce a unique set when starting from a unique set + guaranteedUnique = { + children: true, + contents: true, + next: true, + prev: true + }; + +jQuery.extend({ + dir: function( elem, dir, until ) { + var matched = [], + cur = elem[ dir ]; + + while ( cur && cur.nodeType !== 9 && (until === undefined || cur.nodeType !== 1 || !jQuery( cur ).is( until )) ) { + if ( cur.nodeType === 1 ) { + matched.push( cur ); + } + cur = cur[dir]; + } + return matched; + }, + + sibling: function( n, elem ) { + var r = []; + + for ( ; n; n = n.nextSibling ) { + if ( n.nodeType === 1 && n !== elem ) { + r.push( n ); + } + } + + return r; + } +}); + +jQuery.fn.extend({ + has: function( target ) { + var i, + targets = jQuery( target, this ), + len = targets.length; + + return this.filter(function() { + for ( i = 0; i < len; i++ ) { + if ( jQuery.contains( this, targets[i] ) ) { + return true; + } + } + }); + }, + + closest: function( selectors, context ) { + var cur, + i = 0, + l = this.length, + matched = [], + pos = rneedsContext.test( selectors ) || typeof selectors !== "string" ? + jQuery( selectors, context || this.context ) : + 0; + + for ( ; i < l; i++ ) { + for ( cur = this[i]; cur && cur !== context; cur = cur.parentNode ) { + // Always skip document fragments + if ( cur.nodeType < 11 && (pos ? + pos.index(cur) > -1 : + + // Don't pass non-elements to Sizzle + cur.nodeType === 1 && + jQuery.find.matchesSelector(cur, selectors)) ) { + + matched.push( cur ); + break; + } + } + } + + return this.pushStack( matched.length > 1 ? jQuery.unique( matched ) : matched ); + }, + + // Determine the position of an element within + // the matched set of elements + index: function( elem ) { + + // No argument, return index in parent + if ( !elem ) { + return ( this[0] && this[0].parentNode ) ? this.first().prevAll().length : -1; + } + + // index in selector + if ( typeof elem === "string" ) { + return jQuery.inArray( this[0], jQuery( elem ) ); + } + + // Locate the position of the desired element + return jQuery.inArray( + // If it receives a jQuery object, the first element is used + elem.jquery ? elem[0] : elem, this ); + }, + + add: function( selector, context ) { + return this.pushStack( + jQuery.unique( + jQuery.merge( this.get(), jQuery( selector, context ) ) + ) + ); + }, + + addBack: function( selector ) { + return this.add( selector == null ? + this.prevObject : this.prevObject.filter(selector) + ); + } +}); + +function sibling( cur, dir ) { + do { + cur = cur[ dir ]; + } while ( cur && cur.nodeType !== 1 ); + + return cur; +} + +jQuery.each({ + parent: function( elem ) { + var parent = elem.parentNode; + return parent && parent.nodeType !== 11 ? parent : null; + }, + parents: function( elem ) { + return jQuery.dir( elem, "parentNode" ); + }, + parentsUntil: function( elem, i, until ) { + return jQuery.dir( elem, "parentNode", until ); + }, + next: function( elem ) { + return sibling( elem, "nextSibling" ); + }, + prev: function( elem ) { + return sibling( elem, "previousSibling" ); + }, + nextAll: function( elem ) { + return jQuery.dir( elem, "nextSibling" ); + }, + prevAll: function( elem ) { + return jQuery.dir( elem, "previousSibling" ); + }, + nextUntil: function( elem, i, until ) { + return jQuery.dir( elem, "nextSibling", until ); + }, + prevUntil: function( elem, i, until ) { + return jQuery.dir( elem, "previousSibling", until ); + }, + siblings: function( elem ) { + return jQuery.sibling( ( elem.parentNode || {} ).firstChild, elem ); + }, + children: function( elem ) { + return jQuery.sibling( elem.firstChild ); + }, + contents: function( elem ) { + return jQuery.nodeName( elem, "iframe" ) ? + elem.contentDocument || elem.contentWindow.document : + jQuery.merge( [], elem.childNodes ); + } +}, function( name, fn ) { + jQuery.fn[ name ] = function( until, selector ) { + var ret = jQuery.map( this, fn, until ); + + if ( name.slice( -5 ) !== "Until" ) { + selector = until; + } + + if ( selector && typeof selector === "string" ) { + ret = jQuery.filter( selector, ret ); + } + + if ( this.length > 1 ) { + // Remove duplicates + if ( !guaranteedUnique[ name ] ) { + ret = jQuery.unique( ret ); + } + + // Reverse order for parents* and prev-derivatives + if ( rparentsprev.test( name ) ) { + ret = ret.reverse(); + } + } + + return this.pushStack( ret ); + }; +}); +var rnotwhite = (/\S+/g); + + + +// String to Object options format cache +var optionsCache = {}; + +// Convert String-formatted options into Object-formatted ones and store in cache +function createOptions( options ) { + var object = optionsCache[ options ] = {}; + jQuery.each( options.match( rnotwhite ) || [], function( _, flag ) { + object[ flag ] = true; + }); + return object; +} + +/* + * Create a callback list using the following parameters: + * + * options: an optional list of space-separated options that will change how + * the callback list behaves or a more traditional option object + * + * By default a callback list will act like an event callback list and can be + * "fired" multiple times. + * + * Possible options: + * + * once: will ensure the callback list can only be fired once (like a Deferred) + * + * memory: will keep track of previous values and will call any callback added + * after the list has been fired right away with the latest "memorized" + * values (like a Deferred) + * + * unique: will ensure a callback can only be added once (no duplicate in the list) + * + * stopOnFalse: interrupt callings when a callback returns false + * + */ +jQuery.Callbacks = function( options ) { + + // Convert options from String-formatted to Object-formatted if needed + // (we check in cache first) + options = typeof options === "string" ? + ( optionsCache[ options ] || createOptions( options ) ) : + jQuery.extend( {}, options ); + + var // Flag to know if list is currently firing + firing, + // Last fire value (for non-forgettable lists) + memory, + // Flag to know if list was already fired + fired, + // End of the loop when firing + firingLength, + // Index of currently firing callback (modified by remove if needed) + firingIndex, + // First callback to fire (used internally by add and fireWith) + firingStart, + // Actual callback list + list = [], + // Stack of fire calls for repeatable lists + stack = !options.once && [], + // Fire callbacks + fire = function( data ) { + memory = options.memory && data; + fired = true; + firingIndex = firingStart || 0; + firingStart = 0; + firingLength = list.length; + firing = true; + for ( ; list && firingIndex < firingLength; firingIndex++ ) { + if ( list[ firingIndex ].apply( data[ 0 ], data[ 1 ] ) === false && options.stopOnFalse ) { + memory = false; // To prevent further calls using add + break; + } + } + firing = false; + if ( list ) { + if ( stack ) { + if ( stack.length ) { + fire( stack.shift() ); + } + } else if ( memory ) { + list = []; + } else { + self.disable(); + } + } + }, + // Actual Callbacks object + self = { + // Add a callback or a collection of callbacks to the list + add: function() { + if ( list ) { + // First, we save the current length + var start = list.length; + (function add( args ) { + jQuery.each( args, function( _, arg ) { + var type = jQuery.type( arg ); + if ( type === "function" ) { + if ( !options.unique || !self.has( arg ) ) { + list.push( arg ); + } + } else if ( arg && arg.length && type !== "string" ) { + // Inspect recursively + add( arg ); + } + }); + })( arguments ); + // Do we need to add the callbacks to the + // current firing batch? + if ( firing ) { + firingLength = list.length; + // With memory, if we're not firing then + // we should call right away + } else if ( memory ) { + firingStart = start; + fire( memory ); + } + } + return this; + }, + // Remove a callback from the list + remove: function() { + if ( list ) { + jQuery.each( arguments, function( _, arg ) { + var index; + while ( ( index = jQuery.inArray( arg, list, index ) ) > -1 ) { + list.splice( index, 1 ); + // Handle firing indexes + if ( firing ) { + if ( index <= firingLength ) { + firingLength--; + } + if ( index <= firingIndex ) { + firingIndex--; + } + } + } + }); + } + return this; + }, + // Check if a given callback is in the list. + // If no argument is given, return whether or not list has callbacks attached. + has: function( fn ) { + return fn ? jQuery.inArray( fn, list ) > -1 : !!( list && list.length ); + }, + // Remove all callbacks from the list + empty: function() { + list = []; + firingLength = 0; + return this; + }, + // Have the list do nothing anymore + disable: function() { + list = stack = memory = undefined; + return this; + }, + // Is it disabled? + disabled: function() { + return !list; + }, + // Lock the list in its current state + lock: function() { + stack = undefined; + if ( !memory ) { + self.disable(); + } + return this; + }, + // Is it locked? + locked: function() { + return !stack; + }, + // Call all callbacks with the given context and arguments + fireWith: function( context, args ) { + if ( list && ( !fired || stack ) ) { + args = args || []; + args = [ context, args.slice ? args.slice() : args ]; + if ( firing ) { + stack.push( args ); + } else { + fire( args ); + } + } + return this; + }, + // Call all the callbacks with the given arguments + fire: function() { + self.fireWith( this, arguments ); + return this; + }, + // To know if the callbacks have already been called at least once + fired: function() { + return !!fired; + } + }; + + return self; +}; + + +jQuery.extend({ + + Deferred: function( func ) { + var tuples = [ + // action, add listener, listener list, final state + [ "resolve", "done", jQuery.Callbacks("once memory"), "resolved" ], + [ "reject", "fail", jQuery.Callbacks("once memory"), "rejected" ], + [ "notify", "progress", jQuery.Callbacks("memory") ] + ], + state = "pending", + promise = { + state: function() { + return state; + }, + always: function() { + deferred.done( arguments ).fail( arguments ); + return this; + }, + then: function( /* fnDone, fnFail, fnProgress */ ) { + var fns = arguments; + return jQuery.Deferred(function( newDefer ) { + jQuery.each( tuples, function( i, tuple ) { + var fn = jQuery.isFunction( fns[ i ] ) && fns[ i ]; + // deferred[ done | fail | progress ] for forwarding actions to newDefer + deferred[ tuple[1] ](function() { + var returned = fn && fn.apply( this, arguments ); + if ( returned && jQuery.isFunction( returned.promise ) ) { + returned.promise() + .done( newDefer.resolve ) + .fail( newDefer.reject ) + .progress( newDefer.notify ); + } else { + newDefer[ tuple[ 0 ] + "With" ]( this === promise ? newDefer.promise() : this, fn ? [ returned ] : arguments ); + } + }); + }); + fns = null; + }).promise(); + }, + // Get a promise for this deferred + // If obj is provided, the promise aspect is added to the object + promise: function( obj ) { + return obj != null ? jQuery.extend( obj, promise ) : promise; + } + }, + deferred = {}; + + // Keep pipe for back-compat + promise.pipe = promise.then; + + // Add list-specific methods + jQuery.each( tuples, function( i, tuple ) { + var list = tuple[ 2 ], + stateString = tuple[ 3 ]; + + // promise[ done | fail | progress ] = list.add + promise[ tuple[1] ] = list.add; + + // Handle state + if ( stateString ) { + list.add(function() { + // state = [ resolved | rejected ] + state = stateString; + + // [ reject_list | resolve_list ].disable; progress_list.lock + }, tuples[ i ^ 1 ][ 2 ].disable, tuples[ 2 ][ 2 ].lock ); + } + + // deferred[ resolve | reject | notify ] + deferred[ tuple[0] ] = function() { + deferred[ tuple[0] + "With" ]( this === deferred ? promise : this, arguments ); + return this; + }; + deferred[ tuple[0] + "With" ] = list.fireWith; + }); + + // Make the deferred a promise + promise.promise( deferred ); + + // Call given func if any + if ( func ) { + func.call( deferred, deferred ); + } + + // All done! + return deferred; + }, + + // Deferred helper + when: function( subordinate /* , ..., subordinateN */ ) { + var i = 0, + resolveValues = slice.call( arguments ), + length = resolveValues.length, + + // the count of uncompleted subordinates + remaining = length !== 1 || ( subordinate && jQuery.isFunction( subordinate.promise ) ) ? length : 0, + + // the master Deferred. If resolveValues consist of only a single Deferred, just use that. + deferred = remaining === 1 ? subordinate : jQuery.Deferred(), + + // Update function for both resolve and progress values + updateFunc = function( i, contexts, values ) { + return function( value ) { + contexts[ i ] = this; + values[ i ] = arguments.length > 1 ? slice.call( arguments ) : value; + if ( values === progressValues ) { + deferred.notifyWith( contexts, values ); + + } else if ( !(--remaining) ) { + deferred.resolveWith( contexts, values ); + } + }; + }, + + progressValues, progressContexts, resolveContexts; + + // add listeners to Deferred subordinates; treat others as resolved + if ( length > 1 ) { + progressValues = new Array( length ); + progressContexts = new Array( length ); + resolveContexts = new Array( length ); + for ( ; i < length; i++ ) { + if ( resolveValues[ i ] && jQuery.isFunction( resolveValues[ i ].promise ) ) { + resolveValues[ i ].promise() + .done( updateFunc( i, resolveContexts, resolveValues ) ) + .fail( deferred.reject ) + .progress( updateFunc( i, progressContexts, progressValues ) ); + } else { + --remaining; + } + } + } + + // if we're not waiting on anything, resolve the master + if ( !remaining ) { + deferred.resolveWith( resolveContexts, resolveValues ); + } + + return deferred.promise(); + } +}); + + +// The deferred used on DOM ready +var readyList; + +jQuery.fn.ready = function( fn ) { + // Add the callback + jQuery.ready.promise().done( fn ); + + return this; +}; + +jQuery.extend({ + // Is the DOM ready to be used? Set to true once it occurs. + isReady: false, + + // A counter to track how many items to wait for before + // the ready event fires. See #6781 + readyWait: 1, + + // Hold (or release) the ready event + holdReady: function( hold ) { + if ( hold ) { + jQuery.readyWait++; + } else { + jQuery.ready( true ); + } + }, + + // Handle when the DOM is ready + ready: function( wait ) { + + // Abort if there are pending holds or we're already ready + if ( wait === true ? --jQuery.readyWait : jQuery.isReady ) { + return; + } + + // Make sure body exists, at least, in case IE gets a little overzealous (ticket #5443). + if ( !document.body ) { + return setTimeout( jQuery.ready ); + } + + // Remember that the DOM is ready + jQuery.isReady = true; + + // If a normal DOM Ready event fired, decrement, and wait if need be + if ( wait !== true && --jQuery.readyWait > 0 ) { + return; + } + + // If there are functions bound, to execute + readyList.resolveWith( document, [ jQuery ] ); + + // Trigger any bound ready events + if ( jQuery.fn.triggerHandler ) { + jQuery( document ).triggerHandler( "ready" ); + jQuery( document ).off( "ready" ); + } + } +}); + +/** + * Clean-up method for dom ready events + */ +function detach() { + if ( document.addEventListener ) { + document.removeEventListener( "DOMContentLoaded", completed, false ); + window.removeEventListener( "load", completed, false ); + + } else { + document.detachEvent( "onreadystatechange", completed ); + window.detachEvent( "onload", completed ); + } +} + +/** + * The ready event handler and self cleanup method + */ +function completed() { + // readyState === "complete" is good enough for us to call the dom ready in oldIE + if ( document.addEventListener || event.type === "load" || document.readyState === "complete" ) { + detach(); + jQuery.ready(); + } +} + +jQuery.ready.promise = function( obj ) { + if ( !readyList ) { + + readyList = jQuery.Deferred(); + + // Catch cases where $(document).ready() is called after the browser event has already occurred. + // we once tried to use readyState "interactive" here, but it caused issues like the one + // discovered by ChrisS here: http://bugs.jquery.com/ticket/12282#comment:15 + if ( document.readyState === "complete" ) { + // Handle it asynchronously to allow scripts the opportunity to delay ready + setTimeout( jQuery.ready ); + + // Standards-based browsers support DOMContentLoaded + } else if ( document.addEventListener ) { + // Use the handy event callback + document.addEventListener( "DOMContentLoaded", completed, false ); + + // A fallback to window.onload, that will always work + window.addEventListener( "load", completed, false ); + + // If IE event model is used + } else { + // Ensure firing before onload, maybe late but safe also for iframes + document.attachEvent( "onreadystatechange", completed ); + + // A fallback to window.onload, that will always work + window.attachEvent( "onload", completed ); + + // If IE and not a frame + // continually check to see if the document is ready + var top = false; + + try { + top = window.frameElement == null && document.documentElement; + } catch(e) {} + + if ( top && top.doScroll ) { + (function doScrollCheck() { + if ( !jQuery.isReady ) { + + try { + // Use the trick by Diego Perini + // http://javascript.nwbox.com/IEContentLoaded/ + top.doScroll("left"); + } catch(e) { + return setTimeout( doScrollCheck, 50 ); + } + + // detach all dom ready events + detach(); + + // and execute any waiting functions + jQuery.ready(); + } + })(); + } + } + } + return readyList.promise( obj ); +}; + + +var strundefined = typeof undefined; + + + +// Support: IE<9 +// Iteration over object's inherited properties before its own +var i; +for ( i in jQuery( support ) ) { + break; +} +support.ownLast = i !== "0"; + +// Note: most support tests are defined in their respective modules. +// false until the test is run +support.inlineBlockNeedsLayout = false; + +// Execute ASAP in case we need to set body.style.zoom +jQuery(function() { + // Minified: var a,b,c,d + var val, div, body, container; + + body = document.getElementsByTagName( "body" )[ 0 ]; + if ( !body || !body.style ) { + // Return for frameset docs that don't have a body + return; + } + + // Setup + div = document.createElement( "div" ); + container = document.createElement( "div" ); + container.style.cssText = "position:absolute;border:0;width:0;height:0;top:0;left:-9999px"; + body.appendChild( container ).appendChild( div ); + + if ( typeof div.style.zoom !== strundefined ) { + // Support: IE<8 + // Check if natively block-level elements act like inline-block + // elements when setting their display to 'inline' and giving + // them layout + div.style.cssText = "display:inline;margin:0;border:0;padding:1px;width:1px;zoom:1"; + + support.inlineBlockNeedsLayout = val = div.offsetWidth === 3; + if ( val ) { + // Prevent IE 6 from affecting layout for positioned elements #11048 + // Prevent IE from shrinking the body in IE 7 mode #12869 + // Support: IE<8 + body.style.zoom = 1; + } + } + + body.removeChild( container ); +}); + + + + +(function() { + var div = document.createElement( "div" ); + + // Execute the test only if not already executed in another module. + if (support.deleteExpando == null) { + // Support: IE<9 + support.deleteExpando = true; + try { + delete div.test; + } catch( e ) { + support.deleteExpando = false; + } + } + + // Null elements to avoid leaks in IE. + div = null; +})(); + + +/** + * Determines whether an object can have data + */ +jQuery.acceptData = function( elem ) { + var noData = jQuery.noData[ (elem.nodeName + " ").toLowerCase() ], + nodeType = +elem.nodeType || 1; + + // Do not set data on non-element DOM nodes because it will not be cleared (#8335). + return nodeType !== 1 && nodeType !== 9 ? + false : + + // Nodes accept data unless otherwise specified; rejection can be conditional + !noData || noData !== true && elem.getAttribute("classid") === noData; +}; + + +var rbrace = /^(?:\{[\w\W]*\}|\[[\w\W]*\])$/, + rmultiDash = /([A-Z])/g; + +function dataAttr( elem, key, data ) { + // If nothing was found internally, try to fetch any + // data from the HTML5 data-* attribute + if ( data === undefined && elem.nodeType === 1 ) { + + var name = "data-" + key.replace( rmultiDash, "-$1" ).toLowerCase(); + + data = elem.getAttribute( name ); + + if ( typeof data === "string" ) { + try { + data = data === "true" ? true : + data === "false" ? false : + data === "null" ? null : + // Only convert to a number if it doesn't change the string + +data + "" === data ? +data : + rbrace.test( data ) ? jQuery.parseJSON( data ) : + data; + } catch( e ) {} + + // Make sure we set the data so it isn't changed later + jQuery.data( elem, key, data ); + + } else { + data = undefined; + } + } + + return data; +} + +// checks a cache object for emptiness +function isEmptyDataObject( obj ) { + var name; + for ( name in obj ) { + + // if the public data object is empty, the private is still empty + if ( name === "data" && jQuery.isEmptyObject( obj[name] ) ) { + continue; + } + if ( name !== "toJSON" ) { + return false; + } + } + + return true; +} + +function internalData( elem, name, data, pvt /* Internal Use Only */ ) { + if ( !jQuery.acceptData( elem ) ) { + return; + } + + var ret, thisCache, + internalKey = jQuery.expando, + + // We have to handle DOM nodes and JS objects differently because IE6-7 + // can't GC object references properly across the DOM-JS boundary + isNode = elem.nodeType, + + // Only DOM nodes need the global jQuery cache; JS object data is + // attached directly to the object so GC can occur automatically + cache = isNode ? jQuery.cache : elem, + + // Only defining an ID for JS objects if its cache already exists allows + // the code to shortcut on the same path as a DOM node with no cache + id = isNode ? elem[ internalKey ] : elem[ internalKey ] && internalKey; + + // Avoid doing any more work than we need to when trying to get data on an + // object that has no data at all + if ( (!id || !cache[id] || (!pvt && !cache[id].data)) && data === undefined && typeof name === "string" ) { + return; + } + + if ( !id ) { + // Only DOM nodes need a new unique ID for each element since their data + // ends up in the global cache + if ( isNode ) { + id = elem[ internalKey ] = deletedIds.pop() || jQuery.guid++; + } else { + id = internalKey; + } + } + + if ( !cache[ id ] ) { + // Avoid exposing jQuery metadata on plain JS objects when the object + // is serialized using JSON.stringify + cache[ id ] = isNode ? {} : { toJSON: jQuery.noop }; + } + + // An object can be passed to jQuery.data instead of a key/value pair; this gets + // shallow copied over onto the existing cache + if ( typeof name === "object" || typeof name === "function" ) { + if ( pvt ) { + cache[ id ] = jQuery.extend( cache[ id ], name ); + } else { + cache[ id ].data = jQuery.extend( cache[ id ].data, name ); + } + } + + thisCache = cache[ id ]; + + // jQuery data() is stored in a separate object inside the object's internal data + // cache in order to avoid key collisions between internal data and user-defined + // data. + if ( !pvt ) { + if ( !thisCache.data ) { + thisCache.data = {}; + } + + thisCache = thisCache.data; + } + + if ( data !== undefined ) { + thisCache[ jQuery.camelCase( name ) ] = data; + } + + // Check for both converted-to-camel and non-converted data property names + // If a data property was specified + if ( typeof name === "string" ) { + + // First Try to find as-is property data + ret = thisCache[ name ]; + + // Test for null|undefined property data + if ( ret == null ) { + + // Try to find the camelCased property + ret = thisCache[ jQuery.camelCase( name ) ]; + } + } else { + ret = thisCache; + } + + return ret; +} + +function internalRemoveData( elem, name, pvt ) { + if ( !jQuery.acceptData( elem ) ) { + return; + } + + var thisCache, i, + isNode = elem.nodeType, + + // See jQuery.data for more information + cache = isNode ? jQuery.cache : elem, + id = isNode ? elem[ jQuery.expando ] : jQuery.expando; + + // If there is already no cache entry for this object, there is no + // purpose in continuing + if ( !cache[ id ] ) { + return; + } + + if ( name ) { + + thisCache = pvt ? cache[ id ] : cache[ id ].data; + + if ( thisCache ) { + + // Support array or space separated string names for data keys + if ( !jQuery.isArray( name ) ) { + + // try the string as a key before any manipulation + if ( name in thisCache ) { + name = [ name ]; + } else { + + // split the camel cased version by spaces unless a key with the spaces exists + name = jQuery.camelCase( name ); + if ( name in thisCache ) { + name = [ name ]; + } else { + name = name.split(" "); + } + } + } else { + // If "name" is an array of keys... + // When data is initially created, via ("key", "val") signature, + // keys will be converted to camelCase. + // Since there is no way to tell _how_ a key was added, remove + // both plain key and camelCase key. #12786 + // This will only penalize the array argument path. + name = name.concat( jQuery.map( name, jQuery.camelCase ) ); + } + + i = name.length; + while ( i-- ) { + delete thisCache[ name[i] ]; + } + + // If there is no data left in the cache, we want to continue + // and let the cache object itself get destroyed + if ( pvt ? !isEmptyDataObject(thisCache) : !jQuery.isEmptyObject(thisCache) ) { + return; + } + } + } + + // See jQuery.data for more information + if ( !pvt ) { + delete cache[ id ].data; + + // Don't destroy the parent cache unless the internal data object + // had been the only thing left in it + if ( !isEmptyDataObject( cache[ id ] ) ) { + return; + } + } + + // Destroy the cache + if ( isNode ) { + jQuery.cleanData( [ elem ], true ); + + // Use delete when supported for expandos or `cache` is not a window per isWindow (#10080) + /* jshint eqeqeq: false */ + } else if ( support.deleteExpando || cache != cache.window ) { + /* jshint eqeqeq: true */ + delete cache[ id ]; + + // When all else fails, null + } else { + cache[ id ] = null; + } +} + +jQuery.extend({ + cache: {}, + + // The following elements (space-suffixed to avoid Object.prototype collisions) + // throw uncatchable exceptions if you attempt to set expando properties + noData: { + "applet ": true, + "embed ": true, + // ...but Flash objects (which have this classid) *can* handle expandos + "object ": "clsid:D27CDB6E-AE6D-11cf-96B8-444553540000" + }, + + hasData: function( elem ) { + elem = elem.nodeType ? jQuery.cache[ elem[jQuery.expando] ] : elem[ jQuery.expando ]; + return !!elem && !isEmptyDataObject( elem ); + }, + + data: function( elem, name, data ) { + return internalData( elem, name, data ); + }, + + removeData: function( elem, name ) { + return internalRemoveData( elem, name ); + }, + + // For internal use only. + _data: function( elem, name, data ) { + return internalData( elem, name, data, true ); + }, + + _removeData: function( elem, name ) { + return internalRemoveData( elem, name, true ); + } +}); + +jQuery.fn.extend({ + data: function( key, value ) { + var i, name, data, + elem = this[0], + attrs = elem && elem.attributes; + + // Special expections of .data basically thwart jQuery.access, + // so implement the relevant behavior ourselves + + // Gets all values + if ( key === undefined ) { + if ( this.length ) { + data = jQuery.data( elem ); + + if ( elem.nodeType === 1 && !jQuery._data( elem, "parsedAttrs" ) ) { + i = attrs.length; + while ( i-- ) { + + // Support: IE11+ + // The attrs elements can be null (#14894) + if ( attrs[ i ] ) { + name = attrs[ i ].name; + if ( name.indexOf( "data-" ) === 0 ) { + name = jQuery.camelCase( name.slice(5) ); + dataAttr( elem, name, data[ name ] ); + } + } + } + jQuery._data( elem, "parsedAttrs", true ); + } + } + + return data; + } + + // Sets multiple values + if ( typeof key === "object" ) { + return this.each(function() { + jQuery.data( this, key ); + }); + } + + return arguments.length > 1 ? + + // Sets one value + this.each(function() { + jQuery.data( this, key, value ); + }) : + + // Gets one value + // Try to fetch any internally stored data first + elem ? dataAttr( elem, key, jQuery.data( elem, key ) ) : undefined; + }, + + removeData: function( key ) { + return this.each(function() { + jQuery.removeData( this, key ); + }); + } +}); + + +jQuery.extend({ + queue: function( elem, type, data ) { + var queue; + + if ( elem ) { + type = ( type || "fx" ) + "queue"; + queue = jQuery._data( elem, type ); + + // Speed up dequeue by getting out quickly if this is just a lookup + if ( data ) { + if ( !queue || jQuery.isArray(data) ) { + queue = jQuery._data( elem, type, jQuery.makeArray(data) ); + } else { + queue.push( data ); + } + } + return queue || []; + } + }, + + dequeue: function( elem, type ) { + type = type || "fx"; + + var queue = jQuery.queue( elem, type ), + startLength = queue.length, + fn = queue.shift(), + hooks = jQuery._queueHooks( elem, type ), + next = function() { + jQuery.dequeue( elem, type ); + }; + + // If the fx queue is dequeued, always remove the progress sentinel + if ( fn === "inprogress" ) { + fn = queue.shift(); + startLength--; + } + + if ( fn ) { + + // Add a progress sentinel to prevent the fx queue from being + // automatically dequeued + if ( type === "fx" ) { + queue.unshift( "inprogress" ); + } + + // clear up the last queue stop function + delete hooks.stop; + fn.call( elem, next, hooks ); + } + + if ( !startLength && hooks ) { + hooks.empty.fire(); + } + }, + + // not intended for public consumption - generates a queueHooks object, or returns the current one + _queueHooks: function( elem, type ) { + var key = type + "queueHooks"; + return jQuery._data( elem, key ) || jQuery._data( elem, key, { + empty: jQuery.Callbacks("once memory").add(function() { + jQuery._removeData( elem, type + "queue" ); + jQuery._removeData( elem, key ); + }) + }); + } +}); + +jQuery.fn.extend({ + queue: function( type, data ) { + var setter = 2; + + if ( typeof type !== "string" ) { + data = type; + type = "fx"; + setter--; + } + + if ( arguments.length < setter ) { + return jQuery.queue( this[0], type ); + } + + return data === undefined ? + this : + this.each(function() { + var queue = jQuery.queue( this, type, data ); + + // ensure a hooks for this queue + jQuery._queueHooks( this, type ); + + if ( type === "fx" && queue[0] !== "inprogress" ) { + jQuery.dequeue( this, type ); + } + }); + }, + dequeue: function( type ) { + return this.each(function() { + jQuery.dequeue( this, type ); + }); + }, + clearQueue: function( type ) { + return this.queue( type || "fx", [] ); + }, + // Get a promise resolved when queues of a certain type + // are emptied (fx is the type by default) + promise: function( type, obj ) { + var tmp, + count = 1, + defer = jQuery.Deferred(), + elements = this, + i = this.length, + resolve = function() { + if ( !( --count ) ) { + defer.resolveWith( elements, [ elements ] ); + } + }; + + if ( typeof type !== "string" ) { + obj = type; + type = undefined; + } + type = type || "fx"; + + while ( i-- ) { + tmp = jQuery._data( elements[ i ], type + "queueHooks" ); + if ( tmp && tmp.empty ) { + count++; + tmp.empty.add( resolve ); + } + } + resolve(); + return defer.promise( obj ); + } +}); +var pnum = (/[+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|)/).source; + +var cssExpand = [ "Top", "Right", "Bottom", "Left" ]; + +var isHidden = function( elem, el ) { + // isHidden might be called from jQuery#filter function; + // in that case, element will be second argument + elem = el || elem; + return jQuery.css( elem, "display" ) === "none" || !jQuery.contains( elem.ownerDocument, elem ); + }; + + + +// Multifunctional method to get and set values of a collection +// The value/s can optionally be executed if it's a function +var access = jQuery.access = function( elems, fn, key, value, chainable, emptyGet, raw ) { + var i = 0, + length = elems.length, + bulk = key == null; + + // Sets many values + if ( jQuery.type( key ) === "object" ) { + chainable = true; + for ( i in key ) { + jQuery.access( elems, fn, i, key[i], true, emptyGet, raw ); + } + + // Sets one value + } else if ( value !== undefined ) { + chainable = true; + + if ( !jQuery.isFunction( value ) ) { + raw = true; + } + + if ( bulk ) { + // Bulk operations run against the entire set + if ( raw ) { + fn.call( elems, value ); + fn = null; + + // ...except when executing function values + } else { + bulk = fn; + fn = function( elem, key, value ) { + return bulk.call( jQuery( elem ), value ); + }; + } + } + + if ( fn ) { + for ( ; i < length; i++ ) { + fn( elems[i], key, raw ? value : value.call( elems[i], i, fn( elems[i], key ) ) ); + } + } + } + + return chainable ? + elems : + + // Gets + bulk ? + fn.call( elems ) : + length ? fn( elems[0], key ) : emptyGet; +}; +var rcheckableType = (/^(?:checkbox|radio)$/i); + + + +(function() { + // Minified: var a,b,c + var input = document.createElement( "input" ), + div = document.createElement( "div" ), + fragment = document.createDocumentFragment(); + + // Setup + div.innerHTML = "
a"; + + // IE strips leading whitespace when .innerHTML is used + support.leadingWhitespace = div.firstChild.nodeType === 3; + + // Make sure that tbody elements aren't automatically inserted + // IE will insert them into empty tables + support.tbody = !div.getElementsByTagName( "tbody" ).length; + + // Make sure that link elements get serialized correctly by innerHTML + // This requires a wrapper element in IE + support.htmlSerialize = !!div.getElementsByTagName( "link" ).length; + + // Makes sure cloning an html5 element does not cause problems + // Where outerHTML is undefined, this still works + support.html5Clone = + document.createElement( "nav" ).cloneNode( true ).outerHTML !== "<:nav>"; + + // Check if a disconnected checkbox will retain its checked + // value of true after appended to the DOM (IE6/7) + input.type = "checkbox"; + input.checked = true; + fragment.appendChild( input ); + support.appendChecked = input.checked; + + // Make sure textarea (and checkbox) defaultValue is properly cloned + // Support: IE6-IE11+ + div.innerHTML = ""; + support.noCloneChecked = !!div.cloneNode( true ).lastChild.defaultValue; + + // #11217 - WebKit loses check when the name is after the checked attribute + fragment.appendChild( div ); + div.innerHTML = ""; + + // Support: Safari 5.1, iOS 5.1, Android 4.x, Android 2.3 + // old WebKit doesn't clone checked state correctly in fragments + support.checkClone = div.cloneNode( true ).cloneNode( true ).lastChild.checked; + + // Support: IE<9 + // Opera does not clone events (and typeof div.attachEvent === undefined). + // IE9-10 clones events bound via attachEvent, but they don't trigger with .click() + support.noCloneEvent = true; + if ( div.attachEvent ) { + div.attachEvent( "onclick", function() { + support.noCloneEvent = false; + }); + + div.cloneNode( true ).click(); + } + + // Execute the test only if not already executed in another module. + if (support.deleteExpando == null) { + // Support: IE<9 + support.deleteExpando = true; + try { + delete div.test; + } catch( e ) { + support.deleteExpando = false; + } + } +})(); + + +(function() { + var i, eventName, + div = document.createElement( "div" ); + + // Support: IE<9 (lack submit/change bubble), Firefox 23+ (lack focusin event) + for ( i in { submit: true, change: true, focusin: true }) { + eventName = "on" + i; + + if ( !(support[ i + "Bubbles" ] = eventName in window) ) { + // Beware of CSP restrictions (https://developer.mozilla.org/en/Security/CSP) + div.setAttribute( eventName, "t" ); + support[ i + "Bubbles" ] = div.attributes[ eventName ].expando === false; + } + } + + // Null elements to avoid leaks in IE. + div = null; +})(); + + +var rformElems = /^(?:input|select|textarea)$/i, + rkeyEvent = /^key/, + rmouseEvent = /^(?:mouse|pointer|contextmenu)|click/, + rfocusMorph = /^(?:focusinfocus|focusoutblur)$/, + rtypenamespace = /^([^.]*)(?:\.(.+)|)$/; + +function returnTrue() { + return true; +} + +function returnFalse() { + return false; +} + +function safeActiveElement() { + try { + return document.activeElement; + } catch ( err ) { } +} + +/* + * Helper functions for managing events -- not part of the public interface. + * Props to Dean Edwards' addEvent library for many of the ideas. + */ +jQuery.event = { + + global: {}, + + add: function( elem, types, handler, data, selector ) { + var tmp, events, t, handleObjIn, + special, eventHandle, handleObj, + handlers, type, namespaces, origType, + elemData = jQuery._data( elem ); + + // Don't attach events to noData or text/comment nodes (but allow plain objects) + if ( !elemData ) { + return; + } + + // Caller can pass in an object of custom data in lieu of the handler + if ( handler.handler ) { + handleObjIn = handler; + handler = handleObjIn.handler; + selector = handleObjIn.selector; + } + + // Make sure that the handler has a unique ID, used to find/remove it later + if ( !handler.guid ) { + handler.guid = jQuery.guid++; + } + + // Init the element's event structure and main handler, if this is the first + if ( !(events = elemData.events) ) { + events = elemData.events = {}; + } + if ( !(eventHandle = elemData.handle) ) { + eventHandle = elemData.handle = function( e ) { + // Discard the second event of a jQuery.event.trigger() and + // when an event is called after a page has unloaded + return typeof jQuery !== strundefined && (!e || jQuery.event.triggered !== e.type) ? + jQuery.event.dispatch.apply( eventHandle.elem, arguments ) : + undefined; + }; + // Add elem as a property of the handle fn to prevent a memory leak with IE non-native events + eventHandle.elem = elem; + } + + // Handle multiple events separated by a space + types = ( types || "" ).match( rnotwhite ) || [ "" ]; + t = types.length; + while ( t-- ) { + tmp = rtypenamespace.exec( types[t] ) || []; + type = origType = tmp[1]; + namespaces = ( tmp[2] || "" ).split( "." ).sort(); + + // There *must* be a type, no attaching namespace-only handlers + if ( !type ) { + continue; + } + + // If event changes its type, use the special event handlers for the changed type + special = jQuery.event.special[ type ] || {}; + + // If selector defined, determine special event api type, otherwise given type + type = ( selector ? special.delegateType : special.bindType ) || type; + + // Update special based on newly reset type + special = jQuery.event.special[ type ] || {}; + + // handleObj is passed to all event handlers + handleObj = jQuery.extend({ + type: type, + origType: origType, + data: data, + handler: handler, + guid: handler.guid, + selector: selector, + needsContext: selector && jQuery.expr.match.needsContext.test( selector ), + namespace: namespaces.join(".") + }, handleObjIn ); + + // Init the event handler queue if we're the first + if ( !(handlers = events[ type ]) ) { + handlers = events[ type ] = []; + handlers.delegateCount = 0; + + // Only use addEventListener/attachEvent if the special events handler returns false + if ( !special.setup || special.setup.call( elem, data, namespaces, eventHandle ) === false ) { + // Bind the global event handler to the element + if ( elem.addEventListener ) { + elem.addEventListener( type, eventHandle, false ); + + } else if ( elem.attachEvent ) { + elem.attachEvent( "on" + type, eventHandle ); + } + } + } + + if ( special.add ) { + special.add.call( elem, handleObj ); + + if ( !handleObj.handler.guid ) { + handleObj.handler.guid = handler.guid; + } + } + + // Add to the element's handler list, delegates in front + if ( selector ) { + handlers.splice( handlers.delegateCount++, 0, handleObj ); + } else { + handlers.push( handleObj ); + } + + // Keep track of which events have ever been used, for event optimization + jQuery.event.global[ type ] = true; + } + + // Nullify elem to prevent memory leaks in IE + elem = null; + }, + + // Detach an event or set of events from an element + remove: function( elem, types, handler, selector, mappedTypes ) { + var j, handleObj, tmp, + origCount, t, events, + special, handlers, type, + namespaces, origType, + elemData = jQuery.hasData( elem ) && jQuery._data( elem ); + + if ( !elemData || !(events = elemData.events) ) { + return; + } + + // Once for each type.namespace in types; type may be omitted + types = ( types || "" ).match( rnotwhite ) || [ "" ]; + t = types.length; + while ( t-- ) { + tmp = rtypenamespace.exec( types[t] ) || []; + type = origType = tmp[1]; + namespaces = ( tmp[2] || "" ).split( "." ).sort(); + + // Unbind all events (on this namespace, if provided) for the element + if ( !type ) { + for ( type in events ) { + jQuery.event.remove( elem, type + types[ t ], handler, selector, true ); + } + continue; + } + + special = jQuery.event.special[ type ] || {}; + type = ( selector ? special.delegateType : special.bindType ) || type; + handlers = events[ type ] || []; + tmp = tmp[2] && new RegExp( "(^|\\.)" + namespaces.join("\\.(?:.*\\.|)") + "(\\.|$)" ); + + // Remove matching events + origCount = j = handlers.length; + while ( j-- ) { + handleObj = handlers[ j ]; + + if ( ( mappedTypes || origType === handleObj.origType ) && + ( !handler || handler.guid === handleObj.guid ) && + ( !tmp || tmp.test( handleObj.namespace ) ) && + ( !selector || selector === handleObj.selector || selector === "**" && handleObj.selector ) ) { + handlers.splice( j, 1 ); + + if ( handleObj.selector ) { + handlers.delegateCount--; + } + if ( special.remove ) { + special.remove.call( elem, handleObj ); + } + } + } + + // Remove generic event handler if we removed something and no more handlers exist + // (avoids potential for endless recursion during removal of special event handlers) + if ( origCount && !handlers.length ) { + if ( !special.teardown || special.teardown.call( elem, namespaces, elemData.handle ) === false ) { + jQuery.removeEvent( elem, type, elemData.handle ); + } + + delete events[ type ]; + } + } + + // Remove the expando if it's no longer used + if ( jQuery.isEmptyObject( events ) ) { + delete elemData.handle; + + // removeData also checks for emptiness and clears the expando if empty + // so use it instead of delete + jQuery._removeData( elem, "events" ); + } + }, + + trigger: function( event, data, elem, onlyHandlers ) { + var handle, ontype, cur, + bubbleType, special, tmp, i, + eventPath = [ elem || document ], + type = hasOwn.call( event, "type" ) ? event.type : event, + namespaces = hasOwn.call( event, "namespace" ) ? event.namespace.split(".") : []; + + cur = tmp = elem = elem || document; + + // Don't do events on text and comment nodes + if ( elem.nodeType === 3 || elem.nodeType === 8 ) { + return; + } + + // focus/blur morphs to focusin/out; ensure we're not firing them right now + if ( rfocusMorph.test( type + jQuery.event.triggered ) ) { + return; + } + + if ( type.indexOf(".") >= 0 ) { + // Namespaced trigger; create a regexp to match event type in handle() + namespaces = type.split("."); + type = namespaces.shift(); + namespaces.sort(); + } + ontype = type.indexOf(":") < 0 && "on" + type; + + // Caller can pass in a jQuery.Event object, Object, or just an event type string + event = event[ jQuery.expando ] ? + event : + new jQuery.Event( type, typeof event === "object" && event ); + + // Trigger bitmask: & 1 for native handlers; & 2 for jQuery (always true) + event.isTrigger = onlyHandlers ? 2 : 3; + event.namespace = namespaces.join("."); + event.namespace_re = event.namespace ? + new RegExp( "(^|\\.)" + namespaces.join("\\.(?:.*\\.|)") + "(\\.|$)" ) : + null; + + // Clean up the event in case it is being reused + event.result = undefined; + if ( !event.target ) { + event.target = elem; + } + + // Clone any incoming data and prepend the event, creating the handler arg list + data = data == null ? + [ event ] : + jQuery.makeArray( data, [ event ] ); + + // Allow special events to draw outside the lines + special = jQuery.event.special[ type ] || {}; + if ( !onlyHandlers && special.trigger && special.trigger.apply( elem, data ) === false ) { + return; + } + + // Determine event propagation path in advance, per W3C events spec (#9951) + // Bubble up to document, then to window; watch for a global ownerDocument var (#9724) + if ( !onlyHandlers && !special.noBubble && !jQuery.isWindow( elem ) ) { + + bubbleType = special.delegateType || type; + if ( !rfocusMorph.test( bubbleType + type ) ) { + cur = cur.parentNode; + } + for ( ; cur; cur = cur.parentNode ) { + eventPath.push( cur ); + tmp = cur; + } + + // Only add window if we got to document (e.g., not plain obj or detached DOM) + if ( tmp === (elem.ownerDocument || document) ) { + eventPath.push( tmp.defaultView || tmp.parentWindow || window ); + } + } + + // Fire handlers on the event path + i = 0; + while ( (cur = eventPath[i++]) && !event.isPropagationStopped() ) { + + event.type = i > 1 ? + bubbleType : + special.bindType || type; + + // jQuery handler + handle = ( jQuery._data( cur, "events" ) || {} )[ event.type ] && jQuery._data( cur, "handle" ); + if ( handle ) { + handle.apply( cur, data ); + } + + // Native handler + handle = ontype && cur[ ontype ]; + if ( handle && handle.apply && jQuery.acceptData( cur ) ) { + event.result = handle.apply( cur, data ); + if ( event.result === false ) { + event.preventDefault(); + } + } + } + event.type = type; + + // If nobody prevented the default action, do it now + if ( !onlyHandlers && !event.isDefaultPrevented() ) { + + if ( (!special._default || special._default.apply( eventPath.pop(), data ) === false) && + jQuery.acceptData( elem ) ) { + + // Call a native DOM method on the target with the same name name as the event. + // Can't use an .isFunction() check here because IE6/7 fails that test. + // Don't do default actions on window, that's where global variables be (#6170) + if ( ontype && elem[ type ] && !jQuery.isWindow( elem ) ) { + + // Don't re-trigger an onFOO event when we call its FOO() method + tmp = elem[ ontype ]; + + if ( tmp ) { + elem[ ontype ] = null; + } + + // Prevent re-triggering of the same event, since we already bubbled it above + jQuery.event.triggered = type; + try { + elem[ type ](); + } catch ( e ) { + // IE<9 dies on focus/blur to hidden element (#1486,#12518) + // only reproducible on winXP IE8 native, not IE9 in IE8 mode + } + jQuery.event.triggered = undefined; + + if ( tmp ) { + elem[ ontype ] = tmp; + } + } + } + } + + return event.result; + }, + + dispatch: function( event ) { + + // Make a writable jQuery.Event from the native event object + event = jQuery.event.fix( event ); + + var i, ret, handleObj, matched, j, + handlerQueue = [], + args = slice.call( arguments ), + handlers = ( jQuery._data( this, "events" ) || {} )[ event.type ] || [], + special = jQuery.event.special[ event.type ] || {}; + + // Use the fix-ed jQuery.Event rather than the (read-only) native event + args[0] = event; + event.delegateTarget = this; + + // Call the preDispatch hook for the mapped type, and let it bail if desired + if ( special.preDispatch && special.preDispatch.call( this, event ) === false ) { + return; + } + + // Determine handlers + handlerQueue = jQuery.event.handlers.call( this, event, handlers ); + + // Run delegates first; they may want to stop propagation beneath us + i = 0; + while ( (matched = handlerQueue[ i++ ]) && !event.isPropagationStopped() ) { + event.currentTarget = matched.elem; + + j = 0; + while ( (handleObj = matched.handlers[ j++ ]) && !event.isImmediatePropagationStopped() ) { + + // Triggered event must either 1) have no namespace, or + // 2) have namespace(s) a subset or equal to those in the bound event (both can have no namespace). + if ( !event.namespace_re || event.namespace_re.test( handleObj.namespace ) ) { + + event.handleObj = handleObj; + event.data = handleObj.data; + + ret = ( (jQuery.event.special[ handleObj.origType ] || {}).handle || handleObj.handler ) + .apply( matched.elem, args ); + + if ( ret !== undefined ) { + if ( (event.result = ret) === false ) { + event.preventDefault(); + event.stopPropagation(); + } + } + } + } + } + + // Call the postDispatch hook for the mapped type + if ( special.postDispatch ) { + special.postDispatch.call( this, event ); + } + + return event.result; + }, + + handlers: function( event, handlers ) { + var sel, handleObj, matches, i, + handlerQueue = [], + delegateCount = handlers.delegateCount, + cur = event.target; + + // Find delegate handlers + // Black-hole SVG instance trees (#13180) + // Avoid non-left-click bubbling in Firefox (#3861) + if ( delegateCount && cur.nodeType && (!event.button || event.type !== "click") ) { + + /* jshint eqeqeq: false */ + for ( ; cur != this; cur = cur.parentNode || this ) { + /* jshint eqeqeq: true */ + + // Don't check non-elements (#13208) + // Don't process clicks on disabled elements (#6911, #8165, #11382, #11764) + if ( cur.nodeType === 1 && (cur.disabled !== true || event.type !== "click") ) { + matches = []; + for ( i = 0; i < delegateCount; i++ ) { + handleObj = handlers[ i ]; + + // Don't conflict with Object.prototype properties (#13203) + sel = handleObj.selector + " "; + + if ( matches[ sel ] === undefined ) { + matches[ sel ] = handleObj.needsContext ? + jQuery( sel, this ).index( cur ) >= 0 : + jQuery.find( sel, this, null, [ cur ] ).length; + } + if ( matches[ sel ] ) { + matches.push( handleObj ); + } + } + if ( matches.length ) { + handlerQueue.push({ elem: cur, handlers: matches }); + } + } + } + } + + // Add the remaining (directly-bound) handlers + if ( delegateCount < handlers.length ) { + handlerQueue.push({ elem: this, handlers: handlers.slice( delegateCount ) }); + } + + return handlerQueue; + }, + + fix: function( event ) { + if ( event[ jQuery.expando ] ) { + return event; + } + + // Create a writable copy of the event object and normalize some properties + var i, prop, copy, + type = event.type, + originalEvent = event, + fixHook = this.fixHooks[ type ]; + + if ( !fixHook ) { + this.fixHooks[ type ] = fixHook = + rmouseEvent.test( type ) ? this.mouseHooks : + rkeyEvent.test( type ) ? this.keyHooks : + {}; + } + copy = fixHook.props ? this.props.concat( fixHook.props ) : this.props; + + event = new jQuery.Event( originalEvent ); + + i = copy.length; + while ( i-- ) { + prop = copy[ i ]; + event[ prop ] = originalEvent[ prop ]; + } + + // Support: IE<9 + // Fix target property (#1925) + if ( !event.target ) { + event.target = originalEvent.srcElement || document; + } + + // Support: Chrome 23+, Safari? + // Target should not be a text node (#504, #13143) + if ( event.target.nodeType === 3 ) { + event.target = event.target.parentNode; + } + + // Support: IE<9 + // For mouse/key events, metaKey==false if it's undefined (#3368, #11328) + event.metaKey = !!event.metaKey; + + return fixHook.filter ? fixHook.filter( event, originalEvent ) : event; + }, + + // Includes some event props shared by KeyEvent and MouseEvent + props: "altKey bubbles cancelable ctrlKey currentTarget eventPhase metaKey relatedTarget shiftKey target timeStamp view which".split(" "), + + fixHooks: {}, + + keyHooks: { + props: "char charCode key keyCode".split(" "), + filter: function( event, original ) { + + // Add which for key events + if ( event.which == null ) { + event.which = original.charCode != null ? original.charCode : original.keyCode; + } + + return event; + } + }, + + mouseHooks: { + props: "button buttons clientX clientY fromElement offsetX offsetY pageX pageY screenX screenY toElement".split(" "), + filter: function( event, original ) { + var body, eventDoc, doc, + button = original.button, + fromElement = original.fromElement; + + // Calculate pageX/Y if missing and clientX/Y available + if ( event.pageX == null && original.clientX != null ) { + eventDoc = event.target.ownerDocument || document; + doc = eventDoc.documentElement; + body = eventDoc.body; + + event.pageX = original.clientX + ( doc && doc.scrollLeft || body && body.scrollLeft || 0 ) - ( doc && doc.clientLeft || body && body.clientLeft || 0 ); + event.pageY = original.clientY + ( doc && doc.scrollTop || body && body.scrollTop || 0 ) - ( doc && doc.clientTop || body && body.clientTop || 0 ); + } + + // Add relatedTarget, if necessary + if ( !event.relatedTarget && fromElement ) { + event.relatedTarget = fromElement === event.target ? original.toElement : fromElement; + } + + // Add which for click: 1 === left; 2 === middle; 3 === right + // Note: button is not normalized, so don't use it + if ( !event.which && button !== undefined ) { + event.which = ( button & 1 ? 1 : ( button & 2 ? 3 : ( button & 4 ? 2 : 0 ) ) ); + } + + return event; + } + }, + + special: { + load: { + // Prevent triggered image.load events from bubbling to window.load + noBubble: true + }, + focus: { + // Fire native event if possible so blur/focus sequence is correct + trigger: function() { + if ( this !== safeActiveElement() && this.focus ) { + try { + this.focus(); + return false; + } catch ( e ) { + // Support: IE<9 + // If we error on focus to hidden element (#1486, #12518), + // let .trigger() run the handlers + } + } + }, + delegateType: "focusin" + }, + blur: { + trigger: function() { + if ( this === safeActiveElement() && this.blur ) { + this.blur(); + return false; + } + }, + delegateType: "focusout" + }, + click: { + // For checkbox, fire native event so checked state will be right + trigger: function() { + if ( jQuery.nodeName( this, "input" ) && this.type === "checkbox" && this.click ) { + this.click(); + return false; + } + }, + + // For cross-browser consistency, don't fire native .click() on links + _default: function( event ) { + return jQuery.nodeName( event.target, "a" ); + } + }, + + beforeunload: { + postDispatch: function( event ) { + + // Support: Firefox 20+ + // Firefox doesn't alert if the returnValue field is not set. + if ( event.result !== undefined && event.originalEvent ) { + event.originalEvent.returnValue = event.result; + } + } + } + }, + + simulate: function( type, elem, event, bubble ) { + // Piggyback on a donor event to simulate a different one. + // Fake originalEvent to avoid donor's stopPropagation, but if the + // simulated event prevents default then we do the same on the donor. + var e = jQuery.extend( + new jQuery.Event(), + event, + { + type: type, + isSimulated: true, + originalEvent: {} + } + ); + if ( bubble ) { + jQuery.event.trigger( e, null, elem ); + } else { + jQuery.event.dispatch.call( elem, e ); + } + if ( e.isDefaultPrevented() ) { + event.preventDefault(); + } + } +}; + +jQuery.removeEvent = document.removeEventListener ? + function( elem, type, handle ) { + if ( elem.removeEventListener ) { + elem.removeEventListener( type, handle, false ); + } + } : + function( elem, type, handle ) { + var name = "on" + type; + + if ( elem.detachEvent ) { + + // #8545, #7054, preventing memory leaks for custom events in IE6-8 + // detachEvent needed property on element, by name of that event, to properly expose it to GC + if ( typeof elem[ name ] === strundefined ) { + elem[ name ] = null; + } + + elem.detachEvent( name, handle ); + } + }; + +jQuery.Event = function( src, props ) { + // Allow instantiation without the 'new' keyword + if ( !(this instanceof jQuery.Event) ) { + return new jQuery.Event( src, props ); + } + + // Event object + if ( src && src.type ) { + this.originalEvent = src; + this.type = src.type; + + // Events bubbling up the document may have been marked as prevented + // by a handler lower down the tree; reflect the correct value. + this.isDefaultPrevented = src.defaultPrevented || + src.defaultPrevented === undefined && + // Support: IE < 9, Android < 4.0 + src.returnValue === false ? + returnTrue : + returnFalse; + + // Event type + } else { + this.type = src; + } + + // Put explicitly provided properties onto the event object + if ( props ) { + jQuery.extend( this, props ); + } + + // Create a timestamp if incoming event doesn't have one + this.timeStamp = src && src.timeStamp || jQuery.now(); + + // Mark it as fixed + this[ jQuery.expando ] = true; +}; + +// jQuery.Event is based on DOM3 Events as specified by the ECMAScript Language Binding +// http://www.w3.org/TR/2003/WD-DOM-Level-3-Events-20030331/ecma-script-binding.html +jQuery.Event.prototype = { + isDefaultPrevented: returnFalse, + isPropagationStopped: returnFalse, + isImmediatePropagationStopped: returnFalse, + + preventDefault: function() { + var e = this.originalEvent; + + this.isDefaultPrevented = returnTrue; + if ( !e ) { + return; + } + + // If preventDefault exists, run it on the original event + if ( e.preventDefault ) { + e.preventDefault(); + + // Support: IE + // Otherwise set the returnValue property of the original event to false + } else { + e.returnValue = false; + } + }, + stopPropagation: function() { + var e = this.originalEvent; + + this.isPropagationStopped = returnTrue; + if ( !e ) { + return; + } + // If stopPropagation exists, run it on the original event + if ( e.stopPropagation ) { + e.stopPropagation(); + } + + // Support: IE + // Set the cancelBubble property of the original event to true + e.cancelBubble = true; + }, + stopImmediatePropagation: function() { + var e = this.originalEvent; + + this.isImmediatePropagationStopped = returnTrue; + + if ( e && e.stopImmediatePropagation ) { + e.stopImmediatePropagation(); + } + + this.stopPropagation(); + } +}; + +// Create mouseenter/leave events using mouseover/out and event-time checks +jQuery.each({ + mouseenter: "mouseover", + mouseleave: "mouseout", + pointerenter: "pointerover", + pointerleave: "pointerout" +}, function( orig, fix ) { + jQuery.event.special[ orig ] = { + delegateType: fix, + bindType: fix, + + handle: function( event ) { + var ret, + target = this, + related = event.relatedTarget, + handleObj = event.handleObj; + + // For mousenter/leave call the handler if related is outside the target. + // NB: No relatedTarget if the mouse left/entered the browser window + if ( !related || (related !== target && !jQuery.contains( target, related )) ) { + event.type = handleObj.origType; + ret = handleObj.handler.apply( this, arguments ); + event.type = fix; + } + return ret; + } + }; +}); + +// IE submit delegation +if ( !support.submitBubbles ) { + + jQuery.event.special.submit = { + setup: function() { + // Only need this for delegated form submit events + if ( jQuery.nodeName( this, "form" ) ) { + return false; + } + + // Lazy-add a submit handler when a descendant form may potentially be submitted + jQuery.event.add( this, "click._submit keypress._submit", function( e ) { + // Node name check avoids a VML-related crash in IE (#9807) + var elem = e.target, + form = jQuery.nodeName( elem, "input" ) || jQuery.nodeName( elem, "button" ) ? elem.form : undefined; + if ( form && !jQuery._data( form, "submitBubbles" ) ) { + jQuery.event.add( form, "submit._submit", function( event ) { + event._submit_bubble = true; + }); + jQuery._data( form, "submitBubbles", true ); + } + }); + // return undefined since we don't need an event listener + }, + + postDispatch: function( event ) { + // If form was submitted by the user, bubble the event up the tree + if ( event._submit_bubble ) { + delete event._submit_bubble; + if ( this.parentNode && !event.isTrigger ) { + jQuery.event.simulate( "submit", this.parentNode, event, true ); + } + } + }, + + teardown: function() { + // Only need this for delegated form submit events + if ( jQuery.nodeName( this, "form" ) ) { + return false; + } + + // Remove delegated handlers; cleanData eventually reaps submit handlers attached above + jQuery.event.remove( this, "._submit" ); + } + }; +} + +// IE change delegation and checkbox/radio fix +if ( !support.changeBubbles ) { + + jQuery.event.special.change = { + + setup: function() { + + if ( rformElems.test( this.nodeName ) ) { + // IE doesn't fire change on a check/radio until blur; trigger it on click + // after a propertychange. Eat the blur-change in special.change.handle. + // This still fires onchange a second time for check/radio after blur. + if ( this.type === "checkbox" || this.type === "radio" ) { + jQuery.event.add( this, "propertychange._change", function( event ) { + if ( event.originalEvent.propertyName === "checked" ) { + this._just_changed = true; + } + }); + jQuery.event.add( this, "click._change", function( event ) { + if ( this._just_changed && !event.isTrigger ) { + this._just_changed = false; + } + // Allow triggered, simulated change events (#11500) + jQuery.event.simulate( "change", this, event, true ); + }); + } + return false; + } + // Delegated event; lazy-add a change handler on descendant inputs + jQuery.event.add( this, "beforeactivate._change", function( e ) { + var elem = e.target; + + if ( rformElems.test( elem.nodeName ) && !jQuery._data( elem, "changeBubbles" ) ) { + jQuery.event.add( elem, "change._change", function( event ) { + if ( this.parentNode && !event.isSimulated && !event.isTrigger ) { + jQuery.event.simulate( "change", this.parentNode, event, true ); + } + }); + jQuery._data( elem, "changeBubbles", true ); + } + }); + }, + + handle: function( event ) { + var elem = event.target; + + // Swallow native change events from checkbox/radio, we already triggered them above + if ( this !== elem || event.isSimulated || event.isTrigger || (elem.type !== "radio" && elem.type !== "checkbox") ) { + return event.handleObj.handler.apply( this, arguments ); + } + }, + + teardown: function() { + jQuery.event.remove( this, "._change" ); + + return !rformElems.test( this.nodeName ); + } + }; +} + +// Create "bubbling" focus and blur events +if ( !support.focusinBubbles ) { + jQuery.each({ focus: "focusin", blur: "focusout" }, function( orig, fix ) { + + // Attach a single capturing handler on the document while someone wants focusin/focusout + var handler = function( event ) { + jQuery.event.simulate( fix, event.target, jQuery.event.fix( event ), true ); + }; + + jQuery.event.special[ fix ] = { + setup: function() { + var doc = this.ownerDocument || this, + attaches = jQuery._data( doc, fix ); + + if ( !attaches ) { + doc.addEventListener( orig, handler, true ); + } + jQuery._data( doc, fix, ( attaches || 0 ) + 1 ); + }, + teardown: function() { + var doc = this.ownerDocument || this, + attaches = jQuery._data( doc, fix ) - 1; + + if ( !attaches ) { + doc.removeEventListener( orig, handler, true ); + jQuery._removeData( doc, fix ); + } else { + jQuery._data( doc, fix, attaches ); + } + } + }; + }); +} + +jQuery.fn.extend({ + + on: function( types, selector, data, fn, /*INTERNAL*/ one ) { + var type, origFn; + + // Types can be a map of types/handlers + if ( typeof types === "object" ) { + // ( types-Object, selector, data ) + if ( typeof selector !== "string" ) { + // ( types-Object, data ) + data = data || selector; + selector = undefined; + } + for ( type in types ) { + this.on( type, selector, data, types[ type ], one ); + } + return this; + } + + if ( data == null && fn == null ) { + // ( types, fn ) + fn = selector; + data = selector = undefined; + } else if ( fn == null ) { + if ( typeof selector === "string" ) { + // ( types, selector, fn ) + fn = data; + data = undefined; + } else { + // ( types, data, fn ) + fn = data; + data = selector; + selector = undefined; + } + } + if ( fn === false ) { + fn = returnFalse; + } else if ( !fn ) { + return this; + } + + if ( one === 1 ) { + origFn = fn; + fn = function( event ) { + // Can use an empty set, since event contains the info + jQuery().off( event ); + return origFn.apply( this, arguments ); + }; + // Use same guid so caller can remove using origFn + fn.guid = origFn.guid || ( origFn.guid = jQuery.guid++ ); + } + return this.each( function() { + jQuery.event.add( this, types, fn, data, selector ); + }); + }, + one: function( types, selector, data, fn ) { + return this.on( types, selector, data, fn, 1 ); + }, + off: function( types, selector, fn ) { + var handleObj, type; + if ( types && types.preventDefault && types.handleObj ) { + // ( event ) dispatched jQuery.Event + handleObj = types.handleObj; + jQuery( types.delegateTarget ).off( + handleObj.namespace ? handleObj.origType + "." + handleObj.namespace : handleObj.origType, + handleObj.selector, + handleObj.handler + ); + return this; + } + if ( typeof types === "object" ) { + // ( types-object [, selector] ) + for ( type in types ) { + this.off( type, selector, types[ type ] ); + } + return this; + } + if ( selector === false || typeof selector === "function" ) { + // ( types [, fn] ) + fn = selector; + selector = undefined; + } + if ( fn === false ) { + fn = returnFalse; + } + return this.each(function() { + jQuery.event.remove( this, types, fn, selector ); + }); + }, + + trigger: function( type, data ) { + return this.each(function() { + jQuery.event.trigger( type, data, this ); + }); + }, + triggerHandler: function( type, data ) { + var elem = this[0]; + if ( elem ) { + return jQuery.event.trigger( type, data, elem, true ); + } + } +}); + + +function createSafeFragment( document ) { + var list = nodeNames.split( "|" ), + safeFrag = document.createDocumentFragment(); + + if ( safeFrag.createElement ) { + while ( list.length ) { + safeFrag.createElement( + list.pop() + ); + } + } + return safeFrag; +} + +var nodeNames = "abbr|article|aside|audio|bdi|canvas|data|datalist|details|figcaption|figure|footer|" + + "header|hgroup|mark|meter|nav|output|progress|section|summary|time|video", + rinlinejQuery = / jQuery\d+="(?:null|\d+)"/g, + rnoshimcache = new RegExp("<(?:" + nodeNames + ")[\\s/>]", "i"), + rleadingWhitespace = /^\s+/, + rxhtmlTag = /<(?!area|br|col|embed|hr|img|input|link|meta|param)(([\w:]+)[^>]*)\/>/gi, + rtagName = /<([\w:]+)/, + rtbody = /\s*$/g, + + // We have to close these tags to support XHTML (#13200) + wrapMap = { + option: [ 1, "" ], + legend: [ 1, "
", "
" ], + area: [ 1, "", "" ], + param: [ 1, "", "" ], + thead: [ 1, "", "
" ], + tr: [ 2, "", "
" ], + col: [ 2, "", "
" ], + td: [ 3, "", "
" ], + + // IE6-8 can't serialize link, script, style, or any html5 (NoScope) tags, + // unless wrapped in a div with non-breaking characters in front of it. + _default: support.htmlSerialize ? [ 0, "", "" ] : [ 1, "X
", "
" ] + }, + safeFragment = createSafeFragment( document ), + fragmentDiv = safeFragment.appendChild( document.createElement("div") ); + +wrapMap.optgroup = wrapMap.option; +wrapMap.tbody = wrapMap.tfoot = wrapMap.colgroup = wrapMap.caption = wrapMap.thead; +wrapMap.th = wrapMap.td; + +function getAll( context, tag ) { + var elems, elem, + i = 0, + found = typeof context.getElementsByTagName !== strundefined ? context.getElementsByTagName( tag || "*" ) : + typeof context.querySelectorAll !== strundefined ? context.querySelectorAll( tag || "*" ) : + undefined; + + if ( !found ) { + for ( found = [], elems = context.childNodes || context; (elem = elems[i]) != null; i++ ) { + if ( !tag || jQuery.nodeName( elem, tag ) ) { + found.push( elem ); + } else { + jQuery.merge( found, getAll( elem, tag ) ); + } + } + } + + return tag === undefined || tag && jQuery.nodeName( context, tag ) ? + jQuery.merge( [ context ], found ) : + found; +} + +// Used in buildFragment, fixes the defaultChecked property +function fixDefaultChecked( elem ) { + if ( rcheckableType.test( elem.type ) ) { + elem.defaultChecked = elem.checked; + } +} + +// Support: IE<8 +// Manipulating tables requires a tbody +function manipulationTarget( elem, content ) { + return jQuery.nodeName( elem, "table" ) && + jQuery.nodeName( content.nodeType !== 11 ? content : content.firstChild, "tr" ) ? + + elem.getElementsByTagName("tbody")[0] || + elem.appendChild( elem.ownerDocument.createElement("tbody") ) : + elem; +} + +// Replace/restore the type attribute of script elements for safe DOM manipulation +function disableScript( elem ) { + elem.type = (jQuery.find.attr( elem, "type" ) !== null) + "/" + elem.type; + return elem; +} +function restoreScript( elem ) { + var match = rscriptTypeMasked.exec( elem.type ); + if ( match ) { + elem.type = match[1]; + } else { + elem.removeAttribute("type"); + } + return elem; +} + +// Mark scripts as having already been evaluated +function setGlobalEval( elems, refElements ) { + var elem, + i = 0; + for ( ; (elem = elems[i]) != null; i++ ) { + jQuery._data( elem, "globalEval", !refElements || jQuery._data( refElements[i], "globalEval" ) ); + } +} + +function cloneCopyEvent( src, dest ) { + + if ( dest.nodeType !== 1 || !jQuery.hasData( src ) ) { + return; + } + + var type, i, l, + oldData = jQuery._data( src ), + curData = jQuery._data( dest, oldData ), + events = oldData.events; + + if ( events ) { + delete curData.handle; + curData.events = {}; + + for ( type in events ) { + for ( i = 0, l = events[ type ].length; i < l; i++ ) { + jQuery.event.add( dest, type, events[ type ][ i ] ); + } + } + } + + // make the cloned public data object a copy from the original + if ( curData.data ) { + curData.data = jQuery.extend( {}, curData.data ); + } +} + +function fixCloneNodeIssues( src, dest ) { + var nodeName, e, data; + + // We do not need to do anything for non-Elements + if ( dest.nodeType !== 1 ) { + return; + } + + nodeName = dest.nodeName.toLowerCase(); + + // IE6-8 copies events bound via attachEvent when using cloneNode. + if ( !support.noCloneEvent && dest[ jQuery.expando ] ) { + data = jQuery._data( dest ); + + for ( e in data.events ) { + jQuery.removeEvent( dest, e, data.handle ); + } + + // Event data gets referenced instead of copied if the expando gets copied too + dest.removeAttribute( jQuery.expando ); + } + + // IE blanks contents when cloning scripts, and tries to evaluate newly-set text + if ( nodeName === "script" && dest.text !== src.text ) { + disableScript( dest ).text = src.text; + restoreScript( dest ); + + // IE6-10 improperly clones children of object elements using classid. + // IE10 throws NoModificationAllowedError if parent is null, #12132. + } else if ( nodeName === "object" ) { + if ( dest.parentNode ) { + dest.outerHTML = src.outerHTML; + } + + // This path appears unavoidable for IE9. When cloning an object + // element in IE9, the outerHTML strategy above is not sufficient. + // If the src has innerHTML and the destination does not, + // copy the src.innerHTML into the dest.innerHTML. #10324 + if ( support.html5Clone && ( src.innerHTML && !jQuery.trim(dest.innerHTML) ) ) { + dest.innerHTML = src.innerHTML; + } + + } else if ( nodeName === "input" && rcheckableType.test( src.type ) ) { + // IE6-8 fails to persist the checked state of a cloned checkbox + // or radio button. Worse, IE6-7 fail to give the cloned element + // a checked appearance if the defaultChecked value isn't also set + + dest.defaultChecked = dest.checked = src.checked; + + // IE6-7 get confused and end up setting the value of a cloned + // checkbox/radio button to an empty string instead of "on" + if ( dest.value !== src.value ) { + dest.value = src.value; + } + + // IE6-8 fails to return the selected option to the default selected + // state when cloning options + } else if ( nodeName === "option" ) { + dest.defaultSelected = dest.selected = src.defaultSelected; + + // IE6-8 fails to set the defaultValue to the correct value when + // cloning other types of input fields + } else if ( nodeName === "input" || nodeName === "textarea" ) { + dest.defaultValue = src.defaultValue; + } +} + +jQuery.extend({ + clone: function( elem, dataAndEvents, deepDataAndEvents ) { + var destElements, node, clone, i, srcElements, + inPage = jQuery.contains( elem.ownerDocument, elem ); + + if ( support.html5Clone || jQuery.isXMLDoc(elem) || !rnoshimcache.test( "<" + elem.nodeName + ">" ) ) { + clone = elem.cloneNode( true ); + + // IE<=8 does not properly clone detached, unknown element nodes + } else { + fragmentDiv.innerHTML = elem.outerHTML; + fragmentDiv.removeChild( clone = fragmentDiv.firstChild ); + } + + if ( (!support.noCloneEvent || !support.noCloneChecked) && + (elem.nodeType === 1 || elem.nodeType === 11) && !jQuery.isXMLDoc(elem) ) { + + // We eschew Sizzle here for performance reasons: http://jsperf.com/getall-vs-sizzle/2 + destElements = getAll( clone ); + srcElements = getAll( elem ); + + // Fix all IE cloning issues + for ( i = 0; (node = srcElements[i]) != null; ++i ) { + // Ensure that the destination node is not null; Fixes #9587 + if ( destElements[i] ) { + fixCloneNodeIssues( node, destElements[i] ); + } + } + } + + // Copy the events from the original to the clone + if ( dataAndEvents ) { + if ( deepDataAndEvents ) { + srcElements = srcElements || getAll( elem ); + destElements = destElements || getAll( clone ); + + for ( i = 0; (node = srcElements[i]) != null; i++ ) { + cloneCopyEvent( node, destElements[i] ); + } + } else { + cloneCopyEvent( elem, clone ); + } + } + + // Preserve script evaluation history + destElements = getAll( clone, "script" ); + if ( destElements.length > 0 ) { + setGlobalEval( destElements, !inPage && getAll( elem, "script" ) ); + } + + destElements = srcElements = node = null; + + // Return the cloned set + return clone; + }, + + buildFragment: function( elems, context, scripts, selection ) { + var j, elem, contains, + tmp, tag, tbody, wrap, + l = elems.length, + + // Ensure a safe fragment + safe = createSafeFragment( context ), + + nodes = [], + i = 0; + + for ( ; i < l; i++ ) { + elem = elems[ i ]; + + if ( elem || elem === 0 ) { + + // Add nodes directly + if ( jQuery.type( elem ) === "object" ) { + jQuery.merge( nodes, elem.nodeType ? [ elem ] : elem ); + + // Convert non-html into a text node + } else if ( !rhtml.test( elem ) ) { + nodes.push( context.createTextNode( elem ) ); + + // Convert html into DOM nodes + } else { + tmp = tmp || safe.appendChild( context.createElement("div") ); + + // Deserialize a standard representation + tag = (rtagName.exec( elem ) || [ "", "" ])[ 1 ].toLowerCase(); + wrap = wrapMap[ tag ] || wrapMap._default; + + tmp.innerHTML = wrap[1] + elem.replace( rxhtmlTag, "<$1>" ) + wrap[2]; + + // Descend through wrappers to the right content + j = wrap[0]; + while ( j-- ) { + tmp = tmp.lastChild; + } + + // Manually add leading whitespace removed by IE + if ( !support.leadingWhitespace && rleadingWhitespace.test( elem ) ) { + nodes.push( context.createTextNode( rleadingWhitespace.exec( elem )[0] ) ); + } + + // Remove IE's autoinserted from table fragments + if ( !support.tbody ) { + + // String was a , *may* have spurious + elem = tag === "table" && !rtbody.test( elem ) ? + tmp.firstChild : + + // String was a bare or + wrap[1] === "
" && !rtbody.test( elem ) ? + tmp : + 0; + + j = elem && elem.childNodes.length; + while ( j-- ) { + if ( jQuery.nodeName( (tbody = elem.childNodes[j]), "tbody" ) && !tbody.childNodes.length ) { + elem.removeChild( tbody ); + } + } + } + + jQuery.merge( nodes, tmp.childNodes ); + + // Fix #12392 for WebKit and IE > 9 + tmp.textContent = ""; + + // Fix #12392 for oldIE + while ( tmp.firstChild ) { + tmp.removeChild( tmp.firstChild ); + } + + // Remember the top-level container for proper cleanup + tmp = safe.lastChild; + } + } + } + + // Fix #11356: Clear elements from fragment + if ( tmp ) { + safe.removeChild( tmp ); + } + + // Reset defaultChecked for any radios and checkboxes + // about to be appended to the DOM in IE 6/7 (#8060) + if ( !support.appendChecked ) { + jQuery.grep( getAll( nodes, "input" ), fixDefaultChecked ); + } + + i = 0; + while ( (elem = nodes[ i++ ]) ) { + + // #4087 - If origin and destination elements are the same, and this is + // that element, do not do anything + if ( selection && jQuery.inArray( elem, selection ) !== -1 ) { + continue; + } + + contains = jQuery.contains( elem.ownerDocument, elem ); + + // Append to fragment + tmp = getAll( safe.appendChild( elem ), "script" ); + + // Preserve script evaluation history + if ( contains ) { + setGlobalEval( tmp ); + } + + // Capture executables + if ( scripts ) { + j = 0; + while ( (elem = tmp[ j++ ]) ) { + if ( rscriptType.test( elem.type || "" ) ) { + scripts.push( elem ); + } + } + } + } + + tmp = null; + + return safe; + }, + + cleanData: function( elems, /* internal */ acceptData ) { + var elem, type, id, data, + i = 0, + internalKey = jQuery.expando, + cache = jQuery.cache, + deleteExpando = support.deleteExpando, + special = jQuery.event.special; + + for ( ; (elem = elems[i]) != null; i++ ) { + if ( acceptData || jQuery.acceptData( elem ) ) { + + id = elem[ internalKey ]; + data = id && cache[ id ]; + + if ( data ) { + if ( data.events ) { + for ( type in data.events ) { + if ( special[ type ] ) { + jQuery.event.remove( elem, type ); + + // This is a shortcut to avoid jQuery.event.remove's overhead + } else { + jQuery.removeEvent( elem, type, data.handle ); + } + } + } + + // Remove cache only if it was not already removed by jQuery.event.remove + if ( cache[ id ] ) { + + delete cache[ id ]; + + // IE does not allow us to delete expando properties from nodes, + // nor does it have a removeAttribute function on Document nodes; + // we must handle all of these cases + if ( deleteExpando ) { + delete elem[ internalKey ]; + + } else if ( typeof elem.removeAttribute !== strundefined ) { + elem.removeAttribute( internalKey ); + + } else { + elem[ internalKey ] = null; + } + + deletedIds.push( id ); + } + } + } + } + } +}); + +jQuery.fn.extend({ + text: function( value ) { + return access( this, function( value ) { + return value === undefined ? + jQuery.text( this ) : + this.empty().append( ( this[0] && this[0].ownerDocument || document ).createTextNode( value ) ); + }, null, value, arguments.length ); + }, + + append: function() { + return this.domManip( arguments, function( elem ) { + if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { + var target = manipulationTarget( this, elem ); + target.appendChild( elem ); + } + }); + }, + + prepend: function() { + return this.domManip( arguments, function( elem ) { + if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { + var target = manipulationTarget( this, elem ); + target.insertBefore( elem, target.firstChild ); + } + }); + }, + + before: function() { + return this.domManip( arguments, function( elem ) { + if ( this.parentNode ) { + this.parentNode.insertBefore( elem, this ); + } + }); + }, + + after: function() { + return this.domManip( arguments, function( elem ) { + if ( this.parentNode ) { + this.parentNode.insertBefore( elem, this.nextSibling ); + } + }); + }, + + remove: function( selector, keepData /* Internal Use Only */ ) { + var elem, + elems = selector ? jQuery.filter( selector, this ) : this, + i = 0; + + for ( ; (elem = elems[i]) != null; i++ ) { + + if ( !keepData && elem.nodeType === 1 ) { + jQuery.cleanData( getAll( elem ) ); + } + + if ( elem.parentNode ) { + if ( keepData && jQuery.contains( elem.ownerDocument, elem ) ) { + setGlobalEval( getAll( elem, "script" ) ); + } + elem.parentNode.removeChild( elem ); + } + } + + return this; + }, + + empty: function() { + var elem, + i = 0; + + for ( ; (elem = this[i]) != null; i++ ) { + // Remove element nodes and prevent memory leaks + if ( elem.nodeType === 1 ) { + jQuery.cleanData( getAll( elem, false ) ); + } + + // Remove any remaining nodes + while ( elem.firstChild ) { + elem.removeChild( elem.firstChild ); + } + + // If this is a select, ensure that it displays empty (#12336) + // Support: IE<9 + if ( elem.options && jQuery.nodeName( elem, "select" ) ) { + elem.options.length = 0; + } + } + + return this; + }, + + clone: function( dataAndEvents, deepDataAndEvents ) { + dataAndEvents = dataAndEvents == null ? false : dataAndEvents; + deepDataAndEvents = deepDataAndEvents == null ? dataAndEvents : deepDataAndEvents; + + return this.map(function() { + return jQuery.clone( this, dataAndEvents, deepDataAndEvents ); + }); + }, + + html: function( value ) { + return access( this, function( value ) { + var elem = this[ 0 ] || {}, + i = 0, + l = this.length; + + if ( value === undefined ) { + return elem.nodeType === 1 ? + elem.innerHTML.replace( rinlinejQuery, "" ) : + undefined; + } + + // See if we can take a shortcut and just use innerHTML + if ( typeof value === "string" && !rnoInnerhtml.test( value ) && + ( support.htmlSerialize || !rnoshimcache.test( value ) ) && + ( support.leadingWhitespace || !rleadingWhitespace.test( value ) ) && + !wrapMap[ (rtagName.exec( value ) || [ "", "" ])[ 1 ].toLowerCase() ] ) { + + value = value.replace( rxhtmlTag, "<$1>" ); + + try { + for (; i < l; i++ ) { + // Remove element nodes and prevent memory leaks + elem = this[i] || {}; + if ( elem.nodeType === 1 ) { + jQuery.cleanData( getAll( elem, false ) ); + elem.innerHTML = value; + } + } + + elem = 0; + + // If using innerHTML throws an exception, use the fallback method + } catch(e) {} + } + + if ( elem ) { + this.empty().append( value ); + } + }, null, value, arguments.length ); + }, + + replaceWith: function() { + var arg = arguments[ 0 ]; + + // Make the changes, replacing each context element with the new content + this.domManip( arguments, function( elem ) { + arg = this.parentNode; + + jQuery.cleanData( getAll( this ) ); + + if ( arg ) { + arg.replaceChild( elem, this ); + } + }); + + // Force removal if there was no new content (e.g., from empty arguments) + return arg && (arg.length || arg.nodeType) ? this : this.remove(); + }, + + detach: function( selector ) { + return this.remove( selector, true ); + }, + + domManip: function( args, callback ) { + + // Flatten any nested arrays + args = concat.apply( [], args ); + + var first, node, hasScripts, + scripts, doc, fragment, + i = 0, + l = this.length, + set = this, + iNoClone = l - 1, + value = args[0], + isFunction = jQuery.isFunction( value ); + + // We can't cloneNode fragments that contain checked, in WebKit + if ( isFunction || + ( l > 1 && typeof value === "string" && + !support.checkClone && rchecked.test( value ) ) ) { + return this.each(function( index ) { + var self = set.eq( index ); + if ( isFunction ) { + args[0] = value.call( this, index, self.html() ); + } + self.domManip( args, callback ); + }); + } + + if ( l ) { + fragment = jQuery.buildFragment( args, this[ 0 ].ownerDocument, false, this ); + first = fragment.firstChild; + + if ( fragment.childNodes.length === 1 ) { + fragment = first; + } + + if ( first ) { + scripts = jQuery.map( getAll( fragment, "script" ), disableScript ); + hasScripts = scripts.length; + + // Use the original fragment for the last item instead of the first because it can end up + // being emptied incorrectly in certain situations (#8070). + for ( ; i < l; i++ ) { + node = fragment; + + if ( i !== iNoClone ) { + node = jQuery.clone( node, true, true ); + + // Keep references to cloned scripts for later restoration + if ( hasScripts ) { + jQuery.merge( scripts, getAll( node, "script" ) ); + } + } + + callback.call( this[i], node, i ); + } + + if ( hasScripts ) { + doc = scripts[ scripts.length - 1 ].ownerDocument; + + // Reenable scripts + jQuery.map( scripts, restoreScript ); + + // Evaluate executable scripts on first document insertion + for ( i = 0; i < hasScripts; i++ ) { + node = scripts[ i ]; + if ( rscriptType.test( node.type || "" ) && + !jQuery._data( node, "globalEval" ) && jQuery.contains( doc, node ) ) { + + if ( node.src ) { + // Optional AJAX dependency, but won't run scripts if not present + if ( jQuery._evalUrl ) { + jQuery._evalUrl( node.src ); + } + } else { + jQuery.globalEval( ( node.text || node.textContent || node.innerHTML || "" ).replace( rcleanScript, "" ) ); + } + } + } + } + + // Fix #11809: Avoid leaking memory + fragment = first = null; + } + } + + return this; + } +}); + +jQuery.each({ + appendTo: "append", + prependTo: "prepend", + insertBefore: "before", + insertAfter: "after", + replaceAll: "replaceWith" +}, function( name, original ) { + jQuery.fn[ name ] = function( selector ) { + var elems, + i = 0, + ret = [], + insert = jQuery( selector ), + last = insert.length - 1; + + for ( ; i <= last; i++ ) { + elems = i === last ? this : this.clone(true); + jQuery( insert[i] )[ original ]( elems ); + + // Modern browsers can apply jQuery collections as arrays, but oldIE needs a .get() + push.apply( ret, elems.get() ); + } + + return this.pushStack( ret ); + }; +}); + + +var iframe, + elemdisplay = {}; + +/** + * Retrieve the actual display of a element + * @param {String} name nodeName of the element + * @param {Object} doc Document object + */ +// Called only from within defaultDisplay +function actualDisplay( name, doc ) { + var style, + elem = jQuery( doc.createElement( name ) ).appendTo( doc.body ), + + // getDefaultComputedStyle might be reliably used only on attached element + display = window.getDefaultComputedStyle && ( style = window.getDefaultComputedStyle( elem[ 0 ] ) ) ? + + // Use of this method is a temporary fix (more like optmization) until something better comes along, + // since it was removed from specification and supported only in FF + style.display : jQuery.css( elem[ 0 ], "display" ); + + // We don't have any data stored on the element, + // so use "detach" method as fast way to get rid of the element + elem.detach(); + + return display; +} + +/** + * Try to determine the default display value of an element + * @param {String} nodeName + */ +function defaultDisplay( nodeName ) { + var doc = document, + display = elemdisplay[ nodeName ]; + + if ( !display ) { + display = actualDisplay( nodeName, doc ); + + // If the simple way fails, read from inside an iframe + if ( display === "none" || !display ) { + + // Use the already-created iframe if possible + iframe = (iframe || jQuery( "