|
9 | 9 | "DeleteInferenceScheduler": "<p>Deletes an inference scheduler that has been set up. Already processed output results are not affected. </p>", |
10 | 10 | "DeleteModel": "<p>Deletes an ML model currently available for Amazon Lookout for Equipment. This will prevent it from being used with an inference scheduler, even one that is already set up. </p>", |
11 | 11 | "DescribeDataIngestionJob": "<p>Provides information on a specific data ingestion job such as creation time, dataset ARN, status, and so on. </p>", |
12 | | - "DescribeDataset": "<p>Provides information on a specified dataset such as the schema location, status, and so on.</p>", |
| 12 | + "DescribeDataset": "<p>Provides a JSON description of the data that is in each time series dataset, including names, column names, and data types.</p>", |
13 | 13 | "DescribeInferenceScheduler": "<p> Specifies information about the inference scheduler being used, including name, model, status, and associated metadata </p>", |
14 | | - "DescribeModel": "<p>Provides overall information about a specific ML model, including model name and ARN, dataset, training and evaluation information, status, and so on. </p>", |
| 14 | + "DescribeModel": "<p>Provides a JSON containing the overall information about a specific ML model, including model name and ARN, dataset, training and evaluation information, status, and so on. </p>", |
15 | 15 | "ListDataIngestionJobs": "<p>Provides a list of all data ingestion jobs, including dataset name and ARN, S3 location of the input data, status, and so on. </p>", |
16 | 16 | "ListDatasets": "<p>Lists all datasets currently available in your account, filtering on the dataset name. </p>", |
17 | 17 | "ListInferenceExecutions": "<p> Lists all inference executions that have been performed by the specified inference scheduler. </p>", |
|
98 | 98 | "DataDelayOffsetInMinutes": { |
99 | 99 | "base": null, |
100 | 100 | "refs": { |
101 | | - "CreateInferenceSchedulerRequest$DataDelayOffsetInMinutes": "<p> A period of time (in minutes) by which inference on the data is delayed after the data starts. For instance, if you select an offset delay time of five minutes, inference will not begin on the data until the first data measurement after the five minute mark. For example, if five minutes is selected, the inference scheduler will wake up at the configured frequency with the additional five minute delay time to check the customer S3 bucket. The customer can upload data at the same frequency and they don't need to stop and restart the scheduler when uploading new data. </p>", |
| 101 | + "CreateInferenceSchedulerRequest$DataDelayOffsetInMinutes": "<p>A period of time (in minutes) by which inference on the data is delayed after the data starts. For instance, if you select an offset delay time of five minutes, inference will not begin on the data until the first data measurement after the five minute mark. For example, if five minutes is selected, the inference scheduler will wake up at the configured frequency with the additional five minute delay time to check the customer S3 bucket. The customer can upload data at the same frequency and they don't need to stop and restart the scheduler when uploading new data. </p>", |
102 | 102 | "DescribeInferenceSchedulerResponse$DataDelayOffsetInMinutes": "<p> A period of time (in minutes) by which inference on the data is delayed after the data starts. For instance, if you select an offset delay time of five minutes, inference will not begin on the data until the first data measurement after the five minute mark. For example, if five minutes is selected, the inference scheduler will wake up at the configured frequency with the additional five minute delay time to check the customer S3 bucket. The customer can upload data at the same frequency and they don't need to stop and restart the scheduler when uploading new data.</p>", |
103 | | - "InferenceSchedulerSummary$DataDelayOffsetInMinutes": "<p>> A period of time (in minutes) by which inference on the data is delayed after the data starts. For instance, if an offset delay time of five minutes was selected, inference will not begin on the data until the first data measurement after the five minute mark. For example, if five minutes is selected, the inference scheduler will wake up at the configured frequency with the additional five minute delay time to check the customer S3 bucket. The customer can upload data at the same frequency and they don't need to stop and restart the scheduler when uploading new data. </p>", |
104 | | - "UpdateInferenceSchedulerRequest$DataDelayOffsetInMinutes": "<p>> A period of time (in minutes) by which inference on the data is delayed after the data starts. For instance, if you select an offset delay time of five minutes, inference will not begin on the data until the first data measurement after the five minute mark. For example, if five minutes is selected, the inference scheduler will wake up at the configured frequency with the additional five minute delay time to check the customer S3 bucket. The customer can upload data at the same frequency and they don't need to stop and restart the scheduler when uploading new data.</p>" |
| 103 | + "InferenceSchedulerSummary$DataDelayOffsetInMinutes": "<p>A period of time (in minutes) by which inference on the data is delayed after the data starts. For instance, if an offset delay time of five minutes was selected, inference will not begin on the data until the first data measurement after the five minute mark. For example, if five minutes is selected, the inference scheduler will wake up at the configured frequency with the additional five minute delay time to check the customer S3 bucket. The customer can upload data at the same frequency and they don't need to stop and restart the scheduler when uploading new data. </p>", |
| 104 | + "UpdateInferenceSchedulerRequest$DataDelayOffsetInMinutes": "<p> A period of time (in minutes) by which inference on the data is delayed after the data starts. For instance, if you select an offset delay time of five minutes, inference will not begin on the data until the first data measurement after the five minute mark. For example, if five minutes is selected, the inference scheduler will wake up at the configured frequency with the additional five minute delay time to check the customer S3 bucket. The customer can upload data at the same frequency and they don't need to stop and restart the scheduler when uploading new data.</p>" |
105 | 105 | } |
106 | 106 | }, |
107 | 107 | "DataIngestionJobSummaries": { |
|
297 | 297 | } |
298 | 298 | }, |
299 | 299 | "InferenceInputConfiguration": { |
300 | | - "base": "<p>> Specifies configuration information for the input data for the inference, including S3 location of input data.. </p>", |
| 300 | + "base": "<p>Specifies configuration information for the input data for the inference, including S3 location of input data.. </p>", |
301 | 301 | "refs": { |
302 | 302 | "CreateInferenceSchedulerRequest$DataInputConfiguration": "<p>Specifies configuration information for the input data for the inference scheduler, including delimiter, format, and dataset location. </p>", |
303 | 303 | "DescribeInferenceSchedulerResponse$DataInputConfiguration": "<p> Specifies configuration information for the input data for the inference scheduler, including delimiter, format, and dataset location. </p>", |
|
306 | 306 | } |
307 | 307 | }, |
308 | 308 | "InferenceInputNameConfiguration": { |
309 | | - "base": "<p>>> Specifies configuration information for the input data for the inference, including timestamp format and delimiter. </p>", |
| 309 | + "base": "<p>Specifies configuration information for the input data for the inference, including timestamp format and delimiter. </p>", |
310 | 310 | "refs": { |
311 | | - "InferenceInputConfiguration$InferenceInputNameConfiguration": "<p>> Specifies configuration information for the input data for the inference, including timestamp format and delimiter. </p>" |
| 311 | + "InferenceInputConfiguration$InferenceInputNameConfiguration": "<p>Specifies configuration information for the input data for the inference, including timestamp format and delimiter. </p>" |
312 | 312 | } |
313 | 313 | }, |
314 | 314 | "InferenceOutputConfiguration": { |
|
438 | 438 | "KmsKeyArn": { |
439 | 439 | "base": null, |
440 | 440 | "refs": { |
441 | | - "DescribeDatasetResponse$ServerSideKmsKeyId": "<p>Provides the identifier of the AWS KMS customer master key (CMK) used to encrypt dataset data by Amazon Lookout for Equipment. </p>", |
442 | | - "DescribeInferenceSchedulerResponse$ServerSideKmsKeyId": "<p>Provides the identifier of the AWS KMS customer master key (CMK) used to encrypt inference scheduler data by Amazon Lookout for Equipment. </p>", |
443 | | - "DescribeModelResponse$ServerSideKmsKeyId": "<p>Provides the identifier of the AWS KMS customer master key (CMK) used to encrypt model data by Amazon Lookout for Equipment. </p>" |
| 441 | + "DescribeDatasetResponse$ServerSideKmsKeyId": "<p>Provides the identifier of the KMS key used to encrypt dataset data by Amazon Lookout for Equipment. </p>", |
| 442 | + "DescribeInferenceSchedulerResponse$ServerSideKmsKeyId": "<p>Provides the identifier of the KMS key used to encrypt inference scheduler data by Amazon Lookout for Equipment. </p>", |
| 443 | + "DescribeModelResponse$ServerSideKmsKeyId": "<p>Provides the identifier of the KMS key used to encrypt model data by Amazon Lookout for Equipment. </p>" |
444 | 444 | } |
445 | 445 | }, |
446 | 446 | "LabelsInputConfiguration": { |
|
587 | 587 | "NameOrArn": { |
588 | 588 | "base": null, |
589 | 589 | "refs": { |
590 | | - "CreateDatasetRequest$ServerSideKmsKeyId": "<p>Provides the identifier of the AWS KMS customer master key (CMK) used to encrypt dataset data by Amazon Lookout for Equipment. </p>", |
591 | | - "CreateInferenceSchedulerRequest$ServerSideKmsKeyId": "<p>Provides the identifier of the AWS KMS customer master key (CMK) used to encrypt inference scheduler data by Amazon Lookout for Equipment. </p>", |
592 | | - "CreateModelRequest$ServerSideKmsKeyId": "<p>Provides the identifier of the AWS KMS customer master key (CMK) used to encrypt model data by Amazon Lookout for Equipment. </p>", |
| 590 | + "CreateDatasetRequest$ServerSideKmsKeyId": "<p>Provides the identifier of the KMS key used to encrypt dataset data by Amazon Lookout for Equipment. </p>", |
| 591 | + "CreateInferenceSchedulerRequest$ServerSideKmsKeyId": "<p>Provides the identifier of the KMS key used to encrypt inference scheduler data by Amazon Lookout for Equipment. </p>", |
| 592 | + "CreateModelRequest$ServerSideKmsKeyId": "<p>Provides the identifier of the KMS key used to encrypt model data by Amazon Lookout for Equipment. </p>", |
593 | 593 | "InferenceOutputConfiguration$KmsKeyId": "<p>The ID number for the AWS KMS key used to encrypt the inference output. </p>" |
594 | 594 | } |
595 | 595 | }, |
|
608 | 608 | "ListModelsResponse$NextToken": "<p> An opaque pagination token indicating where to continue the listing of ML models. </p>" |
609 | 609 | } |
610 | 610 | }, |
| 611 | + "OffCondition": { |
| 612 | + "base": null, |
| 613 | + "refs": { |
| 614 | + "CreateModelRequest$OffCondition": "<p>Indicates that the asset associated with this sensor has been shut off. As long as this condition is met, Lookout for Equipment will not use data from this asset for training, evaluation, or inference.</p>", |
| 615 | + "DescribeModelResponse$OffCondition": "<p>Indicates that the asset associated with this sensor has been shut off. As long as this condition is met, Lookout for Equipment will not use data from this asset for training, evaluation, or inference.</p>" |
| 616 | + } |
| 617 | + }, |
611 | 618 | "ResourceNotFoundException": { |
612 | 619 | "base": "<p> The resource requested could not be found. Verify the resource ID and retry your request. </p>", |
613 | 620 | "refs": { |
|
0 commit comments