Skip to content

Commit 8630b59

Browse files
Updated API models and rebuilt service gems.
1 parent 934a6ad commit 8630b59

File tree

37 files changed

+8264
-7818
lines changed

37 files changed

+8264
-7818
lines changed

apis/qconnect/2020-10-19/api-2.json

Lines changed: 1153 additions & 1120 deletions
Large diffs are not rendered by default.

apis/qconnect/2020-10-19/docs-2.json

Lines changed: 127 additions & 108 deletions
Large diffs are not rendered by default.
Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
{
2+
"version": 2,
3+
"waiters": {
4+
}
5+
}

apis/sagemaker/2017-07-24/api-2.json

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -18596,7 +18596,8 @@
1859618596
"ModelPackageDescription":{"shape":"EntityDescription"},
1859718597
"CreationTime":{"shape":"CreationTime"},
1859818598
"ModelPackageStatus":{"shape":"ModelPackageStatus"},
18599-
"ModelApprovalStatus":{"shape":"ModelApprovalStatus"}
18599+
"ModelApprovalStatus":{"shape":"ModelApprovalStatus"},
18600+
"ModelLifeCycle":{"shape":"ModelLifeCycle"}
1860018601
}
1860118602
},
1860218603
"ModelPackageSummaryList":{
@@ -20803,7 +20804,8 @@
2080320804
"enum":[
2080420805
"al2-ami-sagemaker-inference-gpu-2",
2080520806
"al2-ami-sagemaker-inference-gpu-2-1",
20806-
"al2-ami-sagemaker-inference-gpu-3-1"
20807+
"al2-ami-sagemaker-inference-gpu-3-1",
20808+
"al2-ami-sagemaker-inference-neuron-2"
2080720809
]
2080820810
},
2080920811
"ProductionVariantInstanceType":{

apis/sagemaker/2017-07-24/docs-2.json

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -10095,7 +10095,7 @@
1009510095
"MaxPendingTimeInSeconds": {
1009610096
"base": "Maximum job scheduler pending time in seconds.",
1009710097
"refs": {
10098-
"StoppingCondition$MaxPendingTimeInSeconds": "<p>The maximum length of time, in seconds, that a training or compilation job can be pending before it is stopped.</p>"
10098+
"StoppingCondition$MaxPendingTimeInSeconds": "<p>The maximum length of time, in seconds, that a training or compilation job can be pending before it is stopped.</p> <note> <p>When working with training jobs that use capacity from <a href=\"https://docs.aws.amazon.com/sagemaker/latest/dg/reserve-capacity-with-training-plans.html\">training plans</a>, not all <code>Pending</code> job states count against the <code>MaxPendingTimeInSeconds</code> limit. The following scenarios do not increment the <code>MaxPendingTimeInSeconds</code> counter:</p> <ul> <li> <p>The plan is in a <code>Scheduled</code> state: Jobs queued (in <code>Pending</code> status) before a plan's start date (waiting for scheduled start time)</p> </li> <li> <p>Between capacity reservations: Jobs temporarily back to <code>Pending</code> status between two capacity reservation periods</p> </li> </ul> <p> <code>MaxPendingTimeInSeconds</code> only increments when jobs are actively waiting for capacity in an <code>Active</code> plan.</p> </note>"
1009910099
}
1010010100
},
1010110101
"MaxPercentageOfInputDatasetLabeled": {
@@ -10781,6 +10781,7 @@
1078110781
"CreateModelPackageInput$ModelLifeCycle": "<p> A structure describing the current state of the model in its life cycle. </p>",
1078210782
"DescribeModelPackageOutput$ModelLifeCycle": "<p> A structure describing the current state of the model in its life cycle. </p>",
1078310783
"ModelPackage$ModelLifeCycle": "<p> A structure describing the current state of the model in its life cycle. </p>",
10784+
"ModelPackageSummary$ModelLifeCycle": null,
1078410785
"UpdateModelPackageInput$ModelLifeCycle": "<p> A structure describing the current state of the model in its life cycle. </p>"
1078510786
}
1078610787
},
@@ -13286,7 +13287,7 @@
1328613287
"ProductionVariantInferenceAmiVersion": {
1328713288
"base": null,
1328813289
"refs": {
13289-
"ProductionVariant$InferenceAmiVersion": "<p>Specifies an option from a collection of preconfigured Amazon Machine Image (AMI) images. Each image is configured by Amazon Web Services with a set of software and driver versions. Amazon Web Services optimizes these configurations for different machine learning workloads.</p> <p>By selecting an AMI version, you can ensure that your inference environment is compatible with specific software requirements, such as CUDA driver versions, Linux kernel versions, or Amazon Web Services Neuron driver versions.</p> <p>The AMI version names, and their configurations, are the following:</p> <dl> <dt>al2-ami-sagemaker-inference-gpu-2</dt> <dd> <ul> <li> <p>Accelerator: GPU</p> </li> <li> <p>NVIDIA driver version: 535</p> </li> <li> <p>CUDA version: 12.2</p> </li> </ul> </dd> <dt>al2-ami-sagemaker-inference-gpu-2-1</dt> <dd> <ul> <li> <p>Accelerator: GPU</p> </li> <li> <p>NVIDIA driver version: 535</p> </li> <li> <p>CUDA version: 12.2</p> </li> <li> <p>NVIDIA Container Toolkit with disabled CUDA-compat mounting</p> </li> </ul> </dd> <dt>al2-ami-sagemaker-inference-gpu-3-1</dt> <dd> <ul> <li> <p>Accelerator: GPU</p> </li> <li> <p>NVIDIA driver version: 550</p> </li> <li> <p>CUDA version: 12.4</p> </li> <li> <p>NVIDIA Container Toolkit with disabled CUDA-compat mounting</p> </li> </ul> </dd> </dl>"
13290+
"ProductionVariant$InferenceAmiVersion": "<p>Specifies an option from a collection of preconfigured Amazon Machine Image (AMI) images. Each image is configured by Amazon Web Services with a set of software and driver versions. Amazon Web Services optimizes these configurations for different machine learning workloads.</p> <p>By selecting an AMI version, you can ensure that your inference environment is compatible with specific software requirements, such as CUDA driver versions, Linux kernel versions, or Amazon Web Services Neuron driver versions.</p> <p>The AMI version names, and their configurations, are the following:</p> <dl> <dt>al2-ami-sagemaker-inference-gpu-2</dt> <dd> <ul> <li> <p>Accelerator: GPU</p> </li> <li> <p>NVIDIA driver version: 535</p> </li> <li> <p>CUDA version: 12.2</p> </li> </ul> </dd> <dt>al2-ami-sagemaker-inference-gpu-2-1</dt> <dd> <ul> <li> <p>Accelerator: GPU</p> </li> <li> <p>NVIDIA driver version: 535</p> </li> <li> <p>CUDA version: 12.2</p> </li> <li> <p>NVIDIA Container Toolkit with disabled CUDA-compat mounting</p> </li> </ul> </dd> <dt>al2-ami-sagemaker-inference-gpu-3-1</dt> <dd> <ul> <li> <p>Accelerator: GPU</p> </li> <li> <p>NVIDIA driver version: 550</p> </li> <li> <p>CUDA version: 12.4</p> </li> <li> <p>NVIDIA Container Toolkit with disabled CUDA-compat mounting</p> </li> </ul> </dd> <dt>al2-ami-sagemaker-inference-neuron-2</dt> <dd> <ul> <li> <p>Accelerator: Inferentia2 and Trainium</p> </li> <li> <p>Neuron driver version: 2.19</p> </li> </ul> </dd> </dl>"
1329013291
}
1329113292
},
1329213293
"ProductionVariantInstanceType": {

apis/service-quotas/2019-06-24/api-2.json

Lines changed: 11 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -791,6 +791,12 @@
791791
]
792792
},
793793
"QuotaContextScopeType":{"type":"string"},
794+
"QuotaDescription":{
795+
"type":"string",
796+
"max":350,
797+
"min":0,
798+
"pattern":"^.{0,350}$"
799+
},
794800
"QuotaExceededException":{
795801
"type":"structure",
796802
"members":{
@@ -831,7 +837,8 @@
831837
"ServiceCode":{"shape":"ServiceCode"},
832838
"QuotaCode":{"shape":"QuotaCode"},
833839
"DesiredValue":{"shape":"QuotaValue"},
834-
"ContextId":{"shape":"QuotaContextId"}
840+
"ContextId":{"shape":"QuotaContextId"},
841+
"SupportCaseAllowed":{"shape":"SupportCaseAllowed"}
835842
}
836843
},
837844
"RequestServiceQuotaIncreaseResponse":{
@@ -927,7 +934,8 @@
927934
"Period":{"shape":"QuotaPeriod"},
928935
"ErrorReason":{"shape":"ErrorReason"},
929936
"QuotaAppliedAtLevel":{"shape":"AppliedLevelEnum"},
930-
"QuotaContext":{"shape":"QuotaContextInfo"}
937+
"QuotaContext":{"shape":"QuotaContextInfo"},
938+
"Description":{"shape":"QuotaDescription"}
931939
}
932940
},
933941
"ServiceQuotaIncreaseRequestInTemplate":{
@@ -971,6 +979,7 @@
971979
"min":1,
972980
"pattern":"(Sum|Maximum)"
973981
},
982+
"SupportCaseAllowed":{"type":"boolean"},
974983
"Tag":{
975984
"type":"structure",
976985
"required":[

0 commit comments

Comments
 (0)