elasticinference.d.ts 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277
  1. import {Request} from '../lib/request';
  2. import {Response} from '../lib/response';
  3. import {AWSError} from '../lib/error';
  4. import {Service} from '../lib/service';
  5. import {ServiceConfigurationOptions} from '../lib/service';
  6. import {ConfigBase as Config} from '../lib/config-base';
  7. interface Blob {}
  8. declare class ElasticInference extends Service {
  9. /**
  10. * Constructs a service object. This object has one method for each API operation.
  11. */
  12. constructor(options?: ElasticInference.Types.ClientConfiguration)
  13. config: Config & ElasticInference.Types.ClientConfiguration;
  14. /**
  15. * Describes the locations in which a given accelerator type or set of types is present in a given region. February 15, 2023: Starting April 15, 2023, AWS will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service.
  16. */
  17. describeAcceleratorOfferings(params: ElasticInference.Types.DescribeAcceleratorOfferingsRequest, callback?: (err: AWSError, data: ElasticInference.Types.DescribeAcceleratorOfferingsResponse) => void): Request<ElasticInference.Types.DescribeAcceleratorOfferingsResponse, AWSError>;
  18. /**
  19. * Describes the locations in which a given accelerator type or set of types is present in a given region. February 15, 2023: Starting April 15, 2023, AWS will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service.
  20. */
  21. describeAcceleratorOfferings(callback?: (err: AWSError, data: ElasticInference.Types.DescribeAcceleratorOfferingsResponse) => void): Request<ElasticInference.Types.DescribeAcceleratorOfferingsResponse, AWSError>;
  22. /**
  23. * Describes the accelerator types available in a given region, as well as their characteristics, such as memory and throughput. February 15, 2023: Starting April 15, 2023, AWS will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service.
  24. */
  25. describeAcceleratorTypes(params: ElasticInference.Types.DescribeAcceleratorTypesRequest, callback?: (err: AWSError, data: ElasticInference.Types.DescribeAcceleratorTypesResponse) => void): Request<ElasticInference.Types.DescribeAcceleratorTypesResponse, AWSError>;
  26. /**
  27. * Describes the accelerator types available in a given region, as well as their characteristics, such as memory and throughput. February 15, 2023: Starting April 15, 2023, AWS will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service.
  28. */
  29. describeAcceleratorTypes(callback?: (err: AWSError, data: ElasticInference.Types.DescribeAcceleratorTypesResponse) => void): Request<ElasticInference.Types.DescribeAcceleratorTypesResponse, AWSError>;
  30. /**
  31. * Describes information over a provided set of accelerators belonging to an account. February 15, 2023: Starting April 15, 2023, AWS will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service.
  32. */
  33. describeAccelerators(params: ElasticInference.Types.DescribeAcceleratorsRequest, callback?: (err: AWSError, data: ElasticInference.Types.DescribeAcceleratorsResponse) => void): Request<ElasticInference.Types.DescribeAcceleratorsResponse, AWSError>;
  34. /**
  35. * Describes information over a provided set of accelerators belonging to an account. February 15, 2023: Starting April 15, 2023, AWS will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service.
  36. */
  37. describeAccelerators(callback?: (err: AWSError, data: ElasticInference.Types.DescribeAcceleratorsResponse) => void): Request<ElasticInference.Types.DescribeAcceleratorsResponse, AWSError>;
  38. /**
  39. * Returns all tags of an Elastic Inference Accelerator. February 15, 2023: Starting April 15, 2023, AWS will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service.
  40. */
  41. listTagsForResource(params: ElasticInference.Types.ListTagsForResourceRequest, callback?: (err: AWSError, data: ElasticInference.Types.ListTagsForResourceResult) => void): Request<ElasticInference.Types.ListTagsForResourceResult, AWSError>;
  42. /**
  43. * Returns all tags of an Elastic Inference Accelerator. February 15, 2023: Starting April 15, 2023, AWS will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service.
  44. */
  45. listTagsForResource(callback?: (err: AWSError, data: ElasticInference.Types.ListTagsForResourceResult) => void): Request<ElasticInference.Types.ListTagsForResourceResult, AWSError>;
  46. /**
  47. * Adds the specified tags to an Elastic Inference Accelerator. February 15, 2023: Starting April 15, 2023, AWS will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service.
  48. */
  49. tagResource(params: ElasticInference.Types.TagResourceRequest, callback?: (err: AWSError, data: ElasticInference.Types.TagResourceResult) => void): Request<ElasticInference.Types.TagResourceResult, AWSError>;
  50. /**
  51. * Adds the specified tags to an Elastic Inference Accelerator. February 15, 2023: Starting April 15, 2023, AWS will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service.
  52. */
  53. tagResource(callback?: (err: AWSError, data: ElasticInference.Types.TagResourceResult) => void): Request<ElasticInference.Types.TagResourceResult, AWSError>;
  54. /**
  55. * Removes the specified tags from an Elastic Inference Accelerator. February 15, 2023: Starting April 15, 2023, AWS will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service.
  56. */
  57. untagResource(params: ElasticInference.Types.UntagResourceRequest, callback?: (err: AWSError, data: ElasticInference.Types.UntagResourceResult) => void): Request<ElasticInference.Types.UntagResourceResult, AWSError>;
  58. /**
  59. * Removes the specified tags from an Elastic Inference Accelerator. February 15, 2023: Starting April 15, 2023, AWS will not onboard new customers to Amazon Elastic Inference (EI), and will help current customers migrate their workloads to options that offer better price and performance. After April 15, 2023, new customers will not be able to launch instances with Amazon EI accelerators in Amazon SageMaker, Amazon ECS, or Amazon EC2. However, customers who have used Amazon EI at least once during the past 30-day period are considered current customers and will be able to continue using the service.
  60. */
  61. untagResource(callback?: (err: AWSError, data: ElasticInference.Types.UntagResourceResult) => void): Request<ElasticInference.Types.UntagResourceResult, AWSError>;
  62. }
  63. declare namespace ElasticInference {
  64. export type AcceleratorHealthStatus = string;
  65. export type AcceleratorId = string;
  66. export type AcceleratorIdList = AcceleratorId[];
  67. export interface AcceleratorType {
  68. /**
  69. * The name of the Elastic Inference Accelerator type.
  70. */
  71. acceleratorTypeName?: AcceleratorTypeName;
  72. /**
  73. * The memory information of the Elastic Inference Accelerator type.
  74. */
  75. memoryInfo?: MemoryInfo;
  76. /**
  77. * The throughput information of the Elastic Inference Accelerator type.
  78. */
  79. throughputInfo?: ThroughputInfoList;
  80. }
  81. export type AcceleratorTypeList = AcceleratorType[];
  82. export type AcceleratorTypeName = string;
  83. export type AcceleratorTypeNameList = AcceleratorTypeName[];
  84. export interface AcceleratorTypeOffering {
  85. /**
  86. * The name of the Elastic Inference Accelerator type.
  87. */
  88. acceleratorType?: AcceleratorTypeName;
  89. /**
  90. * The location type for the offering. It can assume the following values: region: defines that the offering is at the regional level. availability-zone: defines that the offering is at the availability zone level. availability-zone-id: defines that the offering is at the availability zone level, defined by the availability zone id.
  91. */
  92. locationType?: LocationType;
  93. /**
  94. * The location for the offering. It will return either the region, availability zone or availability zone id for the offering depending on the locationType value.
  95. */
  96. location?: Location;
  97. }
  98. export type AcceleratorTypeOfferingList = AcceleratorTypeOffering[];
  99. export type AvailabilityZone = string;
  100. export interface DescribeAcceleratorOfferingsRequest {
  101. /**
  102. * The location type that you want to describe accelerator type offerings for. It can assume the following values: region: will return the accelerator type offering at the regional level. availability-zone: will return the accelerator type offering at the availability zone level. availability-zone-id: will return the accelerator type offering at the availability zone level returning the availability zone id.
  103. */
  104. locationType: LocationType;
  105. /**
  106. * The list of accelerator types to describe.
  107. */
  108. acceleratorTypes?: AcceleratorTypeNameList;
  109. }
  110. export interface DescribeAcceleratorOfferingsResponse {
  111. /**
  112. * The list of accelerator type offerings for a specific location.
  113. */
  114. acceleratorTypeOfferings?: AcceleratorTypeOfferingList;
  115. }
  116. export interface DescribeAcceleratorTypesRequest {
  117. }
  118. export interface DescribeAcceleratorTypesResponse {
  119. /**
  120. * The available accelerator types.
  121. */
  122. acceleratorTypes?: AcceleratorTypeList;
  123. }
  124. export interface DescribeAcceleratorsRequest {
  125. /**
  126. * The IDs of the accelerators to describe.
  127. */
  128. acceleratorIds?: AcceleratorIdList;
  129. /**
  130. * One or more filters. Filter names and values are case-sensitive. Valid filter names are: accelerator-types: can provide a list of accelerator type names to filter for. instance-id: can provide a list of EC2 instance ids to filter for.
  131. */
  132. filters?: FilterList;
  133. /**
  134. * The total number of items to return in the command's output. If the total number of items available is more than the value specified, a NextToken is provided in the command's output. To resume pagination, provide the NextToken value in the starting-token argument of a subsequent command. Do not use the NextToken response element directly outside of the AWS CLI.
  135. */
  136. maxResults?: MaxResults;
  137. /**
  138. * A token to specify where to start paginating. This is the NextToken from a previously truncated response.
  139. */
  140. nextToken?: NextToken;
  141. }
  142. export interface DescribeAcceleratorsResponse {
  143. /**
  144. * The details of the Elastic Inference Accelerators.
  145. */
  146. acceleratorSet?: ElasticInferenceAcceleratorSet;
  147. /**
  148. * A token to specify where to start paginating. This is the NextToken from a previously truncated response.
  149. */
  150. nextToken?: NextToken;
  151. }
  152. export interface ElasticInferenceAccelerator {
  153. /**
  154. * The health of the Elastic Inference Accelerator.
  155. */
  156. acceleratorHealth?: ElasticInferenceAcceleratorHealth;
  157. /**
  158. * The type of the Elastic Inference Accelerator.
  159. */
  160. acceleratorType?: AcceleratorTypeName;
  161. /**
  162. * The ID of the Elastic Inference Accelerator.
  163. */
  164. acceleratorId?: AcceleratorId;
  165. /**
  166. * The availability zone where the Elastic Inference Accelerator is present.
  167. */
  168. availabilityZone?: AvailabilityZone;
  169. /**
  170. * The ARN of the resource that the Elastic Inference Accelerator is attached to.
  171. */
  172. attachedResource?: ResourceArn;
  173. }
  174. export interface ElasticInferenceAcceleratorHealth {
  175. /**
  176. * The health status of the Elastic Inference Accelerator.
  177. */
  178. status?: AcceleratorHealthStatus;
  179. }
  180. export type ElasticInferenceAcceleratorSet = ElasticInferenceAccelerator[];
  181. export interface Filter {
  182. /**
  183. * The filter name for the Elastic Inference Accelerator list. It can assume the following values: accelerator-type: the type of Elastic Inference Accelerator to filter for. instance-id: an EC2 instance id to filter for.
  184. */
  185. name?: FilterName;
  186. /**
  187. * The values for the filter of the Elastic Inference Accelerator list.
  188. */
  189. values?: ValueStringList;
  190. }
  191. export type FilterList = Filter[];
  192. export type FilterName = string;
  193. export type Integer = number;
  194. export type Key = string;
  195. export interface KeyValuePair {
  196. /**
  197. * The throughput value of the Elastic Inference Accelerator type. It can assume the following values: TFLOPS16bit: the throughput expressed in 16bit TeraFLOPS. TFLOPS32bit: the throughput expressed in 32bit TeraFLOPS.
  198. */
  199. key?: Key;
  200. /**
  201. * The throughput value of the Elastic Inference Accelerator type.
  202. */
  203. value?: Value;
  204. }
  205. export interface ListTagsForResourceRequest {
  206. /**
  207. * The ARN of the Elastic Inference Accelerator to list the tags for.
  208. */
  209. resourceArn: ResourceARN;
  210. }
  211. export interface ListTagsForResourceResult {
  212. /**
  213. * The tags of the Elastic Inference Accelerator.
  214. */
  215. tags?: TagMap;
  216. }
  217. export type Location = string;
  218. export type LocationType = "region"|"availability-zone"|"availability-zone-id"|string;
  219. export type MaxResults = number;
  220. export interface MemoryInfo {
  221. /**
  222. * The size in mebibytes of the Elastic Inference Accelerator type.
  223. */
  224. sizeInMiB?: Integer;
  225. }
  226. export type NextToken = string;
  227. export type ResourceARN = string;
  228. export type ResourceArn = string;
  229. export type String = string;
  230. export type TagKey = string;
  231. export type TagKeyList = TagKey[];
  232. export type TagMap = {[key: string]: TagValue};
  233. export interface TagResourceRequest {
  234. /**
  235. * The ARN of the Elastic Inference Accelerator to tag.
  236. */
  237. resourceArn: ResourceARN;
  238. /**
  239. * The tags to add to the Elastic Inference Accelerator.
  240. */
  241. tags: TagMap;
  242. }
  243. export interface TagResourceResult {
  244. }
  245. export type TagValue = string;
  246. export type ThroughputInfoList = KeyValuePair[];
  247. export interface UntagResourceRequest {
  248. /**
  249. * The ARN of the Elastic Inference Accelerator to untag.
  250. */
  251. resourceArn: ResourceARN;
  252. /**
  253. * The list of tags to remove from the Elastic Inference Accelerator.
  254. */
  255. tagKeys: TagKeyList;
  256. }
  257. export interface UntagResourceResult {
  258. }
  259. export type Value = number;
  260. export type ValueStringList = String[];
  261. /**
  262. * A string in YYYY-MM-DD format that represents the latest possible API version that can be used in this service. Specify 'latest' to use the latest possible version.
  263. */
  264. export type apiVersion = "2017-07-25"|"latest"|string;
  265. export interface ClientApiVersions {
  266. /**
  267. * A string in YYYY-MM-DD format that represents the latest possible API version that can be used in this service. Specify 'latest' to use the latest possible version.
  268. */
  269. apiVersion?: apiVersion;
  270. }
  271. export type ClientConfiguration = ServiceConfigurationOptions & ClientApiVersions;
  272. /**
  273. * Contains interfaces for use with the ElasticInference client.
  274. */
  275. export import Types = ElasticInference;
  276. }
  277. export = ElasticInference;