// Code generated by smithy-go-codegen DO NOT EDIT. package lookoutequipment import ( "context" "fmt" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" "github.com/aws/aws-sdk-go-v2/service/lookoutequipment/types" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" ) // Creates a scheduled inference. Scheduling an inference is setting up a // continuous real-time inference plan to analyze new measurement data. When // setting up the schedule, you provide an S3 bucket location for the input data, // assign it a delimiter between separate entries in the data, set an offset delay // if desired, and set the frequency of inferencing. You must also provide an S3 // bucket location for the output data. func (c *Client) CreateInferenceScheduler(ctx context.Context, params *CreateInferenceSchedulerInput, optFns ...func(*Options)) (*CreateInferenceSchedulerOutput, error) { if params == nil { params = &CreateInferenceSchedulerInput{} } result, metadata, err := c.invokeOperation(ctx, "CreateInferenceScheduler", params, optFns, c.addOperationCreateInferenceSchedulerMiddlewares) if err != nil { return nil, err } out := result.(*CreateInferenceSchedulerOutput) out.ResultMetadata = metadata return out, nil } type CreateInferenceSchedulerInput struct { // A unique identifier for the request. If you do not set the client request // token, Amazon Lookout for Equipment generates one. // // This member is required. ClientToken *string // Specifies configuration information for the input data for the inference // scheduler, including delimiter, format, and dataset location. // // This member is required. DataInputConfiguration *types.InferenceInputConfiguration // Specifies configuration information for the output results for the inference // scheduler, including the S3 location for the output. // // This member is required. DataOutputConfiguration *types.InferenceOutputConfiguration // How often data is uploaded to the source Amazon S3 bucket for the input data. // The value chosen is the length of time between data uploads. For instance, if // you select 5 minutes, Amazon Lookout for Equipment will upload the real-time // data to the source bucket once every 5 minutes. This frequency also determines // how often Amazon Lookout for Equipment runs inference on your data. For more // information, see Understanding the inference process (https://docs.aws.amazon.com/lookout-for-equipment/latest/ug/understanding-inference-process.html) // . // // This member is required. DataUploadFrequency types.DataUploadFrequency // The name of the inference scheduler being created. // // This member is required. InferenceSchedulerName *string // The name of the previously trained ML model being used to create the inference // scheduler. // // This member is required. ModelName *string // The Amazon Resource Name (ARN) of a role with permission to access the data // source being used for the inference. // // This member is required. RoleArn *string // The interval (in minutes) of planned delay at the start of each inference // segment. For example, if inference is set to run every ten minutes, the delay is // set to five minutes and the time is 09:08. The inference scheduler will wake up // at the configured interval (which, without a delay configured, would be 09:10) // plus the additional five minute delay time (so 09:15) to check your Amazon S3 // bucket. The delay provides a buffer for you to upload data at the same // frequency, so that you don't have to stop and restart the scheduler when // uploading new data. For more information, see Understanding the inference // process (https://docs.aws.amazon.com/lookout-for-equipment/latest/ug/understanding-inference-process.html) // . DataDelayOffsetInMinutes *int64 // Provides the identifier of the KMS key used to encrypt inference scheduler data // by Amazon Lookout for Equipment. ServerSideKmsKeyId *string // Any tags associated with the inference scheduler. Tags []types.Tag noSmithyDocumentSerde } type CreateInferenceSchedulerOutput struct { // The Amazon Resource Name (ARN) of the inference scheduler being created. InferenceSchedulerArn *string // The name of inference scheduler being created. InferenceSchedulerName *string // Indicates the status of the CreateInferenceScheduler operation. Status types.InferenceSchedulerStatus // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata noSmithyDocumentSerde } func (c *Client) addOperationCreateInferenceSchedulerMiddlewares(stack *middleware.Stack, options Options) (err error) { err = stack.Serialize.Add(&awsAwsjson10_serializeOpCreateInferenceScheduler{}, middleware.After) if err != nil { return err } err = stack.Deserialize.Add(&awsAwsjson10_deserializeOpCreateInferenceScheduler{}, middleware.After) if err != nil { return err } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { return err } if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { return err } if err = addRetryMiddlewares(stack, options); err != nil { return err } if err = addHTTPSignerV4Middleware(stack, options); err != nil { return err } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { return err } if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } if err = addIdempotencyToken_opCreateInferenceSchedulerMiddleware(stack, options); err != nil { return err } if err = addOpCreateInferenceSchedulerValidationMiddleware(stack); err != nil { return err } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateInferenceScheduler(options.Region), middleware.Before); err != nil { return err } if err = awsmiddleware.AddRecursionDetection(stack); err != nil { return err } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } if err = addResponseErrorMiddleware(stack); err != nil { return err } if err = addRequestResponseLogging(stack, options); err != nil { return err } return nil } type idempotencyToken_initializeOpCreateInferenceScheduler struct { tokenProvider IdempotencyTokenProvider } func (*idempotencyToken_initializeOpCreateInferenceScheduler) ID() string { return "OperationIdempotencyTokenAutoFill" } func (m *idempotencyToken_initializeOpCreateInferenceScheduler) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) ( out middleware.InitializeOutput, metadata middleware.Metadata, err error, ) { if m.tokenProvider == nil { return next.HandleInitialize(ctx, in) } input, ok := in.Parameters.(*CreateInferenceSchedulerInput) if !ok { return out, metadata, fmt.Errorf("expected middleware input to be of type *CreateInferenceSchedulerInput ") } if input.ClientToken == nil { t, err := m.tokenProvider.GetIdempotencyToken() if err != nil { return out, metadata, err } input.ClientToken = &t } return next.HandleInitialize(ctx, in) } func addIdempotencyToken_opCreateInferenceSchedulerMiddleware(stack *middleware.Stack, cfg Options) error { return stack.Initialize.Add(&idempotencyToken_initializeOpCreateInferenceScheduler{tokenProvider: cfg.IdempotencyTokenProvider}, middleware.Before) } func newServiceMetadataMiddleware_opCreateInferenceScheduler(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, SigningName: "lookoutequipment", OperationName: "CreateInferenceScheduler", } }