diff --git a/alicloud/provider.go b/alicloud/provider.go index abc84788e41a..7f374d4b4a12 100644 --- a/alicloud/provider.go +++ b/alicloud/provider.go @@ -915,6 +915,7 @@ func Provider() terraform.ResourceProvider { "alicloud_vpc_ipam_ipams": dataSourceAliCloudVpcIpamIpams(), }, ResourcesMap: map[string]*schema.Resource{ + "alicloud_sls_logtail_pipeline_config": resourceAliCloudSlsLogtailPipelineConfig(), "alicloud_simple_application_server_disk": resourceAliCloudSimpleApplicationServerDisk(), "alicloud_nlb_hd_monitor_region_config": resourceAliCloudNlbHdMonitorRegionConfig(), "alicloud_live_domain": resourceAliCloudLiveDomain(), diff --git a/alicloud/resource_alicloud_sls_logtail_pipeline_config.go b/alicloud/resource_alicloud_sls_logtail_pipeline_config.go new file mode 100644 index 000000000000..4dd9b76259bc --- /dev/null +++ b/alicloud/resource_alicloud_sls_logtail_pipeline_config.go @@ -0,0 +1,323 @@ +package alicloud + +import ( + "fmt" + "log" + "strings" + "time" + + "github.com/aliyun/terraform-provider-alicloud/alicloud/connectivity" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" +) + +func resourceAliCloudSlsLogtailPipelineConfig() *schema.Resource { + return &schema.Resource{ + Create: resourceAliCloudSlsLogtailPipelineConfigCreate, + Read: resourceAliCloudSlsLogtailPipelineConfigRead, + Update: resourceAliCloudSlsLogtailPipelineConfigUpdate, + Delete: resourceAliCloudSlsLogtailPipelineConfigDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(5 * time.Minute), + Update: schema.DefaultTimeout(5 * time.Minute), + Delete: schema.DefaultTimeout(5 * time.Minute), + }, + Schema: map[string]*schema.Schema{ + "aggregators": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeMap}, + }, + "config_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "flushers": { + Type: schema.TypeList, + Required: true, + Elem: &schema.Schema{Type: schema.TypeMap}, + }, + "globals": { + Type: schema.TypeMap, + Optional: true, + }, + "inputs": { + Type: schema.TypeList, + Required: true, + Elem: &schema.Schema{Type: schema.TypeMap}, + }, + "log_sample": { + Type: schema.TypeString, + Optional: true, + }, + "processors": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeMap}, + }, + "project": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "task": { + Type: schema.TypeMap, + Optional: true, + }, + }, + } +} + +func resourceAliCloudSlsLogtailPipelineConfigCreate(d *schema.ResourceData, meta interface{}) error { + + client := meta.(*connectivity.AliyunClient) + + action := fmt.Sprintf("/pipelineconfigs") + var request map[string]interface{} + var response map[string]interface{} + query := make(map[string]*string) + body := make(map[string]interface{}) + hostMap := make(map[string]*string) + var err error + request = make(map[string]interface{}) + hostMap["project"] = StringPointer(d.Get("project").(string)) + if v, ok := d.GetOk("config_name"); ok { + request["configName"] = v + } + + if v, ok := d.GetOk("task"); ok { + request["task"] = v + } + if v, ok := d.GetOk("inputs"); ok { + inputsMapsArray := convertToInterfaceArray(v) + + request["inputs"] = inputsMapsArray + } + + if v, ok := d.GetOk("globals"); ok { + request["global"] = v + } + if v, ok := d.GetOk("aggregators"); ok { + aggregatorsMapsArray := convertToInterfaceArray(v) + + request["aggregators"] = aggregatorsMapsArray + } + + if v, ok := d.GetOk("log_sample"); ok { + request["logSample"] = v + } + if v, ok := d.GetOk("flushers"); ok { + flushersMapsArray := convertToInterfaceArray(v) + + request["flushers"] = flushersMapsArray + } + + if v, ok := d.GetOk("processors"); ok { + processorsMapsArray := convertToInterfaceArray(v) + + request["processors"] = processorsMapsArray + } + + body = request + wait := incrementalWait(3*time.Second, 5*time.Second) + err = resource.Retry(d.Timeout(schema.TimeoutCreate), func() *resource.RetryError { + response, err = client.Do("Sls", roaParam("POST", "2020-12-30", "CreateLogtailPipelineConfig", action), query, body, nil, hostMap, false) + if err != nil { + if NeedRetry(err) { + wait() + return resource.RetryableError(err) + } + return resource.NonRetryableError(err) + } + return nil + }) + addDebug(action, response, request) + + if err != nil { + return WrapErrorf(err, DefaultErrorMsg, "alicloud_sls_logtail_pipeline_config", action, AlibabaCloudSdkGoERROR) + } + + d.SetId(fmt.Sprintf("%v:%v", *hostMap["project"], request["configName"])) + + return resourceAliCloudSlsLogtailPipelineConfigRead(d, meta) +} + +func resourceAliCloudSlsLogtailPipelineConfigRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*connectivity.AliyunClient) + slsServiceV2 := SlsServiceV2{client} + + objectRaw, err := slsServiceV2.DescribeSlsLogtailPipelineConfig(d.Id()) + if err != nil { + if !d.IsNewResource() && NotFoundError(err) { + log.Printf("[DEBUG] Resource alicloud_sls_logtail_pipeline_config DescribeSlsLogtailPipelineConfig Failed!!! %s", err) + d.SetId("") + return nil + } + return WrapError(err) + } + + d.Set("globals", objectRaw["global"]) + d.Set("log_sample", objectRaw["logSample"]) + d.Set("task", objectRaw["task"]) + + aggregatorsRaw := objectRaw["aggregators"] + if err := d.Set("aggregators", aggregatorsRaw); err != nil { + return err + } + flushersRaw := objectRaw["flushers"] + if err := d.Set("flushers", flushersRaw); err != nil { + return err + } + inputsRaw := objectRaw["inputs"] + if err := d.Set("inputs", inputsRaw); err != nil { + return err + } + processorsRaw := objectRaw["processors"] + if err := d.Set("processors", processorsRaw); err != nil { + return err + } + + parts := strings.Split(d.Id(), ":") + d.Set("project", parts[0]) + d.Set("config_name", parts[1]) + + return nil +} + +func resourceAliCloudSlsLogtailPipelineConfigUpdate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*connectivity.AliyunClient) + var request map[string]interface{} + var response map[string]interface{} + var query map[string]*string + var body map[string]interface{} + update := false + + var err error + parts := strings.Split(d.Id(), ":") + configName := parts[1] + action := fmt.Sprintf("/pipelineconfigs/%s", configName) + request = make(map[string]interface{}) + query = make(map[string]*string) + body = make(map[string]interface{}) + hostMap := make(map[string]*string) + hostMap["project"] = StringPointer(parts[0]) + + if d.HasChange("task") { + update = true + } + if v, ok := d.GetOk("task"); ok || d.HasChange("task") { + request["task"] = v + } + if d.HasChange("inputs") { + update = true + } + if v, ok := d.GetOk("inputs"); ok || d.HasChange("inputs") { + inputsMapsArray := convertToInterfaceArray(v) + + request["inputs"] = inputsMapsArray + } + + if d.HasChange("globals") { + update = true + } + if v, ok := d.GetOk("globals"); ok || d.HasChange("globals") { + request["global"] = v + } + request["configName"] = d.Get("logstore_name") + if d.HasChange("aggregators") { + update = true + } + if v, ok := d.GetOk("aggregators"); ok || d.HasChange("aggregators") { + aggregatorsMapsArray := convertToInterfaceArray(v) + + request["aggregators"] = aggregatorsMapsArray + } + + if d.HasChange("log_sample") { + update = true + } + if v, ok := d.GetOk("log_sample"); ok || d.HasChange("log_sample") { + request["logSample"] = v + } + if d.HasChange("flushers") { + update = true + } + if v, ok := d.GetOk("flushers"); ok || d.HasChange("flushers") { + flushersMapsArray := convertToInterfaceArray(v) + + request["flushers"] = flushersMapsArray + } + + if d.HasChange("processors") { + update = true + } + if v, ok := d.GetOk("processors"); ok || d.HasChange("processors") { + processorsMapsArray := convertToInterfaceArray(v) + + request["processors"] = processorsMapsArray + } + + body = request + if update { + wait := incrementalWait(3*time.Second, 5*time.Second) + err = resource.Retry(d.Timeout(schema.TimeoutUpdate), func() *resource.RetryError { + response, err = client.Do("Sls", roaParam("PUT", "2020-12-30", "UpdateLogtailPipelineConfig", action), query, body, nil, hostMap, false) + if err != nil { + if NeedRetry(err) { + wait() + return resource.RetryableError(err) + } + return resource.NonRetryableError(err) + } + return nil + }) + addDebug(action, response, request) + if err != nil { + return WrapErrorf(err, DefaultErrorMsg, d.Id(), action, AlibabaCloudSdkGoERROR) + } + } + + return resourceAliCloudSlsLogtailPipelineConfigRead(d, meta) +} + +func resourceAliCloudSlsLogtailPipelineConfigDelete(d *schema.ResourceData, meta interface{}) error { + + client := meta.(*connectivity.AliyunClient) + parts := strings.Split(d.Id(), ":") + configName := parts[1] + action := fmt.Sprintf("/pipelineconfigs/%s", configName) + var request map[string]interface{} + var response map[string]interface{} + query := make(map[string]*string) + hostMap := make(map[string]*string) + var err error + request = make(map[string]interface{}) + hostMap["project"] = StringPointer(parts[0]) + + wait := incrementalWait(3*time.Second, 5*time.Second) + err = resource.Retry(d.Timeout(schema.TimeoutDelete), func() *resource.RetryError { + response, err = client.Do("Sls", roaParam("DELETE", "2020-12-30", "DeleteLogtailPipelineConfig", action), query, nil, nil, hostMap, false) + if err != nil { + if NeedRetry(err) { + wait() + return resource.RetryableError(err) + } + return resource.NonRetryableError(err) + } + return nil + }) + addDebug(action, response, request) + + if err != nil { + if NotFoundError(err) { + return nil + } + return WrapErrorf(err, DefaultErrorMsg, d.Id(), action, AlibabaCloudSdkGoERROR) + } + + return nil +} diff --git a/alicloud/resource_alicloud_sls_logtail_pipeline_config_test.go b/alicloud/resource_alicloud_sls_logtail_pipeline_config_test.go new file mode 100644 index 000000000000..d5202e3b392c --- /dev/null +++ b/alicloud/resource_alicloud_sls_logtail_pipeline_config_test.go @@ -0,0 +1,79 @@ +// Package alicloud. This file is generated automatically. Please do not modify it manually, thank you! +package alicloud + +import ( + "fmt" + "testing" + + "github.com/aliyun/terraform-provider-alicloud/alicloud/connectivity" + "github.com/hashicorp/terraform-plugin-sdk/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" +) + +// Test Sls LogtailPipelineConfig. >>> Resource test cases, automatically generated. +// Case LogtailPipelineConfigTestPL 12633 +func TestAccAliCloudSlsLogtailPipelineConfig_basic12633(t *testing.T) { + var v map[string]interface{} + resourceId := "alicloud_sls_logtail_pipeline_config.default" + ra := resourceAttrInit(resourceId, AlicloudSlsLogtailPipelineConfigMap12633) + rc := resourceCheckInitWithDescribeMethod(resourceId, &v, func() interface{} { + return &SlsServiceV2{testAccProvider.Meta().(*connectivity.AliyunClient)} + }, "DescribeSlsLogtailPipelineConfig") + rac := resourceAttrCheckInit(rc, ra) + testAccCheck := rac.resourceAttrMapUpdateSet() + rand := acctest.RandIntRange(10000, 99999) + name := fmt.Sprintf("tfaccsls%d", rand) + testAccConfig := resourceTestAccConfigFunc(resourceId, name, AlicloudSlsLogtailPipelineConfigBasicDependence12633) + resource.Test(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + }, + IDRefreshName: resourceId, + Providers: testAccProviders, + CheckDestroy: rac.checkResourceDestroy(), + Steps: []resource.TestStep{ + { + Config: testAccConfig(map[string]interface{}{ + "project": "terraform-logstore-test-578", + "config_name": "pl-auto-test", + "flushers": []map[string]interface{}{}, + "inputs": []map[string]interface{}{}, + }), + Check: resource.ComposeTestCheckFunc( + testAccCheck(map[string]string{ + "project": "terraform-logstore-test-578", + "config_name": "pl-auto-test", + }), + ), + }, + { + Config: testAccConfig(map[string]interface{}{ + "inputs": []map[string]interface{}{}, + }), + Check: resource.ComposeTestCheckFunc( + testAccCheck(map[string]string{}), + ), + }, + { + ResourceName: resourceId, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"logstore_name"}, + }, + }, + }) +} + +var AlicloudSlsLogtailPipelineConfigMap12633 = map[string]string{} + +func AlicloudSlsLogtailPipelineConfigBasicDependence12633(name string) string { + return fmt.Sprintf(` +variable "name" { + default = "%s" +} + + +`, name) +} + +// Test Sls LogtailPipelineConfig. <<< Resource test cases, automatically generated. diff --git a/alicloud/service_alicloud_sls_v2.go b/alicloud/service_alicloud_sls_v2.go index 9797dbd7a248..e8e9abed3e7b 100644 --- a/alicloud/service_alicloud_sls_v2.go +++ b/alicloud/service_alicloud_sls_v2.go @@ -974,3 +974,81 @@ func (s *SlsServiceV2) SlsScheduledSqlStateRefreshFuncWithApi(id string, field s } // DescribeSlsScheduledSql >>> Encapsulated. + +// DescribeSlsLogtailPipelineConfig <<< Encapsulated get interface for Sls LogtailPipelineConfig. + +func (s *SlsServiceV2) DescribeSlsLogtailPipelineConfig(id string) (object map[string]interface{}, err error) { + client := s.client + var request map[string]interface{} + var response map[string]interface{} + var query map[string]*string + parts := strings.Split(id, ":") + if len(parts) != 2 { + err = WrapError(fmt.Errorf("invalid Resource Id %s. Expected parts' length %d, got %d", id, 2, len(parts))) + return nil, err + } + configName := parts[1] + request = make(map[string]interface{}) + query = make(map[string]*string) + hostMap := make(map[string]*string) + hostMap["project"] = StringPointer(parts[0]) + + action := fmt.Sprintf("/pipelineconfigs/%s", configName) + + wait := incrementalWait(3*time.Second, 5*time.Second) + err = resource.Retry(1*time.Minute, func() *resource.RetryError { + response, err = client.Do("Sls", roaParam("GET", "2020-12-30", "GetLogtailPipelineConfig", action), query, nil, nil, hostMap, true) + + if err != nil { + if NeedRetry(err) { + wait() + return resource.RetryableError(err) + } + return resource.NonRetryableError(err) + } + return nil + }) + addDebug(action, response, request) + if err != nil { + if IsExpectedErrors(err, []string{"ConfigNotExist"}) { + return object, WrapErrorf(NotFoundErr("LogtailPipelineConfig", id), NotFoundMsg, response) + } + return object, WrapErrorf(err, DefaultErrorMsg, id, action, AlibabaCloudSdkGoERROR) + } + + return response, nil +} + +func (s *SlsServiceV2) SlsLogtailPipelineConfigStateRefreshFunc(id string, field string, failStates []string) resource.StateRefreshFunc { + return s.SlsLogtailPipelineConfigStateRefreshFuncWithApi(id, field, failStates, s.DescribeSlsLogtailPipelineConfig) +} + +func (s *SlsServiceV2) SlsLogtailPipelineConfigStateRefreshFuncWithApi(id string, field string, failStates []string, call func(id string) (map[string]interface{}, error)) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + object, err := call(id) + if err != nil { + if NotFoundError(err) { + return object, "", nil + } + return nil, "", WrapError(err) + } + v, err := jsonpath.Get(field, object) + currentStatus := fmt.Sprint(v) + + if strings.HasPrefix(field, "#") { + v, _ := jsonpath.Get(strings.TrimPrefix(field, "#"), object) + if v != nil { + currentStatus = "#CHECKSET" + } + } + + for _, failState := range failStates { + if currentStatus == failState { + return object, currentStatus, WrapError(Error(FailedToReachTargetStatus, currentStatus)) + } + } + return object, currentStatus, nil + } +} + +// DescribeSlsLogtailPipelineConfig >>> Encapsulated. diff --git a/website/docs/r/sls_logtail_pipeline_config.html.markdown b/website/docs/r/sls_logtail_pipeline_config.html.markdown new file mode 100644 index 000000000000..9b7de26aaab7 --- /dev/null +++ b/website/docs/r/sls_logtail_pipeline_config.html.markdown @@ -0,0 +1,90 @@ +--- +subcategory: "Log Service (SLS)" +layout: "alicloud" +page_title: "Alicloud: alicloud_sls_logtail_pipeline_config" +description: |- + Provides a Alicloud Log Service (SLS) Logtail Pipeline Config resource. +--- + +# alicloud_sls_logtail_pipeline_config + +Provides a Log Service (SLS) Logtail Pipeline Config resource. + +Logtail Pipeline Collection Configuration. + +For information about Log Service (SLS) Logtail Pipeline Config and how to use it, see [What is Logtail Pipeline Config](https://next.api.alibabacloud.com/document/Sls/2020-12-30/CreateLogtailPipelineConfig). + +-> **NOTE:** Available since v1.273.0. + +## Example Usage + +Basic Usage + +```terraform +variable "name" { + default = "terraform-example" +} + +provider "alicloud" { + region = "" +} + + +resource "alicloud_sls_logtail_pipeline_config" "default" { + project = "terraform-logstore-example-578" + config_name = "pl-auto-example" + inputs { + } + flushers { + } +} +``` + +## Argument Reference + +The following arguments are supported: +* `aggregators` - (Optional, List) This property does not have a description in the spec, please add it before generating code. See [`aggregators`](#aggregators) below. +* `config_name` - (Required, ForceNew) The name of the resource +* `flushers` - (Required, List) This property does not have a description in the spec, please add it before generating code. See [`flushers`](#flushers) below. +* `globals` - (Optional, Map) This property does not have a description in the spec, please add it before generating code. +* `inputs` - (Required, List) The creation time of the resource See [`inputs`](#inputs) below. +* `log_sample` - (Optional) This property does not have a description in the spec, please add it before generating code. +* `processors` - (Optional, List) This property does not have a description in the spec, please add it before generating code. See [`processors`](#processors) below. +* `project` - (Required, ForceNew) The first ID of the resource +* `task` - (Optional, Map) This property does not have a description in the spec, please add it before generating code. + +### `aggregators` + +The aggregators supports the following: + +### `flushers` + +The flushers supports the following: + +### `inputs` + +The inputs supports the following: + +### `processors` + +The processors supports the following: + +## Attributes Reference + +The following attributes are exported: +* `id` - The ID of the resource supplied above. The value is formulated as `:`. + +## Timeouts + +The `timeouts` block allows you to specify [timeouts](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts) for certain actions: +* `create` - (Defaults to 5 mins) Used when create the Logtail Pipeline Config. +* `delete` - (Defaults to 5 mins) Used when delete the Logtail Pipeline Config. +* `update` - (Defaults to 5 mins) Used when update the Logtail Pipeline Config. + +## Import + +Log Service (SLS) Logtail Pipeline Config can be imported using the id, e.g. + +```shell +$ terraform import alicloud_sls_logtail_pipeline_config.example : +``` \ No newline at end of file